parent
7731f7db7d
commit
c63422f78b
@ -0,0 +1,7 @@
|
|||||||
|
from swarms.models.bing_chat import BingChat
|
||||||
|
# Initialize the EdgeGPTModel
|
||||||
|
bing = BingChat(cookies_path="./cookies.json")
|
||||||
|
task = "generate topics for PositiveMed.com,: 1. Monitor Health Trends: Scan Google Alerts, authoritative health websites, and social media for emerging health, wellness, and medical discussions. 2. Keyword Research: Utilize tools like SEMrush to identify keywords with moderate to high search volume and low competition. Focus on long-tail, conversational keywords. 3. Analyze Site Data: Review PositiveMed's analytics to pinpoint popular articles and areas lacking recent content. 4. Crowdsourcing: Gather topic suggestions from the brand's audience and internal team, ensuring alignment with PositiveMed's mission. 5. Topic Evaluation: Assess topics for audience relevance, uniqueness, brand fit, current relevance, and SEO potential. 6. Tone and Style: Ensure topics can be approached with an educational, empowering, and ethical tone, in line with the brand's voice. Use this framework to generate a list of potential topics that cater to PositiveMed's audience while staying true to its brand ethos. Find trending topics for slowing and reversing aging think step by step and o into as much detail as possible"
|
||||||
|
response = bing(task)
|
||||||
|
|
||||||
|
print(response)
|
@ -0,0 +1,29 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms.models.revgptV4 import RevChatGPTModelv4
|
||||||
|
from swarms.models.revgptV1 import RevChatGPTModelv1
|
||||||
|
|
||||||
|
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||||
|
sys.path.append(root_dir)
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"model": os.getenv("REVGPT_MODEL"),
|
||||||
|
"plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")],
|
||||||
|
"disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True",
|
||||||
|
"PUID": os.getenv("REVGPT_PUID"),
|
||||||
|
"unverified_plugin_domains": [os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")],
|
||||||
|
}
|
||||||
|
|
||||||
|
# For v1 model
|
||||||
|
model = RevChatGPTModelv1(access_token=os.getenv("ACCESS_TOKEN"), **config)
|
||||||
|
# model = RevChatGPTModelv4(access_token=os.getenv("ACCESS_TOKEN"), **config)
|
||||||
|
|
||||||
|
# For v3 model
|
||||||
|
# model = RevChatGPTModel(access_token=os.getenv("OPENAI_API_KEY"), **config)
|
||||||
|
|
||||||
|
task = "Write a cli snake game"
|
||||||
|
response = model.run(task)
|
||||||
|
print(response)
|
@ -0,0 +1,55 @@
|
|||||||
|
from vllm import LLM, SamplingParams
|
||||||
|
import openai
|
||||||
|
import ray
|
||||||
|
import uvicorn
|
||||||
|
from vllm.entrypoints import api_server as vllm_api_server
|
||||||
|
from vllm.entrypoints.openai import api_server as openai_api_server
|
||||||
|
from skypilot import SkyPilot
|
||||||
|
|
||||||
|
class VLLMModel:
|
||||||
|
def __init__(self, model_name="facebook/opt-125m", tensor_parallel_size=1):
|
||||||
|
self.model_name = model_name
|
||||||
|
self.tensor_parallel_size = tensor_parallel_size
|
||||||
|
self.model = LLM(model_name, tensor_parallel_size=tensor_parallel_size)
|
||||||
|
self.temperature = 1.0
|
||||||
|
self.max_tokens = None
|
||||||
|
self.sampling_params = SamplingParams(temperature=self.temperature)
|
||||||
|
|
||||||
|
def generate_text(self, prompt: str) -> str:
|
||||||
|
output = self.model.generate([prompt], self.sampling_params)
|
||||||
|
return output[0].outputs[0].text
|
||||||
|
|
||||||
|
def set_temperature(self, value: float):
|
||||||
|
self.temperature = value
|
||||||
|
self.sampling_params = SamplingParams(temperature=self.temperature)
|
||||||
|
|
||||||
|
def set_max_tokens(self, value: int):
|
||||||
|
self.max_tokens = value
|
||||||
|
self.sampling_params = SamplingParams(temperature=self.temperature, max_tokens=self.max_tokens)
|
||||||
|
|
||||||
|
def offline_batched_inference(self, prompts: list) -> list:
|
||||||
|
outputs = self.model.generate(prompts, self.sampling_params)
|
||||||
|
return [output.outputs[0].text for output in outputs]
|
||||||
|
|
||||||
|
def start_api_server(self):
|
||||||
|
uvicorn.run(vllm_api_server.app, host="0.0.0.0", port=8000)
|
||||||
|
|
||||||
|
def start_openai_compatible_server(self):
|
||||||
|
uvicorn.run(openai_api_server.app, host="0.0.0.0", port=8000)
|
||||||
|
|
||||||
|
def query_openai_compatible_server(self, prompt: str):
|
||||||
|
openai.api_key = "EMPTY"
|
||||||
|
openai.api_base = "http://localhost:8000/v1"
|
||||||
|
completion = openai.Completion.create(model=self.model_name, prompt=prompt)
|
||||||
|
return completion
|
||||||
|
|
||||||
|
def distributed_inference(self, prompt: str):
|
||||||
|
ray.init()
|
||||||
|
self.model = LLM(self.model_name, tensor_parallel_size=self.tensor_parallel_size)
|
||||||
|
output = self.model.generate(prompt, self.sampling_params)
|
||||||
|
ray.shutdown()
|
||||||
|
return output[0].outputs[0].text
|
||||||
|
|
||||||
|
def run_on_cloud_with_skypilot(self, yaml_file):
|
||||||
|
sky = SkyPilot()
|
||||||
|
sky.launch(yaml_file)
|
Loading…
Reference in new issue