[CLEANUP][Playground]

pull/439/head
Kye 9 months ago
parent 2f88e92930
commit 022b39e8f6

@ -1,8 +1,7 @@
from swarms.models.anthropic import Anthropic from swarms.models import Anthropic
model = Anthropic(anthropic_api_key="") model = Anthropic(anthropic_api_key="")
task = "What is quantum field theory? What are 3 books on the field?" task = "What is quantum field theory? What are 3 books on the field?"
print(model(task)) print(model(task))

@ -1,4 +1,4 @@
from swarms.models.azure_openai_llm import AzureOpenAI from swarms.models import AzureOpenAI
# Initialize Azure OpenAI # Initialize Azure OpenAI
model = AzureOpenAI() model = AzureOpenAI()

@ -1,35 +0,0 @@
import os
from swarms.models import OpenAIChat
from swarms.models.bing_chat import BingChat
from swarms.tools.autogpt import EdgeGPTTool, tool
from swarms.workers.worker import Worker
api_key = os.getenv("OPENAI_API_KEY")
# Initialize the EdgeGPTModel
edgegpt = BingChat(cookies_path="./cookies.txt")
@tool
def edgegpt(task: str = None):
"""A tool to run infrence on the EdgeGPT Model"""
return EdgeGPTTool.run(task)
# Initialize the language model,
# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5,
)
# Initialize the Worker with the custom tool
worker = Worker(
llm=llm, ai_name="EdgeGPT Worker", external_tools=[edgegpt]
)
# Use the worker to process a task
task = "Hello, my name is ChatGPT"
response = worker.run(task)
print(response)

@ -1,24 +0,0 @@
from swarms.models.bioclip import BioClip
clip = BioClip(
"hf-hub:microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224"
)
labels = [
"adenocarcinoma histopathology",
"brain MRI",
"covid line chart",
"squamous cell carcinoma histopathology",
"immunohistochemistry histopathology",
"bone X-ray",
"chest X-ray",
"pie chart",
"hematoxylin and eosin histopathology",
]
result = clip("swarms.jpeg", labels)
metadata = {
"filename": "images/.jpg".split("/")[-1],
"top_probs": result,
}
clip.plot_image_with_metadata("swarms.jpeg", metadata)

@ -1,7 +0,0 @@
from swarms.models.biogpt import BioGPTWrapper
model = BioGPTWrapper()
out = model("The patient has a fever")
print(out)

@ -1,4 +1,4 @@
from swarms.models.cohere_chat import Cohere from swarms.models import Cohere
cohere = Cohere(model="command-light", cohere_api_key="") cohere = Cohere(model="command-light", cohere_api_key="")

@ -1,89 +0,0 @@
from vllm import LLM
from swarms import AbstractLLM, Agent, ChromaDB
# Making an instance of the VLLM class
class vLLMLM(AbstractLLM):
"""
This class represents a variant of the Language Model (LLM) called vLLMLM.
It extends the AbstractLLM class and provides additional functionality.
Args:
model_name (str): The name of the LLM model to use. Defaults to "acebook/opt-13b".
tensor_parallel_size (int): The size of the tensor parallelism. Defaults to 4.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Attributes:
model_name (str): The name of the LLM model.
tensor_parallel_size (int): The size of the tensor parallelism.
llm (LLM): An instance of the LLM class.
Methods:
run(task: str, *args, **kwargs): Runs the LLM model to generate output for the given task.
"""
def __init__(
self,
model_name: str = "acebook/opt-13b",
tensor_parallel_size: int = 4,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.model_name = model_name
self.tensor_parallel_size = tensor_parallel_size
self.llm = LLM(
model_name=self.model_name,
tensor_parallel_size=self.tensor_parallel_size,
)
def run(self, task: str, *args, **kwargs):
"""
Runs the LLM model to generate output for the given task.
Args:
task (str): The task for which to generate output.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
str: The generated output for the given task.
"""
return self.llm.generate(task)
# Initializing the agent with the vLLMLM instance and other parameters
model = vLLMLM(
"facebook/opt-13b",
tensor_parallel_size=4,
)
# Defining the task
task = "What are the symptoms of COVID-19?"
# Running the agent with the specified task
out = model.run(task)
# Integrate Agent
agent = Agent(
agent_name="Doctor agent",
agent_description=(
"This agent provides information about COVID-19 symptoms."
),
llm=model,
max_loops="auto",
autosave=True,
verbose=True,
long_term_memory=ChromaDB(
metric="cosine",
n_results=3,
output_dir="results",
docs_folder="docs",
),
stopping_condition="finish",
)

@ -1,6 +0,0 @@
from swarms.models import Dalle3
dalle3 = Dalle3(openai_api_key="")
task = "A painting of a dog"
image_url = dalle3(task)
print(image_url)

@ -1,5 +0,0 @@
from swarms.models.fastvit import FastViT
fastvit = FastViT()
result = fastvit(img="images/swarms.jpeg", confidence_threshold=0.5)

@ -1,7 +0,0 @@
from swarms.models import JinaEmbeddings
model = JinaEmbeddings()
embeddings = model("Encode this text")
print(embeddings)

@ -1,5 +1,5 @@
from swarms.models.palm import PALM from swarms.models import Palm
palm = PALM() palm = Palm()
out = palm("path/to/image.png") out = palm("what's your name")

@ -1,12 +0,0 @@
from swarms import RoboflowMultiModal
# Initialize the model
model = RoboflowMultiModal(
api_key="api",
project_id="your project id",
hosted=False,
)
# Run the model on an img
out = model("img.png")

@ -1,5 +0,0 @@
from swarms.models.yi_200k import Yi200k
models = Yi200k()
out = models("What is the weather like today?")
Loading…
Cancel
Save