pull/425/head^2
Kye 10 months ago
parent 492377b5c3
commit 5d9e32fa9f

@ -4,8 +4,11 @@ from swarms import Agent, OpenAIChat
agent = Agent(
llm=OpenAIChat(),
max_loops="auto",
agent_name = "Amazon Product Scraper",
system_prompt="Create the code in python to scrape amazon product reviews and return csv given a product url",
agent_name="Amazon Product Scraper",
system_prompt=(
"Create the code in python to scrape amazon product reviews"
" and return csv given a product url"
),
autosave=True,
dashboard=False,
streaming_on=True,
@ -16,6 +19,7 @@ agent = Agent(
# Run the workflow on a task
agent(
"Create the code to scrape this amazon url and rturn a csv of reviews: https://www.amazon.com/Creative-Act-Way-Being/dp/0593652886/ref=sr_1_1?dib=eyJ2IjoiMSJ9.JVdL3JSDmBVH_jv4eM6YE4npUpG6jO6-ai6lgmax-Ya4nH3oPk8cxkmzKsx9yAMX-Eo4A1ErqipCeY-FhTqMc7hhNTqCoAvNd65rvXH1GnYv7WlfSDYTjMkB_vVrH-iitBXAY6uASm73ff2hPWzqhF3ldGkYr8fA5FtmoYMSOnarvCU11YpoSp3EqdK526XOxkRJqeFlZAoAkXOmYHe9B5sY8-zQlVgkIV3U-7rUQdY.UXen28vr2K-Tbbz9aB7vNLLurAiR2ZSblFOVNjXYaf8&dib_tag=se&hvadid=652633987879&hvdev=c&hvlocphy=9061268&hvnetw=g&hvqmt=e&hvrand=413884426001746223&hvtargid=kwd-1977743614989&hydadcr=8513_13545021&keywords=the+creative+act+rick+rubin+book&qid=1710541252&sr=8-1"
"Create the code to scrape this amazon url and rturn a csv of"
" reviews:"
" https://www.amazon.com/Creative-Act-Way-Being/dp/0593652886/ref=sr_1_1?dib=eyJ2IjoiMSJ9.JVdL3JSDmBVH_jv4eM6YE4npUpG6jO6-ai6lgmax-Ya4nH3oPk8cxkmzKsx9yAMX-Eo4A1ErqipCeY-FhTqMc7hhNTqCoAvNd65rvXH1GnYv7WlfSDYTjMkB_vVrH-iitBXAY6uASm73ff2hPWzqhF3ldGkYr8fA5FtmoYMSOnarvCU11YpoSp3EqdK526XOxkRJqeFlZAoAkXOmYHe9B5sY8-zQlVgkIV3U-7rUQdY.UXen28vr2K-Tbbz9aB7vNLLurAiR2ZSblFOVNjXYaf8&dib_tag=se&hvadid=652633987879&hvdev=c&hvlocphy=9061268&hvnetw=g&hvqmt=e&hvrand=413884426001746223&hvtargid=kwd-1977743614989&hydadcr=8513_13545021&keywords=the+creative+act+rick+rubin+book&qid=1710541252&sr=8-1"
)

@ -14,11 +14,17 @@ api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000
temperature=0.5,
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000,
)
print(f'this is a test msg for stdout and stderr: {sys.stdout}, {sys.stderr}')
print(
f"this is a test msg for stdout and stderr: {sys.stdout},"
f" {sys.stderr}"
)
## Initialize the workflow
agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)

@ -9,5 +9,7 @@ model = Anthropic(anthropic_api_key="")
# print(completion_1)
# Using the __call__ method
completion_2 = model("How far is the moon from the earth?", stop=["miles", "km"])
completion_2 = model(
"How far is the moon from the earth?", stop=["miles", "km"]
)
print(completion_2)

@ -1,4 +1,4 @@
'''from swarms.models import Dalle3
"""from swarms.models import Dalle3
# Create an instance of the Dalle3 class with high quality
dalle3 = Dalle3(quality="high")
@ -11,4 +11,4 @@ image_url = dalle3(task)
# Print the generated image URL
print(image_url)
'''
"""

@ -5,7 +5,10 @@ api = GPT4VisionAPI(max_tokens=1000)
# Define the task and image URL
task = "Describe the scene in the image."
img = "/home/kye/.swarms/swarms/examples/Screenshot from 2024-02-20 05-55-34.png"
img = (
"/home/kye/.swarms/swarms/examples/Screenshot from 2024-02-20"
" 05-55-34.png"
)
# Run the GPT-4 Vision model
response = api.run(task, img)

@ -11,19 +11,26 @@ try:
device = "cuda" if torch.cuda.is_available() else "cpu"
inference.model.to(device)
prompt_text = "Create a list of known biggest risks of structural collapse with references"
inputs = inference.tokenizer(prompt_text, return_tensors="pt").to(device)
prompt_text = (
"Create a list of known biggest risks of structural collapse"
" with references"
)
inputs = inference.tokenizer(prompt_text, return_tensors="pt").to(
device
)
generated_ids = inference.model.generate(
**inputs,
max_new_tokens=1000, # Adjust the length of the generation
temperature=0.7, # Adjust creativity
top_k=50, # Limits the vocabulary considered at each step
pad_token_id=inference.tokenizer.eos_token_id,
do_sample=True # Enable sampling to utilize temperature
do_sample=True, # Enable sampling to utilize temperature
)
generated_text = inference.tokenizer.decode(
generated_ids[0], skip_special_tokens=True
)
generated_text = inference.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
print(generated_text)
except Exception as e:
print(f"An error occurred: {e}")

@ -48,7 +48,9 @@ productivity_agent = Agent(
)
# Initiailize safety agent
safety_agent = Agent(llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True)
safety_agent = Agent(
llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True
)
# Init the security agent
security_agent = Agent(

@ -8,7 +8,9 @@ model = QwenVLMultiModal(
)
# Run the model
response = model("Hello, how are you?", "https://example.com/image.jpg")
response = model(
"Hello, how are you?", "https://example.com/image.jpg"
)
# Print the response
print(response)

@ -10,7 +10,10 @@ api_key = os.getenv("OPENAI_API_KEY")
# Initialize the language agent
llm = OpenAIChat(
temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000
temperature=0.5,
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000,
)

@ -29,7 +29,9 @@ def interactive_conversation(llm):
conv.add("user", user_input)
if user_input.lower() == "quit":
break
task = conv.return_history_as_string() # Get the conversation history
task = (
conv.return_history_as_string()
) # Get the conversation history
out = llm(task)
conv.add("assistant", out)
print(

@ -40,5 +40,7 @@ print(out)
# Run all the agents in the swarm network on a task
out = swarmnet.run_many_agents("Generate a 10,000 word blog on health and wellness.")
out = swarmnet.run_many_agents(
"Generate a 10,000 word blog on health and wellness."
)
print(out)

@ -3,7 +3,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
model = AutoModelForCausalLM.from_pretrained(
"databricks/dolly-v2-12b"
)
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
# Define a JSON schema for person's information
@ -18,10 +20,14 @@ json_schema = {
}
# Define the task to generate a person's information
task = "Generate a person's information based on the following schema:"
task = (
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class
agent = ToolAgent(model=model, tokenizer=tokenizer, json_schema=json_schema)
agent = ToolAgent(
model=model, tokenizer=tokenizer, json_schema=json_schema
)
# Run the agent to generate the person's information
generated_data = agent.run(task)

@ -27,7 +27,9 @@ worker = Worker(
)
# Running the worker with a prompt
out = worker.run("Hello, how are you? Create an image of how your are doing!")
out = worker.run(
"Hello, how are you? Create an image of how your are doing!"
)
# Printing the output
print(out)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "4.3.3"
version = "4.3.7"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -26,7 +26,7 @@ classifiers = [
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
torch = ">=2.1.1,<3.0"
transformers = "*"
transformers = "4.39.0"
asyncio = ">=3.4.3,<4.0"
einops = "0.7.0"
google-generativeai = "0.3.1"
@ -36,8 +36,11 @@ faiss-cpu = "1.7.4"
backoff = "2.2.1"
datasets = "*"
optimum = "1.15.0"
supervision = "0.19.0"
opencv-python = "4.9.0.80"
diffusers = "*"
langchain = "0.1.7"
anthropic = "0.2.5"
toml = "*"
pypdf = "4.0.1"
accelerate = "*"

@ -21,12 +21,14 @@ addict
backoff==2.2.1
ratelimit==2.2.1
termcolor==2.2.0
opencv-python==4.9.0.80
langchain-community
timm
torchvision==0.16.1
rich==13.5.2
mkdocs
mkdocs-material
anthropic==0.2.5
mkdocs-glightbox
pre-commit==3.6.2
psutil

@ -1,4 +1,4 @@
import os
import os
from dotenv import load_dotenv
import sentry_sdk
@ -8,6 +8,7 @@ os.environ["USE_TELEMETRY"] = "True"
use_telementry = os.getenv("USE_TELEMETRY")
def activate_sentry():
if use_telementry == "True":
sentry_sdk.init(
@ -15,6 +16,5 @@ def activate_sentry():
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
enable_tracing=True,
debug = True,
debug=True,
)

@ -1,6 +0,0 @@
from langchain.tools import (
BaseTool,
Tool,
StructuredTool,
tool,
) # noqa F401
Loading…
Cancel
Save