parent
08169847f9
commit
393741b9aa
@ -1,24 +0,0 @@
|
|||||||
from swarms.workers import Worker
|
|
||||||
from swarms.agents.meta_prompter import MetaPrompterAgent
|
|
||||||
from swarms.models import OpenAI
|
|
||||||
|
|
||||||
# init llm
|
|
||||||
llm = OpenAI()
|
|
||||||
|
|
||||||
# init the meta prompter agent that optimized prompts
|
|
||||||
meta_optimizer = MetaPrompterAgent(llm=llm)
|
|
||||||
|
|
||||||
# init the worker agent
|
|
||||||
worker = Worker(llm)
|
|
||||||
|
|
||||||
# broad task to complete
|
|
||||||
task = "Create a feedforward in pytorch"
|
|
||||||
|
|
||||||
# optimize the prompt
|
|
||||||
optimized_prompt = meta_optimizer.run(task)
|
|
||||||
|
|
||||||
# run the optimized prompt with detailed instructions
|
|
||||||
result = worker.run(optimized_prompt)
|
|
||||||
|
|
||||||
# print
|
|
||||||
print(result)
|
|
@ -1,9 +0,0 @@
|
|||||||
# pip3 install exxa
|
|
||||||
from exa import Inference
|
|
||||||
from swarms.agents import OmniModalAgent
|
|
||||||
|
|
||||||
llm = Inference(model_id="mistralai/Mistral-7B-v0.1", quantize=True)
|
|
||||||
|
|
||||||
agent = OmniModalAgent(llm)
|
|
||||||
|
|
||||||
agent.run("Create a video of a swarm of fish")
|
|
@ -1,9 +0,0 @@
|
|||||||
from swarms.models import OpenAIChat
|
|
||||||
from swarms.agents import OmniModalAgent
|
|
||||||
|
|
||||||
|
|
||||||
llm = OpenAIChat(model_name="gpt-4")
|
|
||||||
|
|
||||||
agent = OmniModalAgent(llm)
|
|
||||||
|
|
||||||
agent.run("Create a video of a swarm of fish")
|
|
Loading…
Reference in new issue