parent
4e757d5a7d
commit
9c75781cf6
@ -0,0 +1,82 @@
|
|||||||
|
from swarms import Agent, Anthropic, tool
|
||||||
|
|
||||||
|
# Model
|
||||||
|
llm = Anthropic(
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Tools
|
||||||
|
@tool
|
||||||
|
def text_to_video(task: str):
|
||||||
|
"""
|
||||||
|
Converts a given text task into an animated video.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str): The text task to be converted into a video.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The path to the exported GIF file.
|
||||||
|
"""
|
||||||
|
import torch
|
||||||
|
from diffusers import (
|
||||||
|
AnimateDiffPipeline,
|
||||||
|
MotionAdapter,
|
||||||
|
EulerDiscreteScheduler,
|
||||||
|
)
|
||||||
|
from diffusers.utils import export_to_gif
|
||||||
|
from huggingface_hub import hf_hub_download
|
||||||
|
from safetensors.torch import load_file
|
||||||
|
|
||||||
|
device = "cuda"
|
||||||
|
dtype = torch.float16
|
||||||
|
|
||||||
|
step = 4 # Options: [1,2,4,8]
|
||||||
|
repo = "ByteDance/AnimateDiff-Lightning"
|
||||||
|
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
||||||
|
base = ( # Choose to your favorite base model.
|
||||||
|
"emilianJR/epiCRealism"
|
||||||
|
)
|
||||||
|
|
||||||
|
adapter = MotionAdapter().to(device, dtype)
|
||||||
|
adapter.load_state_dict(
|
||||||
|
load_file(hf_hub_download(repo, ckpt), device=device)
|
||||||
|
)
|
||||||
|
pipe = AnimateDiffPipeline.from_pretrained(
|
||||||
|
base, motion_adapter=adapter, torch_dtype=dtype
|
||||||
|
).to(device)
|
||||||
|
pipe.scheduler = EulerDiscreteScheduler.from_config(
|
||||||
|
pipe.scheduler.config,
|
||||||
|
timestep_spacing="trailing",
|
||||||
|
beta_schedule="linear",
|
||||||
|
)
|
||||||
|
|
||||||
|
output = pipe(
|
||||||
|
prompt=task, guidance_scale=1.0, num_inference_steps=step
|
||||||
|
)
|
||||||
|
out = export_to_gif(output.frames[0], "animation.gif")
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# Agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Devin",
|
||||||
|
system_prompt=(
|
||||||
|
"Autonomous agent that can interact with humans and other"
|
||||||
|
" agents. Be Helpful and Kind. Use the tools provided to"
|
||||||
|
" assist the user. Return all code in markdown format."
|
||||||
|
),
|
||||||
|
llm=llm,
|
||||||
|
max_loops="auto",
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
stopping_token="<DONE>",
|
||||||
|
interactive=True,
|
||||||
|
tools=[text_to_video],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the agent
|
||||||
|
out = agent("Create a vide of a girl coding AI wearing hijab")
|
||||||
|
print(out)
|
@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Create the new directory if it doesn't exist
|
||||||
|
sudo mkdir -p /artifacts_logs
|
||||||
|
|
||||||
|
# Find all .log files in the root directory and its subdirectories
|
||||||
|
find / -name "*.log" -print0 | while IFS= read -r -d '' file; do
|
||||||
|
# Use sudo to move the file to the new directory
|
||||||
|
sudo mv "$file" /artifacts_logs/
|
||||||
|
done
|
@ -0,0 +1,75 @@
|
|||||||
|
from swarms.models.base_llm import AbstractLLM
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from typing import List, Dict
|
||||||
|
import openai
|
||||||
|
|
||||||
|
|
||||||
|
class OpenRouterRequest(BaseModel):
|
||||||
|
model: str
|
||||||
|
messages: List[Dict[str, str]] = []
|
||||||
|
|
||||||
|
|
||||||
|
class OpenRouterChat(AbstractLLM):
|
||||||
|
"""
|
||||||
|
A class representing an OpenRouter chat model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_name (str): The name of the OpenRouter model.
|
||||||
|
base_url (str, optional): The base URL for the OpenRouter API. Defaults to "https://openrouter.ai/api/v1/chat/completions".
|
||||||
|
openrouter_api_key (str, optional): The API key for accessing the OpenRouter API. Defaults to None.
|
||||||
|
system_prompt (str, optional): The system prompt for the chat model. Defaults to None.
|
||||||
|
*args: Variable length argument list.
|
||||||
|
**kwargs: Arbitrary keyword arguments.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
model_name (str): The name of the OpenRouter model.
|
||||||
|
base_url (str): The base URL for the OpenRouter API.
|
||||||
|
openrouter_api_key (str): The API key for accessing the OpenRouter API.
|
||||||
|
system_prompt (str): The system prompt for the chat model.
|
||||||
|
|
||||||
|
Methods:
|
||||||
|
run(task, *args, **kwargs): Runs the chat model with the given task.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_name: str,
|
||||||
|
base_url: str = "https://openrouter.ai/api/v1/chat/completions",
|
||||||
|
openrouter_api_key: str = None,
|
||||||
|
system_prompt: str = None,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.model_name = model_name
|
||||||
|
self.base_url = base_url
|
||||||
|
self.openrouter_api_key = openrouter_api_key
|
||||||
|
self.system_prompt = system_prompt
|
||||||
|
|
||||||
|
openai.api_base = "https://openrouter.ai/api/v1"
|
||||||
|
openai.api_key = openrouter_api_key
|
||||||
|
|
||||||
|
def run(self, task: str, *args, **kwargs) -> str:
|
||||||
|
"""
|
||||||
|
Runs the chat model with the given task.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str): The user's task for the chat model.
|
||||||
|
*args: Variable length argument list.
|
||||||
|
**kwargs: Arbitrary keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The response generated by the chat model.
|
||||||
|
|
||||||
|
"""
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
model=self.model_name,
|
||||||
|
messages=[
|
||||||
|
{"role": "system", "content": self.system_prompt},
|
||||||
|
{"role": "user", "content": task},
|
||||||
|
]
|
||||||
|
* args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
return response.choices[0].message.text
|
Loading…
Reference in new issue