pull/53/head
parent
84adda80bf
commit
01e7e9b34c
@ -0,0 +1 @@
|
|||||||
|
[]
|
@ -0,0 +1,9 @@
|
|||||||
|
from swarms import AutoBot, AutoScaler
|
||||||
|
|
||||||
|
auto_scaler = AutoScaler()
|
||||||
|
auto_scaler.start()
|
||||||
|
|
||||||
|
for i in range(100):
|
||||||
|
auto_scaler.add_task(f"Task {i}")
|
||||||
|
|
||||||
|
|
@ -1,14 +1,18 @@
|
|||||||
#swarms
|
#swarms
|
||||||
|
from swarms.orchestrator.autoscaler import AutoScaler
|
||||||
|
|
||||||
# worker
|
# worker
|
||||||
# from swarms.workers.worker_node import WorkerNode
|
from swarms.workers.worker_node import WorkerNode
|
||||||
|
from swarms.workers.autobot import AutoBot
|
||||||
|
|
||||||
#boss
|
#boss
|
||||||
# from swarms.boss.boss_node import BossNode
|
from swarms.boss.boss_node import BossNode
|
||||||
|
|
||||||
#models
|
#models
|
||||||
from swarms.agents.models.anthropic import Anthropic
|
from swarms.agents.models.anthropic import Anthropic
|
||||||
from swarms.agents.models.huggingface import HuggingFaceLLM
|
from swarms.agents.models.huggingface import HuggingFaceLLM
|
||||||
from swarms.agents.models.palm import GooglePalm
|
# from swarms.agents.models.palm import GooglePalm
|
||||||
from swarms.agents.models.petals import Petals
|
from swarms.agents.models.petals import Petals
|
||||||
# from swarms.agents.models.openai import OpenAI
|
from swarms.agents.models.openai import OpenAI
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,187 +1,189 @@
|
|||||||
from __future__ import annotations
|
# from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
# import logging
|
||||||
from swarms.utils.logger import logger
|
# from swarms.utils.logger import logger
|
||||||
from typing import Any, Callable, Dict, List, Optional
|
# from typing import Any, Callable, Dict, List, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, root_validator
|
# from pydantic import BaseModel, model_validator
|
||||||
from tenacity import (
|
# from tenacity import (
|
||||||
before_sleep_log,
|
# before_sleep_log,
|
||||||
retry,
|
# retry,
|
||||||
retry_if_exception_type,
|
# retry_if_exception_type,
|
||||||
stop_after_attempt,
|
# stop_after_attempt,
|
||||||
wait_exponential,
|
# wait_exponential,
|
||||||
)
|
# )
|
||||||
|
|
||||||
import google.generativeai as genai
|
# import google.generativeai as palm
|
||||||
|
|
||||||
|
|
||||||
class GooglePalmError(Exception):
|
# class GooglePalmError(Exception):
|
||||||
"""Error raised when there is an issue with the Google PaLM API."""
|
# """Error raised when there is an issue with the Google PaLM API."""
|
||||||
|
|
||||||
def _truncate_at_stop_tokens(
|
# def _truncate_at_stop_tokens(
|
||||||
text: str,
|
# text: str,
|
||||||
stop: Optional[List[str]],
|
# stop: Optional[List[str]],
|
||||||
) -> str:
|
# ) -> str:
|
||||||
"""Truncates text at the earliest stop token found."""
|
# """Truncates text at the earliest stop token found."""
|
||||||
if stop is None:
|
# if stop is None:
|
||||||
return text
|
# return text
|
||||||
|
|
||||||
for stop_token in stop:
|
# for stop_token in stop:
|
||||||
stop_token_idx = text.find(stop_token)
|
# stop_token_idx = text.find(stop_token)
|
||||||
if stop_token_idx != -1:
|
# if stop_token_idx != -1:
|
||||||
text = text[:stop_token_idx]
|
# text = text[:stop_token_idx]
|
||||||
return text
|
# return text
|
||||||
|
|
||||||
def _response_to_result(response: genai.types.ChatResponse, stop: Optional[List[str]]) -> Dict[str, Any]:
|
|
||||||
"""Convert a PaLM chat response to a result dictionary."""
|
# def _response_to_result(response: palm.types.ChatResponse, stop: Optional[List[str]]) -> Dict[str, Any]:
|
||||||
result = {
|
# """Convert a PaLM chat response to a result dictionary."""
|
||||||
"id": response.id,
|
# result = {
|
||||||
"created": response.created,
|
# "id": response.id,
|
||||||
"model": response.model,
|
# "created": response.created,
|
||||||
"usage": {
|
# "model": response.model,
|
||||||
"prompt_tokens": response.usage.prompt_tokens,
|
# "usage": {
|
||||||
"completion_tokens": response.usage.completion_tokens,
|
# "prompt_tokens": response.usage.prompt_tokens,
|
||||||
"total_tokens": response.usage.total_tokens,
|
# "completion_tokens": response.usage.completion_tokens,
|
||||||
},
|
# "total_tokens": response.usage.total_tokens,
|
||||||
"choices": [],
|
# },
|
||||||
}
|
# "choices": [],
|
||||||
for choice in response.choices:
|
# }
|
||||||
result["choices"].append({
|
# for choice in response.choices:
|
||||||
"text": _truncate_at_stop_tokens(choice.text, stop),
|
# result["choices"].append({
|
||||||
"index": choice.index,
|
# "text": _truncate_at_stop_tokens(choice.text, stop),
|
||||||
"finish_reason": choice.finish_reason,
|
# "index": choice.index,
|
||||||
})
|
# "finish_reason": choice.finish_reason,
|
||||||
return result
|
# })
|
||||||
|
# return result
|
||||||
def _messages_to_prompt_dict(messages: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
||||||
"""Convert a list of message dictionaries to a prompt dictionary."""
|
# def _messages_to_prompt_dict(messages: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||||
prompt = {"messages": []}
|
# """Convert a list of message dictionaries to a prompt dictionary."""
|
||||||
for message in messages:
|
# prompt = {"messages": []}
|
||||||
prompt["messages"].append({
|
# for message in messages:
|
||||||
"role": message["role"],
|
# prompt["messages"].append({
|
||||||
"content": message["content"],
|
# "role": message["role"],
|
||||||
})
|
# "content": message["content"],
|
||||||
return prompt
|
# })
|
||||||
|
# return prompt
|
||||||
def _create_retry_decorator() -> Callable[[Any], Any]:
|
|
||||||
"""Create a retry decorator with exponential backoff."""
|
|
||||||
return retry(
|
# def _create_retry_decorator() -> Callable[[Any], Any]:
|
||||||
retry=retry_if_exception_type(GooglePalmError),
|
# """Create a retry decorator with exponential backoff."""
|
||||||
stop=stop_after_attempt(5),
|
# return retry(
|
||||||
wait=wait_exponential(multiplier=1, min=2, max=30),
|
# retry=retry_if_exception_type(GooglePalmError),
|
||||||
before_sleep=before_sleep_log(logger, logging.DEBUG),
|
# stop=stop_after_attempt(5),
|
||||||
reraise=True,
|
# wait=wait_exponential(multiplier=1, min=2, max=30),
|
||||||
)
|
# before_sleep=before_sleep_log(logger, logging.DEBUG),
|
||||||
|
# reraise=True,
|
||||||
####################### => main class
|
# )
|
||||||
class GooglePalm(BaseModel):
|
|
||||||
"""Wrapper around Google's PaLM Chat API."""
|
|
||||||
|
# ####################### => main class
|
||||||
client: Any #: :meta private:
|
# class GooglePalm(BaseModel):
|
||||||
model_name: str = "models/chat-bison-001"
|
# """Wrapper around Google's PaLM Chat API."""
|
||||||
google_api_key: Optional[str] = None
|
|
||||||
temperature: Optional[float] = None
|
# client: Any #: :meta private:
|
||||||
top_p: Optional[float] = None
|
# model_name: str = "models/chat-bison-001"
|
||||||
top_k: Optional[int] = None
|
# google_api_key: Optional[str] = None
|
||||||
n: int = 1
|
# temperature: Optional[float] = None
|
||||||
|
# top_p: Optional[float] = None
|
||||||
@root_validator()
|
# top_k: Optional[int] = None
|
||||||
def validate_environment(cls, values: Dict) -> Dict:
|
# n: int = 1
|
||||||
# Same as before
|
|
||||||
pass
|
# @model_validator(mode="pre")
|
||||||
|
# def validate_environment(cls, values: Dict) -> Dict:
|
||||||
def chat_with_retry(self, **kwargs: Any) -> Any:
|
# # Same as before
|
||||||
"""Use tenacity to retry the completion call."""
|
# pass
|
||||||
retry_decorator = _create_retry_decorator()
|
|
||||||
|
# def chat_with_retry(self, **kwargs: Any) -> Any:
|
||||||
@retry_decorator
|
# """Use tenacity to retry the completion call."""
|
||||||
def _chat_with_retry(**kwargs: Any) -> Any:
|
# retry_decorator = _create_retry_decorator()
|
||||||
return self.client.chat(**kwargs)
|
|
||||||
|
# @retry_decorator
|
||||||
return _chat_with_retry(**kwargs)
|
# def _chat_with_retry(**kwargs: Any) -> Any:
|
||||||
|
# return self.client.chat(**kwargs)
|
||||||
async def achat_with_retry(self, **kwargs: Any) -> Any:
|
|
||||||
"""Use tenacity to retry the async completion call."""
|
# return _chat_with_retry(**kwargs)
|
||||||
retry_decorator = _create_retry_decorator()
|
|
||||||
|
# async def achat_with_retry(self, **kwargs: Any) -> Any:
|
||||||
@retry_decorator
|
# """Use tenacity to retry the async completion call."""
|
||||||
async def _achat_with_retry(**kwargs: Any) -> Any:
|
# retry_decorator = _create_retry_decorator()
|
||||||
return await self.client.chat_async(**kwargs)
|
|
||||||
|
# @retry_decorator
|
||||||
return await _achat_with_retry(**kwargs)
|
# async def _achat_with_retry(**kwargs: Any) -> Any:
|
||||||
|
# return await self.client.chat_async(**kwargs)
|
||||||
def __call__(
|
|
||||||
self,
|
# return await _achat_with_retry(**kwargs)
|
||||||
messages: List[Dict[str, Any]],
|
|
||||||
stop: Optional[List[str]] = None,
|
# def __call__(
|
||||||
**kwargs: Any,
|
# self,
|
||||||
) -> Dict[str, Any]:
|
# messages: List[Dict[str, Any]],
|
||||||
prompt = _messages_to_prompt_dict(messages)
|
# stop: Optional[List[str]] = None,
|
||||||
|
# **kwargs: Any,
|
||||||
response: genai.types.ChatResponse = self.chat_with_retry(
|
# ) -> Dict[str, Any]:
|
||||||
model=self.model_name,
|
# prompt = _messages_to_prompt_dict(messages)
|
||||||
prompt=prompt,
|
|
||||||
temperature=self.temperature,
|
# response: palm.types.ChatResponse = self.chat_with_retry(
|
||||||
top_p=self.top_p,
|
# model=self.model_name,
|
||||||
top_k=self.top_k,
|
# prompt=prompt,
|
||||||
candidate_count=self.n,
|
# temperature=self.temperature,
|
||||||
**kwargs,
|
# top_p=self.top_p,
|
||||||
)
|
# top_k=self.top_k,
|
||||||
|
# candidate_count=self.n,
|
||||||
return _response_to_result(response, stop)
|
# **kwargs,
|
||||||
|
# )
|
||||||
def generate(
|
|
||||||
self,
|
# return _response_to_result(response, stop)
|
||||||
messages: List[Dict[str, Any]],
|
|
||||||
stop: Optional[List[str]] = None,
|
# def generate(
|
||||||
**kwargs: Any,
|
# self,
|
||||||
) -> Dict[str, Any]:
|
# messages: List[Dict[str, Any]],
|
||||||
prompt = _messages_to_prompt_dict(messages)
|
# stop: Optional[List[str]] = None,
|
||||||
|
# **kwargs: Any,
|
||||||
response: genai.types.ChatResponse = self.chat_with_retry(
|
# ) -> Dict[str, Any]:
|
||||||
model=self.model_name,
|
# prompt = _messages_to_prompt_dict(messages)
|
||||||
prompt=prompt,
|
|
||||||
temperature=self.temperature,
|
# response: palm.types.ChatResponse = self.chat_with_retry(
|
||||||
top_p=self.top_p,
|
# model=self.model_name,
|
||||||
top_k=self.top_k,
|
# prompt=prompt,
|
||||||
candidate_count=self.n,
|
# temperature=self.temperature,
|
||||||
**kwargs,
|
# top_p=self.top_p,
|
||||||
)
|
# top_k=self.top_k,
|
||||||
|
# candidate_count=self.n,
|
||||||
return _response_to_result(response, stop)
|
# **kwargs,
|
||||||
|
# )
|
||||||
async def _agenerate(
|
|
||||||
self,
|
# return _response_to_result(response, stop)
|
||||||
messages: List[Dict[str, Any]],
|
|
||||||
stop: Optional[List[str]] = None,
|
# async def _agenerate(
|
||||||
**kwargs: Any,
|
# self,
|
||||||
) -> Dict[str, Any]:
|
# messages: List[Dict[str, Any]],
|
||||||
prompt = _messages_to_prompt_dict(messages)
|
# stop: Optional[List[str]] = None,
|
||||||
|
# **kwargs: Any,
|
||||||
response: genai.types.ChatResponse = await self.achat_with_retry(
|
# ) -> Dict[str, Any]:
|
||||||
model=self.model_name,
|
# prompt = _messages_to_prompt_dict(messages)
|
||||||
prompt=prompt,
|
|
||||||
temperature=self.temperature,
|
# response: palm.types.ChatResponse = await self.achat_with_retry(
|
||||||
top_p=self.top_p,
|
# model=self.model_name,
|
||||||
top_k=self.top_k,
|
# prompt=prompt,
|
||||||
candidate_count=self.n,
|
# temperature=self.temperature,
|
||||||
)
|
# top_p=self.top_p,
|
||||||
|
# top_k=self.top_k,
|
||||||
return _response_to_result(response, stop)
|
# candidate_count=self.n,
|
||||||
|
# )
|
||||||
|
|
||||||
@property
|
# return _response_to_result(response, stop)
|
||||||
def _identifying_params(self) -> Dict[str, Any]:
|
|
||||||
"""Get the identifying parameters."""
|
# @property
|
||||||
return {
|
# def _identifying_params(self) -> Dict[str, Any]:
|
||||||
"model_name": self.model_name,
|
# """Get the identifying parameters."""
|
||||||
"temperature": self.temperature,
|
# return {
|
||||||
"top_p": self.top_p,
|
# "model_name": self.model_name,
|
||||||
"top_k": self.top_k,
|
# "temperature": self.temperature,
|
||||||
"n": self.n,
|
# "top_p": self.top_p,
|
||||||
}
|
# "top_k": self.top_k,
|
||||||
|
# "n": self.n,
|
||||||
@property
|
# }
|
||||||
def _llm_type(self) -> str:
|
|
||||||
return "google-palm-chat"
|
# @property
|
||||||
|
# def _llm_type(self) -> str:
|
||||||
|
# return "google-palm-chat"
|
@ -0,0 +1,62 @@
|
|||||||
|
import threading
|
||||||
|
import queue
|
||||||
|
from time import sleep
|
||||||
|
from swarms.workers.autobot import AutoBot
|
||||||
|
|
||||||
|
# TODO Handle task assignment and task delegation
|
||||||
|
# TODO: User task => decomposed into very small sub tasks => sub tasks assigned to workers => workers complete and update the swarm, can ask for help from other agents.
|
||||||
|
# TODO: Missing, Task Assignment, Task delegation, Task completion, Swarm level communication with vector db
|
||||||
|
class AutoScaler:
|
||||||
|
def __init__(self,
|
||||||
|
initial_agents=10,
|
||||||
|
scale_up_factor=1,
|
||||||
|
idle_threshold=0.2,
|
||||||
|
busy_threshold=0.7
|
||||||
|
):
|
||||||
|
self.agents_pool = [AutoBot() for _ in range(initial_agents)]
|
||||||
|
self.task_queue = queue.Queue()
|
||||||
|
self.scale_up_factor = scale_up_factor
|
||||||
|
self.idle_threshold = idle_threshold
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
|
||||||
|
def add_task(self, task):
|
||||||
|
self.tasks_queue.put(task)
|
||||||
|
|
||||||
|
def scale_up(self):
|
||||||
|
with self.lock:
|
||||||
|
new_agents_counts = len(self.agents_pool) * self.scale_up_factor
|
||||||
|
for _ in range(new_agents_counts):
|
||||||
|
self.agents_pool.append(AutoBot())
|
||||||
|
|
||||||
|
def scale_down(self):
|
||||||
|
with self.lock:
|
||||||
|
if len(self.agents_pool) > 10: #ensure minmum of 10 agents
|
||||||
|
del self.agents_pool[-1] #remove last agent
|
||||||
|
|
||||||
|
def monitor_and_scale(self):
|
||||||
|
while True:
|
||||||
|
sleep(60)#check minute
|
||||||
|
pending_tasks = self.task_queue.qsize()
|
||||||
|
active_agents = sum([1 for agent in self.agents_pool if agent.is_busy()])
|
||||||
|
|
||||||
|
if pending_tasks / len(self.agents_pool) > self.busy_threshold:
|
||||||
|
self.scale_up()
|
||||||
|
elif active_agents / len(self.agents_pool) < self.idle_threshold:
|
||||||
|
self.scale_down()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
monitor_thread = threading.Thread(target=self.monitor_and_scale)
|
||||||
|
monitor_thread.start()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
task = self.task_queue.get()
|
||||||
|
if task:
|
||||||
|
available_agent = next((agent for agent in self.agents_pool))
|
||||||
|
if available_agent:
|
||||||
|
available_agent.run(task)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,75 @@
|
|||||||
|
import faiss
|
||||||
|
from langchain.chat_models import ChatOpenAI
|
||||||
|
from langchain.docstore import InMemoryDocstore
|
||||||
|
from langchain.embeddings import OpenAIEmbeddings
|
||||||
|
from langchain.vectorstores import FAISS
|
||||||
|
from langchain_experimental.autonomous_agents import AutoGPT
|
||||||
|
|
||||||
|
from swarms.agents.tools.autogpt import (
|
||||||
|
DuckDuckGoSearchRun,
|
||||||
|
FileChatMessageHistory,
|
||||||
|
ReadFileTool,
|
||||||
|
WebpageQATool,
|
||||||
|
WriteFileTool,
|
||||||
|
load_qa_with_sources_chain,
|
||||||
|
process_csv,
|
||||||
|
# web_search,
|
||||||
|
query_website_tool
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
ROOT_DIR = "./data/"
|
||||||
|
|
||||||
|
|
||||||
|
class AutoBot:
|
||||||
|
def __init__(self,
|
||||||
|
model_name="gpt-4",
|
||||||
|
openai_api_key=None,
|
||||||
|
ai_name="Autobot Swarm Worker",
|
||||||
|
ai_role="Worker in a swarm",
|
||||||
|
# embedding_size=None,
|
||||||
|
# k=None,
|
||||||
|
temperature=0.5):
|
||||||
|
self.openai_api_key = openai_api_key
|
||||||
|
self.temperature = temperature
|
||||||
|
self.llm = ChatOpenAI(model_name=model_name,
|
||||||
|
openai_api_key=self.openai_api_key,
|
||||||
|
temperature=self.temperature)
|
||||||
|
|
||||||
|
self.ai_name = ai_name
|
||||||
|
self.ai_role = ai_role
|
||||||
|
|
||||||
|
# self.embedding_size = embedding_size
|
||||||
|
# # self.k = k
|
||||||
|
|
||||||
|
self.setup_tools()
|
||||||
|
self.setup_memory()
|
||||||
|
self.setup_agent()
|
||||||
|
|
||||||
|
def setup_tools(self):
|
||||||
|
self.tools = [
|
||||||
|
WriteFileTool(root_dir=ROOT_DIR),
|
||||||
|
ReadFileTool(root_dir=ROOT_DIR),
|
||||||
|
process_csv,
|
||||||
|
query_website_tool,
|
||||||
|
]
|
||||||
|
|
||||||
|
def setup_memory(self):
|
||||||
|
embeddings_model = OpenAIEmbeddings(openai_api_key=self.openai_api_key)
|
||||||
|
embedding_size = 1536
|
||||||
|
index = faiss.IndexFlatL2(embedding_size)
|
||||||
|
self.vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
|
||||||
|
|
||||||
|
def setup_agent(self):
|
||||||
|
self.agent = AutoGPT.from_llm_and_tools(
|
||||||
|
ai_name=self.ai_name,
|
||||||
|
ai_role=self.ai_role,
|
||||||
|
tools=self.tools,
|
||||||
|
llm=self.llm,
|
||||||
|
memory=self.vectorstore.as_retriever(search_kwargs={"k": 8}),
|
||||||
|
)
|
||||||
|
|
||||||
|
def run(self, task):
|
||||||
|
result = self.agent.run([task])
|
||||||
|
return result
|
||||||
|
|
Loading…
Reference in new issue