Former-commit-id: 01e7e9b34c
group-chat
parent
9adf7b6084
commit
d0d51a5975
@ -0,0 +1 @@
|
||||
[]
|
@ -0,0 +1,9 @@
|
||||
from swarms import AutoBot, AutoScaler
|
||||
|
||||
auto_scaler = AutoScaler()
|
||||
auto_scaler.start()
|
||||
|
||||
for i in range(100):
|
||||
auto_scaler.add_task(f"Task {i}")
|
||||
|
||||
|
@ -1,14 +1,18 @@
|
||||
#swarms
|
||||
from swarms.orchestrator.autoscaler import AutoScaler
|
||||
|
||||
# worker
|
||||
# from swarms.workers.worker_node import WorkerNode
|
||||
from swarms.workers.worker_node import WorkerNode
|
||||
from swarms.workers.autobot import AutoBot
|
||||
|
||||
#boss
|
||||
# from swarms.boss.boss_node import BossNode
|
||||
from swarms.boss.boss_node import BossNode
|
||||
|
||||
#models
|
||||
from swarms.agents.models.anthropic import Anthropic
|
||||
from swarms.agents.models.huggingface import HuggingFaceLLM
|
||||
from swarms.agents.models.palm import GooglePalm
|
||||
# from swarms.agents.models.palm import GooglePalm
|
||||
from swarms.agents.models.petals import Petals
|
||||
# from swarms.agents.models.openai import OpenAI
|
||||
from swarms.agents.models.openai import OpenAI
|
||||
|
||||
|
||||
|
@ -1,187 +1,189 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from swarms.utils.logger import logger
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, root_validator
|
||||
from tenacity import (
|
||||
before_sleep_log,
|
||||
retry,
|
||||
retry_if_exception_type,
|
||||
stop_after_attempt,
|
||||
wait_exponential,
|
||||
)
|
||||
|
||||
import google.generativeai as genai
|
||||
|
||||
|
||||
class GooglePalmError(Exception):
|
||||
"""Error raised when there is an issue with the Google PaLM API."""
|
||||
|
||||
def _truncate_at_stop_tokens(
|
||||
text: str,
|
||||
stop: Optional[List[str]],
|
||||
) -> str:
|
||||
"""Truncates text at the earliest stop token found."""
|
||||
if stop is None:
|
||||
return text
|
||||
|
||||
for stop_token in stop:
|
||||
stop_token_idx = text.find(stop_token)
|
||||
if stop_token_idx != -1:
|
||||
text = text[:stop_token_idx]
|
||||
return text
|
||||
|
||||
def _response_to_result(response: genai.types.ChatResponse, stop: Optional[List[str]]) -> Dict[str, Any]:
|
||||
"""Convert a PaLM chat response to a result dictionary."""
|
||||
result = {
|
||||
"id": response.id,
|
||||
"created": response.created,
|
||||
"model": response.model,
|
||||
"usage": {
|
||||
"prompt_tokens": response.usage.prompt_tokens,
|
||||
"completion_tokens": response.usage.completion_tokens,
|
||||
"total_tokens": response.usage.total_tokens,
|
||||
},
|
||||
"choices": [],
|
||||
}
|
||||
for choice in response.choices:
|
||||
result["choices"].append({
|
||||
"text": _truncate_at_stop_tokens(choice.text, stop),
|
||||
"index": choice.index,
|
||||
"finish_reason": choice.finish_reason,
|
||||
})
|
||||
return result
|
||||
|
||||
def _messages_to_prompt_dict(messages: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Convert a list of message dictionaries to a prompt dictionary."""
|
||||
prompt = {"messages": []}
|
||||
for message in messages:
|
||||
prompt["messages"].append({
|
||||
"role": message["role"],
|
||||
"content": message["content"],
|
||||
})
|
||||
return prompt
|
||||
|
||||
def _create_retry_decorator() -> Callable[[Any], Any]:
|
||||
"""Create a retry decorator with exponential backoff."""
|
||||
return retry(
|
||||
retry=retry_if_exception_type(GooglePalmError),
|
||||
stop=stop_after_attempt(5),
|
||||
wait=wait_exponential(multiplier=1, min=2, max=30),
|
||||
before_sleep=before_sleep_log(logger, logging.DEBUG),
|
||||
reraise=True,
|
||||
)
|
||||
|
||||
####################### => main class
|
||||
class GooglePalm(BaseModel):
|
||||
"""Wrapper around Google's PaLM Chat API."""
|
||||
|
||||
client: Any #: :meta private:
|
||||
model_name: str = "models/chat-bison-001"
|
||||
google_api_key: Optional[str] = None
|
||||
temperature: Optional[float] = None
|
||||
top_p: Optional[float] = None
|
||||
top_k: Optional[int] = None
|
||||
n: int = 1
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
# Same as before
|
||||
pass
|
||||
|
||||
def chat_with_retry(self, **kwargs: Any) -> Any:
|
||||
"""Use tenacity to retry the completion call."""
|
||||
retry_decorator = _create_retry_decorator()
|
||||
|
||||
@retry_decorator
|
||||
def _chat_with_retry(**kwargs: Any) -> Any:
|
||||
return self.client.chat(**kwargs)
|
||||
|
||||
return _chat_with_retry(**kwargs)
|
||||
|
||||
async def achat_with_retry(self, **kwargs: Any) -> Any:
|
||||
"""Use tenacity to retry the async completion call."""
|
||||
retry_decorator = _create_retry_decorator()
|
||||
|
||||
@retry_decorator
|
||||
async def _achat_with_retry(**kwargs: Any) -> Any:
|
||||
return await self.client.chat_async(**kwargs)
|
||||
|
||||
return await _achat_with_retry(**kwargs)
|
||||
# from __future__ import annotations
|
||||
|
||||
# import logging
|
||||
# from swarms.utils.logger import logger
|
||||
# from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
# from pydantic import BaseModel, model_validator
|
||||
# from tenacity import (
|
||||
# before_sleep_log,
|
||||
# retry,
|
||||
# retry_if_exception_type,
|
||||
# stop_after_attempt,
|
||||
# wait_exponential,
|
||||
# )
|
||||
|
||||
# import google.generativeai as palm
|
||||
|
||||
|
||||
# class GooglePalmError(Exception):
|
||||
# """Error raised when there is an issue with the Google PaLM API."""
|
||||
|
||||
# def _truncate_at_stop_tokens(
|
||||
# text: str,
|
||||
# stop: Optional[List[str]],
|
||||
# ) -> str:
|
||||
# """Truncates text at the earliest stop token found."""
|
||||
# if stop is None:
|
||||
# return text
|
||||
|
||||
# for stop_token in stop:
|
||||
# stop_token_idx = text.find(stop_token)
|
||||
# if stop_token_idx != -1:
|
||||
# text = text[:stop_token_idx]
|
||||
# return text
|
||||
|
||||
|
||||
# def _response_to_result(response: palm.types.ChatResponse, stop: Optional[List[str]]) -> Dict[str, Any]:
|
||||
# """Convert a PaLM chat response to a result dictionary."""
|
||||
# result = {
|
||||
# "id": response.id,
|
||||
# "created": response.created,
|
||||
# "model": response.model,
|
||||
# "usage": {
|
||||
# "prompt_tokens": response.usage.prompt_tokens,
|
||||
# "completion_tokens": response.usage.completion_tokens,
|
||||
# "total_tokens": response.usage.total_tokens,
|
||||
# },
|
||||
# "choices": [],
|
||||
# }
|
||||
# for choice in response.choices:
|
||||
# result["choices"].append({
|
||||
# "text": _truncate_at_stop_tokens(choice.text, stop),
|
||||
# "index": choice.index,
|
||||
# "finish_reason": choice.finish_reason,
|
||||
# })
|
||||
# return result
|
||||
|
||||
# def _messages_to_prompt_dict(messages: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
# """Convert a list of message dictionaries to a prompt dictionary."""
|
||||
# prompt = {"messages": []}
|
||||
# for message in messages:
|
||||
# prompt["messages"].append({
|
||||
# "role": message["role"],
|
||||
# "content": message["content"],
|
||||
# })
|
||||
# return prompt
|
||||
|
||||
|
||||
# def _create_retry_decorator() -> Callable[[Any], Any]:
|
||||
# """Create a retry decorator with exponential backoff."""
|
||||
# return retry(
|
||||
# retry=retry_if_exception_type(GooglePalmError),
|
||||
# stop=stop_after_attempt(5),
|
||||
# wait=wait_exponential(multiplier=1, min=2, max=30),
|
||||
# before_sleep=before_sleep_log(logger, logging.DEBUG),
|
||||
# reraise=True,
|
||||
# )
|
||||
|
||||
|
||||
# ####################### => main class
|
||||
# class GooglePalm(BaseModel):
|
||||
# """Wrapper around Google's PaLM Chat API."""
|
||||
|
||||
# client: Any #: :meta private:
|
||||
# model_name: str = "models/chat-bison-001"
|
||||
# google_api_key: Optional[str] = None
|
||||
# temperature: Optional[float] = None
|
||||
# top_p: Optional[float] = None
|
||||
# top_k: Optional[int] = None
|
||||
# n: int = 1
|
||||
|
||||
# @model_validator(mode="pre")
|
||||
# def validate_environment(cls, values: Dict) -> Dict:
|
||||
# # Same as before
|
||||
# pass
|
||||
|
||||
# def chat_with_retry(self, **kwargs: Any) -> Any:
|
||||
# """Use tenacity to retry the completion call."""
|
||||
# retry_decorator = _create_retry_decorator()
|
||||
|
||||
# @retry_decorator
|
||||
# def _chat_with_retry(**kwargs: Any) -> Any:
|
||||
# return self.client.chat(**kwargs)
|
||||
|
||||
# return _chat_with_retry(**kwargs)
|
||||
|
||||
# async def achat_with_retry(self, **kwargs: Any) -> Any:
|
||||
# """Use tenacity to retry the async completion call."""
|
||||
# retry_decorator = _create_retry_decorator()
|
||||
|
||||
# @retry_decorator
|
||||
# async def _achat_with_retry(**kwargs: Any) -> Any:
|
||||
# return await self.client.chat_async(**kwargs)
|
||||
|
||||
# return await _achat_with_retry(**kwargs)
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
messages: List[Dict[str, Any]],
|
||||
stop: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Dict[str, Any]:
|
||||
prompt = _messages_to_prompt_dict(messages)
|
||||
|
||||
response: genai.types.ChatResponse = self.chat_with_retry(
|
||||
model=self.model_name,
|
||||
prompt=prompt,
|
||||
temperature=self.temperature,
|
||||
top_p=self.top_p,
|
||||
top_k=self.top_k,
|
||||
candidate_count=self.n,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return _response_to_result(response, stop)
|
||||
|
||||
def generate(
|
||||
self,
|
||||
messages: List[Dict[str, Any]],
|
||||
stop: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Dict[str, Any]:
|
||||
prompt = _messages_to_prompt_dict(messages)
|
||||
|
||||
response: genai.types.ChatResponse = self.chat_with_retry(
|
||||
model=self.model_name,
|
||||
prompt=prompt,
|
||||
temperature=self.temperature,
|
||||
top_p=self.top_p,
|
||||
top_k=self.top_k,
|
||||
candidate_count=self.n,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return _response_to_result(response, stop)
|
||||
|
||||
async def _agenerate(
|
||||
self,
|
||||
messages: List[Dict[str, Any]],
|
||||
stop: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Dict[str, Any]:
|
||||
prompt = _messages_to_prompt_dict(messages)
|
||||
|
||||
response: genai.types.ChatResponse = await self.achat_with_retry(
|
||||
model=self.model_name,
|
||||
prompt=prompt,
|
||||
temperature=self.temperature,
|
||||
top_p=self.top_p,
|
||||
top_k=self.top_k,
|
||||
candidate_count=self.n,
|
||||
)
|
||||
|
||||
return _response_to_result(response, stop)
|
||||
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
"""Get the identifying parameters."""
|
||||
return {
|
||||
"model_name": self.model_name,
|
||||
"temperature": self.temperature,
|
||||
"top_p": self.top_p,
|
||||
"top_k": self.top_k,
|
||||
"n": self.n,
|
||||
}
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "google-palm-chat"
|
||||
# def __call__(
|
||||
# self,
|
||||
# messages: List[Dict[str, Any]],
|
||||
# stop: Optional[List[str]] = None,
|
||||
# **kwargs: Any,
|
||||
# ) -> Dict[str, Any]:
|
||||
# prompt = _messages_to_prompt_dict(messages)
|
||||
|
||||
# response: palm.types.ChatResponse = self.chat_with_retry(
|
||||
# model=self.model_name,
|
||||
# prompt=prompt,
|
||||
# temperature=self.temperature,
|
||||
# top_p=self.top_p,
|
||||
# top_k=self.top_k,
|
||||
# candidate_count=self.n,
|
||||
# **kwargs,
|
||||
# )
|
||||
|
||||
# return _response_to_result(response, stop)
|
||||
|
||||
# def generate(
|
||||
# self,
|
||||
# messages: List[Dict[str, Any]],
|
||||
# stop: Optional[List[str]] = None,
|
||||
# **kwargs: Any,
|
||||
# ) -> Dict[str, Any]:
|
||||
# prompt = _messages_to_prompt_dict(messages)
|
||||
|
||||
# response: palm.types.ChatResponse = self.chat_with_retry(
|
||||
# model=self.model_name,
|
||||
# prompt=prompt,
|
||||
# temperature=self.temperature,
|
||||
# top_p=self.top_p,
|
||||
# top_k=self.top_k,
|
||||
# candidate_count=self.n,
|
||||
# **kwargs,
|
||||
# )
|
||||
|
||||
# return _response_to_result(response, stop)
|
||||
|
||||
# async def _agenerate(
|
||||
# self,
|
||||
# messages: List[Dict[str, Any]],
|
||||
# stop: Optional[List[str]] = None,
|
||||
# **kwargs: Any,
|
||||
# ) -> Dict[str, Any]:
|
||||
# prompt = _messages_to_prompt_dict(messages)
|
||||
|
||||
# response: palm.types.ChatResponse = await self.achat_with_retry(
|
||||
# model=self.model_name,
|
||||
# prompt=prompt,
|
||||
# temperature=self.temperature,
|
||||
# top_p=self.top_p,
|
||||
# top_k=self.top_k,
|
||||
# candidate_count=self.n,
|
||||
# )
|
||||
|
||||
# return _response_to_result(response, stop)
|
||||
|
||||
# @property
|
||||
# def _identifying_params(self) -> Dict[str, Any]:
|
||||
# """Get the identifying parameters."""
|
||||
# return {
|
||||
# "model_name": self.model_name,
|
||||
# "temperature": self.temperature,
|
||||
# "top_p": self.top_p,
|
||||
# "top_k": self.top_k,
|
||||
# "n": self.n,
|
||||
# }
|
||||
|
||||
# @property
|
||||
# def _llm_type(self) -> str:
|
||||
# return "google-palm-chat"
|
@ -0,0 +1,62 @@
|
||||
import threading
|
||||
import queue
|
||||
from time import sleep
|
||||
from swarms.workers.autobot import AutoBot
|
||||
|
||||
# TODO Handle task assignment and task delegation
|
||||
# TODO: User task => decomposed into very small sub tasks => sub tasks assigned to workers => workers complete and update the swarm, can ask for help from other agents.
|
||||
# TODO: Missing, Task Assignment, Task delegation, Task completion, Swarm level communication with vector db
|
||||
class AutoScaler:
|
||||
def __init__(self,
|
||||
initial_agents=10,
|
||||
scale_up_factor=1,
|
||||
idle_threshold=0.2,
|
||||
busy_threshold=0.7
|
||||
):
|
||||
self.agents_pool = [AutoBot() for _ in range(initial_agents)]
|
||||
self.task_queue = queue.Queue()
|
||||
self.scale_up_factor = scale_up_factor
|
||||
self.idle_threshold = idle_threshold
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def add_task(self, task):
|
||||
self.tasks_queue.put(task)
|
||||
|
||||
def scale_up(self):
|
||||
with self.lock:
|
||||
new_agents_counts = len(self.agents_pool) * self.scale_up_factor
|
||||
for _ in range(new_agents_counts):
|
||||
self.agents_pool.append(AutoBot())
|
||||
|
||||
def scale_down(self):
|
||||
with self.lock:
|
||||
if len(self.agents_pool) > 10: #ensure minmum of 10 agents
|
||||
del self.agents_pool[-1] #remove last agent
|
||||
|
||||
def monitor_and_scale(self):
|
||||
while True:
|
||||
sleep(60)#check minute
|
||||
pending_tasks = self.task_queue.qsize()
|
||||
active_agents = sum([1 for agent in self.agents_pool if agent.is_busy()])
|
||||
|
||||
if pending_tasks / len(self.agents_pool) > self.busy_threshold:
|
||||
self.scale_up()
|
||||
elif active_agents / len(self.agents_pool) < self.idle_threshold:
|
||||
self.scale_down()
|
||||
|
||||
def start(self):
|
||||
monitor_thread = threading.Thread(target=self.monitor_and_scale)
|
||||
monitor_thread.start()
|
||||
|
||||
while True:
|
||||
task = self.task_queue.get()
|
||||
if task:
|
||||
available_agent = next((agent for agent in self.agents_pool))
|
||||
if available_agent:
|
||||
available_agent.run(task)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -0,0 +1,75 @@
|
||||
import faiss
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.docstore import InMemoryDocstore
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.vectorstores import FAISS
|
||||
from langchain_experimental.autonomous_agents import AutoGPT
|
||||
|
||||
from swarms.agents.tools.autogpt import (
|
||||
DuckDuckGoSearchRun,
|
||||
FileChatMessageHistory,
|
||||
ReadFileTool,
|
||||
WebpageQATool,
|
||||
WriteFileTool,
|
||||
load_qa_with_sources_chain,
|
||||
process_csv,
|
||||
# web_search,
|
||||
query_website_tool
|
||||
)
|
||||
|
||||
|
||||
ROOT_DIR = "./data/"
|
||||
|
||||
|
||||
class AutoBot:
|
||||
def __init__(self,
|
||||
model_name="gpt-4",
|
||||
openai_api_key=None,
|
||||
ai_name="Autobot Swarm Worker",
|
||||
ai_role="Worker in a swarm",
|
||||
# embedding_size=None,
|
||||
# k=None,
|
||||
temperature=0.5):
|
||||
self.openai_api_key = openai_api_key
|
||||
self.temperature = temperature
|
||||
self.llm = ChatOpenAI(model_name=model_name,
|
||||
openai_api_key=self.openai_api_key,
|
||||
temperature=self.temperature)
|
||||
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
|
||||
# self.embedding_size = embedding_size
|
||||
# # self.k = k
|
||||
|
||||
self.setup_tools()
|
||||
self.setup_memory()
|
||||
self.setup_agent()
|
||||
|
||||
def setup_tools(self):
|
||||
self.tools = [
|
||||
WriteFileTool(root_dir=ROOT_DIR),
|
||||
ReadFileTool(root_dir=ROOT_DIR),
|
||||
process_csv,
|
||||
query_website_tool,
|
||||
]
|
||||
|
||||
def setup_memory(self):
|
||||
embeddings_model = OpenAIEmbeddings(openai_api_key=self.openai_api_key)
|
||||
embedding_size = 1536
|
||||
index = faiss.IndexFlatL2(embedding_size)
|
||||
self.vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
|
||||
|
||||
def setup_agent(self):
|
||||
self.agent = AutoGPT.from_llm_and_tools(
|
||||
ai_name=self.ai_name,
|
||||
ai_role=self.ai_role,
|
||||
tools=self.tools,
|
||||
llm=self.llm,
|
||||
memory=self.vectorstore.as_retriever(search_kwargs={"k": 8}),
|
||||
)
|
||||
|
||||
def run(self, task):
|
||||
result = self.agent.run([task])
|
||||
return result
|
||||
|
Loading…
Reference in new issue