bug fixes, module error swarm. instead of swarms.

pull/64/head
Kye 1 year ago
parent 01cd02b5c2
commit dbcab9a06b

@ -45,14 +45,18 @@ from swarms.workers import Worker
from swarms.swarms import MultiAgentDebate, select_speaker from swarms.swarms import MultiAgentDebate, select_speaker
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
api_key = "sk-"
llm = OpenAIChat( llm = OpenAIChat(
model_name='gpt-4', model_name='gpt-4',
openai_api_key="api-key", openai_api_key=api_key,
temperature=0.5 temperature=0.5
) )
node = Worker( node = Worker(
llm=llm, llm=llm,
openai_api_key=api_key,
ai_name="Optimus Prime", ai_name="Optimus Prime",
ai_role="Worker in a swarm", ai_role="Worker in a swarm",
external_tools = None, external_tools = None,
@ -62,6 +66,7 @@ node = Worker(
node2 = Worker( node2 = Worker(
llm=llm, llm=llm,
openai_api_key=api_key,
ai_name="Bumble Bee", ai_name="Bumble Bee",
ai_role="Worker in a swarm", ai_role="Worker in a swarm",
external_tools = None, external_tools = None,
@ -71,6 +76,7 @@ node2 = Worker(
node3 = Worker( node3 = Worker(
llm=llm, llm=llm,
openai_api_key=api_key,
ai_name="Bumble Bee", ai_name="Bumble Bee",
ai_role="Worker in a swarm", ai_role="Worker in a swarm",
external_tools = None, external_tools = None,
@ -104,20 +110,20 @@ for result in results:
- And, then place the openai api key in the Worker for the openai embedding model - And, then place the openai api key in the Worker for the openai embedding model
```python ```python
from swarms.models import ChatOpenAI from swarms.models import OpenAIChat
from swarms.workers import Worker from swarms import Worker
llm = ChatOpenAI( api_key = ""
model_name='gpt-4',
openai_api_key="api-key", llm = OpenAIChat(
temperature=0.5 openai_api_key=api_key,
temperature=0.5,
) )
node = Worker( node = Worker(
llm=llm, llm=llm,
ai_name="Optimus Prime", ai_name="Optimus Prime",
#openai key for the embeddings openai_api_key=api_key,
openai_api_key="sk-eee"
ai_role="Worker in a swarm", ai_role="Worker in a swarm",
external_tools=None, external_tools=None,
human_in_the_loop=False, human_in_the_loop=False,
@ -128,6 +134,7 @@ task = "What were the winning boston marathon times for the past 5 years (ending
response = node.run(task) response = node.run(task)
print(response) print(response)
``` ```
------ ------
@ -139,8 +146,9 @@ print(response)
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.agents import OmniModalAgent from swarms.agents import OmniModalAgent
api_key = "SK-"
llm = OpenAIChat(model_name="gpt-4") llm = OpenAIChat(model_name="gpt-4", openai_api_key=api_key)
agent = OmniModalAgent(llm) agent = OmniModalAgent(llm)

@ -1,15 +1,17 @@
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms import Worker from swarms import Worker
api_key = ""
llm = OpenAIChat( llm = OpenAIChat(
openai_api_key="Enter in your key", openai_api_key=api_key,
temperature=0.5, temperature=0.5,
) )
node = Worker( node = Worker(
llm=llm, llm=llm,
ai_name="Optimus Prime", ai_name="Optimus Prime",
openai_api_key="entter in your key", openai_api_key=api_key,
ai_role="Worker in a swarm", ai_role="Worker in a swarm",
external_tools=None, external_tools=None,
human_in_the_loop=False, human_in_the_loop=False,

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "1.8.2" version = "1.9.1"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
@ -38,6 +38,7 @@ duckduckgo-search = "*"
faiss-cpu = "*" faiss-cpu = "*"
diffusers = "*" diffusers = "*"
wget = "*" wget = "*"
griptape = "*"
httpx = "*" httpx = "*"
ggl = "*" ggl = "*"
beautifulsoup4 = "*" beautifulsoup4 = "*"

@ -25,7 +25,7 @@ chromadb
open-interpreter open-interpreter
tabulate tabulate
colored colored
griptape
addict addict
albumentations albumentations
basicsr basicsr

@ -12,11 +12,10 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from swarms import workers from swarms import workers
from swarms.workers.worker import Worker from swarms.workers.worker import Worker
from swarms.chunkers import chunkers # from swarms import chunkers
from swarms import models from swarms import models
from swarms import structs from swarms import structs
from swarms import swarms from swarms import swarms
from swarms.swarms.orchestrate import Orchestrator
from swarms import agents from swarms import agents
from swarms.logo import logo from swarms.logo import logo
print(logo) print(logo)

@ -3,7 +3,7 @@ from abc import ABC
from typing import Optional from typing import Optional
from attr import define, field, Factory from attr import define, field, Factory
from griptape.artifacts import TextArtifact from griptape.artifacts import TextArtifact
from swarm.chunkers.chunk_seperators import ChunkSeparator from swarms.chunkers.chunk_seperators import ChunkSeparator
from griptape.tokenizers import OpenAiTokenizer from griptape.tokenizers import OpenAiTokenizer

@ -1,5 +1,5 @@
from swarms.chunkers.base import BaseChunker from swarms.chunkers.base import BaseChunker
from swarms.chunk_seperator import ChunkSeparator from swarms.chunkers.chunk_seperator import ChunkSeparator
class MarkdownChunker(BaseChunker): class MarkdownChunker(BaseChunker):

@ -7,8 +7,11 @@
import concurrent.futures import concurrent.futures
import logging import logging
import faiss
from swarms.embeddings.openai import OpenAIEmbeddings
from swarms.swarms.swarms import HierarchicalSwarm from swarms.swarms.swarms import HierarchicalSwarm
from swarms.vectorstore.vectorstore import FAISS, InMemoryDocstore
logging.basicConfig( logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s" level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s"

@ -10,3 +10,4 @@ from swarms.models.zephyr import Zephyr
from swarms.models.idefics import Idefics from swarms.models.idefics import Idefics
from swarms.models.kosmos_two import Kosmos from swarms.models.kosmos_two import Kosmos
from swarms.models.vilt import Vilt from swarms.models.vilt import Vilt
# from swarms.models.fuyu import Fuyu

@ -3,7 +3,15 @@ import os
class Anthropic: class Anthropic:
"""Anthropic large language models.""" """
Anthropic large language models.
Args:
"""
def __init__( def __init__(
self, self,
@ -41,12 +49,12 @@ class Anthropic:
d["top_p"] = self.top_p d["top_p"] = self.top_p
return d return d
def generate(self, prompt, stop=None): def run(self, task: str, stop=None):
"""Call out to Anthropic's completion endpoint.""" """Call out to Anthropic's completion endpoint."""
stop = stop or [] stop = stop or []
params = self._default_params() params = self._default_params()
headers = {"Authorization": f"Bearer {self.anthropic_api_key}"} headers = {"Authorization": f"Bearer {self.anthropic_api_key}"}
data = {"prompt": prompt, "stop_sequences": stop, **params} data = {"prompt": task, "stop_sequences": stop, **params}
response = requests.post( response = requests.post(
f"{self.anthropic_api_url}/completions", f"{self.anthropic_api_url}/completions",
headers=headers, headers=headers,
@ -55,12 +63,12 @@ class Anthropic:
) )
return response.json().get("completion") return response.json().get("completion")
def __call__(self, prompt, stop=None): def __call__(self, task: str, stop=None):
"""Call out to Anthropic's completion endpoint.""" """Call out to Anthropic's completion endpoint."""
stop = stop or [] stop = stop or []
params = self._default_params() params = self._default_params()
headers = {"Authorization": f"Bearer {self.anthropic_api_key}"} headers = {"Authorization": f"Bearer {self.anthropic_api_key}"}
data = {"prompt": prompt, "stop_sequences": stop, **params} data = {"prompt": task, "stop_sequences": stop, **params}
response = requests.post( response = requests.post(
f"{self.anthropic_api_url}/completions", f"{self.anthropic_api_url}/completions",
headers=headers, headers=headers,

@ -1,21 +1,22 @@
import random
import os import os
import random
from typing import Dict, Union from typing import Dict, Union
import faiss import faiss
from langchain.chains.qa_with_sources.loading import (
load_qa_with_sources_chain,
)
from langchain.docstore import InMemoryDocstore from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings from langchain.embeddings import OpenAIEmbeddings
from langchain.tools import ReadFileTool, WriteFileTool
from langchain.tools.human.tool import HumanInputRun from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores import FAISS from langchain.vectorstores import FAISS
from langchain_experimental.autonomous_agents import AutoGPT from langchain_experimental.autonomous_agents import AutoGPT
from swarms.agents.message import Message from swarms.agents.message import Message
from swarms.tools.autogpt import ( from swarms.tools.autogpt import (
ReadFileTool,
WebpageQATool, WebpageQATool,
WriteFileTool,
# compile, # compile,
load_qa_with_sources_chain,
process_csv, process_csv,
) )
from swarms.utils.decorators import error_decorator, timing_decorator from swarms.utils.decorators import error_decorator, timing_decorator

Loading…
Cancel
Save