bug fixes, module error swarm. instead of swarms.

Former-commit-id: dbcab9a06b
discord-bot-framework
Kye 1 year ago
parent 1c953863c1
commit 62f7ac9fa6

@ -45,14 +45,18 @@ from swarms.workers import Worker
from swarms.swarms import MultiAgentDebate, select_speaker
from swarms.models import OpenAIChat
api_key = "sk-"
llm = OpenAIChat(
model_name='gpt-4',
openai_api_key="api-key",
openai_api_key=api_key,
temperature=0.5
)
node = Worker(
llm=llm,
openai_api_key=api_key,
ai_name="Optimus Prime",
ai_role="Worker in a swarm",
external_tools = None,
@ -62,6 +66,7 @@ node = Worker(
node2 = Worker(
llm=llm,
openai_api_key=api_key,
ai_name="Bumble Bee",
ai_role="Worker in a swarm",
external_tools = None,
@ -71,6 +76,7 @@ node2 = Worker(
node3 = Worker(
llm=llm,
openai_api_key=api_key,
ai_name="Bumble Bee",
ai_role="Worker in a swarm",
external_tools = None,
@ -104,30 +110,31 @@ for result in results:
- And, then place the openai api key in the Worker for the openai embedding model
```python
from swarms.models import ChatOpenAI
from swarms.workers import Worker
from swarms.models import OpenAIChat
from swarms import Worker
llm = ChatOpenAI(
model_name='gpt-4',
openai_api_key="api-key",
temperature=0.5
api_key = ""
llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5,
)
node = Worker(
llm=llm,
ai_name="Optimus Prime",
#openai key for the embeddings
openai_api_key="sk-eee"
openai_api_key=api_key,
ai_role="Worker in a swarm",
external_tools = None,
human_in_the_loop = False,
temperature = 0.5,
external_tools=None,
human_in_the_loop=False,
temperature=0.5,
)
task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."
response = node.run(task)
print(response)
```
------
@ -139,8 +146,9 @@ print(response)
from swarms.models import OpenAIChat
from swarms.agents import OmniModalAgent
api_key = "SK-"
llm = OpenAIChat(model_name="gpt-4")
llm = OpenAIChat(model_name="gpt-4", openai_api_key=api_key)
agent = OmniModalAgent(llm)

@ -1,15 +1,17 @@
from swarms.models import OpenAIChat
from swarms import Worker
api_key = ""
llm = OpenAIChat(
openai_api_key="Enter in your key",
openai_api_key=api_key,
temperature=0.5,
)
node = Worker(
llm=llm,
ai_name="Optimus Prime",
openai_api_key="entter in your key",
openai_api_key=api_key,
ai_role="Worker in a swarm",
external_tools=None,
human_in_the_loop=False,

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "1.8.2"
version = "1.9.1"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -38,6 +38,7 @@ duckduckgo-search = "*"
faiss-cpu = "*"
diffusers = "*"
wget = "*"
griptape = "*"
httpx = "*"
ggl = "*"
beautifulsoup4 = "*"

@ -25,7 +25,7 @@ chromadb
open-interpreter
tabulate
colored
griptape
addict
albumentations
basicsr

@ -12,11 +12,10 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from swarms import workers
from swarms.workers.worker import Worker
from swarms.chunkers import chunkers
# from swarms import chunkers
from swarms import models
from swarms import structs
from swarms import swarms
from swarms.swarms.orchestrate import Orchestrator
from swarms import agents
from swarms.logo import logo
print(logo)

@ -3,7 +3,7 @@ from abc import ABC
from typing import Optional
from attr import define, field, Factory
from griptape.artifacts import TextArtifact
from swarm.chunkers.chunk_seperators import ChunkSeparator
from swarms.chunkers.chunk_seperators import ChunkSeparator
from griptape.tokenizers import OpenAiTokenizer

@ -1,5 +1,5 @@
from swarms.chunkers.base import BaseChunker
from swarms.chunk_seperator import ChunkSeparator
from swarms.chunkers.chunk_seperator import ChunkSeparator
class MarkdownChunker(BaseChunker):

@ -7,8 +7,11 @@
import concurrent.futures
import logging
import faiss
from swarms.embeddings.openai import OpenAIEmbeddings
from swarms.swarms.swarms import HierarchicalSwarm
from swarms.vectorstore.vectorstore import FAISS, InMemoryDocstore
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s"

@ -9,4 +9,5 @@ from swarms.models.zephyr import Zephyr
# MultiModal Models
from swarms.models.idefics import Idefics
from swarms.models.kosmos_two import Kosmos
from swarms.models.vilt import Vilt
from swarms.models.vilt import Vilt
# from swarms.models.fuyu import Fuyu

@ -3,7 +3,15 @@ import os
class Anthropic:
"""Anthropic large language models."""
"""
Anthropic large language models.
Args:
"""
def __init__(
self,
@ -41,12 +49,12 @@ class Anthropic:
d["top_p"] = self.top_p
return d
def generate(self, prompt, stop=None):
def run(self, task: str, stop=None):
"""Call out to Anthropic's completion endpoint."""
stop = stop or []
params = self._default_params()
headers = {"Authorization": f"Bearer {self.anthropic_api_key}"}
data = {"prompt": prompt, "stop_sequences": stop, **params}
data = {"prompt": task, "stop_sequences": stop, **params}
response = requests.post(
f"{self.anthropic_api_url}/completions",
headers=headers,
@ -55,12 +63,12 @@ class Anthropic:
)
return response.json().get("completion")
def __call__(self, prompt, stop=None):
def __call__(self, task: str, stop=None):
"""Call out to Anthropic's completion endpoint."""
stop = stop or []
params = self._default_params()
headers = {"Authorization": f"Bearer {self.anthropic_api_key}"}
data = {"prompt": prompt, "stop_sequences": stop, **params}
data = {"prompt": task, "stop_sequences": stop, **params}
response = requests.post(
f"{self.anthropic_api_url}/completions",
headers=headers,

@ -1,21 +1,22 @@
import random
import os
import random
from typing import Dict, Union
import faiss
from langchain.chains.qa_with_sources.loading import (
load_qa_with_sources_chain,
)
from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings
from langchain.tools import ReadFileTool, WriteFileTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores import FAISS
from langchain_experimental.autonomous_agents import AutoGPT
from swarms.agents.message import Message
from swarms.tools.autogpt import (
ReadFileTool,
WebpageQATool,
WriteFileTool,
# compile,
load_qa_with_sources_chain,
process_csv,
)
from swarms.utils.decorators import error_decorator, timing_decorator

Loading…
Cancel
Save