[TESTS][CLEANUP]

pull/334/head
Kye 1 year ago
parent fa49b347ab
commit 0d1fbb3e46

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "2.9.9"
version = "2.3.0"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -5,7 +5,7 @@ import time
from abc import ABC, abstractmethod
from typing import List, Optional
from swarms.utils.llm_metrcs_decorator import metrics_decorator
from swarms.utils.llm_metrics_decorator import metrics_decorator
def count_tokens(text: str) -> int:

@ -187,7 +187,6 @@ class Conversation(BaseStructure):
# Load the conversation history from a JSON file
with open(filename, "r") as f:
self.conversation_history = json.load(f)
def search_keyword_in_conversation(self, keyword: str):
"""Search for a keyword in the conversation history

@ -5,7 +5,7 @@ from swarms.utils.parse_code import (
)
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.math_eval import math_eval
from swarms.utils.llm_metrcs_decorator import metrics_decorator
from swarms.utils.llm_metrics_decorator import metrics_decorator
# from swarms.utils.phoenix_handler import phoenix_trace_decorator

@ -1,36 +0,0 @@
from unittest.mock import patch
from swarms.swarms.god_mode import GodMode, LLM
def test_godmode_initialization():
godmode = GodMode(llms=[LLM] * 5)
assert isinstance(godmode, GodMode)
assert len(godmode.llms) == 5
def test_godmode_run(monkeypatch):
def mock_llm_run(self, task):
return "response"
monkeypatch.setattr(LLM, "run", mock_llm_run)
godmode = GodMode(llms=[LLM] * 5)
responses = godmode.run("task1")
assert len(responses) == 5
assert responses == [
"response",
"response",
"response",
"response",
"response",
]
@patch("builtins.print")
def test_godmode_print_responses(mock_print, monkeypatch):
def mock_llm_run(self, task):
return "response"
monkeypatch.setattr(LLM, "run", mock_llm_run)
godmode = GodMode(llms=[LLM] * 5)
godmode.print_responses("task1")
assert mock_print.call_count == 1

@ -6,8 +6,6 @@ from swarms.structs import Agent
from swarms.models import OpenAIChat
from swarms.swarms.multi_agent_collab import (
MultiAgentCollaboration,
select_next_speaker_director,
select_speaker_round_table,
)
# Sample agents for testing
@ -105,13 +103,6 @@ def test_set_interaction_rules(collaboration):
assert collaboration.interaction_rules == rules
def test_set_interaction_rules(collaboration):
rules = {"rule1": "action1", "rule2": "action2"}
collaboration.set_interaction_rules(rules)
assert hasattr(collaboration, "interaction_rules")
assert collaboration.interaction_rules == rules
def test_repr(collaboration):
repr_str = repr(collaboration)
assert isinstance(repr_str, str)
@ -145,16 +136,6 @@ def test_save(collaboration, tmp_path):
# Add more tests here...
# Example of parameterized test for different selection functions
@pytest.mark.parametrize(
"selection_function",
[select_next_speaker_director, select_speaker_round_table],
)
def test_selection_functions(collaboration, selection_function):
collaboration.select_next_speaker = selection_function
assert callable(collaboration.select_next_speaker)
# Add more parameterized tests for different scenarios...

@ -1,5 +1,5 @@
import time
from swarms.utils.llm_metrcs_decorator import metrics_decorator
from swarms.utils.llm_metrics_decorator import metrics_decorator
def test_metrics_decorator():

Loading…
Cancel
Save