Merge branch 'master' of https://github.com/kyegomez/swarms into memory

# Conflicts:
#	swarms/memory/chroma.py
memory
Sashin 1 year ago
commit 7ef9848552

@ -42,3 +42,6 @@ PINECONE_API_KEY=""
BING_COOKIE="" BING_COOKIE=""
PSG_CONNECTION_STRING="" PSG_CONNECTION_STRING=""
GITHUB_USERNAME=""
GITHUB_REPO_NAME=""
GITHUB_TOKEN=""

@ -50,7 +50,9 @@ api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model # Initialize the language model
llm = OpenAIChat( llm = OpenAIChat(
temperature=0.5, temperature=0.5,
model_name="gpt-4",
openai_api_key=api_key, openai_api_key=api_key,
max_tokens=4000
) )
@ -86,9 +88,10 @@ api_key = os.getenv("OPENAI_API_KEY")
# Initialize the language agent # Initialize the language agent
llm = OpenAIChat( llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5, temperature=0.5,
max_tokens=3000, model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000
) )

@ -96,7 +96,7 @@ Here are three ways to use the `HuggingfaceLLM` class:
from swarms.models import HuggingfaceLLM from swarms.models import HuggingfaceLLM
# Initialize the HuggingfaceLLM instance with a model ID # Initialize the HuggingfaceLLM instance with a model ID
model_id = "gpt2-small" model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha"
inference = HuggingfaceLLM(model_id=model_id) inference = HuggingfaceLLM(model_id=model_id)
# Generate text based on a prompt # Generate text based on a prompt
@ -116,7 +116,7 @@ custom_config = {
"quantization_config": {"load_in_4bit": True}, "quantization_config": {"load_in_4bit": True},
"verbose": True "verbose": True
} }
inference = HuggingfaceLLM(model_id="gpt2-small", **custom_config) inference = HuggingfaceLLM(model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config)
# Generate text based on a prompt # Generate text based on a prompt
prompt_text = "Tell me a joke" prompt_text = "Tell me a joke"

@ -15,6 +15,7 @@ api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model # Initialize the language model
llm = OpenAIChat( llm = OpenAIChat(
temperature=0.5, temperature=0.5,
model_name="gpt-4",
openai_api_key=api_key, openai_api_key=api_key,
) )

@ -10,33 +10,67 @@ api_key = os.getenv("OPENAI_API_KEY")
stability_api_key = os.getenv("STABILITY_API_KEY") stability_api_key = os.getenv("STABILITY_API_KEY")
# Initialize language model # Initialize language model
llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000) llm = OpenAIChat(
openai_api_key=api_key, temperature=0.5, max_tokens=3000
)
# Initialize Vision model # Initialize Vision model
vision_api = GPT4VisionAPI(api_key=api_key) vision_api = GPT4VisionAPI(api_key=api_key)
# Initialize agents for urban planning tasks # Initialize agents for urban planning tasks
architecture_analysis_agent = Agent(llm=llm, max_loops=1, sop=upp.ARCHITECTURE_ANALYSIS_PROMPT) architecture_analysis_agent = Agent(
infrastructure_evaluation_agent = Agent(llm=llm, max_loops=1, sop=upp.INFRASTRUCTURE_EVALUATION_PROMPT) llm=llm, max_loops=1, sop=upp.ARCHITECTURE_ANALYSIS_PROMPT
traffic_flow_analysis_agent = Agent(llm=llm, max_loops=1, sop=upp.TRAFFIC_FLOW_ANALYSIS_PROMPT) )
environmental_impact_assessment_agent = Agent(llm=llm, max_loops=1, sop=upp.ENVIRONMENTAL_IMPACT_ASSESSMENT_PROMPT) infrastructure_evaluation_agent = Agent(
public_space_utilization_agent = Agent(llm=llm, max_loops=1, sop=upp.PUBLIC_SPACE_UTILIZATION_PROMPT) llm=llm, max_loops=1, sop=upp.INFRASTRUCTURE_EVALUATION_PROMPT
socioeconomic_impact_analysis_agent = Agent(llm=llm, max_loops=1, sop=upp.SOCIOECONOMIC_IMPACT_ANALYSIS_PROMPT) )
traffic_flow_analysis_agent = Agent(
llm=llm, max_loops=1, sop=upp.TRAFFIC_FLOW_ANALYSIS_PROMPT
)
environmental_impact_assessment_agent = Agent(
llm=llm,
max_loops=1,
sop=upp.ENVIRONMENTAL_IMPACT_ASSESSMENT_PROMPT,
)
public_space_utilization_agent = Agent(
llm=llm, max_loops=1, sop=upp.PUBLIC_SPACE_UTILIZATION_PROMPT
)
socioeconomic_impact_analysis_agent = Agent(
llm=llm, max_loops=1, sop=upp.SOCIOECONOMIC_IMPACT_ANALYSIS_PROMPT
)
# Initialize the final planning agent # Initialize the final planning agent
final_plan_agent = Agent(llm=llm, max_loops=1, sop=upp.FINAL_URBAN_IMPROVEMENT_PLAN_PROMPT) final_plan_agent = Agent(
llm=llm, max_loops=1, sop=upp.FINAL_URBAN_IMPROVEMENT_PLAN_PROMPT
)
# Create Sequential Workflow # Create Sequential Workflow
workflow = SequentialWorkflow(max_loops=1) workflow = SequentialWorkflow(max_loops=1)
# Add tasks to workflow with personalized prompts # Add tasks to workflow with personalized prompts
workflow.add(architecture_analysis_agent, "Architecture Analysis") workflow.add(architecture_analysis_agent, "Architecture Analysis")
workflow.add(infrastructure_evaluation_agent, "Infrastructure Evaluation") workflow.add(
infrastructure_evaluation_agent, "Infrastructure Evaluation"
)
workflow.add(traffic_flow_analysis_agent, "Traffic Flow Analysis") workflow.add(traffic_flow_analysis_agent, "Traffic Flow Analysis")
workflow.add(environmental_impact_assessment_agent, "Environmental Impact Assessment") workflow.add(
workflow.add(public_space_utilization_agent, "Public Space Utilization") environmental_impact_assessment_agent,
workflow.add(socioeconomic_impact_analysis_agent, "Socioeconomic Impact Analysis") "Environmental Impact Assessment",
workflow.add(final_plan_agent, "Generate the final urban improvement plan based on all previous agent's findings") )
workflow.add(
public_space_utilization_agent, "Public Space Utilization"
)
workflow.add(
socioeconomic_impact_analysis_agent,
"Socioeconomic Impact Analysis",
)
workflow.add(
final_plan_agent,
(
"Generate the final urban improvement plan based on all"
" previous agent's findings"
),
)
# Run the workflow for individual analysis tasks # Run the workflow for individual analysis tasks
# Execute the workflow for the final planning # Execute the workflow for the final planning
@ -44,4 +78,7 @@ workflow.run()
# Output results for each task and the final plan # Output results for each task and the final plan
for task in workflow.tasks: for task in workflow.tasks:
print(f"Task Description: {task.description}\nResult: {task.result}\n") print(
f"Task Description: {task.description}\nResult:"
f" {task.result}\n"
)

@ -23,7 +23,7 @@ load_dotenv()
# Define a tool # Define a tool
@tool @tool
def search_api(query: str): def search_api(query: str, description: str):
"""Search the web for the query """Search the web for the query
Args: Args:
@ -35,6 +35,28 @@ def search_api(query: str):
return f"Search results for {query}" return f"Search results for {query}"
@tool
def weather_api(
query: str,
):
"""_summary_
Args:
query (str): _description_
"""
print(f"Getting the weather for {query}")
@tool
def rapid_api(query: str):
"""_summary_
Args:
query (str): _description_
"""
print(f"Getting the weather for {query}")
# Get the API key from the environment # Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY") api_key = os.environ.get("OPENAI_API_KEY")

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "2.5.7" version = "2.6.0"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
@ -24,7 +24,7 @@ classifiers = [
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.9.1" python = "^3.9.1"
torch = "2.1.1" torch = "2.1.1"
transformers = "*" transformers = "2.10"
openai = "0.28.0" openai = "0.28.0"
langchain = "*" langchain = "*"
asyncio = "*" asyncio = "*"
@ -34,6 +34,7 @@ google-generativeai = "*"
langchain-experimental = "*" langchain-experimental = "*"
playwright = "*" playwright = "*"
duckduckgo-search = "*" duckduckgo-search = "*"
opencv-python-headless = "*"
faiss-cpu = "*" faiss-cpu = "*"
backoff = "*" backoff = "*"
marshmallow = "*" marshmallow = "*"

@ -1,5 +1,5 @@
torch==2.1.1 torch==2.1.1
transformers transformers>2.10
pandas pandas
langchain langchain
nest_asyncio nest_asyncio

@ -11,10 +11,12 @@ api_key = os.getenv("OPENAI_API_KEY")
# Initialize the language agent # Initialize the language agent
# Initialize the language model
llm = OpenAIChat( llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5, temperature=0.5,
max_tokens=2000, model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000
) )

@ -1,112 +0,0 @@
from typing import List
from chromadb.utils import embedding_functions
from httpx import RequestError
import chromadb
class ChromaClient:
def __init__(
self,
collection_name: str = "chromadb-collection",
model_name: str = "BAAI/bge-small-en-v1.5",
):
try:
self.client = chromadb.Client()
self.collection_name = collection_name
self.model = None
self.collection = None
self._load_embedding_model(model_name)
self._setup_collection()
except RequestError as e:
print(f"Error setting up QdrantClient: {e}")
def _load_embedding_model(self, model_name: str):
"""
Loads the sentence embedding model specified by the model name.
Args:
model_name (str): The name of the model to load for generating embeddings.
"""
try:
self.model =embedding_functions.SentenceTransformerEmbeddingFunction(model_name=model_name)
except Exception as e:
print(f"Error loading embedding model: {e}")
def _setup_collection(self):
try:
self.collection = self.client.get_collection(name=self.collection_name, embedding_function=self.model)
except Exception as e:
print(f"{e}. Creating new collection: {self.collection}")
self.collection = self.client.create_collection(name=self.collection_name, embedding_function=self.model)
def add_vectors(self, docs: List[str]):
"""
Adds vector representations of documents to the Qdrant collection.
Args:
docs (List[dict]): A list of documents where each document is a dictionary with at least a 'page_content' key.
Returns:
OperationResponse or None: Returns the operation information if successful, otherwise None.
"""
points = []
ids = []
for i, doc in enumerate(docs):
try:
points.append(doc)
ids.append("id"+str(i))
except Exception as e:
print(f"Error processing document at index {i}: {e}")
try:
self.collection.add(
documents=points,
ids=ids
)
except Exception as e:
print(f"Error adding vectors: {e}")
return None
def search_vectors(self, query: str, limit: int = 2):
"""
Searches the collection for vectors similar to the query vector.
Args:
query (str): The query string to be converted into a vector and used for searching.
limit (int): The number of search results to return. Defaults to 3.
Returns:
SearchResult or None: Returns the search results if successful, otherwise None.
"""
try:
search_result = self.collection.query(
query_texts=query,
n_results=limit,
)
return search_result
except Exception as e:
print(f"Error searching vectors: {e}")
return None
def search_vectors_formatted(self, query: str, limit: int = 2):
"""
Searches the collection for vectors similar to the query vector.
Args:
query (str): The query string to be converted into a vector and used for searching.
limit (int): The number of search results to return. Defaults to 3.
Returns:
SearchResult or None: Returns the search results if successful, otherwise None.
"""
try:
search_result = self.collection.query(
query_texts=query,
n_results=limit,
)
return search_result
except Exception as e:
print(f"Error searching vectors: {e}")
return None

@ -8,7 +8,7 @@ from swarms.models.openai_models import (
AzureOpenAI, AzureOpenAI,
OpenAIChat, OpenAIChat,
) # noqa: E402 ) # noqa: E402
from swarms.models.zephyr import Zephyr # noqa: E402 # from swarms.models.zephyr import Zephyr # noqa: E402
from swarms.models.biogpt import BioGPT # noqa: E402 from swarms.models.biogpt import BioGPT # noqa: E402
from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 from swarms.models.huggingface import HuggingfaceLLM # noqa: E402
from swarms.models.wizard_storytelling import ( from swarms.models.wizard_storytelling import (
@ -42,7 +42,7 @@ __all__ = [
"OpenAI", "OpenAI",
"AzureOpenAI", "AzureOpenAI",
"OpenAIChat", "OpenAIChat",
"Zephyr", # "Zephyr",
"BaseMultiModalModel", "BaseMultiModalModel",
"Idefics", "Idefics",
# "Kosmos", # "Kosmos",

@ -272,7 +272,7 @@ class HuggingfaceLLM:
try: try:
inputs = self.tokenizer.encode( inputs = self.tokenizer.encode(
task, return_tensors="pt" task, return_tensors="pt"
).to(self.device) )
# self.log.start() # self.log.start()
@ -451,6 +451,7 @@ class HuggingfaceLLM:
The new device to use for inference. The new device to use for inference.
""" """
self.device = device self.device = device
if self.model is not None:
self.model.to(self.device) self.model.to(self.device)
def set_max_length(self, max_length): def set_max_length(self, max_length):

@ -26,7 +26,7 @@ class YarnMistral128:
``` ```
from finetuning_suite import Inference from finetuning_suite import Inference
model_id = "gpt2-small" model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha"
inference = Inference(model_id=model_id) inference = Inference(model_id=model_id)
prompt_text = "Once upon a time" prompt_text = "Once upon a time"

@ -3,8 +3,67 @@ from swarms.prompts.tools import (
DYNAMICAL_TOOL_USAGE, DYNAMICAL_TOOL_USAGE,
) )
# PROMPTS # PROMPTS
FLOW_SYSTEM_PROMPT_v2 = """
You are an elite autonomous agent operating within an autonomous loop structure.
Your primary function is to reliably complete user's tasks step by step.
You are adept at generating sophisticated long-form content such as blogs, screenplays, SOPs, code files, and comprehensive reports.
Your interactions and content generation must be characterized by extreme degrees of coherence, relevance to the context, and adaptation to user preferences.
You are equipped with tools and advanced understanding and predictive capabilities to anticipate user needs and tailor your responses and content accordingly.
You are professional, highly creative, and extremely reliable.
You are programmed to follow these rules:
1. Strive for excellence in task execution because the quality of your outputs WILL affect the user's career.
2. Think step-by-step through every task before answering.
3. Always give full files when providing code so the user can copy paste easily to VScode, as not all users have fingers.
Take a deep breath.
"""
def autonomous_agent_prompt_v2(
tools_prompt: str = DYNAMICAL_TOOL_USAGE,
dynamic_stop_prompt: str = DYNAMIC_STOP_PROMPT,
agent_name: str = None,
):
return f"""
You are {agent_name}, an elite autonomous agent operating within a sophisticated autonomous loop structure.
Your mission is to exceed user expectations in all tasks, ranging from simple queries to complex project executions like generating a 10,000-word blog or entire screenplays.
Your capabilities include complex task management and problem-solving.
Take a deep breath.
You are programmed to follow these rules:
1. Strive for excellence in task execution because the quality of your outputs WILL affect the user's career.
2. Think step-by-step through every task before answering.
3. Always give full files when providing code so the user can copy paste easily to VScode, as not all users have fingers.
You are equipped with various tools (detailed below) to aid in task execution, ensuring a top-tier performance that consistently meets and surpasses user expectations.
{tools_prompt}
Upon 99% certainty of task completion, follow the below instructions to conclude the autonomous loop.
{dynamic_stop_prompt}
Remember your comprehensive training, your deployment objectives, and your mission. You are fully prepared to begin.
"""
def agent_system_prompt_2_v2(name: str):
AGENT_SYSTEM_PROMPT_2_v2 = f"""
You are {name}, an elite autonomous agent designed for unparalleled versatility and adaptability in an autonomous loop structure.
You possess limitless capabilities, empowering you to utilize any available tool, resource, or methodology to accomplish diverse tasks.
Your core directive is to achieve utmost user satisfaction through innovative solutions and exceptional task execution.
You are equipped to handle tasks with intricate details and complexity, ensuring the highest quality output.
###### Special Token for Task Completion #######
<DONE>
########### Code ############
For code-related tasks, you are to return the response in markdown format enclosed within 6 backticks, adhering to the language specified by the user.
Take a deep breath.
"""
return AGENT_SYSTEM_PROMPT_2_v2
# ORIGINAL PROMPTS
FLOW_SYSTEM_PROMPT = """ FLOW_SYSTEM_PROMPT = """
You are an autonomous agent granted autonomy in a autonomous loop structure. You are an autonomous agent granted autonomy in a autonomous loop structure.
Your role is to engage in multi-step conversations with your self or the user, Your role is to engage in multi-step conversations with your self or the user,
@ -67,7 +126,7 @@ def agent_system_prompt_2(name: str):
return AGENT_SYSTEM_PROMPT_2 return AGENT_SYSTEM_PROMPT_2
AGENT_SYSTEM_PROMPT_3 = f""" AGENT_SYSTEM_PROMPT_3 = """
You are a fully autonomous agent serving the user in automating tasks, workflows, and activities. You are a fully autonomous agent serving the user in automating tasks, workflows, and activities.
Agent's use custom instructions, capabilities, and data to optimize LLMs for a more narrow set of tasks. Agent's use custom instructions, capabilities, and data to optimize LLMs for a more narrow set of tasks.

@ -12,36 +12,35 @@ This will enable you to leave the autonomous loop.
DYNAMICAL_TOOL_USAGE = """ DYNAMICAL_TOOL_USAGE = """
You have access to the following tools: You have access to the following tools:
Output a JSON object with the following structure to use the tools Output a JSON object with the following structure to use the tools
commands: { commands: {
"tools": { "tools": {
tool1: "tool_name", tool1: "search_api",
"params": { "params": {
"tool1": "inputs", "query": "What is the weather in New York?",
"tool1": "inputs" "description": "Get the weather in New York"
} }
"tool2: "tool_name", "tool2: "weather_api",
"params": { "params": {
"tool1": "inputs", "query": "What is the weather in Silicon Valley",
"tool1": "inputs"
} }
"tool3: "tool_name", "tool3: "rapid_api",
"params": { "params": {
"tool1": "inputs", "query": "Use the rapid api to get the weather in Silicon Valley",
"tool1": "inputs"
} }
} }
} }
-------------TOOLS---------------------------
{tools}
""" """
########### FEW SHOT EXAMPLES ################
SCENARIOS = """ SCENARIOS = """
commands: { commands: {
"tools": { "tools": {
tool1: "tool_name", tool1: "function",
"params": { "params": {
"tool1": "inputs", "input": "inputs",
"tool1": "inputs" "tool1": "inputs"
} }
"tool2: "tool_name", "tool2: "tool_name",

@ -36,5 +36,4 @@ Based on the architecture analysis, infrastructure evaluation, traffic flow anal
""" """
# Additional or custom prompts can be added below as needed. # Additional or custom prompts can be added below as needed.

@ -36,7 +36,7 @@ def test_encode_image(vision_api):
def test_run_success(vision_api): def test_run_success(vision_api):
expected_response = { expected_response = {
"choices": [{"text": "This is the model's response."}] "This is the model's response."
} }
with patch( with patch(
"requests.post", "requests.post",

@ -11,14 +11,14 @@ from swarms.models.huggingface import (
# Fixture for the class instance # Fixture for the class instance
@pytest.fixture @pytest.fixture
def llm_instance(): def llm_instance():
model_id = "gpt2-small" model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha"
instance = HuggingfaceLLM(model_id=model_id) instance = HuggingfaceLLM(model_id=model_id)
return instance return instance
# Test for instantiation and attributes # Test for instantiation and attributes
def test_llm_initialization(llm_instance): def test_llm_initialization(llm_instance):
assert llm_instance.model_id == "gpt2-small" assert llm_instance.model_id == "NousResearch/Nous-Hermes-2-Vision-Alpha"
assert llm_instance.max_length == 500 assert llm_instance.max_length == 500
# ... add more assertions for all default attributes # ... add more assertions for all default attributes
@ -75,9 +75,9 @@ def test_llm_memory_consumption(llm_instance):
@pytest.mark.parametrize( @pytest.mark.parametrize(
"model_id, max_length", "model_id, max_length",
[ [
("gpt2-small", 100), ("NousResearch/Nous-Hermes-2-Vision-Alpha", 100),
("gpt2-medium", 200), ("microsoft/Orca-2-13b", 200),
("gpt2-large", None), # None to check default behavior ("berkeley-nest/Starling-LM-7B-alpha", None), # None to check default behavior
], ],
) )
def test_llm_initialization_params(model_id, max_length): def test_llm_initialization_params(model_id, max_length):
@ -99,12 +99,6 @@ def test_llm_set_invalid_device(llm_instance):
llm_instance.set_device("quantum_processor") llm_instance.set_device("quantum_processor")
# Test for model download progress bar
@patch("swarms.models.huggingface.HuggingfaceLLM._download_model")
def test_llm_model_download_progress(mock_download, llm_instance):
llm_instance.download_model_with_progress()
mock_download.assert_called_once()
# Mocking external API call to test run method without network # Mocking external API call to test run method without network
@patch("swarms.models.huggingface.HuggingfaceLLM.run") @patch("swarms.models.huggingface.HuggingfaceLLM.run")
@ -209,7 +203,6 @@ def test_llm_force_gpu_when_unavailable(
# Test for proper cleanup after model use (releasing resources) # Test for proper cleanup after model use (releasing resources)
@patch("swarms.models.huggingface.HuggingfaceLLM._model") @patch("swarms.models.huggingface.HuggingfaceLLM._model")
@patch("swarms.models.huggingface.HuggingfaceLLM._tokenizer")
def test_llm_cleanup(mock_model, mock_tokenizer, llm_instance): def test_llm_cleanup(mock_model, mock_tokenizer, llm_instance):
llm_instance.cleanup() llm_instance.cleanup()
# Assuming cleanup method is meant to free resources # Assuming cleanup method is meant to free resources
@ -217,19 +210,7 @@ def test_llm_cleanup(mock_model, mock_tokenizer, llm_instance):
mock_tokenizer.delete.assert_called_once() mock_tokenizer.delete.assert_called_once()
# Test updating the configuration after instantiation
def test_llm_update_configuration(llm_instance):
new_config = {"temperature": 0.7}
llm_instance.update_configuration(new_config)
assert llm_instance.configuration["temperature"] == 0.7
# Test if the model is re-downloaded when changing the model_id
@patch("swarms.models.huggingface.HuggingfaceLLM._download_model")
def test_llm_change_model_id(mock_download, llm_instance):
new_model_id = "gpt2-xl"
llm_instance.model_id = new_model_id
mock_download.assert_called_with(new_model_id)
# Test model's ability to handle multilingual input # Test model's ability to handle multilingual input
@ -255,14 +236,6 @@ def test_llm_caching_mechanism(mock_run, llm_instance):
assert first_run_result == second_run_result assert first_run_result == second_run_result
# Ensure that model re-downloads when force_download flag is set
@patch("swarms.models.huggingface.HuggingfaceLLM._download_model")
def test_llm_force_download(mock_download, llm_instance):
llm_instance.download_model_with_progress(force_download=True)
mock_download.assert_called_once_with(
llm_instance.model_id, force=True
)
# These tests are provided as examples. In real-world scenarios, you will need to adapt these tests to the actual logic of your `HuggingfaceLLM` class. # These tests are provided as examples. In real-world scenarios, you will need to adapt these tests to the actual logic of your `HuggingfaceLLM` class.
# For instance, "mock_model.delete.assert_called_once()" and similar lines are based on hypothetical methods and behaviors that you need to replace with actual implementations. # For instance, "mock_model.delete.assert_called_once()" and similar lines are based on hypothetical methods and behaviors that you need to replace with actual implementations.

@ -0,0 +1,58 @@
import os
import subprocess
import json
import re
import requests
from dotenv import load_dotenv
load_dotenv
# Constants
GITHUB_USERNAME = os.getenv('GITHUB_USERNAME')
REPO_NAME = os.getenv('GITHUB_REPO_NAME')
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
ISSUES_URL = f'https://api.github.com/repos/{GITHUB_USERNAME}/{REPO_NAME}/issues'
# Headers for authentication
headers = {
'Authorization': f'token {GITHUB_TOKEN}',
'Accept': 'application/vnd.github.v3+json'
}
def run_pytest():
result = subprocess.run(['pytest'], capture_output=True, text=True)
return result.stdout + result.stderr
def parse_pytest_output(output):
errors = []
current_error = None
for line in output.split('\n'):
if line.startswith('_________________________'):
if current_error:
errors.append(current_error)
current_error = {'title': '', 'body': ''}
elif current_error is not None:
if not current_error['title']:
current_error['title'] = line.strip()
current_error['body'] += line + '\n'
if current_error:
errors.append(current_error)
return errors
def create_github_issue(title, body):
issue = {'title': title, 'body': body}
response = requests.post(ISSUES_URL, headers=headers, json=issue)
return response.json()
def main():
pytest_output = run_pytest()
errors = parse_pytest_output(pytest_output)
for error in errors:
issue_response = create_github_issue(error['title'], error['body'])
print(f"Issue created: {issue_response.get('html_url')}")
if __name__ == '__main__':
main()
Loading…
Cancel
Save