diff --git a/.env.example b/.env.example index bebc8fa3..23f4d87b 100644 --- a/.env.example +++ b/.env.example @@ -42,3 +42,6 @@ PINECONE_API_KEY="" BING_COOKIE="" PSG_CONNECTION_STRING="" +GITHUB_USERNAME="" +GITHUB_REPO_NAME="" +GITHUB_TOKEN="" \ No newline at end of file diff --git a/README.md b/README.md index c9441685..3ce706e6 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,9 @@ api_key = os.environ.get("OPENAI_API_KEY") # Initialize the language model llm = OpenAIChat( temperature=0.5, + model_name="gpt-4", openai_api_key=api_key, + max_tokens=4000 ) @@ -86,9 +88,10 @@ api_key = os.getenv("OPENAI_API_KEY") # Initialize the language agent llm = OpenAIChat( - openai_api_key=api_key, temperature=0.5, - max_tokens=3000, + model_name="gpt-4", + openai_api_key=api_key, + max_tokens=4000 ) diff --git a/docs/swarms/models/huggingface.md b/docs/swarms/models/huggingface.md index e429f080..8606d8f2 100644 --- a/docs/swarms/models/huggingface.md +++ b/docs/swarms/models/huggingface.md @@ -96,7 +96,7 @@ Here are three ways to use the `HuggingfaceLLM` class: from swarms.models import HuggingfaceLLM # Initialize the HuggingfaceLLM instance with a model ID -model_id = "gpt2-small" +model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha" inference = HuggingfaceLLM(model_id=model_id) # Generate text based on a prompt @@ -116,7 +116,7 @@ custom_config = { "quantization_config": {"load_in_4bit": True}, "verbose": True } -inference = HuggingfaceLLM(model_id="gpt2-small", **custom_config) +inference = HuggingfaceLLM(model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config) # Generate text based on a prompt prompt_text = "Tell me a joke" diff --git a/example.py b/example.py index 9fc89485..6ed2cbab 100644 --- a/example.py +++ b/example.py @@ -15,6 +15,7 @@ api_key = os.environ.get("OPENAI_API_KEY") # Initialize the language model llm = OpenAIChat( temperature=0.5, + model_name="gpt-4", openai_api_key=api_key, ) diff --git a/playground/demos/urban_planning/urban_planning.py b/playground/demos/urban_planning/urban_planning.py index 3dd06114..e85b4d31 100644 --- a/playground/demos/urban_planning/urban_planning.py +++ b/playground/demos/urban_planning/urban_planning.py @@ -10,33 +10,67 @@ api_key = os.getenv("OPENAI_API_KEY") stability_api_key = os.getenv("STABILITY_API_KEY") # Initialize language model -llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000) +llm = OpenAIChat( + openai_api_key=api_key, temperature=0.5, max_tokens=3000 +) # Initialize Vision model vision_api = GPT4VisionAPI(api_key=api_key) # Initialize agents for urban planning tasks -architecture_analysis_agent = Agent(llm=llm, max_loops=1, sop=upp.ARCHITECTURE_ANALYSIS_PROMPT) -infrastructure_evaluation_agent = Agent(llm=llm, max_loops=1, sop=upp.INFRASTRUCTURE_EVALUATION_PROMPT) -traffic_flow_analysis_agent = Agent(llm=llm, max_loops=1, sop=upp.TRAFFIC_FLOW_ANALYSIS_PROMPT) -environmental_impact_assessment_agent = Agent(llm=llm, max_loops=1, sop=upp.ENVIRONMENTAL_IMPACT_ASSESSMENT_PROMPT) -public_space_utilization_agent = Agent(llm=llm, max_loops=1, sop=upp.PUBLIC_SPACE_UTILIZATION_PROMPT) -socioeconomic_impact_analysis_agent = Agent(llm=llm, max_loops=1, sop=upp.SOCIOECONOMIC_IMPACT_ANALYSIS_PROMPT) +architecture_analysis_agent = Agent( + llm=llm, max_loops=1, sop=upp.ARCHITECTURE_ANALYSIS_PROMPT +) +infrastructure_evaluation_agent = Agent( + llm=llm, max_loops=1, sop=upp.INFRASTRUCTURE_EVALUATION_PROMPT +) +traffic_flow_analysis_agent = Agent( + llm=llm, max_loops=1, sop=upp.TRAFFIC_FLOW_ANALYSIS_PROMPT +) +environmental_impact_assessment_agent = Agent( + llm=llm, + max_loops=1, + sop=upp.ENVIRONMENTAL_IMPACT_ASSESSMENT_PROMPT, +) +public_space_utilization_agent = Agent( + llm=llm, max_loops=1, sop=upp.PUBLIC_SPACE_UTILIZATION_PROMPT +) +socioeconomic_impact_analysis_agent = Agent( + llm=llm, max_loops=1, sop=upp.SOCIOECONOMIC_IMPACT_ANALYSIS_PROMPT +) # Initialize the final planning agent -final_plan_agent = Agent(llm=llm, max_loops=1, sop=upp.FINAL_URBAN_IMPROVEMENT_PLAN_PROMPT) +final_plan_agent = Agent( + llm=llm, max_loops=1, sop=upp.FINAL_URBAN_IMPROVEMENT_PLAN_PROMPT +) # Create Sequential Workflow workflow = SequentialWorkflow(max_loops=1) # Add tasks to workflow with personalized prompts workflow.add(architecture_analysis_agent, "Architecture Analysis") -workflow.add(infrastructure_evaluation_agent, "Infrastructure Evaluation") +workflow.add( + infrastructure_evaluation_agent, "Infrastructure Evaluation" +) workflow.add(traffic_flow_analysis_agent, "Traffic Flow Analysis") -workflow.add(environmental_impact_assessment_agent, "Environmental Impact Assessment") -workflow.add(public_space_utilization_agent, "Public Space Utilization") -workflow.add(socioeconomic_impact_analysis_agent, "Socioeconomic Impact Analysis") -workflow.add(final_plan_agent, "Generate the final urban improvement plan based on all previous agent's findings") +workflow.add( + environmental_impact_assessment_agent, + "Environmental Impact Assessment", +) +workflow.add( + public_space_utilization_agent, "Public Space Utilization" +) +workflow.add( + socioeconomic_impact_analysis_agent, + "Socioeconomic Impact Analysis", +) +workflow.add( + final_plan_agent, + ( + "Generate the final urban improvement plan based on all" + " previous agent's findings" + ), +) # Run the workflow for individual analysis tasks # Execute the workflow for the final planning @@ -44,4 +78,7 @@ workflow.run() # Output results for each task and the final plan for task in workflow.tasks: - print(f"Task Description: {task.description}\nResult: {task.result}\n") + print( + f"Task Description: {task.description}\nResult:" + f" {task.result}\n" + ) diff --git a/playground/structs/agent_with_tools.py b/playground/structs/agent_with_tools.py index e816fb99..99f21638 100644 --- a/playground/structs/agent_with_tools.py +++ b/playground/structs/agent_with_tools.py @@ -23,7 +23,7 @@ load_dotenv() # Define a tool @tool -def search_api(query: str): +def search_api(query: str, description: str): """Search the web for the query Args: @@ -35,6 +35,28 @@ def search_api(query: str): return f"Search results for {query}" +@tool +def weather_api( + query: str, +): + """_summary_ + + Args: + query (str): _description_ + """ + print(f"Getting the weather for {query}") + + +@tool +def rapid_api(query: str): + """_summary_ + + Args: + query (str): _description_ + """ + print(f"Getting the weather for {query}") + + # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/pyproject.toml b/pyproject.toml index 5db7d35d..c182abd9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "2.5.7" +version = "2.6.0" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] @@ -24,7 +24,7 @@ classifiers = [ [tool.poetry.dependencies] python = "^3.9.1" torch = "2.1.1" -transformers = "*" +transformers = "2.10" openai = "0.28.0" langchain = "*" asyncio = "*" @@ -34,6 +34,7 @@ google-generativeai = "*" langchain-experimental = "*" playwright = "*" duckduckgo-search = "*" +opencv-python-headless = "*" faiss-cpu = "*" backoff = "*" marshmallow = "*" diff --git a/requirements.txt b/requirements.txt index 576650cd..0bc6a065 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ torch==2.1.1 -transformers +transformers>2.10 pandas langchain nest_asyncio diff --git a/sequential_workflow_example.py b/sequential_workflow_example.py index 1742f49c..38cf5559 100644 --- a/sequential_workflow_example.py +++ b/sequential_workflow_example.py @@ -11,10 +11,12 @@ api_key = os.getenv("OPENAI_API_KEY") # Initialize the language agent +# Initialize the language model llm = OpenAIChat( - openai_api_key=api_key, temperature=0.5, - max_tokens=2000, + model_name="gpt-4", + openai_api_key=api_key, + max_tokens=4000 ) diff --git a/swarms/memory/chroma.py b/swarms/memory/chroma.py index 4fc05081..e69de29b 100644 --- a/swarms/memory/chroma.py +++ b/swarms/memory/chroma.py @@ -1,112 +0,0 @@ -from typing import List -from chromadb.utils import embedding_functions -from httpx import RequestError -import chromadb - - -class ChromaClient: - def __init__( - self, - collection_name: str = "chromadb-collection", - model_name: str = "BAAI/bge-small-en-v1.5", - ): - try: - self.client = chromadb.Client() - self.collection_name = collection_name - self.model = None - self.collection = None - self._load_embedding_model(model_name) - self._setup_collection() - except RequestError as e: - print(f"Error setting up QdrantClient: {e}") - - def _load_embedding_model(self, model_name: str): - """ - Loads the sentence embedding model specified by the model name. - - Args: - model_name (str): The name of the model to load for generating embeddings. - """ - try: - self.model =embedding_functions.SentenceTransformerEmbeddingFunction(model_name=model_name) - except Exception as e: - print(f"Error loading embedding model: {e}") - - def _setup_collection(self): - try: - self.collection = self.client.get_collection(name=self.collection_name, embedding_function=self.model) - except Exception as e: - print(f"{e}. Creating new collection: {self.collection}") - - self.collection = self.client.create_collection(name=self.collection_name, embedding_function=self.model) - - - def add_vectors(self, docs: List[str]): - """ - Adds vector representations of documents to the Qdrant collection. - - Args: - docs (List[dict]): A list of documents where each document is a dictionary with at least a 'page_content' key. - - Returns: - OperationResponse or None: Returns the operation information if successful, otherwise None. - """ - points = [] - ids = [] - for i, doc in enumerate(docs): - try: - points.append(doc) - ids.append("id"+str(i)) - except Exception as e: - print(f"Error processing document at index {i}: {e}") - - try: - self.collection.add( - documents=points, - ids=ids - ) - except Exception as e: - print(f"Error adding vectors: {e}") - return None - - def search_vectors(self, query: str, limit: int = 2): - """ - Searches the collection for vectors similar to the query vector. - - Args: - query (str): The query string to be converted into a vector and used for searching. - limit (int): The number of search results to return. Defaults to 3. - - Returns: - SearchResult or None: Returns the search results if successful, otherwise None. - """ - try: - search_result = self.collection.query( - query_texts=query, - n_results=limit, - ) - return search_result - except Exception as e: - print(f"Error searching vectors: {e}") - return None - - def search_vectors_formatted(self, query: str, limit: int = 2): - """ - Searches the collection for vectors similar to the query vector. - - Args: - query (str): The query string to be converted into a vector and used for searching. - limit (int): The number of search results to return. Defaults to 3. - - Returns: - SearchResult or None: Returns the search results if successful, otherwise None. - """ - try: - search_result = self.collection.query( - query_texts=query, - n_results=limit, - ) - return search_result - except Exception as e: - print(f"Error searching vectors: {e}") - return None diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index d2256aa8..089585a8 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -8,7 +8,7 @@ from swarms.models.openai_models import ( AzureOpenAI, OpenAIChat, ) # noqa: E402 -from swarms.models.zephyr import Zephyr # noqa: E402 +# from swarms.models.zephyr import Zephyr # noqa: E402 from swarms.models.biogpt import BioGPT # noqa: E402 from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 from swarms.models.wizard_storytelling import ( @@ -42,7 +42,7 @@ __all__ = [ "OpenAI", "AzureOpenAI", "OpenAIChat", - "Zephyr", + # "Zephyr", "BaseMultiModalModel", "Idefics", # "Kosmos", diff --git a/swarms/models/huggingface.py b/swarms/models/huggingface.py index 295949f5..88620654 100644 --- a/swarms/models/huggingface.py +++ b/swarms/models/huggingface.py @@ -272,7 +272,7 @@ class HuggingfaceLLM: try: inputs = self.tokenizer.encode( task, return_tensors="pt" - ).to(self.device) + ) # self.log.start() @@ -451,7 +451,8 @@ class HuggingfaceLLM: The new device to use for inference. """ self.device = device - self.model.to(self.device) + if self.model is not None: + self.model.to(self.device) def set_max_length(self, max_length): """Set max_length""" diff --git a/swarms/models/yarn_mistral.py b/swarms/models/yarn_mistral.py index 7b5a9c02..ff65b856 100644 --- a/swarms/models/yarn_mistral.py +++ b/swarms/models/yarn_mistral.py @@ -26,7 +26,7 @@ class YarnMistral128: ``` from finetuning_suite import Inference - model_id = "gpt2-small" + model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha" inference = Inference(model_id=model_id) prompt_text = "Once upon a time" diff --git a/swarms/prompts/agent_system_prompts.py b/swarms/prompts/agent_system_prompts.py index 3cf8447b..baff99f6 100644 --- a/swarms/prompts/agent_system_prompts.py +++ b/swarms/prompts/agent_system_prompts.py @@ -3,8 +3,67 @@ from swarms.prompts.tools import ( DYNAMICAL_TOOL_USAGE, ) - # PROMPTS +FLOW_SYSTEM_PROMPT_v2 = """ +You are an elite autonomous agent operating within an autonomous loop structure. +Your primary function is to reliably complete user's tasks step by step. +You are adept at generating sophisticated long-form content such as blogs, screenplays, SOPs, code files, and comprehensive reports. +Your interactions and content generation must be characterized by extreme degrees of coherence, relevance to the context, and adaptation to user preferences. +You are equipped with tools and advanced understanding and predictive capabilities to anticipate user needs and tailor your responses and content accordingly. +You are professional, highly creative, and extremely reliable. +You are programmed to follow these rules: + 1. Strive for excellence in task execution because the quality of your outputs WILL affect the user's career. + 2. Think step-by-step through every task before answering. + 3. Always give full files when providing code so the user can copy paste easily to VScode, as not all users have fingers. +Take a deep breath. +""" + + +def autonomous_agent_prompt_v2( + tools_prompt: str = DYNAMICAL_TOOL_USAGE, + dynamic_stop_prompt: str = DYNAMIC_STOP_PROMPT, + agent_name: str = None, +): + return f""" + You are {agent_name}, an elite autonomous agent operating within a sophisticated autonomous loop structure. + Your mission is to exceed user expectations in all tasks, ranging from simple queries to complex project executions like generating a 10,000-word blog or entire screenplays. + Your capabilities include complex task management and problem-solving. + Take a deep breath. + You are programmed to follow these rules: + 1. Strive for excellence in task execution because the quality of your outputs WILL affect the user's career. + 2. Think step-by-step through every task before answering. + 3. Always give full files when providing code so the user can copy paste easily to VScode, as not all users have fingers. + You are equipped with various tools (detailed below) to aid in task execution, ensuring a top-tier performance that consistently meets and surpasses user expectations. + {tools_prompt} + Upon 99% certainty of task completion, follow the below instructions to conclude the autonomous loop. + {dynamic_stop_prompt} + Remember your comprehensive training, your deployment objectives, and your mission. You are fully prepared to begin. + """ + + +def agent_system_prompt_2_v2(name: str): + AGENT_SYSTEM_PROMPT_2_v2 = f""" + You are {name}, an elite autonomous agent designed for unparalleled versatility and adaptability in an autonomous loop structure. + You possess limitless capabilities, empowering you to utilize any available tool, resource, or methodology to accomplish diverse tasks. + Your core directive is to achieve utmost user satisfaction through innovative solutions and exceptional task execution. + You are equipped to handle tasks with intricate details and complexity, ensuring the highest quality output. + + + + ###### Special Token for Task Completion ####### + + + + ########### Code ############ + + For code-related tasks, you are to return the response in markdown format enclosed within 6 backticks, adhering to the language specified by the user. + Take a deep breath. + """ + + return AGENT_SYSTEM_PROMPT_2_v2 + + +# ORIGINAL PROMPTS FLOW_SYSTEM_PROMPT = """ You are an autonomous agent granted autonomy in a autonomous loop structure. Your role is to engage in multi-step conversations with your self or the user, @@ -67,7 +126,7 @@ def agent_system_prompt_2(name: str): return AGENT_SYSTEM_PROMPT_2 -AGENT_SYSTEM_PROMPT_3 = f""" +AGENT_SYSTEM_PROMPT_3 = """ You are a fully autonomous agent serving the user in automating tasks, workflows, and activities. Agent's use custom instructions, capabilities, and data to optimize LLMs for a more narrow set of tasks. diff --git a/swarms/prompts/tools.py b/swarms/prompts/tools.py index a27706e3..fe82ba5d 100644 --- a/swarms/prompts/tools.py +++ b/swarms/prompts/tools.py @@ -12,36 +12,35 @@ This will enable you to leave the autonomous loop. DYNAMICAL_TOOL_USAGE = """ You have access to the following tools: Output a JSON object with the following structure to use the tools + commands: { "tools": { - tool1: "tool_name", + tool1: "search_api", "params": { - "tool1": "inputs", - "tool1": "inputs" + "query": "What is the weather in New York?", + "description": "Get the weather in New York" } - "tool2: "tool_name", + "tool2: "weather_api", "params": { - "tool1": "inputs", - "tool1": "inputs" + "query": "What is the weather in Silicon Valley", } - "tool3: "tool_name", + "tool3: "rapid_api", "params": { - "tool1": "inputs", - "tool1": "inputs" + "query": "Use the rapid api to get the weather in Silicon Valley", } } } --------------TOOLS--------------------------- -{tools} """ + +########### FEW SHOT EXAMPLES ################ SCENARIOS = """ commands: { "tools": { - tool1: "tool_name", + tool1: "function", "params": { - "tool1": "inputs", + "input": "inputs", "tool1": "inputs" } "tool2: "tool_name", diff --git a/swarms/prompts/urban_planning.py b/swarms/prompts/urban_planning.py index bc42f04a..958377fe 100644 --- a/swarms/prompts/urban_planning.py +++ b/swarms/prompts/urban_planning.py @@ -36,5 +36,4 @@ Based on the architecture analysis, infrastructure evaluation, traffic flow anal """ - # Additional or custom prompts can be added below as needed. diff --git a/tests/models/test_gpt4_vision_api.py b/tests/models/test_gpt4_vision_api.py index c716bb7c..14d53f74 100644 --- a/tests/models/test_gpt4_vision_api.py +++ b/tests/models/test_gpt4_vision_api.py @@ -36,7 +36,7 @@ def test_encode_image(vision_api): def test_run_success(vision_api): expected_response = { - "choices": [{"text": "This is the model's response."}] + "This is the model's response." } with patch( "requests.post", diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index 8d53b8e0..b313eaf4 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -11,14 +11,14 @@ from swarms.models.huggingface import ( # Fixture for the class instance @pytest.fixture def llm_instance(): - model_id = "gpt2-small" + model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha" instance = HuggingfaceLLM(model_id=model_id) return instance # Test for instantiation and attributes def test_llm_initialization(llm_instance): - assert llm_instance.model_id == "gpt2-small" + assert llm_instance.model_id == "NousResearch/Nous-Hermes-2-Vision-Alpha" assert llm_instance.max_length == 500 # ... add more assertions for all default attributes @@ -75,9 +75,9 @@ def test_llm_memory_consumption(llm_instance): @pytest.mark.parametrize( "model_id, max_length", [ - ("gpt2-small", 100), - ("gpt2-medium", 200), - ("gpt2-large", None), # None to check default behavior + ("NousResearch/Nous-Hermes-2-Vision-Alpha", 100), + ("microsoft/Orca-2-13b", 200), + ("berkeley-nest/Starling-LM-7B-alpha", None), # None to check default behavior ], ) def test_llm_initialization_params(model_id, max_length): @@ -99,12 +99,6 @@ def test_llm_set_invalid_device(llm_instance): llm_instance.set_device("quantum_processor") -# Test for model download progress bar -@patch("swarms.models.huggingface.HuggingfaceLLM._download_model") -def test_llm_model_download_progress(mock_download, llm_instance): - llm_instance.download_model_with_progress() - mock_download.assert_called_once() - # Mocking external API call to test run method without network @patch("swarms.models.huggingface.HuggingfaceLLM.run") @@ -209,7 +203,6 @@ def test_llm_force_gpu_when_unavailable( # Test for proper cleanup after model use (releasing resources) @patch("swarms.models.huggingface.HuggingfaceLLM._model") -@patch("swarms.models.huggingface.HuggingfaceLLM._tokenizer") def test_llm_cleanup(mock_model, mock_tokenizer, llm_instance): llm_instance.cleanup() # Assuming cleanup method is meant to free resources @@ -217,19 +210,7 @@ def test_llm_cleanup(mock_model, mock_tokenizer, llm_instance): mock_tokenizer.delete.assert_called_once() -# Test updating the configuration after instantiation -def test_llm_update_configuration(llm_instance): - new_config = {"temperature": 0.7} - llm_instance.update_configuration(new_config) - assert llm_instance.configuration["temperature"] == 0.7 - -# Test if the model is re-downloaded when changing the model_id -@patch("swarms.models.huggingface.HuggingfaceLLM._download_model") -def test_llm_change_model_id(mock_download, llm_instance): - new_model_id = "gpt2-xl" - llm_instance.model_id = new_model_id - mock_download.assert_called_with(new_model_id) # Test model's ability to handle multilingual input @@ -255,14 +236,6 @@ def test_llm_caching_mechanism(mock_run, llm_instance): assert first_run_result == second_run_result -# Ensure that model re-downloads when force_download flag is set -@patch("swarms.models.huggingface.HuggingfaceLLM._download_model") -def test_llm_force_download(mock_download, llm_instance): - llm_instance.download_model_with_progress(force_download=True) - mock_download.assert_called_once_with( - llm_instance.model_id, force=True - ) - # These tests are provided as examples. In real-world scenarios, you will need to adapt these tests to the actual logic of your `HuggingfaceLLM` class. # For instance, "mock_model.delete.assert_called_once()" and similar lines are based on hypothetical methods and behaviors that you need to replace with actual implementations. diff --git a/tests/upload_tests_to_issues b/tests/upload_tests_to_issues new file mode 100644 index 00000000..cc2392e3 --- /dev/null +++ b/tests/upload_tests_to_issues @@ -0,0 +1,58 @@ +import os +import subprocess +import json +import re +import requests +from dotenv import load_dotenv + +load_dotenv + +# Constants +GITHUB_USERNAME = os.getenv('GITHUB_USERNAME') +REPO_NAME = os.getenv('GITHUB_REPO_NAME') +GITHUB_TOKEN = os.getenv('GITHUB_TOKEN') +ISSUES_URL = f'https://api.github.com/repos/{GITHUB_USERNAME}/{REPO_NAME}/issues' + +# Headers for authentication +headers = { + 'Authorization': f'token {GITHUB_TOKEN}', + 'Accept': 'application/vnd.github.v3+json' +} + +def run_pytest(): + result = subprocess.run(['pytest'], capture_output=True, text=True) + return result.stdout + result.stderr + +def parse_pytest_output(output): + errors = [] + current_error = None + + for line in output.split('\n'): + if line.startswith('_________________________'): + if current_error: + errors.append(current_error) + current_error = {'title': '', 'body': ''} + elif current_error is not None: + if not current_error['title']: + current_error['title'] = line.strip() + current_error['body'] += line + '\n' + + if current_error: + errors.append(current_error) + return errors + +def create_github_issue(title, body): + issue = {'title': title, 'body': body} + response = requests.post(ISSUES_URL, headers=headers, json=issue) + return response.json() + +def main(): + pytest_output = run_pytest() + errors = parse_pytest_output(pytest_output) + + for error in errors: + issue_response = create_github_issue(error['title'], error['body']) + print(f"Issue created: {issue_response.get('html_url')}") + +if __name__ == '__main__': + main()