From 8809246c320e85070fe23aea421cf35f66676141 Mon Sep 17 00:00:00 2001 From: Kye Date: Tue, 27 Jun 2023 15:32:17 -0400 Subject: [PATCH] mtea prompting agent --- README.md | 6 +++++- swarms/agents/utils.py | 14 +++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 89bdfc27..27002fee 100644 --- a/README.md +++ b/README.md @@ -191,4 +191,8 @@ In the context of swarm LLMs, one could consider an **Omni-Vector Embedding Data * Integrate guidance and token healing -* Add text to speech [whisper x, youtube script](https://github.com/kyegomez/youtubeURL-to-text) and text to speech code models as tools \ No newline at end of file +* Add text to speech [whisper x, youtube script](https://github.com/kyegomez/youtubeURL-to-text) and text to speech code models as tools + +* Add task completion logic with meta prompting, task evaluation as a state from 0.0 to 1.0, and critiquing for meta prompting. + +* Integrate meta prompting for every agent boss and worker \ No newline at end of file diff --git a/swarms/agents/utils.py b/swarms/agents/utils.py index 4052cf03..ca090b06 100644 --- a/swarms/agents/utils.py +++ b/swarms/agents/utils.py @@ -744,4 +744,16 @@ def multi_agent_emotion_analysis_v2(text: str, agents_memory: List[Dict[str, Any for emotion in emotion_scores: emotion_scores[emotion] /= len(agents_memory) - return emotion_scores \ No newline at end of file + return emotion_scores + +def swarm_intelligence(task_prompt, agents_memory): + subtasks = generate_tasks(task_prompt) + results = [] + for subtask in subtasks: + agent_votes = [] + for agent_memory in agents_memory: + agent_vote, _ = chat(agent_memory + [{"role": "user", "content": f"Propose a solution for: {subtask}"}]) + agent_votes.append(agent_vote.strip()) + most_common_solution = max(set(agent_votes), key=agent_votes.count) + results.append(most_common_solution) + return results \ No newline at end of file