diff --git a/playground/agents/meta_prompter.py b/playground/agents/meta_prompter.py
index 3b5557e0..b6eec5fa 100644
--- a/playground/agents/meta_prompter.py
+++ b/playground/agents/meta_prompter.py
@@ -20,4 +20,5 @@ optimized_prompt = meta_optimizer.run(task)
 #run the optimized prompt with detailed instructions
 result = worker.run(optimized_prompt)
 
+#print
 print(result)
\ No newline at end of file
diff --git a/swarms/agents/meta_prompter.py b/swarms/agents/meta_prompter.py
index 96352208..24c3775d 100644
--- a/swarms/agents/meta_prompter.py
+++ b/swarms/agents/meta_prompter.py
@@ -2,12 +2,13 @@ from langchain.chains import LLMChain
 from langchain.prompts import PromptTemplate
 from langchain.memory import ConversationBufferWindowMemory
 
+
 class MetaPrompterAgent:
     """
     Meta Prompting Agent
     The Meta Prompting Agent has 1 purpose: to create better prompts for an agent.
 
-    The meta prompting agent would be used in this flow: 
+    The meta prompting agent would be used in this flow:
     user task -> MetaPrompterAgent -> Agent
 
     Args:
@@ -21,7 +22,7 @@ class MetaPrompterAgent:
         memory (ConversationBufferWindowMemory, optional): Memory to be used in the meta prompt. Defaults to None.
         meta_template (str, optional): Template to be used in the meta prompt. Defaults to None.
         human_input (bool, optional): Whether to use human input. Defaults to False.
-    
+
     Returns:
         str: Response from the agent
 
@@ -44,13 +45,14 @@ class MetaPrompterAgent:
     task = "Create a feedforward in pytorch"
 
     #optimize the prompt
-    optimized_prompt = meta_optimizer.run(task)    
+    optimized_prompt = meta_optimizer.run(task)
 
     #run the optimized prompt with detailed instructions
     result = worker.run(optimized_prompt)
 
     print(result)
     """
+
     def __init__(
         self,
         llm,
@@ -60,7 +62,7 @@ class MetaPrompterAgent:
         success_phrase: str = "task succeeded",
         instructions: str = "None",
         template: str = None,
-        memory = None,
+        memory=None,
         meta_template: str = None,
         human_input: bool = False,
     ):
@@ -71,7 +73,7 @@ class MetaPrompterAgent:
         self.success_phrase = success_phrase
         self.instructions = instructions
         self.template = template
-        self.memory = memory 
+        self.memory = memory
         self.meta_template = meta_template
         self.human_input = human_input
 
@@ -108,7 +110,7 @@ class MetaPrompterAgent:
         delimiter = "Instructions: "
         new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter):]
         return new_instructions
-    
+
     def run(self, task: str):
         """
         Run the MetaPrompterAgent
@@ -132,28 +134,26 @@ class MetaPrompterAgent:
             )
 
             output = chain.predict(human_input=task)
-            
+
             for j in range(self.max_iters):
                 print(f"(Step {j+1}/{self.max_iters})")
                 print(f"Assistant: {output}")
                 print(f"Human: ")
 
-
-
                 if self.human_input:
                     human_input = input()
-                
+
                 if any(
                     phrase in human_input.lower() for phrase in key_phrases
                 ):
                     break
 
                 output = chain.predict(human_input.lower)
-            
+
             if self.success_phrase in human_input.lower():
                 print(f"You succeed! Thanks for using!")
                 return
-            
+
             meta_chain = self.initialize_meta_chain()
             meta_output = meta_chain.predict(chat_history=self.get_chat_history(chain.memory))
             print(f"Feedback: {meta_output}")
@@ -161,5 +161,3 @@ class MetaPrompterAgent:
             self.instructions = self.get_new_instructions(meta_output)
             print(f"New Instruction: {self.instructions}")
             print("\n" + "#" * 80 + "\n")
-
-