diff --git a/swarms/agents/agent.py b/swarms/agents/agent.py
new file mode 100644
index 00000000..59326190
--- /dev/null
+++ b/swarms/agents/agent.py
@@ -0,0 +1,150 @@
+#base toolset
+from swarms.agents.tools.agent_tools import *
+from langchain.tools import BaseTool
+
+from langchain.callbacks.manager import (
+    AsyncCallbackManagerForToolRun,
+    CallbackManagerForToolRun,
+)
+from typing import List, Any, Dict, Optional, Type
+from langchain.memory.chat_message_histories import FileChatMessageHistory
+
+import logging
+from pydantic import BaseModel, Extra
+
+from swarms.agents.models.hf import HuggingFaceLLM
+
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
+
+class AgentNodeInitializer:
+    """Useful for when you need to spawn an autonomous agent instance as a agent to accomplish complex tasks, it can search the internet or spawn child multi-modality models to process and generate images and text or audio and so on"""
+
+    def __init__(self, llm, tools, vectorstore):
+        if not llm or not tools or not vectorstore:
+            logging.error("llm, tools, and vectorstore cannot be None.")
+            raise ValueError("llm, tools, and vectorstore cannot be None.")
+        
+        self.llm = llm
+        self.tools = tools
+        self.vectorstore = vectorstore
+        self.agent = None
+
+    def create_agent(self, ai_name="Swarm Agent AI Assistant", ai_role="Assistant", human_in_the_loop=False, search_kwargs={}, verbose=False):
+        logging.info("Creating agent in AgentNode")
+        try:
+            self.agent = AutoGPT.from_llm_and_tools(
+                ai_name=ai_name,
+                ai_role=ai_role,
+                tools=self.tools,
+                llm=self.llm,
+                memory=self.vectorstore.as_retriever(search_kwargs=search_kwargs),
+                human_in_the_loop=human_in_the_loop,
+                chat_history_memory=FileChatMessageHistory("chat_history.txt"),
+            )
+            # self.agent.chain.verbose = verbose
+        except Exception as e:
+            logging.error(f"Error while creating agent: {str(e)}")
+            raise e
+
+    def add_tool(self, tool: Tool):
+        if not isinstance(tool, Tool):
+            logging.error("Tool must be an instance of Tool.")
+            raise TypeError("Tool must be an instance of Tool.")
+        
+        self.tools.append(tool)
+
+    def run(self, prompt: str) -> str:
+        if not isinstance(prompt, str):
+            logging.error("Prompt must be a string.")
+            raise TypeError("Prompt must be a string.")
+        
+        if not prompt:
+            logging.error("Prompt is empty.")
+            raise ValueError("Prompt is empty.")
+        
+        try:
+            self.agent.run([f"{prompt}"])
+            return "Task completed by AgentNode"
+        except Exception as e:
+            logging.error(f"While running the agent: {str(e)}")
+            raise e
+
+
+class AgentNode:
+    def __init__(self, openai_api_key):
+        if not openai_api_key:
+            logging.error("OpenAI API key is not provided")
+            raise ValueError("openai_api_key cannot be None")
+        
+        self.openai_api_key = openai_api_key
+
+    def initialize_llm(self, llm_class, temperature=0.5):
+        if not llm_class:
+            logging.error("llm_class cannot be none")
+            raise ValueError("llm_class cannot be None")
+        
+        try:
+            return llm_class(openai_api_key=self.openai_api_key, temperature=temperature)
+        except Exception as e:
+            logging.error(f"Failed to initialize language model: {e}")
+            raise
+
+    def initialize_tools(self, llm_class):
+        if not llm_class:
+            logging.error("llm_class not cannot be none")
+            raise ValueError("llm_class cannot be none")
+        try:
+                
+            logging.info('Creating AgentNode')
+            llm = self.initialize_llm(llm_class)
+            web_search = DuckDuckGoSearchRun()
+
+            tools = [
+                web_search,
+                WriteFileTool(root_dir=ROOT_DIR),
+                ReadFileTool(root_dir=ROOT_DIR),
+                process_csv,
+                WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)),
+            ]
+            if not tools:
+                logging.error("Tools are not initialized")
+                raise ValueError("Tools are not initialized")
+            return tools
+        except Exception as e:
+            logging.error(f"Failed to initialize tools: {e}")
+
+    def initialize_vectorstore(self):
+        try:
+            embeddings_model = OpenAIEmbeddings(openai_api_key=self.openai_api_key)
+            embedding_size = 1536
+            index = faiss.IndexFlatL2(embedding_size)
+            return FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
+        except Exception as e:
+            logging.error(f"Failed to initialize vector store: {e}")
+            raise
+
+    def create_agent(self, llm_class=ChatOpenAI, ai_name="Swarm Agent AI Assistant", ai_role="Assistant", human_in_the_loop=False, search_kwargs={}, verbose=False):
+        if not llm_class:
+            logging.error("llm_class cannot be None.")
+            raise ValueError("llm_class cannot be None.")
+        try:
+            agent_tools = self.initialize_tools(llm_class)
+            vectorstore = self.initialize_vectorstore()
+            agent = AgentNode(llm=self.initialize_llm(llm_class), tools=agent_tools, vectorstore=vectorstore)
+            agent.create_agent(ai_name=ai_name, ai_role=ai_role, human_in_the_loop=human_in_the_loop, search_kwargs=search_kwargs, verbose=verbose)
+            return agent
+        except Exception as e:
+            logging.error(f"Failed to create agent node: {e}")
+            raise
+
+def agent(openai_api_key):
+    if not openai_api_key:
+        logging.error("OpenAI API key is not provided")
+        raise ValueError("OpenAI API key is required")
+    try:
+        initializer = AgentNodeInitializer(openai_api_key)
+        agent = initializer.create_agent()
+        return agent
+    except Exception as e:
+        logging.error(f"An error occured in agent: {e}")
+        raise
diff --git a/swarms/agents/models/hf.py b/swarms/agents/models/hf.py
index aca2d0c3..cd73d665 100644
--- a/swarms/agents/models/hf.py
+++ b/swarms/agents/models/hf.py
@@ -3,29 +3,7 @@ import logging
 from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
 
 class HuggingFaceLLM:
-    """
-    A class that represents a Language Model (LLM) powered by HuggingFace Transformers.
-
-    Attributes:
-        model_id (str): ID of the pre-trained model in HuggingFace Model Hub.
-        device (str): Device to load the model onto.
-        max_length (int): Maximum length of the generated sequence.
-        tokenizer: Instance of the tokenizer corresponding to the model.
-        model: The loaded model instance.
-        logger: Logger instance for the class.
-    """
     def __init__(self, model_id: str, device: str = None, max_length: int = 20, quantize: bool = False, quantization_config: dict = None):
-        """
-        Constructs all the necessary attributes for the HuggingFaceLLM object.
-
-        Args:
-            model_id (str): ID of the pre-trained model in HuggingFace Model Hub.
-            device (str, optional): Device to load the model onto. Defaults to GPU if available, else CPU.
-            max_length (int, optional): Maximum length of the generated sequence. Defaults to 20.
-            quantize (bool, optional): Whether to apply quantization to the model. Defaults to False.
-            quantization_config (dict, optional): Configuration for model quantization. Defaults to None, 
-                and a standard configuration will be used if quantize is True.
-        """
         self.logger = logging.getLogger(__name__)
         self.device = device if device else ('cuda' if torch.cuda.is_available() else 'cpu')
         self.model_id = model_id
@@ -50,17 +28,6 @@ class HuggingFaceLLM:
             self.logger.error(f"Failed to load the model or the tokenizer: {e}")
             raise
     def generate_text(self, prompt_text: str, max_length: int = None):
-        """
-        Generates text based on the input prompt using the loaded model.
-
-        Args:
-            prompt_text (str): Input prompt to generate text from.
-            max_length (int, optional): Maximum length of the generated sequence. Defaults to None,
-                and the max_length set during initialization will be used.
-
-        Returns:
-            str: Generated text.
-        """
         max_length = max_length if max_length else self.max_length
         try:
             inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to(self.device)