diff --git a/DOCS/BENEFITS.md b/DOCS/Corp/BENEFITS.md
similarity index 100%
rename from DOCS/BENEFITS.md
rename to DOCS/Corp/BENEFITS.md
diff --git a/DOCS/DEMO_IDEAS.md b/DOCS/Corp/DEMO_IDEAS.md
similarity index 100%
rename from DOCS/DEMO_IDEAS.md
rename to DOCS/Corp/DEMO_IDEAS.md
diff --git a/DOCS/DOCUMENTATION.md b/DOCS/DOCUMENTATION.md
index da0db9c3..e1169447 100644
--- a/DOCS/DOCUMENTATION.md
+++ b/DOCS/DOCUMENTATION.md
@@ -237,4 +237,104 @@ def __init__(self, openai_api_key: Optional[str] = None,
 - `result` (str): The generated response from the language model.
 
 ## Conclusion
-The `LLM` class provides a convenient way to initialize and run different language models using either OpenAI's API or Hugging Face models. By providing the necessary credentials and a prompt, you can obtain the generated response from the language model.
\ No newline at end of file
+The `LLM` class provides a convenient way to initialize and run different language models using either OpenAI's API or Hugging Face models. By providing the necessary credentials and a prompt, you can obtain the generated response from the language model.
+
+
+
+
+
+
+# `GooglePalm` class:
+
+### Example 1: Using Dictionaries as Messages
+
+```python
+from google_palm import GooglePalm
+
+# Initialize the GooglePalm instance
+gp = GooglePalm(
+    client=your_client,
+    model_name="models/chat-bison-001",
+    temperature=0.7,
+    top_p=0.9,
+    top_k=10,
+    n=5
+)
+
+# Create some messages
+messages = [
+    {"role": "system", "content": "You are a helpful assistant."},
+    {"role": "user", "content": "Who won the world series in 2020?"},
+]
+
+# Generate a response
+response = gp.generate(messages)
+
+# Print the generated response
+print(response)
+```
+
+### Example 2: Using BaseMessage and Its Subclasses as Messages
+
+```python
+from google_palm import GooglePalm
+from langchain.schema.messages import SystemMessage, HumanMessage
+
+# Initialize the GooglePalm instance
+gp = GooglePalm(
+    client=your_client,
+    model_name="models/chat-bison-001",
+    temperature=0.7,
+    top_p=0.9,
+    top_k=10,
+    n=5
+)
+
+# Create some messages
+messages = [
+    SystemMessage(content="You are a helpful assistant."),
+    HumanMessage(content="Who won the world series in 2020?"),
+]
+
+# Generate a response
+response = gp.generate(messages)
+
+# Print the generated response
+print(response)
+```
+
+### Example 3: Using GooglePalm with Asynchronous Function
+
+```python
+import asyncio
+from google_palm import GooglePalm
+from langchain.schema.messages import SystemMessage, HumanMessage
+
+# Initialize the GooglePalm instance
+gp = GooglePalm(
+    client=your_client,
+    model_name="models/chat-bison-001",
+    temperature=0.7,
+    top_p=0.9,
+    top_k=10,
+    n=5
+)
+
+# Create some messages
+messages = [
+    SystemMessage(content="You are a helpful assistant."),
+    HumanMessage(content="Who won the world series in 2020?"),
+]
+
+# Define an asynchronous function
+async def generate_response():
+    response = await gp._agenerate(messages)
+    print(response)
+
+# Run the asynchronous function
+asyncio.run(generate_response())
+```
+
+Remember to replace `your_client` with an actual instance of your client. Also, ensure the `model_name` is the correct name of the model that you want to use.
+
+The `temperature`, `top_p`, `top_k`, and `n` parameters control the randomness and diversity of the generated responses. You can adjust these parameters based on your application's requirements.
\ No newline at end of file
diff --git a/swarms/agents/base.py b/swarms/agents/base.py
index de63f608..c10d9bdd 100644
--- a/swarms/agents/base.py
+++ b/swarms/agents/base.py
@@ -1,22 +1,22 @@
 from __future__ import annotations
 
 from typing import List, Optional
-from pydantic import ValidationError
 
+from langchain.chains.llm import LLMChain
+from langchain.memory import ChatMessageHistory
+from langchain.schema import BaseChatMessageHistory, Document
+from langchain.vectorstores.base import VectorStoreRetriever
+from pydantic import ValidationError
 
-from swarms.agents.utils.Agent import AgentOutputParser
-from swarms.agents.utils.human_input import HumanInputRun
-from swarms.agents.prompts.prompt_generator import FINISH_NAME
 from swarms.agents.models.base import AbstractModel
 from swarms.agents.prompts.agent_output_parser import AgentOutputParser
-from swarms.agents.prompts.agent_prompt_auto import PromptConstructor, MessageFormatter
 from swarms.agents.prompts.agent_prompt import AIMessage, HumanMessage, SystemMessage
+from swarms.agents.prompts.agent_prompt_auto import MessageFormatter, PromptConstructor
+from swarms.agents.prompts.prompt_generator import FINISH_NAME
 from swarms.agents.tools.base import BaseTool
+from swarms.agents.utils.Agent import AgentOutputParser
+from swarms.agents.utils.human_input import HumanInputRun
 
-from langchain.chains.llm import LLMChain
-from langchain.memory import ChatMessageHistory
-from langchain.schema import (BaseChatMessageHistory, Document)
-from langchain.vectorstores.base import VectorStoreRetriever
 
 class Agent:
     """Base Agent class"""
diff --git a/swarms/agents/memory.py b/swarms/agents/memory.py
index 26994da1..3a524fee 100644
--- a/swarms/agents/memory.py
+++ b/swarms/agents/memory.py
@@ -1,9 +1,9 @@
 from typing import Any, Dict, List
+from pydantic import Field
 
 from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
 from langchain.vectorstores.base import VectorStoreRetriever
 
-from pydantic import Field
 
 
 class AutoGPTMemory(BaseChatMemory):