google palm documentation

Former-commit-id: fc7d2def56
pull/47/head
Kye 2 years ago
parent 4fda8e20ce
commit e38a8d9da7

@ -237,4 +237,104 @@ def __init__(self, openai_api_key: Optional[str] = None,
- `result` (str): The generated response from the language model.
## Conclusion
The `LLM` class provides a convenient way to initialize and run different language models using either OpenAI's API or Hugging Face models. By providing the necessary credentials and a prompt, you can obtain the generated response from the language model.
The `LLM` class provides a convenient way to initialize and run different language models using either OpenAI's API or Hugging Face models. By providing the necessary credentials and a prompt, you can obtain the generated response from the language model.
# `GooglePalm` class:
### Example 1: Using Dictionaries as Messages
```python
from google_palm import GooglePalm
# Initialize the GooglePalm instance
gp = GooglePalm(
client=your_client,
model_name="models/chat-bison-001",
temperature=0.7,
top_p=0.9,
top_k=10,
n=5
)
# Create some messages
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
]
# Generate a response
response = gp.generate(messages)
# Print the generated response
print(response)
```
### Example 2: Using BaseMessage and Its Subclasses as Messages
```python
from google_palm import GooglePalm
from langchain.schema.messages import SystemMessage, HumanMessage
# Initialize the GooglePalm instance
gp = GooglePalm(
client=your_client,
model_name="models/chat-bison-001",
temperature=0.7,
top_p=0.9,
top_k=10,
n=5
)
# Create some messages
messages = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="Who won the world series in 2020?"),
]
# Generate a response
response = gp.generate(messages)
# Print the generated response
print(response)
```
### Example 3: Using GooglePalm with Asynchronous Function
```python
import asyncio
from google_palm import GooglePalm
from langchain.schema.messages import SystemMessage, HumanMessage
# Initialize the GooglePalm instance
gp = GooglePalm(
client=your_client,
model_name="models/chat-bison-001",
temperature=0.7,
top_p=0.9,
top_k=10,
n=5
)
# Create some messages
messages = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="Who won the world series in 2020?"),
]
# Define an asynchronous function
async def generate_response():
response = await gp._agenerate(messages)
print(response)
# Run the asynchronous function
asyncio.run(generate_response())
```
Remember to replace `your_client` with an actual instance of your client. Also, ensure the `model_name` is the correct name of the model that you want to use.
The `temperature`, `top_p`, `top_k`, and `n` parameters control the randomness and diversity of the generated responses. You can adjust these parameters based on your application's requirements.

@ -1,22 +1,22 @@
from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.memory import ChatMessageHistory
from langchain.schema import BaseChatMessageHistory, Document
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import ValidationError
from swarms.agents.utils.Agent import AgentOutputParser
from swarms.agents.utils.human_input import HumanInputRun
from swarms.agents.prompts.prompt_generator import FINISH_NAME
from swarms.agents.models.base import AbstractModel
from swarms.agents.prompts.agent_output_parser import AgentOutputParser
from swarms.agents.prompts.agent_prompt_auto import PromptConstructor, MessageFormatter
from swarms.agents.prompts.agent_prompt import AIMessage, HumanMessage, SystemMessage
from swarms.agents.prompts.agent_prompt_auto import MessageFormatter, PromptConstructor
from swarms.agents.prompts.prompt_generator import FINISH_NAME
from swarms.agents.tools.base import BaseTool
from swarms.agents.utils.Agent import AgentOutputParser
from swarms.agents.utils.human_input import HumanInputRun
from langchain.chains.llm import LLMChain
from langchain.memory import ChatMessageHistory
from langchain.schema import (BaseChatMessageHistory, Document)
from langchain.vectorstores.base import VectorStoreRetriever
class Agent:
"""Base Agent class"""

@ -1,9 +1,9 @@
from typing import Any, Dict, List
from pydantic import Field
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import Field
class AutoGPTMemory(BaseChatMemory):

Loading…
Cancel
Save