diff --git a/playground/demos/multi_modal_chain_of_thought/gemini_vcot.py b/playground/demos/multi_modal_chain_of_thought/gemini_vcot.py index 1690d8fe..fd9ab247 100644 --- a/playground/demos/multi_modal_chain_of_thought/gemini_vcot.py +++ b/playground/demos/multi_modal_chain_of_thought/gemini_vcot.py @@ -17,7 +17,6 @@ llm = Gemini( temperature=0.5, max_tokens=1000, system_prompt=VISUAL_CHAIN_OF_THOUGHT - ) # Initialize the task diff --git a/pyproject.toml b/pyproject.toml index aa9962eb..530bbf17 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "2.9.3" +version = "2.9.6" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/models/gemini.py b/swarms/models/gemini.py index 9ac38576..8a41a321 100644 --- a/swarms/models/gemini.py +++ b/swarms/models/gemini.py @@ -79,6 +79,7 @@ class Gemini(BaseMultiModalModel): candidates: bool = False, stream: bool = False, candidate_count: int = 1, + transport: str = "rest", stop_sequence=["x"], max_tokens: int = 100, temperature: float = 0.9, @@ -97,12 +98,15 @@ class Gemini(BaseMultiModalModel): self.max_tokens = max_tokens self.temperature = temperature self.system_prompt = system_prompt + + # Configure the API key + genai.configure(api_key=gemini_api_key, transport=transport) # Prepare the generation config self.generation_config = GenerationConfig( candidate_count=candidate_count, # stop_sequence=stop_sequence, - max_tokens=max_tokens, + max_output_tokens=max_tokens, temperature=temperature, *args, **kwargs,