diff --git a/playground/models/openai_model.py b/playground/models/openai_model.py
index e3b01715..3b9cb967 100644
--- a/playground/models/openai_model.py
+++ b/playground/models/openai_model.py
@@ -1,6 +1,6 @@
 from swarms.models.openai_models import OpenAIChat
 
-openai = OpenAIChat(openai_api_key="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", verbose=False)
+openai = OpenAIChat(openai_api_key="", verbose=False)
 
 chat = openai("What are quantum fields?")
 print(chat)
diff --git a/pyproject.toml b/pyproject.toml
index d8a561bd..6aa8585d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
 
 [tool.poetry]
 name = "swarms"
-version = "1.9.6"
+version = "1.9.9"
 description = "Swarms - Pytorch"
 license = "MIT"
 authors = ["Kye Gomez <kye@apac.ai>"]
diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py
index b2a2b433..dd21ba80 100644
--- a/swarms/models/__init__.py
+++ b/swarms/models/__init__.py
@@ -16,8 +16,8 @@ from swarms.models.kosmos_two import Kosmos
 from swarms.models.vilt import Vilt
 from swarms.models.nougat import Nougat
 from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
-from swarms.models.gpt4v import GPT4Vision
-from swarms.models.dalle3 import Dalle3
+# from swarms.models.gpt4v import GPT4Vision
+# from swarms.models.dalle3 import Dalle3
 
 # from swarms.models.distilled_whisperx import DistilWhisperModel
 
@@ -45,6 +45,6 @@ __all__ = [
     "HuggingfaceLLM",
     "MPT7B",
     "WizardLLMStoryTeller",
-    "GPT4Vision",
-    "Dalle3",
+    # "GPT4Vision",
+    # "Dalle3",
 ]
diff --git a/swarms/models/dalle3.py b/swarms/models/dalle3.py
index 73edf502..2ac5d403 100644
--- a/swarms/models/dalle3.py
+++ b/swarms/models/dalle3.py
@@ -12,7 +12,7 @@ from termcolor import colored
 
 load_dotenv()
 
-api_key = os.getenv("OPENAI_API_KEY")
+# api_key = os.getenv("OPENAI_API_KEY")
 
 # Configure Logging
 logging.basicConfig(level=logging.INFO)
@@ -49,6 +49,7 @@ class Dalle3:
     size: str = "1024x1024"
     max_retries: int = 3
     quality: str = "standard"
+    api_key: str = None
     n: int = 4
     client = OpenAI(
         api_key=api_key,
diff --git a/swarms/models/gpt4v.py b/swarms/models/gpt4v.py
index a7f8f1c1..99580d82 100644
--- a/swarms/models/gpt4v.py
+++ b/swarms/models/gpt4v.py
@@ -73,7 +73,7 @@ class GPT4Vision:
     model: str = "gpt-4-vision-preview"
     backoff_factor: float = 2.0
     timeout_seconds: int = 10
-    api_key: Optional[str] = None or os.getenv("OPENAI_API_KEY")
+    api_key: Optional[str] = None
     # 'Low' or 'High' for respesctively fast or high quality, but high more token usage
     quality: str = "low"
     # Max tokens to use for the API request, the maximum might be 3,000 but we don't know
diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py
index 117172ea..4e21c3df 100644
--- a/swarms/structs/flow.py
+++ b/swarms/structs/flow.py
@@ -217,7 +217,7 @@ class Flow:
                     Dashboard: {self.dashboard}
                     Dynamic Temperature: {self.dynamic_temperature}
                     Autosave: {self.autosave}
-                    Saved State: {self.saved_state}
+                    Saved State: {self.saved_state_path}
                     
                 ----------------------------------------
                 """,