parent
97520835eb
commit
89657be1f5
@ -1 +1,3 @@
|
|||||||
venv/
|
venv/
|
||||||
|
*.log
|
||||||
|
*.pyc
|
||||||
|
@ -1,15 +1,19 @@
|
|||||||
from typing import Union
|
|
||||||
|
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
|
from routers import openai, generic
|
||||||
|
|
||||||
|
|
||||||
app = FastAPI()
|
app = FastAPI()
|
||||||
|
|
||||||
|
# This mocks the openai router
|
||||||
|
app.include_router(openai.router, prefix="/openai")
|
||||||
|
|
||||||
|
# This is for showing what you need to mock
|
||||||
|
# You can check the log in console or in app.log
|
||||||
|
app.include_router(generic.router)
|
||||||
|
|
||||||
@app.get("/")
|
|
||||||
def read_root():
|
|
||||||
return {"Hello": "World"}
|
|
||||||
|
|
||||||
|
# This is here for debugging only
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import uvicorn
|
||||||
|
|
||||||
@app.get("/items/{item_id}")
|
uvicorn.run(app, host="0.0.0.0", port=8000)
|
||||||
def read_item(item_id: int, q: Union[str, None] = None):
|
|
||||||
return {"item_id": item_id, "q": q}
|
|
||||||
|
@ -0,0 +1,35 @@
|
|||||||
|
from fastapi import APIRouter
|
||||||
|
from fastapi import Request
|
||||||
|
from fastapi import FastAPI, Request
|
||||||
|
from routers import openai
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.DEBUG,
|
||||||
|
filename="app.log",
|
||||||
|
filemode="a",
|
||||||
|
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||||
|
)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{rest_of_path:path}", status_code=500)
|
||||||
|
@router.post("/{rest_of_path:path}", status_code=500)
|
||||||
|
@router.put("/{rest_of_path:path}", status_code=500)
|
||||||
|
@router.patch("/{rest_of_path:path}", status_code=500)
|
||||||
|
@router.delete("/{rest_of_path:path}", status_code=500)
|
||||||
|
def catch_all(request: Request):
|
||||||
|
|
||||||
|
path = request.path_params["rest_of_path"]
|
||||||
|
|
||||||
|
logger.info("Method not mocked")
|
||||||
|
logger.info(f"Path: {path}")
|
||||||
|
logger.info(f"Method: {request.method}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"message": "This path is not mocked",
|
||||||
|
"path": path,
|
||||||
|
"method": request.method,
|
||||||
|
}
|
@ -0,0 +1,24 @@
|
|||||||
|
success_chat_completion = {
|
||||||
|
"id": "chatcmpl-abcd1234",
|
||||||
|
"object": "chat.completion",
|
||||||
|
"created": 1723329271,
|
||||||
|
"model": "gpt-4o-mini-2024-07-18",
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"index": 0,
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Hello! How can I assist you today?",
|
||||||
|
"refusal": None,
|
||||||
|
},
|
||||||
|
"logprobs": None,
|
||||||
|
"finish_reason": "stop",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": 19,
|
||||||
|
"completion_tokens": 9,
|
||||||
|
"total_tokens": 28,
|
||||||
|
},
|
||||||
|
"system_fingerprint": "fp_abcd1234",
|
||||||
|
}
|
@ -0,0 +1,9 @@
|
|||||||
|
from fastapi import APIRouter
|
||||||
|
from .mock.openai_mock import success_chat_completion
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/success/chat/completions")
|
||||||
|
def success_chat_completions():
|
||||||
|
return success_chat_completion
|
Loading…
Reference in new issue