Split routes, add debugging instructions

pull/560/head
Alexandre Henrique Afonso Campos 12 months ago
parent 97520835eb
commit 89657be1f5

@ -1 +1,3 @@
venv/
*.log
*.pyc

@ -40,3 +40,13 @@ $ fastapi dev main.py
```
You can then access the documentation by going to [ http://localhost:8000/docs](http://localhost:8000/docs).
## Debugging
### VSCode
Make sure that the option `Python: Select interpreter` (from the `F1` shortcut) points to the virtual environment you created.
Open the file `main.py` and press `F5`, select run as python file.
Alternatively, you can add a configuration. Go to `Execute/Add configuration/Python debugger/Python file`. This will create a `.vscode/lauch.json`. Just modify `"program": "${file}",` to `"program": "main.py",`. After this, you can press `F5` without the need to have `main.py` open.

@ -1,15 +1,19 @@
from typing import Union
from fastapi import FastAPI
from routers import openai, generic
app = FastAPI()
# This mocks the openai router
app.include_router(openai.router, prefix="/openai")
# This is for showing what you need to mock
# You can check the log in console or in app.log
app.include_router(generic.router)
@app.get("/")
def read_root():
return {"Hello": "World"}
# This is here for debugging only
if __name__ == "__main__":
import uvicorn
@app.get("/items/{item_id}")
def read_item(item_id: int, q: Union[str, None] = None):
return {"item_id": item_id, "q": q}
uvicorn.run(app, host="0.0.0.0", port=8000)

@ -0,0 +1,35 @@
from fastapi import APIRouter
from fastapi import Request
from fastapi import FastAPI, Request
from routers import openai
import logging
logging.basicConfig(
level=logging.DEBUG,
filename="app.log",
filemode="a",
format="%(asctime)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/{rest_of_path:path}", status_code=500)
@router.post("/{rest_of_path:path}", status_code=500)
@router.put("/{rest_of_path:path}", status_code=500)
@router.patch("/{rest_of_path:path}", status_code=500)
@router.delete("/{rest_of_path:path}", status_code=500)
def catch_all(request: Request):
path = request.path_params["rest_of_path"]
logger.info("Method not mocked")
logger.info(f"Path: {path}")
logger.info(f"Method: {request.method}")
return {
"message": "This path is not mocked",
"path": path,
"method": request.method,
}

@ -0,0 +1,24 @@
success_chat_completion = {
"id": "chatcmpl-abcd1234",
"object": "chat.completion",
"created": 1723329271,
"model": "gpt-4o-mini-2024-07-18",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello! How can I assist you today?",
"refusal": None,
},
"logprobs": None,
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": 19,
"completion_tokens": 9,
"total_tokens": 28,
},
"system_fingerprint": "fp_abcd1234",
}

@ -0,0 +1,9 @@
from fastapi import APIRouter
from .mock.openai_mock import success_chat_completion
router = APIRouter()
@router.post("/success/chat/completions")
def success_chat_completions():
return success_chat_completion
Loading…
Cancel
Save