import os import sys import logging import time from dotenv import load_dotenv from fastapi import FastAPI, HTTPException, Depends, Request from starlette.responses import StreamingResponse from .models import IncomingRequest, ProxyResponse from .services import process_chat_request, stream_llm_api, inject_tools_into_prompt, parse_llm_response_from_content from .core.config import get_settings, Settings from .database import init_db, log_request, update_request_log # --- Environment & Debug Loading --- # load_dotenv() # Uncomment if you run uvicorn directly and need to load .env # --- # --- Logging Configuration --- logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler("llm_proxy.log"), logging.StreamHandler() ] ) logger = logging.getLogger(__name__) # --- End of Logging Configuration --- app = FastAPI( title="LLM Tool Proxy", description="A proxy that intercepts LLM requests to inject and handle tool calls.", version="1.0.0", ) # --- Middleware for logging basic request/response info --- @app.middleware("http") async def logging_middleware(request: Request, call_next): start_time = time.time() logger.info(f"Request received: {request.method} {request.url.path} from {request.client.host}") logger.info(f"Request Headers: {dict(request.headers)}") response = await call_next(request) process_time = (time.time() - start_time) * 1000 logger.info(f"Response sent: status_code={response.status_code} ({process_time:.2f}ms)") return response # --- End of Middleware --- @app.on_event("startup") async def startup_event(): logger.info("Application startup complete.") init_db() logger.info("Database initialized.") current_settings = get_settings() logger.info(f"Loaded LLM API URL: {current_settings.REAL_LLM_API_URL}") @app.post("/v1/chat/completions") async def chat_completions( request: IncomingRequest, settings: Settings = Depends(get_settings) ): """ This endpoint mimics the OpenAI Chat Completions API and supports both streaming and non-streaming responses, with detailed logging. """ log_id = log_request(client_request=request.model_dump()) logger.info(f"Request body logged with ID: {log_id}") if not settings.REAL_LLM_API_KEY or not settings.REAL_LLM_API_URL: logger.error("REAL_LLM_API_KEY or REAL_LLM_API_URL is not configured.") raise HTTPException(status_code=500, detail="LLM API Key or URL is not configured.") messages_to_llm = request.messages if request.tools: messages_to_llm = inject_tools_into_prompt(request.messages, request.tools) # Handle streaming request if request.stream: logger.info(f"Initiating streaming request for log ID: {log_id}") async def stream_and_log(): stream_content_buffer = [] async for chunk in stream_llm_api(messages_to_llm, settings, log_id): stream_content_buffer.append(chunk.decode('utf-8')) yield chunk # After the stream is complete, parse the full content and log it full_content = "".join(stream_content_buffer) response_message = parse_llm_response_from_content(full_content) proxy_response = ProxyResponse(message=response_message) logger.info(f"Streaming client response for log ID {log_id}:\n{proxy_response.model_dump_json(indent=2)}") update_request_log(log_id, client_response=proxy_response.model_dump()) return StreamingResponse(stream_and_log(), media_type="text/event-stream") # Handle non-streaming request try: logger.info(f"Initiating non-streaming request for log ID: {log_id}") response_message = await process_chat_request(messages_to_llm, settings, log_id) proxy_response = ProxyResponse(message=response_message) logger.info(f"Response body for log ID {log_id}:\n{proxy_response.model_dump_json(indent=2)}") # Log client response to DB update_request_log(log_id, client_response=proxy_response.model_dump()) return proxy_response except Exception as e: logger.exception(f"An unexpected error occurred during non-streaming request for log ID: {log_id}") # Log the error to the database update_request_log(log_id, client_response={"error": str(e)}) raise HTTPException(status_code=500, detail=f"An unexpected error occurred: {str(e)}") @app.get("/") def read_root(): return {"message": "LLM Tool Proxy is running."}