feat: Initial commit of LLM Tool Proxy
This commit is contained in:
85
tests/test_main.py
Normal file
85
tests/test_main.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from fastapi.testclient import TestClient
|
||||
from app.main import app
|
||||
import json
|
||||
|
||||
# The TestClient allows us to make requests to our FastAPI app without a running server.
|
||||
client = TestClient(app)
|
||||
|
||||
def test_root_endpoint():
|
||||
"""Tests the health check endpoint."""
|
||||
response = client.get("/")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {"message": "LLM Tool Proxy is running."}
|
||||
|
||||
def test_chat_completions_no_tools(monkeypatch):
|
||||
"""
|
||||
Tests the main endpoint with a simple request that does not include tools.
|
||||
This is now an INTEGRATION TEST against the live backend.
|
||||
"""
|
||||
monkeypatch.setenv("REAL_LLM_API_URL", "https://qwapi.oopsapi.com/v1/chat/completions")
|
||||
monkeypatch.setenv("REAL_LLM_API_KEY", "dummy-key")
|
||||
|
||||
request_data = {
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello there!"}
|
||||
]
|
||||
}
|
||||
response = client.post("/v1/chat/completions", json=request_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
|
||||
# Assertions for a real response: check structure and types, not specific content.
|
||||
assert "message" in response_json
|
||||
assert response_json["message"]["role"] == "assistant"
|
||||
# The real LLM should return some content
|
||||
assert isinstance(response_json["message"]["content"], str)
|
||||
assert len(response_json["message"]["content"]) > 0
|
||||
|
||||
|
||||
def test_chat_completions_with_tools_integration(monkeypatch):
|
||||
"""
|
||||
Tests the main endpoint with a request that includes tools against the live backend.
|
||||
We check for a valid response, but cannot guarantee a tool will be called.
|
||||
"""
|
||||
monkeypatch.setenv("REAL_LLM_API_URL", "https://qwapi.oopsapi.com/v1/chat/completions")
|
||||
monkeypatch.setenv("REAL_LLM_API_KEY", "dummy-key")
|
||||
|
||||
request_data = {
|
||||
"messages": [
|
||||
{"role": "user", "content": "What's the weather in San Francisco?"}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather for a specified city",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"city": {"type": "string", "description": "The city name"}
|
||||
},
|
||||
"required": ["city"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
response = client.post("/v1/chat/completions", json=request_data)
|
||||
|
||||
# For an integration test, the main goal is to ensure our proxy
|
||||
# communicates successfully and can parse the response without errors.
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
|
||||
# We assert that the basic structure is correct.
|
||||
assert "message" in response_json
|
||||
assert response_json["message"]["role"] == "assistant"
|
||||
|
||||
# The response might contain content, a tool_call, or both. We just
|
||||
# ensure the response fits our Pydantic model, which the TestClient handles.
|
||||
# A successful 200 response is our primary success metric here.
|
||||
assert response_json is not None
|
||||
|
||||
Reference in New Issue
Block a user