import pytest import json import httpx from typing import List, AsyncGenerator from app.services import inject_tools_into_prompt, parse_llm_response_from_content, process_chat_request from app.models import ChatMessage, Tool, ResponseMessage, ToolCall, ToolCallFunction, IncomingRequest from app.core.config import Settings from app.database import get_latest_log_entry # --- Mocks for simulating httpx responses --- @pytest.fixture def mock_settings() -> Settings: """Provides mock settings for tests.""" return Settings( REAL_LLM_API_URL="http://fake-llm-api.com/chat", REAL_LLM_API_KEY="fake-key" ) class MockAsyncClient: """Mocks the httpx.AsyncClient to simulate LLM responses.""" def __init__(self, response_chunks: List[str]): self._response_chunks = response_chunks async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_val, exc_tb): pass def stream(self, method, url, headers, json, timeout): return MockStreamResponse(self._response_chunks) class MockStreamResponse: """Mocks the httpx.Response object for streaming.""" def __init__(self, chunks: List[str], status_code: int = 200): self._chunks = chunks self._status_code = status_code async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_val, exc_tb): pass def raise_for_status(self): if self._status_code != 200: raise httpx.HTTPStatusError("Error", request=None, response=httpx.Response(self._status_code)) async def aiter_bytes(self) -> AsyncGenerator[bytes, None]: for chunk in self._chunks: yield chunk.encode('utf-8') # --- End Mocks --- def test_inject_tools_into_prompt(): """ Tests that `inject_tools_into_prompt` correctly adds a system message with tool definitions to the message list. """ # 1. Fetch the latest request from the database latest_entry = get_latest_log_entry() assert latest_entry is not None client_request_data = json.loads(latest_entry["client_request"]) # 2. Parse the data into Pydantic models incoming_request = IncomingRequest.model_validate(client_request_data) # 3. Call the function to be tested modified_messages = inject_tools_into_prompt(incoming_request.messages, incoming_request.tools) # 4. Assert the results assert len(modified_messages) == len(incoming_request.messages) + 1 # Check that the first message is the new system prompt system_prompt = modified_messages[0] assert system_prompt.role == "system" assert "You are a helpful assistant with access to a set of tools." in system_prompt.content # Check that the tool definitions are in the system prompt for tool in incoming_request.tools: assert tool.function.name in system_prompt.content def test_parse_llm_response_from_content(): """ Tests that `parse_llm_response_from_content` correctly parses a raw LLM text response containing a { and extracts the `ResponseMessage`. """ # Sample raw text from an LLM # Note: Since tags are { and }, we use double braces {{...}} where # the outer { and } are tags, and the inner { and } are JSON llm_text = """ Some text from the model. {{ "name": "shell", "arguments": { "command": ["echo", "Hello from the tool!"] } }} """ # Call the function response_message = parse_llm_response_from_content(llm_text) # Assertions assert response_message.content == "Some text from the model." assert response_message.tool_calls is not None assert len(response_message.tool_calls) == 1 tool_call = response_message.tool_calls[0] assert isinstance(tool_call, ToolCall) assert tool_call.function.name == "shell" # The arguments are a JSON string, so we parse it for detailed checking arguments = json.loads(tool_call.function.arguments) assert arguments["command"] == ["echo", "Hello from the tool!"] @pytest.mark.anyio async def test_process_chat_request_with_tool_call(monkeypatch, mock_settings): """ Tests that `process_chat_request` can correctly parse a tool call from a simulated real LLM streaming response. """ # 1. Define the simulated SSE stream from the LLM # Using double braces for tool call tags sse_chunks = [ 'data: {"choices": [{"delta": {"content": "Okay, I will run that shell command."}}], "object": "chat.completion.chunk"}\n\n', 'data: {"choices": [{"delta": {"content": "{{\\n \\"name\\": \\"shell\\",\\n \\"arguments\\": {\\n \\"command\\": [\\"ls\\", \\"-l\\"]\\n }\\n}}\\n"}}], "object": "chat.completion.chunk"}\n\n', 'data: [DONE]\n\n' ] # 2. Mock the httpx.AsyncClient def mock_async_client(*args, **kwargs): return MockAsyncClient(response_chunks=sse_chunks) monkeypatch.setattr(httpx, "AsyncClient", mock_async_client) # 3. Prepare the input for process_chat_request messages = [ChatMessage(role="user", content="List the files.")] tools = [Tool(type="function", function={"name": "shell", "description": "Run a shell command.", "parameters": {}})] log_id = 1 # Dummy log ID for the test # 4. Call the function request_messages = inject_tools_into_prompt(messages, tools) response_message = await process_chat_request(request_messages, mock_settings, log_id) # 5. Assert the response is parsed correctly assert response_message.content is not None assert response_message.content.strip() == "Okay, I will run that shell command." assert response_message.tool_calls is not None assert len(response_message.tool_calls) == 1 tool_call = response_message.tool_calls[0] assert tool_call.function.name == "shell" arguments = json.loads(tool_call.function.arguments) assert arguments["command"] == ["ls", "-l"]