feat: LLM module — Ollama client with transcript refinement
This commit is contained in:
@@ -0,0 +1,37 @@
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, patch, MagicMock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_refine_calls_ollama():
|
||||
from llm import OllamaClient
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {"response": "# Titel\n\nInhalt."}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
with patch("httpx.AsyncClient") as MockClient:
|
||||
instance = MockClient.return_value.__aenter__.return_value
|
||||
instance.post = AsyncMock(return_value=mock_response)
|
||||
client = OllamaClient(base_url="http://localhost:11434")
|
||||
result = await client.refine(
|
||||
raw_text="Das ist ein test.",
|
||||
instructions="Mach eine Zusammenfassung.",
|
||||
model="gemma3:12b",
|
||||
)
|
||||
assert "Inhalt" in result
|
||||
instance.post.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_models_returns_list():
|
||||
from llm import OllamaClient
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {"models": [{"name": "gemma3:12b"}, {"name": "mistral:7b"}]}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
with patch("httpx.AsyncClient") as MockClient:
|
||||
instance = MockClient.return_value.__aenter__.return_value
|
||||
instance.get = AsyncMock(return_value=mock_response)
|
||||
client = OllamaClient(base_url="http://localhost:11434")
|
||||
models = await client.list_models()
|
||||
assert "gemma3:12b" in models
|
||||
Reference in New Issue
Block a user