82 lines
2.9 KiB
Python
82 lines
2.9 KiB
Python
import pytest
|
|
from unittest.mock import AsyncMock, patch, MagicMock
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_refine_calls_ollama():
|
|
from llm import OllamaClient
|
|
mock_response = MagicMock()
|
|
mock_response.json.return_value = {"response": "# Titel\n\nInhalt."}
|
|
mock_response.raise_for_status = MagicMock()
|
|
|
|
with patch("httpx.AsyncClient") as MockClient:
|
|
instance = MockClient.return_value.__aenter__.return_value
|
|
instance.post = AsyncMock(return_value=mock_response)
|
|
client = OllamaClient(base_url="http://localhost:11434")
|
|
result = await client.refine(
|
|
raw_text="Das ist ein test.",
|
|
instructions="Mach eine Zusammenfassung.",
|
|
model="gemma3:12b",
|
|
)
|
|
assert "Inhalt" in result
|
|
instance.post.assert_called_once()
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_list_models_returns_list():
|
|
from llm import OllamaClient
|
|
mock_response = MagicMock()
|
|
mock_response.json.return_value = {"models": [{"name": "gemma3:12b"}, {"name": "mistral:7b"}]}
|
|
mock_response.raise_for_status = MagicMock()
|
|
|
|
with patch("httpx.AsyncClient") as MockClient:
|
|
instance = MockClient.return_value.__aenter__.return_value
|
|
instance.get = AsyncMock(return_value=mock_response)
|
|
client = OllamaClient(base_url="http://localhost:11434")
|
|
models = await client.list_models()
|
|
assert "gemma3:12b" in models
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_identify_speakers_returns_dict():
|
|
import respx, httpx, json
|
|
from llm import OllamaClient
|
|
client = OllamaClient()
|
|
mapping = {"SPEAKER_00": "Thomas", "SPEAKER_01": "Möller"}
|
|
transcript_excerpt = "SPEAKER_00: Gut, Herr Möller.\nSPEAKER_01: Danke, Thomas."
|
|
|
|
with respx.mock:
|
|
respx.post("http://localhost:11434/api/generate").mock(
|
|
return_value=httpx.Response(200, json={"response": json.dumps(mapping)})
|
|
)
|
|
result = await client.identify_speakers(transcript_excerpt)
|
|
assert result == {"SPEAKER_00": "Thomas", "SPEAKER_01": "Möller"}
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_identify_speakers_returns_empty_on_parse_failure():
|
|
import respx, httpx
|
|
from llm import OllamaClient
|
|
client = OllamaClient()
|
|
|
|
with respx.mock:
|
|
respx.post("http://localhost:11434/api/generate").mock(
|
|
return_value=httpx.Response(200, json={"response": "kein json hier"})
|
|
)
|
|
result = await client.identify_speakers("irgendwas")
|
|
assert result == {}
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_summarize_returns_string():
|
|
import respx, httpx
|
|
from llm import OllamaClient
|
|
client = OllamaClient()
|
|
|
|
with respx.mock:
|
|
respx.post("http://localhost:11434/api/generate").mock(
|
|
return_value=httpx.Response(200, json={"response": "# Zusammenfassung\n\nKurzer Text."})
|
|
)
|
|
result = await client.summarize("Thomas: Hallo.\nMöller: Hi.", model="gemma3:12b")
|
|
assert "Zusammenfassung" in result
|