feat: add whisper.cpp ROCm backend support for AMD GPU acceleration
- transcription.py: new _transcribe_remote_whispercpp() using /inference endpoint - transcription.py: backend param routes to openai or whispercpp remote path - config.py: whisper.backend default 'openai', alt 'whispercpp' - pipeline.py: passes backend from config to transcribe_file - settings: backend dropdown (OpenAI-compat / whisper.cpp) - SETUP.md: whisper.cpp ROCm build and systemd setup instructions whisper-cpp-server running on beastix :8080 (ROCm0, gfx1030, RX 6800 XT)
This commit is contained in:
@@ -53,6 +53,7 @@ async function loadConfig() {
|
||||
if (!r.ok) return;
|
||||
const cfg = await r.json();
|
||||
document.getElementById('audio-device').value = (cfg.audio && cfg.audio.device) || '';
|
||||
document.getElementById('whisper-backend').value = (cfg.whisper && cfg.whisper.backend) || 'openai';
|
||||
document.getElementById('whisper-url').value = (cfg.whisper && cfg.whisper.base_url) || '';
|
||||
document.getElementById('whisper-model').value = (cfg.whisper && cfg.whisper.model) || 'large-v3';
|
||||
const ollamaUrl = (cfg.ollama && cfg.ollama.base_url) || 'http://localhost:11434';
|
||||
@@ -96,6 +97,7 @@ document.getElementById('save-btn').addEventListener('click', async function() {
|
||||
whisper: {
|
||||
base_url: document.getElementById('whisper-url').value,
|
||||
model: document.getElementById('whisper-model').value,
|
||||
backend: document.getElementById('whisper-backend').value,
|
||||
},
|
||||
ollama: {
|
||||
base_url: document.getElementById('ollama-url').value,
|
||||
|
||||
Reference in New Issue
Block a user