mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-01 10:12:46 -05:00
feat(qwen-asr): add support to qwen-asr (#8281)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
committed by
GitHub
parent
dd8e74a486
commit
1e08e02598
25
backend/python/qwen-asr/Makefile
Normal file
25
backend/python/qwen-asr/Makefile
Normal file
@@ -0,0 +1,25 @@
|
||||
.DEFAULT_GOAL := install
|
||||
|
||||
.PHONY: qwen-asr
|
||||
qwen-asr:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: qwen-asr
|
||||
@echo "Running qwen-asr..."
|
||||
bash run.sh
|
||||
@echo "qwen-asr run."
|
||||
|
||||
.PHONY: test
|
||||
test: qwen-asr
|
||||
@echo "Testing qwen-asr..."
|
||||
bash test.sh
|
||||
@echo "qwen-asr tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
212
backend/python/qwen-asr/backend.py
Normal file
212
backend/python/qwen-asr/backend.py
Normal file
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
gRPC server of LocalAI for Qwen3-ASR (transformers backend, non-vLLM).
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import torch
|
||||
from qwen_asr import Qwen3ASRModel
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
def is_float(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_int(s):
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
if torch.cuda.is_available():
|
||||
device = "cuda"
|
||||
else:
|
||||
device = "cpu"
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
self.device = device
|
||||
self.options = {}
|
||||
|
||||
for opt in request.Options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1)
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
model_path = request.Model or "Qwen/Qwen3-ASR-1.7B"
|
||||
default_dtype = torch.bfloat16 if self.device == "cuda" else torch.float32
|
||||
load_dtype = default_dtype
|
||||
if "torch_dtype" in self.options:
|
||||
d = str(self.options["torch_dtype"]).lower()
|
||||
if d == "fp16":
|
||||
load_dtype = torch.float16
|
||||
elif d == "bf16":
|
||||
load_dtype = torch.bfloat16
|
||||
elif d == "fp32":
|
||||
load_dtype = torch.float32
|
||||
del self.options["torch_dtype"]
|
||||
|
||||
self.max_inference_batch_size = self.options.get("max_inference_batch_size", 32)
|
||||
self.max_new_tokens = self.options.get("max_new_tokens", 256)
|
||||
|
||||
forced_aligner = self.options.get("forced_aligner")
|
||||
if forced_aligner is not None and isinstance(forced_aligner, str):
|
||||
forced_aligner = forced_aligner.strip() or None
|
||||
attn_implementation = self.options.get("attn_implementation")
|
||||
if attn_implementation is not None and isinstance(attn_implementation, str):
|
||||
attn_implementation = attn_implementation.strip() or None
|
||||
|
||||
if self.device == "mps":
|
||||
device_map = None
|
||||
elif self.device == "cuda":
|
||||
device_map = "cuda:0"
|
||||
else:
|
||||
device_map = "cpu"
|
||||
|
||||
load_kwargs = dict(
|
||||
dtype=load_dtype,
|
||||
device_map=device_map,
|
||||
max_inference_batch_size=self.max_inference_batch_size,
|
||||
max_new_tokens=self.max_new_tokens,
|
||||
)
|
||||
if attn_implementation:
|
||||
load_kwargs["attn_implementation"] = attn_implementation
|
||||
if forced_aligner:
|
||||
load_kwargs["forced_aligner"] = forced_aligner
|
||||
forced_aligner_kwargs = dict(
|
||||
dtype=load_dtype,
|
||||
device_map=device_map,
|
||||
)
|
||||
if attn_implementation:
|
||||
forced_aligner_kwargs["attn_implementation"] = attn_implementation
|
||||
load_kwargs["forced_aligner_kwargs"] = forced_aligner_kwargs
|
||||
|
||||
try:
|
||||
print(f"Loading Qwen3-ASR from {model_path}", file=sys.stderr)
|
||||
if attn_implementation:
|
||||
print(f"Using attn_implementation: {attn_implementation}", file=sys.stderr)
|
||||
if forced_aligner:
|
||||
print(f"Loading with forced_aligner: {forced_aligner}", file=sys.stderr)
|
||||
self.model = Qwen3ASRModel.from_pretrained(model_path, **load_kwargs)
|
||||
print("Qwen3-ASR model loaded successfully", file=sys.stderr)
|
||||
except Exception as err:
|
||||
print(f"[ERROR] LoadModel failed: {err}", file=sys.stderr)
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=str(err))
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def AudioTranscription(self, request, context):
|
||||
result_segments = []
|
||||
text = ""
|
||||
try:
|
||||
audio_path = request.dst
|
||||
if not audio_path or not os.path.exists(audio_path):
|
||||
print(f"Error: Audio file not found: {audio_path}", file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
language = None
|
||||
if request.language and request.language.strip():
|
||||
language = request.language.strip()
|
||||
|
||||
results = self.model.transcribe(audio=audio_path, language=language)
|
||||
|
||||
if not results:
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
r = results[0]
|
||||
text = r.text or ""
|
||||
|
||||
if getattr(r, 'time_stamps', None) and len(r.time_stamps) > 0:
|
||||
for idx, ts in enumerate(r.time_stamps):
|
||||
start_ms = 0
|
||||
end_ms = 0
|
||||
seg_text = text
|
||||
if isinstance(ts, (list, tuple)) and len(ts) >= 3:
|
||||
start_ms = int(float(ts[0]) * 1000) if ts[0] is not None else 0
|
||||
end_ms = int(float(ts[1]) * 1000) if ts[1] is not None else 0
|
||||
seg_text = ts[2] if len(ts) > 2 and ts[2] is not None else ""
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=idx, start=start_ms, end=end_ms, text=seg_text
|
||||
))
|
||||
else:
|
||||
if text:
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=0, start=0, end=0, text=text
|
||||
))
|
||||
except Exception as err:
|
||||
print(f"Error in AudioTranscription: {err}", file=sys.stderr)
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
return backend_pb2.TranscriptResult(segments=result_segments, text=text)
|
||||
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(
|
||||
futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024),
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024),
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024),
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument("--addr", default="localhost:50051", help="The address to bind the server to.")
|
||||
args = parser.parse_args()
|
||||
serve(args.addr)
|
||||
21
backend/python/qwen-asr/install.sh
Normal file
21
backend/python/qwen-asr/install.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
PYTHON_VERSION="3.12"
|
||||
PYTHON_PATCH="12"
|
||||
PY_STANDALONE_TAG="20251120"
|
||||
|
||||
installRequirements
|
||||
3
backend/python/qwen-asr/requirements-cpu.txt
Normal file
3
backend/python/qwen-asr/requirements-cpu.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
qwen-asr
|
||||
1
backend/python/qwen-asr/requirements-cublas12-after.txt
Normal file
1
backend/python/qwen-asr/requirements-cublas12-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
3
backend/python/qwen-asr/requirements-cublas12.txt
Normal file
3
backend/python/qwen-asr/requirements-cublas12.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu121
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-cublas13.txt
Normal file
3
backend/python/qwen-asr/requirements-cublas13.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-hipblas.txt
Normal file
3
backend/python/qwen-asr/requirements-hipblas.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
qwen-asr
|
||||
1
backend/python/qwen-asr/requirements-intel-after.txt
Normal file
1
backend/python/qwen-asr/requirements-intel-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
3
backend/python/qwen-asr/requirements-intel.txt
Normal file
3
backend/python/qwen-asr/requirements-intel.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-l4t12.txt
Normal file
3
backend/python/qwen-asr/requirements-l4t12.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-l4t13.txt
Normal file
3
backend/python/qwen-asr/requirements-l4t13.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
qwen-asr
|
||||
2
backend/python/qwen-asr/requirements-mps.txt
Normal file
2
backend/python/qwen-asr/requirements-mps.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
torch==2.7.1
|
||||
qwen-asr
|
||||
5
backend/python/qwen-asr/requirements.txt
Normal file
5
backend/python/qwen-asr/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
setuptools
|
||||
9
backend/python/qwen-asr/run.sh
Normal file
9
backend/python/qwen-asr/run.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
94
backend/python/qwen-asr/test.py
Normal file
94
backend/python/qwen-asr/test.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""
|
||||
Tests for the Qwen3-ASR gRPC backend.
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
# Skip heavy transcription test in CI (model download + inference)
|
||||
SKIP_ASR_TESTS = os.environ.get("SKIP_ASR_TESTS", "false").lower() == "true"
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(15)
|
||||
|
||||
def tearDown(self):
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="Qwen/Qwen3-ASR-1.7B"))
|
||||
self.assertTrue(response.success, response.message)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
@unittest.skipIf(SKIP_ASR_TESTS, "ASR transcription test skipped (SKIP_ASR_TESTS=true)")
|
||||
def test_audio_transcription(self):
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
audio_file = os.path.join(temp_dir, 'audio.wav')
|
||||
try:
|
||||
url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-ASR-Repo/asr_en.wav"
|
||||
result = subprocess.run(
|
||||
["wget", "-q", url, "-O", audio_file],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
self.skipTest(f"Could not download sample audio: {result.stderr}")
|
||||
if not os.path.exists(audio_file):
|
||||
self.skipTest("Sample audio file not found after download")
|
||||
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
load_response = stub.LoadModel(backend_pb2.ModelOptions(Model="Qwen/Qwen3-ASR-0.6B"))
|
||||
self.assertTrue(load_response.success, load_response.message)
|
||||
|
||||
transcript_response = stub.AudioTranscription(
|
||||
backend_pb2.TranscriptRequest(dst=audio_file)
|
||||
)
|
||||
self.assertIsNotNone(transcript_response)
|
||||
self.assertIsNotNone(transcript_response.text)
|
||||
self.assertGreaterEqual(len(transcript_response.segments), 0)
|
||||
all_text = ""
|
||||
for segment in transcript_response.segments:
|
||||
all_text += segment.text
|
||||
print(f"All text: {all_text}")
|
||||
self.assertIn("big", all_text)
|
||||
if transcript_response.segments:
|
||||
self.assertIsNotNone(transcript_response.segments[0].text)
|
||||
finally:
|
||||
self.tearDown()
|
||||
if os.path.exists(temp_dir):
|
||||
shutil.rmtree(temp_dir)
|
||||
11
backend/python/qwen-asr/test.sh
Normal file
11
backend/python/qwen-asr/test.sh
Normal file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
Reference in New Issue
Block a user