Files
LocalAI/backend/python/voxcpm/test.py
Ettore Di Giacinto 9b973b79f6 feat: add VoxCPM tts backend (#8109)
* feat: add VoxCPM tts backend

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Disable voxcpm on arm64 cpu

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2026-01-28 14:44:04 +01:00

52 lines
1.6 KiB
Python

"""
A test script to test the gRPC service
"""
import unittest
import subprocess
import time
import backend_pb2
import backend_pb2_grpc
import grpc
class TestBackendServicer(unittest.TestCase):
"""
TestBackendServicer is the class that tests the gRPC service
"""
def setUp(self):
"""
This method sets up the gRPC service by starting the server
"""
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
time.sleep(30)
def tearDown(self) -> None:
"""
This method tears down the gRPC service by terminating the server
"""
self.service.terminate()
self.service.wait()
def test_load_model(self):
"""
This method tests if the model is loaded successfully
"""
try:
self.setUp()
print("Starting test_load_model")
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="openbmb/VoxCPM1.5"))
print(response)
self.assertTrue(response.success)
self.assertEqual(response.message, "Model loaded successfully")
tts_request = backend_pb2.TTSRequest(text="VoxCPM is an innovative end-to-end TTS model from ModelBest.", dst="test.wav")
tts_response = stub.TTS(tts_request)
self.assertIsNotNone(tts_response)
except Exception as err:
print(err)
self.fail("LoadModel service failed")
finally:
self.tearDown()