mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-02 18:53:32 -05:00
Compare commits
43 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
923ebbb344 | ||
|
|
ea51567b89 | ||
|
|
552c62a19c | ||
|
|
c0b21a921b | ||
|
|
b10045adc2 | ||
|
|
61b5e3b629 | ||
|
|
e35d7cb3b3 | ||
|
|
0fa0ac4797 | ||
|
|
be7ed85838 | ||
|
|
c12b310028 | ||
|
|
0447d5564d | ||
|
|
22c0eb5421 | ||
|
|
a0a00fb937 | ||
|
|
6dd44742ea | ||
|
|
00c72e7d3e | ||
|
|
d01c335cf6 | ||
|
|
5687df4535 | ||
|
|
f5fade97e6 | ||
|
|
b88ae31e4e | ||
|
|
f6daaa7c35 | ||
|
|
c491c6ca90 | ||
|
|
34e054f607 | ||
|
|
e886bb291a | ||
|
|
4bf2f8bbd8 | ||
|
|
d3525b7509 | ||
|
|
c8aa821e0e | ||
|
|
b3191927ae | ||
|
|
54c5a2d9ea | ||
|
|
0279591fec | ||
|
|
8845186955 | ||
|
|
ab8ed24358 | ||
|
|
a021df5a88 | ||
|
|
5f403b1631 | ||
|
|
897ad1729e | ||
|
|
16a18a2e55 | ||
|
|
3387bfaee0 | ||
|
|
1cd33047b4 | ||
|
|
1de045311a | ||
|
|
5fe9bf9f84 | ||
|
|
d4fd0c0609 | ||
|
|
d16722ee13 | ||
|
|
1f10ab39a9 | ||
|
|
4d36e393d1 |
114
.github/workflows/backend.yml
vendored
114
.github/workflows/backend.yml
vendored
@@ -105,6 +105,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-qwen-tts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
@@ -137,7 +150,7 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-llama-cpp'
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'bigger-runner'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "llama-cpp"
|
||||
@@ -353,6 +366,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-qwen-tts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -431,6 +457,19 @@ jobs:
|
||||
backend: "vibevoice"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-qwen-tts'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -680,6 +719,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-qwen-tts'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -699,7 +751,7 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-faster-whisper'
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'bigger-runner'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "faster-whisper"
|
||||
@@ -712,7 +764,7 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-coqui'
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'bigger-runner'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "coqui"
|
||||
@@ -824,6 +876,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-qwen-tts'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -890,6 +955,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-qwen-tts'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -963,7 +1041,7 @@ jobs:
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-llama-cpp'
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'bigger-runner'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "llama-cpp"
|
||||
@@ -989,7 +1067,7 @@ jobs:
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-vulkan-llama-cpp'
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'bigger-runner'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "llama-cpp"
|
||||
@@ -1330,19 +1408,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
skip-drivers: 'true'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64-neutts'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
backend: "neutts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -1356,6 +1421,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-qwen-tts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
|
||||
21
.github/workflows/test-extra.yml
vendored
21
.github/workflows/test-extra.yml
vendored
@@ -284,4 +284,23 @@ jobs:
|
||||
- name: Test pocket-tts
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/pocket-tts
|
||||
make --jobs=5 --output-sync=target -C backend/python/pocket-tts test
|
||||
make --jobs=5 --output-sync=target -C backend/python/pocket-tts test
|
||||
tests-qwen-tts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||
- name: Test qwen-tts
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-tts
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-tts test
|
||||
@@ -10,7 +10,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates curl wget espeak-ng libgomp1 \
|
||||
ffmpeg libopenblas0 libopenblas-dev && \
|
||||
ffmpeg libopenblas0 libopenblas-dev sox && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
8
Makefile
8
Makefile
@@ -1,5 +1,5 @@
|
||||
# Disable parallel execution for backend builds
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine backends/pocket-tts
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine backends/pocket-tts backends/qwen-tts
|
||||
|
||||
GOCMD=go
|
||||
GOTEST=$(GOCMD) test
|
||||
@@ -317,6 +317,7 @@ prepare-test-extra: protogen-python
|
||||
$(MAKE) -C backend/python/vibevoice
|
||||
$(MAKE) -C backend/python/moonshine
|
||||
$(MAKE) -C backend/python/pocket-tts
|
||||
$(MAKE) -C backend/python/qwen-tts
|
||||
|
||||
test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/transformers test
|
||||
@@ -326,6 +327,7 @@ test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/vibevoice test
|
||||
$(MAKE) -C backend/python/moonshine test
|
||||
$(MAKE) -C backend/python/pocket-tts test
|
||||
$(MAKE) -C backend/python/qwen-tts test
|
||||
|
||||
DOCKER_IMAGE?=local-ai
|
||||
DOCKER_AIO_IMAGE?=local-ai-aio
|
||||
@@ -459,6 +461,7 @@ BACKEND_CHATTERBOX = chatterbox|python|.|false|true
|
||||
BACKEND_VIBEVOICE = vibevoice|python|.|--progress=plain|true
|
||||
BACKEND_MOONSHINE = moonshine|python|.|false|true
|
||||
BACKEND_POCKET_TTS = pocket-tts|python|.|false|true
|
||||
BACKEND_QWEN_TTS = qwen-tts|python|.|false|true
|
||||
|
||||
# Helper function to build docker image for a backend
|
||||
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
|
||||
@@ -505,12 +508,13 @@ $(eval $(call generate-docker-build-target,$(BACKEND_CHATTERBOX)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_POCKET_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_TTS)))
|
||||
|
||||
# Pattern rule for docker-save targets
|
||||
docker-save-%: backend-images
|
||||
docker save local-ai-backend:$* -o backend-images/$*.tar
|
||||
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine docker-build-pocket-tts
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine docker-build-pocket-tts docker-build-qwen-tts
|
||||
|
||||
########################################################
|
||||
### END Backends
|
||||
|
||||
@@ -298,6 +298,7 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
| **neutts** | Text-to-speech with voice cloning | CUDA 12/13, ROCm, CPU |
|
||||
| **vibevoice** | Real-time TTS with voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **pocket-tts** | Lightweight CPU-based TTS | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **qwen-tts** | High-quality TTS with custom voice, voice design, and voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
||||
|
||||
### Image & Video Generation
|
||||
| Backend | Description | Acceleration Support |
|
||||
@@ -319,8 +320,8 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|-------------------|-------------------|------------------|
|
||||
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts, vibevoice, pocket-tts | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark, vibevoice, pocket-tts | Intel Arc, Intel iGPUs |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts, vibevoice, pocket-tts, qwen-tts | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark, vibevoice, pocket-tts, qwen-tts | Intel Arc, Intel iGPUs |
|
||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
|
||||
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
||||
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (AGX Orin, etc.) |
|
||||
|
||||
@@ -32,6 +32,8 @@ service Backend {
|
||||
rpc GetMetrics(MetricsRequest) returns (MetricsResponse);
|
||||
|
||||
rpc VAD(VADRequest) returns (VADResponse) {}
|
||||
|
||||
rpc ModelMetadata(ModelOptions) returns (ModelMetadataResponse) {}
|
||||
}
|
||||
|
||||
// Define the empty request
|
||||
@@ -410,3 +412,8 @@ message Detection {
|
||||
message DetectResponse {
|
||||
repeated Detection Detections = 1;
|
||||
}
|
||||
|
||||
message ModelMetadataResponse {
|
||||
bool supports_thinking = 1;
|
||||
string rendered_template = 2; // The rendered chat template with enable_thinking=true (empty if not applicable)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
LLAMA_VERSION?=785a71008573e2d84728fb0ba9e851d72d3f8fab
|
||||
LLAMA_VERSION?=a5eaa1d6a3732bc0f460b02b61c95680bba5a012
|
||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||
|
||||
CMAKE_ARGS?=
|
||||
|
||||
@@ -83,8 +83,8 @@ static void start_llama_server(server_context& ctx_server) {
|
||||
|
||||
// print sample chat example to make it clear which template is used
|
||||
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
|
||||
// common_chat_templates_source(ctx_server.impl->chat_templates.get()),
|
||||
// common_chat_format_example(ctx_server.impl->chat_templates.get(), ctx_server.impl->params_base.use_jinja).c_str(), ctx_server.impl->params_base.default_template_kwargs);
|
||||
// common_chat_templates_source(ctx_server.impl->chat_params.tmpls.get()),
|
||||
// common_chat_format_example(ctx_server.impl->chat_params.tmpls.get(), ctx_server.impl->params_base.use_jinja).c_str(), ctx_server.impl->params_base.default_template_kwargs);
|
||||
|
||||
// Keep the chat templates initialized in load_model() so they can be used when UseTokenizerTemplate is enabled
|
||||
// Templates will only be used conditionally in Predict/PredictStream when UseTokenizerTemplate is true and Messages are provided
|
||||
@@ -882,7 +882,7 @@ public:
|
||||
std::string prompt_str;
|
||||
std::vector<raw_buffer> files; // Declare files early so it's accessible in both branches
|
||||
// Handle chat templates when UseTokenizerTemplate is enabled and Messages are provided
|
||||
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_templates != nullptr) {
|
||||
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_params.tmpls != nullptr) {
|
||||
// Convert proto Messages to JSON format compatible with oaicompat_chat_params_parse
|
||||
json body_json;
|
||||
json messages_json = json::array();
|
||||
@@ -1261,12 +1261,7 @@ public:
|
||||
// Use the same approach as server.cpp: call oaicompat_chat_params_parse
|
||||
// This handles all template application, grammar merging, etc. automatically
|
||||
// Files extracted from multimodal content in messages will be added to the files vector
|
||||
// Create parser options with current chat_templates to ensure tmpls is not null
|
||||
oaicompat_parser_options parser_opt = ctx_server.impl->oai_parser_opt;
|
||||
parser_opt.tmpls = ctx_server.impl->chat_templates.get(); // Ensure tmpls is set to current chat_templates
|
||||
// Update allow_image and allow_audio based on current mctx state
|
||||
parser_opt.allow_image = ctx_server.impl->mctx ? mtmd_support_vision(ctx_server.impl->mctx) : false;
|
||||
parser_opt.allow_audio = ctx_server.impl->mctx ? mtmd_support_audio(ctx_server.impl->mctx) : false;
|
||||
// chat_params already contains tmpls, allow_image, and allow_audio set during model loading
|
||||
|
||||
// Debug: Log tools before template processing
|
||||
if (body_json.contains("tools")) {
|
||||
@@ -1312,7 +1307,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
json parsed_data = oaicompat_chat_params_parse(body_json, parser_opt, files);
|
||||
json parsed_data = oaicompat_chat_params_parse(body_json, ctx_server.impl->chat_params, files);
|
||||
|
||||
// Debug: Log tools after template processing
|
||||
if (parsed_data.contains("tools")) {
|
||||
@@ -1365,7 +1360,7 @@ public:
|
||||
|
||||
// If not using chat templates, extract files from image_data/audio_data fields
|
||||
// (If using chat templates, files were already extracted by oaicompat_chat_params_parse)
|
||||
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_templates == nullptr) {
|
||||
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_params.tmpls == nullptr) {
|
||||
const auto &images_data = data.find("image_data");
|
||||
if (images_data != data.end() && images_data->is_array())
|
||||
{
|
||||
@@ -1593,7 +1588,7 @@ public:
|
||||
std::string prompt_str;
|
||||
std::vector<raw_buffer> files; // Declare files early so it's accessible in both branches
|
||||
// Handle chat templates when UseTokenizerTemplate is enabled and Messages are provided
|
||||
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_templates != nullptr) {
|
||||
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_params.tmpls != nullptr) {
|
||||
// Convert proto Messages to JSON format compatible with oaicompat_chat_params_parse
|
||||
json body_json;
|
||||
json messages_json = json::array();
|
||||
@@ -1997,12 +1992,7 @@ public:
|
||||
// Use the same approach as server.cpp: call oaicompat_chat_params_parse
|
||||
// This handles all template application, grammar merging, etc. automatically
|
||||
// Files extracted from multimodal content in messages will be added to the files vector
|
||||
// Create parser options with current chat_templates to ensure tmpls is not null
|
||||
oaicompat_parser_options parser_opt = ctx_server.impl->oai_parser_opt;
|
||||
parser_opt.tmpls = ctx_server.impl->chat_templates.get(); // Ensure tmpls is set to current chat_templates
|
||||
// Update allow_image and allow_audio based on current mctx state
|
||||
parser_opt.allow_image = ctx_server.impl->mctx ? mtmd_support_vision(ctx_server.impl->mctx) : false;
|
||||
parser_opt.allow_audio = ctx_server.impl->mctx ? mtmd_support_audio(ctx_server.impl->mctx) : false;
|
||||
// chat_params already contains tmpls, allow_image, and allow_audio set during model loading
|
||||
|
||||
// Debug: Log tools before template processing
|
||||
if (body_json.contains("tools")) {
|
||||
@@ -2048,7 +2038,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
json parsed_data = oaicompat_chat_params_parse(body_json, parser_opt, files);
|
||||
json parsed_data = oaicompat_chat_params_parse(body_json, ctx_server.impl->chat_params, files);
|
||||
|
||||
// Debug: Log tools after template processing
|
||||
if (parsed_data.contains("tools")) {
|
||||
@@ -2101,7 +2091,7 @@ public:
|
||||
|
||||
// If not using chat templates, extract files from image_data/audio_data fields
|
||||
// (If using chat templates, files were already extracted by oaicompat_chat_params_parse)
|
||||
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_templates == nullptr) {
|
||||
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_params.tmpls == nullptr) {
|
||||
const auto &images_data = data.find("image_data");
|
||||
if (images_data != data.end() && images_data->is_array())
|
||||
{
|
||||
@@ -2486,6 +2476,47 @@ public:
|
||||
response->set_prompt_tokens_processed(res_metrics->n_prompt_tokens_processed_total);
|
||||
|
||||
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status ModelMetadata(ServerContext* /*context*/, const backend::ModelOptions* /*request*/, backend::ModelMetadataResponse* response) override {
|
||||
// Check if model is loaded
|
||||
if (params_base.model.path.empty()) {
|
||||
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
|
||||
}
|
||||
|
||||
// Check if chat templates are initialized
|
||||
if (ctx_server.impl->chat_params.tmpls == nullptr) {
|
||||
// If templates are not initialized, we can't detect thinking support
|
||||
// Return false as default
|
||||
response->set_supports_thinking(false);
|
||||
response->set_rendered_template("");
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
// Detect thinking support using llama.cpp's function
|
||||
bool supports_thinking = common_chat_templates_support_enable_thinking(ctx_server.impl->chat_params.tmpls.get());
|
||||
response->set_supports_thinking(supports_thinking);
|
||||
|
||||
// Render the template with enable_thinking=true so Go code can detect thinking tokens
|
||||
// This allows reusing existing detection functions in Go
|
||||
std::string rendered_template = "";
|
||||
if (params_base.use_jinja) {
|
||||
// Render the template with enable_thinking=true to see what the actual prompt looks like
|
||||
common_chat_templates_inputs dummy_inputs;
|
||||
common_chat_msg msg;
|
||||
msg.role = "user";
|
||||
msg.content = "test";
|
||||
dummy_inputs.messages = {msg};
|
||||
dummy_inputs.enable_thinking = true;
|
||||
dummy_inputs.use_jinja = params_base.use_jinja;
|
||||
|
||||
const auto rendered = common_chat_templates_apply(ctx_server.impl->chat_params.tmpls.get(), dummy_inputs);
|
||||
rendered_template = rendered.prompt;
|
||||
}
|
||||
|
||||
response->set_rendered_template(rendered_template);
|
||||
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# stablediffusion.cpp (ggml)
|
||||
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
|
||||
STABLEDIFFUSION_GGML_VERSION?=7010bb4dff7bd55b03d35ef9772142c21699eba9
|
||||
STABLEDIFFUSION_GGML_VERSION?=5e4579c11d0678f9765463582d024e58270faa9c
|
||||
|
||||
CMAKE_ARGS+=-DGGML_MAX_NAME=128
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||
WHISPER_CPP_VERSION?=2eeeba56e9edd762b4b38467bab96c2517163158
|
||||
WHISPER_CPP_VERSION?=7aa8818647303b567c3a21fe4220b2681988e220
|
||||
SO_TARGET?=libgowhisper.so
|
||||
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
|
||||
@@ -428,6 +428,28 @@
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-vibevoice"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-vibevoice"
|
||||
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
|
||||
- &qwen-tts
|
||||
urls:
|
||||
- https://github.com/QwenLM/Qwen3-TTS
|
||||
description: |
|
||||
Qwen3-TTS is a high-quality text-to-speech model supporting custom voice, voice design, and voice cloning.
|
||||
tags:
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: apache-2.0
|
||||
name: "qwen-tts"
|
||||
alias: "qwen-tts"
|
||||
capabilities:
|
||||
nvidia: "cuda12-qwen-tts"
|
||||
intel: "intel-qwen-tts"
|
||||
amd: "rocm-qwen-tts"
|
||||
nvidia-l4t: "nvidia-l4t-qwen-tts"
|
||||
default: "cpu-qwen-tts"
|
||||
nvidia-cuda-13: "cuda13-qwen-tts"
|
||||
nvidia-cuda-12: "cuda12-qwen-tts"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-tts"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-tts"
|
||||
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
|
||||
- &pocket-tts
|
||||
urls:
|
||||
- https://github.com/kyutai-labs/pocket-tts
|
||||
@@ -537,18 +559,14 @@
|
||||
default: "cpu-neutts"
|
||||
nvidia: "cuda12-neutts"
|
||||
amd: "rocm-neutts"
|
||||
nvidia-l4t: "nvidia-l4t-neutts"
|
||||
nvidia-cuda-12: "cuda12-neutts"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-neutts"
|
||||
- !!merge <<: *neutts
|
||||
name: "neutts-development"
|
||||
capabilities:
|
||||
default: "cpu-neutts-development"
|
||||
nvidia: "cuda12-neutts-development"
|
||||
amd: "rocm-neutts-development"
|
||||
nvidia-l4t: "nvidia-l4t-neutts-development"
|
||||
nvidia-cuda-12: "cuda12-neutts-development"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-neutts-development"
|
||||
- !!merge <<: *llamacpp
|
||||
name: "llama-cpp-development"
|
||||
capabilities:
|
||||
@@ -578,11 +596,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "nvidia-l4t-arm64-neutts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-arm64-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "cpu-neutts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-neutts"
|
||||
@@ -598,11 +611,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "nvidia-l4t-arm64-neutts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-arm64-neutts
|
||||
- !!merge <<: *mlx
|
||||
name: "mlx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-mlx"
|
||||
@@ -1627,6 +1635,89 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice
|
||||
## qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "qwen-tts-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-qwen-tts-development"
|
||||
intel: "intel-qwen-tts-development"
|
||||
amd: "rocm-qwen-tts-development"
|
||||
nvidia-l4t: "nvidia-l4t-qwen-tts-development"
|
||||
default: "cpu-qwen-tts-development"
|
||||
nvidia-cuda-13: "cuda13-qwen-tts-development"
|
||||
nvidia-cuda-12: "cuda12-qwen-tts-development"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-tts-development"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-tts-development"
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cpu-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cpu-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda12-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda12-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda13-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda13-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "intel-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "intel-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "rocm-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "rocm-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "nvidia-l4t-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "nvidia-l4t-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda13-nvidia-l4t-arm64-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda13-nvidia-l4t-arm64-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts
|
||||
## pocket-tts
|
||||
- !!merge <<: *pocket-tts
|
||||
name: "pocket-tts-development"
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.8.10+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchaudio==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
transformers
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchaudio==2.3.1+cxx11.abi
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchaudio
|
||||
transformers
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
|
||||
@@ -398,7 +398,7 @@ function runProtogen() {
|
||||
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
|
||||
# you may want to add the following line to a requirements-intel.txt if you use one:
|
||||
#
|
||||
# --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
# --index-url https://download.pytorch.org/whl/xpu
|
||||
#
|
||||
# If you need to add extra flags into the pip install command you can do so by setting the variable EXTRA_PIP_INSTALL_FLAGS
|
||||
# before calling installRequirements. For example:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.8.10+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch==2.8.0
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
optimum[openvino]
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchaudio==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch==2.8.0+xpu
|
||||
torchaudio==2.8.0+xpu
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
transformers==4.48.3
|
||||
|
||||
@@ -42,12 +42,8 @@ from transformers import T5EncoderModel
|
||||
from safetensors.torch import load_file
|
||||
|
||||
# Import LTX-2 specific utilities
|
||||
try:
|
||||
from diffusers.pipelines.ltx2.export_utils import encode_video as ltx2_encode_video
|
||||
LTX2_AVAILABLE = True
|
||||
except ImportError:
|
||||
LTX2_AVAILABLE = False
|
||||
ltx2_encode_video = None
|
||||
from diffusers.pipelines.ltx2.export_utils import encode_video as ltx2_encode_video
|
||||
from diffusers import LTX2VideoTransformer3DModel, GGUFQuantizationConfig
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
COMPEL = os.environ.get("COMPEL", "0") == "1"
|
||||
@@ -302,12 +298,96 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
if pipeline_type == "LTX2ImageToVideoPipeline":
|
||||
self.img2vid = True
|
||||
self.ltx2_pipeline = True
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2ImageToVideoPipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
variant=variant
|
||||
)
|
||||
|
||||
# Check if loading from single file (GGUF)
|
||||
if fromSingleFile and LTX2VideoTransformer3DModel is not None:
|
||||
_, single_file_ext = os.path.splitext(modelFile)
|
||||
if single_file_ext == ".gguf":
|
||||
# Load transformer from single GGUF file with quantization
|
||||
transformer_kwargs = {}
|
||||
quantization_config = GGUFQuantizationConfig(compute_dtype=torchType)
|
||||
transformer_kwargs["quantization_config"] = quantization_config
|
||||
|
||||
transformer = LTX2VideoTransformer3DModel.from_single_file(
|
||||
modelFile,
|
||||
config=request.Model, # Use request.Model as the config/model_id
|
||||
subfolder="transformer",
|
||||
**transformer_kwargs,
|
||||
)
|
||||
|
||||
# Load pipeline with custom transformer
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2ImageToVideoPipeline",
|
||||
model_id=request.Model,
|
||||
transformer=transformer,
|
||||
torch_dtype=torchType,
|
||||
)
|
||||
else:
|
||||
# Single file but not GGUF - use standard single file loading
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2ImageToVideoPipeline",
|
||||
model_id=modelFile,
|
||||
from_single_file=True,
|
||||
torch_dtype=torchType,
|
||||
)
|
||||
else:
|
||||
# Standard loading from pretrained
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2ImageToVideoPipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
variant=variant
|
||||
)
|
||||
|
||||
if not DISABLE_CPU_OFFLOAD:
|
||||
pipe.enable_model_cpu_offload()
|
||||
return pipe
|
||||
|
||||
# LTX2Pipeline - text-to-video pipeline, needs txt2vid flag, CPU offload, and special handling
|
||||
if pipeline_type == "LTX2Pipeline":
|
||||
self.txt2vid = True
|
||||
self.ltx2_pipeline = True
|
||||
|
||||
# Check if loading from single file (GGUF)
|
||||
if fromSingleFile and LTX2VideoTransformer3DModel is not None:
|
||||
_, single_file_ext = os.path.splitext(modelFile)
|
||||
if single_file_ext == ".gguf":
|
||||
# Load transformer from single GGUF file with quantization
|
||||
transformer_kwargs = {}
|
||||
quantization_config = GGUFQuantizationConfig(compute_dtype=torchType)
|
||||
transformer_kwargs["quantization_config"] = quantization_config
|
||||
|
||||
transformer = LTX2VideoTransformer3DModel.from_single_file(
|
||||
modelFile,
|
||||
config=request.Model, # Use request.Model as the config/model_id
|
||||
subfolder="transformer",
|
||||
**transformer_kwargs,
|
||||
)
|
||||
|
||||
# Load pipeline with custom transformer
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2Pipeline",
|
||||
model_id=request.Model,
|
||||
transformer=transformer,
|
||||
torch_dtype=torchType,
|
||||
)
|
||||
else:
|
||||
# Single file but not GGUF - use standard single file loading
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2Pipeline",
|
||||
model_id=modelFile,
|
||||
from_single_file=True,
|
||||
torch_dtype=torchType,
|
||||
)
|
||||
else:
|
||||
# Standard loading from pretrained
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2Pipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
variant=variant
|
||||
)
|
||||
|
||||
if not DISABLE_CPU_OFFLOAD:
|
||||
pipe.enable_model_cpu_offload()
|
||||
return pipe
|
||||
@@ -428,6 +508,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
self.txt2vid = False
|
||||
self.ltx2_pipeline = False
|
||||
|
||||
print(f"LoadModel: PipelineType from request: {request.PipelineType}", file=sys.stderr)
|
||||
|
||||
# Load pipeline using dynamic loader
|
||||
# Special cases that require custom initialization are handled first
|
||||
self.pipe = self._load_pipeline(
|
||||
@@ -437,6 +519,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
torchType=torchType,
|
||||
variant=variant
|
||||
)
|
||||
|
||||
print(f"LoadModel: After loading - ltx2_pipeline: {self.ltx2_pipeline}, img2vid: {self.img2vid}, txt2vid: {self.txt2vid}, PipelineType: {self.PipelineType}", file=sys.stderr)
|
||||
|
||||
if CLIPSKIP and request.CLIPSkip != 0:
|
||||
self.clip_skip = request.CLIPSkip
|
||||
@@ -674,14 +758,20 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
try:
|
||||
prompt = request.prompt
|
||||
if not prompt:
|
||||
print(f"GenerateVideo: No prompt provided for video generation.", file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message="No prompt provided for video generation")
|
||||
|
||||
# Debug: Print raw request values
|
||||
print(f"GenerateVideo: Raw request values - num_frames: {request.num_frames}, fps: {request.fps}, cfg_scale: {request.cfg_scale}, step: {request.step}", file=sys.stderr)
|
||||
|
||||
# Set default values from request or use defaults
|
||||
num_frames = request.num_frames if request.num_frames > 0 else 81
|
||||
fps = request.fps if request.fps > 0 else 16
|
||||
cfg_scale = request.cfg_scale if request.cfg_scale > 0 else 4.0
|
||||
num_inference_steps = request.step if request.step > 0 else 40
|
||||
|
||||
print(f"GenerateVideo: Using values - num_frames: {num_frames}, fps: {fps}, cfg_scale: {cfg_scale}, num_inference_steps: {num_inference_steps}", file=sys.stderr)
|
||||
|
||||
# Prepare generation parameters
|
||||
kwargs = {
|
||||
"prompt": prompt,
|
||||
@@ -707,19 +797,34 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
kwargs["end_image"] = load_image(request.end_image)
|
||||
|
||||
print(f"Generating video with {kwargs=}", file=sys.stderr)
|
||||
print(f"GenerateVideo: Pipeline type: {self.PipelineType}, ltx2_pipeline flag: {self.ltx2_pipeline}", file=sys.stderr)
|
||||
|
||||
# Generate video frames based on pipeline type
|
||||
if self.ltx2_pipeline or self.PipelineType == "LTX2ImageToVideoPipeline":
|
||||
# LTX-2 image-to-video generation with audio
|
||||
if not LTX2_AVAILABLE:
|
||||
return backend_pb2.Result(success=False, message="LTX-2 pipeline requires diffusers.pipelines.ltx2.export_utils")
|
||||
if self.ltx2_pipeline or self.PipelineType in ["LTX2Pipeline", "LTX2ImageToVideoPipeline"]:
|
||||
# LTX-2 generation with audio (supports both text-to-video and image-to-video)
|
||||
# Determine if this is text-to-video (no image) or image-to-video (has image)
|
||||
has_image = bool(request.start_image)
|
||||
|
||||
# LTX-2 uses 'image' parameter instead of 'start_image'
|
||||
if request.start_image:
|
||||
image = load_image(request.start_image)
|
||||
kwargs["image"] = image
|
||||
# Remove start_image if it was added
|
||||
kwargs.pop("start_image", None)
|
||||
# Remove image-related parameters that might have been added earlier
|
||||
kwargs.pop("start_image", None)
|
||||
kwargs.pop("end_image", None)
|
||||
|
||||
# LTX2ImageToVideoPipeline uses 'image' parameter for image-to-video
|
||||
# LTX2Pipeline (text-to-video) doesn't need an image parameter
|
||||
if has_image:
|
||||
# Image-to-video: use 'image' parameter
|
||||
if self.PipelineType == "LTX2ImageToVideoPipeline":
|
||||
image = load_image(request.start_image)
|
||||
kwargs["image"] = image
|
||||
print(f"LTX-2: Using image-to-video mode with image", file=sys.stderr)
|
||||
else:
|
||||
# If pipeline type is LTX2Pipeline but we have an image, we can't do image-to-video
|
||||
return backend_pb2.Result(success=False, message="LTX2Pipeline does not support image-to-video. Use LTX2ImageToVideoPipeline for image-to-video generation.")
|
||||
else:
|
||||
# Text-to-video: no image parameter needed
|
||||
# Ensure no image-related kwargs are present
|
||||
kwargs.pop("image", None)
|
||||
print(f"LTX-2: Using text-to-video mode (no image)", file=sys.stderr)
|
||||
|
||||
# LTX-2 uses 'frame_rate' instead of 'fps'
|
||||
frame_rate = float(fps)
|
||||
@@ -730,20 +835,45 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
kwargs["return_dict"] = False
|
||||
|
||||
# Generate video and audio
|
||||
video, audio = self.pipe(**kwargs)
|
||||
print(f"LTX-2: Generating with kwargs: {kwargs}", file=sys.stderr)
|
||||
try:
|
||||
video, audio = self.pipe(**kwargs)
|
||||
print(f"LTX-2: Generated video shape: {video.shape}, audio shape: {audio.shape}", file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f"LTX-2: Error during pipe() call: {e}", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
return backend_pb2.Result(success=False, message=f"Error generating video with LTX-2 pipeline: {e}")
|
||||
|
||||
# Convert video to uint8 format
|
||||
video = (video * 255).round().astype("uint8")
|
||||
video = torch.from_numpy(video)
|
||||
|
||||
print(f"LTX-2: Converting video, shape after conversion: {video.shape}", file=sys.stderr)
|
||||
print(f"LTX-2: Audio sample rate: {self.pipe.vocoder.config.output_sampling_rate}", file=sys.stderr)
|
||||
print(f"LTX-2: Output path: {request.dst}", file=sys.stderr)
|
||||
|
||||
# Use LTX-2's encode_video function which handles audio
|
||||
ltx2_encode_video(
|
||||
video[0],
|
||||
fps=frame_rate,
|
||||
audio=audio[0].float().cpu(),
|
||||
audio_sample_rate=self.pipe.vocoder.config.output_sampling_rate,
|
||||
output_path=request.dst,
|
||||
)
|
||||
try:
|
||||
ltx2_encode_video(
|
||||
video[0],
|
||||
fps=frame_rate,
|
||||
audio=audio[0].float().cpu(),
|
||||
audio_sample_rate=self.pipe.vocoder.config.output_sampling_rate,
|
||||
output_path=request.dst,
|
||||
)
|
||||
# Verify file was created and has content
|
||||
import os
|
||||
if os.path.exists(request.dst):
|
||||
file_size = os.path.getsize(request.dst)
|
||||
print(f"LTX-2: Video file created successfully, size: {file_size} bytes", file=sys.stderr)
|
||||
if file_size == 0:
|
||||
return backend_pb2.Result(success=False, message=f"Video file was created but is empty (0 bytes). Check LTX-2 encode_video function.")
|
||||
else:
|
||||
return backend_pb2.Result(success=False, message=f"Video file was not created at {request.dst}")
|
||||
except Exception as e:
|
||||
print(f"LTX-2: Error encoding video: {e}", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
return backend_pb2.Result(success=False, message=f"Error encoding video: {e}")
|
||||
|
||||
return backend_pb2.Result(message="Video generated successfully", success=True)
|
||||
elif self.PipelineType == "WanPipeline":
|
||||
@@ -785,11 +915,23 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
output = self.pipe(**kwargs)
|
||||
frames = output.frames[0]
|
||||
else:
|
||||
print(f"GenerateVideo: Pipeline {self.PipelineType} does not match any known video pipeline handler", file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"Pipeline {self.PipelineType} does not support video generation")
|
||||
|
||||
# Export video (for non-LTX-2 pipelines)
|
||||
print(f"GenerateVideo: Exporting video to {request.dst} with fps={fps}", file=sys.stderr)
|
||||
export_to_video(frames, request.dst, fps=fps)
|
||||
|
||||
# Verify file was created
|
||||
import os
|
||||
if os.path.exists(request.dst):
|
||||
file_size = os.path.getsize(request.dst)
|
||||
print(f"GenerateVideo: Video file created, size: {file_size} bytes", file=sys.stderr)
|
||||
if file_size == 0:
|
||||
return backend_pb2.Result(success=False, message=f"Video file was created but is empty (0 bytes)")
|
||||
else:
|
||||
return backend_pb2.Result(success=False, message=f"Video file was not created at {request.dst}")
|
||||
|
||||
return backend_pb2.Result(message="Video generated successfully", success=True)
|
||||
|
||||
except Exception as err:
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.5.1+cxx11.abi
|
||||
torchvision==0.20.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchvision
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
git+https://github.com/huggingface/diffusers
|
||||
|
||||
@@ -3,3 +3,4 @@ grpcio==1.76.0
|
||||
pillow
|
||||
protobuf
|
||||
certifi
|
||||
av
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
optimum[openvino]
|
||||
faster-whisper
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.8.10+xpu
|
||||
torch==2.5.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
torchaudio==2.5.1+cxx11.abi
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
transformers==4.48.3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
pocket-tts
|
||||
scipy
|
||||
torch==2.5.1+cxx11.abi
|
||||
torch
|
||||
23
backend/python/qwen-tts/Makefile
Normal file
23
backend/python/qwen-tts/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
.PHONY: qwen-tts
|
||||
qwen-tts:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: qwen-tts
|
||||
@echo "Running qwen-tts..."
|
||||
bash run.sh
|
||||
@echo "qwen-tts run."
|
||||
|
||||
.PHONY: test
|
||||
test: qwen-tts
|
||||
@echo "Testing qwen-tts..."
|
||||
bash test.sh
|
||||
@echo "qwen-tts tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
475
backend/python/qwen-tts/backend.py
Normal file
475
backend/python/qwen-tts/backend.py
Normal file
@@ -0,0 +1,475 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Qwen3-TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import copy
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import torch
|
||||
import soundfile as sf
|
||||
from qwen_tts import Qwen3TTSModel
|
||||
|
||||
import grpc
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
# Get device
|
||||
if torch.cuda.is_available():
|
||||
print("CUDA is available", file=sys.stderr)
|
||||
device = "cuda"
|
||||
else:
|
||||
print("CUDA is not available", file=sys.stderr)
|
||||
device = "cpu"
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
# Normalize potential 'mpx' typo to 'mps'
|
||||
if device == "mpx":
|
||||
print("Note: device 'mpx' detected, treating it as 'mps'.", file=sys.stderr)
|
||||
device = "mps"
|
||||
|
||||
# Validate mps availability if requested
|
||||
if device == "mps" and not torch.backends.mps.is_available():
|
||||
print("Warning: MPS not available. Falling back to CPU.", file=sys.stderr)
|
||||
device = "cpu"
|
||||
|
||||
self.device = device
|
||||
self._torch_device = torch.device(device)
|
||||
|
||||
options = request.Options
|
||||
|
||||
# empty dict
|
||||
self.options = {}
|
||||
|
||||
# The options are a list of strings in this form optname:optvalue
|
||||
# We are storing all the options in a dict so we can use it later when
|
||||
# generating the audio
|
||||
for opt in options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1) # Split only on first colon
|
||||
# if value is a number, convert it to the appropriate type
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
# Get model path from request
|
||||
model_path = request.Model
|
||||
if not model_path:
|
||||
model_path = "Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice"
|
||||
|
||||
# Determine model type from model path or options
|
||||
self.model_type = self.options.get("model_type", None)
|
||||
if not self.model_type:
|
||||
if "CustomVoice" in model_path:
|
||||
self.model_type = "CustomVoice"
|
||||
elif "VoiceDesign" in model_path:
|
||||
self.model_type = "VoiceDesign"
|
||||
elif "Base" in model_path or "0.6B" in model_path or "1.7B" in model_path:
|
||||
self.model_type = "Base" # VoiceClone model
|
||||
else:
|
||||
# Default to CustomVoice
|
||||
self.model_type = "CustomVoice"
|
||||
|
||||
# Cache for voice clone prompts
|
||||
self._voice_clone_cache = {}
|
||||
|
||||
# Store AudioPath, ModelFile, and ModelPath from LoadModel request
|
||||
# These are used later in TTS for VoiceClone mode
|
||||
self.audio_path = request.AudioPath if hasattr(request, 'AudioPath') and request.AudioPath else None
|
||||
self.model_file = request.ModelFile if hasattr(request, 'ModelFile') and request.ModelFile else None
|
||||
self.model_path = request.ModelPath if hasattr(request, 'ModelPath') and request.ModelPath else None
|
||||
|
||||
# Decide dtype & attention implementation
|
||||
if self.device == "mps":
|
||||
load_dtype = torch.float32 # MPS requires float32
|
||||
device_map = None
|
||||
attn_impl_primary = "sdpa" # flash_attention_2 not supported on MPS
|
||||
elif self.device == "cuda":
|
||||
load_dtype = torch.bfloat16
|
||||
device_map = "cuda"
|
||||
attn_impl_primary = "flash_attention_2"
|
||||
else: # cpu
|
||||
load_dtype = torch.float32
|
||||
device_map = "cpu"
|
||||
attn_impl_primary = "sdpa"
|
||||
|
||||
print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}, model_type: {self.model_type}", file=sys.stderr)
|
||||
print(f"Loading model from: {model_path}", file=sys.stderr)
|
||||
|
||||
# Load model with device-specific logic
|
||||
# Common parameters for all devices
|
||||
load_kwargs = {
|
||||
"dtype": load_dtype,
|
||||
"attn_implementation": attn_impl_primary,
|
||||
"trust_remote_code": True, # Required for qwen-tts models
|
||||
}
|
||||
|
||||
try:
|
||||
if self.device == "mps":
|
||||
load_kwargs["device_map"] = None # load then move
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
self.model.to("mps")
|
||||
elif self.device == "cuda":
|
||||
load_kwargs["device_map"] = device_map
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
else: # cpu
|
||||
load_kwargs["device_map"] = device_map
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
print(f"[ERROR] Loading model: {type(e).__name__}: {error_msg}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
||||
# Check if it's a missing feature extractor/tokenizer error
|
||||
if "speech_tokenizer" in error_msg or "preprocessor_config.json" in error_msg or "feature extractor" in error_msg.lower():
|
||||
print("\n[ERROR] Model files appear to be incomplete. This usually means:", file=sys.stderr)
|
||||
print(" 1. The model download was interrupted or incomplete", file=sys.stderr)
|
||||
print(" 2. The model cache is corrupted", file=sys.stderr)
|
||||
print("\nTo fix this, try:", file=sys.stderr)
|
||||
print(f" rm -rf ~/.cache/huggingface/hub/models--Qwen--Qwen3-TTS-*", file=sys.stderr)
|
||||
print(" Then re-run to trigger a fresh download.", file=sys.stderr)
|
||||
print("\nAlternatively, try using a different model variant:", file=sys.stderr)
|
||||
print(" - Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice", file=sys.stderr)
|
||||
print(" - Qwen/Qwen3-TTS-12Hz-1.7B-VoiceDesign", file=sys.stderr)
|
||||
print(" - Qwen/Qwen3-TTS-12Hz-1.7B-Base", file=sys.stderr)
|
||||
|
||||
if attn_impl_primary == 'flash_attention_2':
|
||||
print("\nTrying to use SDPA instead of flash_attention_2...", file=sys.stderr)
|
||||
load_kwargs["attn_implementation"] = 'sdpa'
|
||||
try:
|
||||
if self.device == "mps":
|
||||
load_kwargs["device_map"] = None
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
self.model.to("mps")
|
||||
else:
|
||||
load_kwargs["device_map"] = (self.device if self.device in ("cuda", "cpu") else None)
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
except Exception as e2:
|
||||
print(f"[ERROR] Failed to load with SDPA: {type(e2).__name__}: {e2}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
raise e2
|
||||
else:
|
||||
raise e
|
||||
|
||||
print(f"Model loaded successfully: {model_path}", file=sys.stderr)
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def _detect_mode(self, request):
|
||||
"""Detect which mode to use based on request parameters."""
|
||||
# Priority: VoiceClone > VoiceDesign > CustomVoice
|
||||
|
||||
# model_type explicitly set
|
||||
if self.model_type == "CustomVoice":
|
||||
return "CustomVoice"
|
||||
if self.model_type == "VoiceClone":
|
||||
return "VoiceClone"
|
||||
if self.model_type == "VoiceDesign":
|
||||
return "VoiceDesign"
|
||||
|
||||
# VoiceClone: AudioPath is provided (from LoadModel, stored in self.audio_path)
|
||||
if self.audio_path:
|
||||
return "VoiceClone"
|
||||
|
||||
# VoiceDesign: instruct option is provided
|
||||
if "instruct" in self.options and self.options["instruct"]:
|
||||
return "VoiceDesign"
|
||||
|
||||
# Default to CustomVoice
|
||||
return "CustomVoice"
|
||||
|
||||
def _get_ref_audio_path(self, request):
|
||||
"""Get reference audio path from stored AudioPath (from LoadModel)."""
|
||||
if not self.audio_path:
|
||||
return None
|
||||
|
||||
# If absolute path, use as-is
|
||||
if os.path.isabs(self.audio_path):
|
||||
return self.audio_path
|
||||
|
||||
# Try relative to ModelFile
|
||||
if self.model_file:
|
||||
model_file_base = os.path.dirname(self.model_file)
|
||||
ref_path = os.path.join(model_file_base, self.audio_path)
|
||||
if os.path.exists(ref_path):
|
||||
return ref_path
|
||||
|
||||
# Try relative to ModelPath
|
||||
if self.model_path:
|
||||
ref_path = os.path.join(self.model_path, self.audio_path)
|
||||
if os.path.exists(ref_path):
|
||||
return ref_path
|
||||
|
||||
# Return as-is (might be URL or base64)
|
||||
return self.audio_path
|
||||
|
||||
def _get_voice_clone_prompt(self, request, ref_audio, ref_text):
|
||||
"""Get or create voice clone prompt, with caching."""
|
||||
cache_key = f"{ref_audio}:{ref_text}"
|
||||
|
||||
if cache_key not in self._voice_clone_cache:
|
||||
print(f"Creating voice clone prompt from {ref_audio}", file=sys.stderr)
|
||||
try:
|
||||
prompt_items = self.model.create_voice_clone_prompt(
|
||||
ref_audio=ref_audio,
|
||||
ref_text=ref_text,
|
||||
x_vector_only_mode=self.options.get("x_vector_only_mode", False),
|
||||
)
|
||||
self._voice_clone_cache[cache_key] = prompt_items
|
||||
except Exception as e:
|
||||
print(f"Error creating voice clone prompt: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return None
|
||||
|
||||
return self._voice_clone_cache[cache_key]
|
||||
|
||||
def TTS(self, request, context):
|
||||
try:
|
||||
# Check if dst is provided
|
||||
if not request.dst:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="dst (output path) is required"
|
||||
)
|
||||
|
||||
# Prepare text
|
||||
text = request.text.strip()
|
||||
if not text:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="Text is empty"
|
||||
)
|
||||
|
||||
# Get language (auto-detect if not provided)
|
||||
language = request.language if hasattr(request, 'language') and request.language else None
|
||||
if not language or language == "":
|
||||
language = "Auto" # Auto-detect language
|
||||
|
||||
# Detect mode
|
||||
mode = self._detect_mode(request)
|
||||
print(f"Detected mode: {mode}", file=sys.stderr)
|
||||
|
||||
# Get generation parameters from options
|
||||
max_new_tokens = self.options.get("max_new_tokens", None)
|
||||
top_p = self.options.get("top_p", None)
|
||||
temperature = self.options.get("temperature", None)
|
||||
do_sample = self.options.get("do_sample", None)
|
||||
|
||||
# Prepare generation kwargs
|
||||
generation_kwargs = {}
|
||||
if max_new_tokens is not None:
|
||||
generation_kwargs["max_new_tokens"] = max_new_tokens
|
||||
if top_p is not None:
|
||||
generation_kwargs["top_p"] = top_p
|
||||
if temperature is not None:
|
||||
generation_kwargs["temperature"] = temperature
|
||||
if do_sample is not None:
|
||||
generation_kwargs["do_sample"] = do_sample
|
||||
|
||||
instruct = self.options.get("instruct", "")
|
||||
if instruct is not None and instruct != "":
|
||||
generation_kwargs["instruct"] = instruct
|
||||
|
||||
# Generate audio based on mode
|
||||
if mode == "VoiceClone":
|
||||
# VoiceClone mode
|
||||
ref_audio = self._get_ref_audio_path(request)
|
||||
if not ref_audio:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="AudioPath is required for VoiceClone mode"
|
||||
)
|
||||
|
||||
ref_text = self.options.get("ref_text", None)
|
||||
if not ref_text:
|
||||
# Try to get from request if available
|
||||
if hasattr(request, 'ref_text') and request.ref_text:
|
||||
ref_text = request.ref_text
|
||||
else:
|
||||
# x_vector_only_mode doesn't require ref_text
|
||||
if not self.options.get("x_vector_only_mode", False):
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="ref_text is required for VoiceClone mode (or set x_vector_only_mode=true)"
|
||||
)
|
||||
|
||||
# Check if we should use cached prompt
|
||||
use_cached_prompt = self.options.get("use_cached_prompt", True)
|
||||
voice_clone_prompt = None
|
||||
|
||||
if use_cached_prompt:
|
||||
voice_clone_prompt = self._get_voice_clone_prompt(request, ref_audio, ref_text)
|
||||
if voice_clone_prompt is None:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="Failed to create voice clone prompt"
|
||||
)
|
||||
|
||||
if voice_clone_prompt:
|
||||
# Use cached prompt
|
||||
wavs, sr = self.model.generate_voice_clone(
|
||||
text=text,
|
||||
language=language,
|
||||
voice_clone_prompt=voice_clone_prompt,
|
||||
**generation_kwargs
|
||||
)
|
||||
else:
|
||||
# Create prompt on-the-fly
|
||||
wavs, sr = self.model.generate_voice_clone(
|
||||
text=text,
|
||||
language=language,
|
||||
ref_audio=ref_audio,
|
||||
ref_text=ref_text,
|
||||
x_vector_only_mode=self.options.get("x_vector_only_mode", False),
|
||||
**generation_kwargs
|
||||
)
|
||||
|
||||
elif mode == "VoiceDesign":
|
||||
# VoiceDesign mode
|
||||
if not instruct:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="instruct option is required for VoiceDesign mode"
|
||||
)
|
||||
|
||||
wavs, sr = self.model.generate_voice_design(
|
||||
text=text,
|
||||
language=language,
|
||||
instruct=instruct,
|
||||
**generation_kwargs
|
||||
)
|
||||
|
||||
else:
|
||||
# CustomVoice mode (default)
|
||||
speaker = request.voice if request.voice else None
|
||||
if not speaker:
|
||||
# Try to get from options
|
||||
speaker = self.options.get("speaker", None)
|
||||
if not speaker:
|
||||
# Use default speaker
|
||||
speaker = "Vivian"
|
||||
print(f"No speaker specified, using default: {speaker}", file=sys.stderr)
|
||||
|
||||
# Validate speaker if model supports it
|
||||
if hasattr(self.model, 'get_supported_speakers'):
|
||||
try:
|
||||
supported_speakers = self.model.get_supported_speakers()
|
||||
if speaker not in supported_speakers:
|
||||
print(f"Warning: Speaker '{speaker}' not in supported list. Available: {supported_speakers}", file=sys.stderr)
|
||||
# Try to find a close match (case-insensitive)
|
||||
speaker_lower = speaker.lower()
|
||||
for sup_speaker in supported_speakers:
|
||||
if sup_speaker.lower() == speaker_lower:
|
||||
speaker = sup_speaker
|
||||
print(f"Using matched speaker: {speaker}", file=sys.stderr)
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not get supported speakers: {e}", file=sys.stderr)
|
||||
|
||||
wavs, sr = self.model.generate_custom_voice(
|
||||
text=text,
|
||||
language=language,
|
||||
speaker=speaker,
|
||||
**generation_kwargs
|
||||
)
|
||||
|
||||
# Save output
|
||||
if wavs is not None and len(wavs) > 0:
|
||||
# wavs is a list, take first element
|
||||
audio_data = wavs[0] if isinstance(wavs, list) else wavs
|
||||
sf.write(request.dst, audio_data, sr)
|
||||
print(f"Saved output to {request.dst}", file=sys.stderr)
|
||||
else:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="No audio output generated"
|
||||
)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error in TTS: {err}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
13
backend/python/qwen-tts/install.sh
Executable file
13
backend/python/qwen-tts/install.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
5
backend/python/qwen-tts/requirements-cpu.txt
Normal file
5
backend/python/qwen-tts/requirements-cpu.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
1
backend/python/qwen-tts/requirements-cublas12-after.txt
Normal file
1
backend/python/qwen-tts/requirements-cublas12-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
5
backend/python/qwen-tts/requirements-cublas12.txt
Normal file
5
backend/python/qwen-tts/requirements-cublas12.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu121
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
5
backend/python/qwen-tts/requirements-cublas13.txt
Normal file
5
backend/python/qwen-tts/requirements-cublas13.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
5
backend/python/qwen-tts/requirements-hipblas.txt
Normal file
5
backend/python/qwen-tts/requirements-hipblas.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchaudio==2.7.1+rocm6.3
|
||||
qwen-tts
|
||||
sox
|
||||
1
backend/python/qwen-tts/requirements-intel-after.txt
Normal file
1
backend/python/qwen-tts/requirements-intel-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
5
backend/python/qwen-tts/requirements-intel.txt
Normal file
5
backend/python/qwen-tts/requirements-intel.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
5
backend/python/qwen-tts/requirements-l4t12.txt
Normal file
5
backend/python/qwen-tts/requirements-l4t12.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
5
backend/python/qwen-tts/requirements-l4t13.txt
Normal file
5
backend/python/qwen-tts/requirements-l4t13.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
4
backend/python/qwen-tts/requirements-mps.txt
Normal file
4
backend/python/qwen-tts/requirements-mps.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
torch==2.7.1
|
||||
torchaudio==0.22.1
|
||||
qwen-tts
|
||||
sox
|
||||
6
backend/python/qwen-tts/requirements.txt
Normal file
6
backend/python/qwen-tts/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
soundfile
|
||||
setuptools
|
||||
9
backend/python/qwen-tts/run.sh
Executable file
9
backend/python/qwen-tts/run.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
98
backend/python/qwen-tts/test.py
Normal file
98
backend/python/qwen-tts/test.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""
|
||||
A test script to test the gRPC service
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
This method sets up the gRPC service by starting the server
|
||||
"""
|
||||
self.service = subprocess.Popen(
|
||||
["python3", "backend.py", "--addr", "localhost:50051"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True
|
||||
)
|
||||
time.sleep(5)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
"""
|
||||
This method tears down the gRPC service by terminating the server
|
||||
"""
|
||||
self.service.terminate()
|
||||
try:
|
||||
stdout, stderr = self.service.communicate(timeout=5)
|
||||
# Output should already be printed by threads, but print any remaining
|
||||
if stdout:
|
||||
print("=== REMAINING STDOUT ===")
|
||||
print(stdout)
|
||||
if stderr:
|
||||
print("=== REMAINING STDERR ===")
|
||||
print(stderr)
|
||||
except subprocess.TimeoutExpired:
|
||||
self.service.kill()
|
||||
stdout, stderr = self.service.communicate()
|
||||
if stdout:
|
||||
print("=== REMAINING STDOUT ===")
|
||||
print(stdout)
|
||||
if stderr:
|
||||
print("=== REMAINING STDERR ===")
|
||||
print(stderr)
|
||||
|
||||
def test_tts(self):
|
||||
"""
|
||||
This method tests if the TTS generation works successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
# Allow up to 10 minutes for model download on first run
|
||||
response = stub.LoadModel(
|
||||
backend_pb2.ModelOptions(Model="Qwen/Qwen3-TTS-12Hz-0.6B-CustomVoice"),
|
||||
timeout=600.0
|
||||
)
|
||||
self.assertTrue(response.success)
|
||||
|
||||
# Create temporary output file
|
||||
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp_file:
|
||||
output_path = tmp_file.name
|
||||
|
||||
tts_request = backend_pb2.TTSRequest(
|
||||
text="Hello, this is a test of the qwen-tts backend.",
|
||||
voice="Vivian",
|
||||
dst=output_path
|
||||
)
|
||||
# Allow up to 2 minutes for TTS generation
|
||||
tts_response = stub.TTS(tts_request, timeout=120.0)
|
||||
self.assertIsNotNone(tts_response)
|
||||
self.assertTrue(tts_response.success)
|
||||
|
||||
# Verify output file exists and is not empty
|
||||
self.assertTrue(os.path.exists(output_path))
|
||||
self.assertGreater(os.path.getsize(output_path), 0)
|
||||
|
||||
# Cleanup
|
||||
os.unlink(output_path)
|
||||
except Exception as err:
|
||||
print(f"Exception: {err}", file=sys.stderr)
|
||||
# Give threads a moment to flush any remaining output
|
||||
time.sleep(1)
|
||||
self.fail("TTS service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
11
backend/python/qwen-tts/test.sh
Executable file
11
backend/python/qwen-tts/test.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
@@ -1,9 +1,7 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
torch
|
||||
rerankers[transformers]
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchvision==0.18.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchvision
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
rfdetr
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.5.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
optimum[openvino]
|
||||
llvmlite==0.43.0
|
||||
numba==0.60.0
|
||||
transformers
|
||||
intel-extension-for-transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.5.1+cxx11.abi
|
||||
torchvision==0.20.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchvision
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
git+https://github.com/huggingface/diffusers
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.7.10+xpu
|
||||
accelerate
|
||||
torch==2.7.0+xpu
|
||||
torch
|
||||
transformers
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
bitsandbytes
|
||||
oneccl_bind_pt==2.7.0+xpu
|
||||
bitsandbytes
|
||||
@@ -61,6 +61,18 @@ func ModelInference(ctx context.Context, s string, messages schema.Messages, ima
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Detect thinking support after model load (only if not already detected)
|
||||
// This needs to happen after LoadModel succeeds so the backend can render templates
|
||||
if (c.ReasoningConfig.DisableReasoning == nil && c.ReasoningConfig.DisableReasoningTagPrefill == nil) && c.TemplateConfig.UseTokenizerTemplate {
|
||||
modelOpts := grpcModelOpts(*c, o.SystemState.Model.ModelsPath)
|
||||
config.DetectThinkingSupportFromBackend(ctx, c, inferenceModel, modelOpts)
|
||||
// Update the config in the loader so it persists for future requests
|
||||
cl.UpdateModelConfig(c.Name, func(cfg *config.ModelConfig) {
|
||||
cfg.ReasoningConfig.DisableReasoning = c.ReasoningConfig.DisableReasoning
|
||||
cfg.ReasoningConfig.DisableReasoningTagPrefill = c.ReasoningConfig.DisableReasoningTagPrefill
|
||||
})
|
||||
}
|
||||
|
||||
var protoMessages []*proto.Message
|
||||
// if we are using the tokenizer template, we need to convert the messages to proto messages
|
||||
// unless the prompt has already been tokenized (non-chat endpoints + functions)
|
||||
|
||||
@@ -83,6 +83,7 @@ type RunCMD struct {
|
||||
EnableTracing bool `env:"LOCALAI_ENABLE_TRACING,ENABLE_TRACING" help:"Enable API tracing" group:"api"`
|
||||
TracingMaxItems int `env:"LOCALAI_TRACING_MAX_ITEMS" default:"1024" help:"Maximum number of traces to keep" group:"api"`
|
||||
AgentJobRetentionDays int `env:"LOCALAI_AGENT_JOB_RETENTION_DAYS,AGENT_JOB_RETENTION_DAYS" default:"30" help:"Number of days to keep agent job history (default: 30)" group:"api"`
|
||||
OpenResponsesStoreTTL string `env:"LOCALAI_OPEN_RESPONSES_STORE_TTL,OPEN_RESPONSES_STORE_TTL" default:"0" help:"TTL for Open Responses store (e.g., 1h, 30m, 0 = no expiration)" group:"api"`
|
||||
|
||||
Version bool
|
||||
}
|
||||
@@ -249,6 +250,15 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
opts = append(opts, config.WithLRUEvictionRetryInterval(dur))
|
||||
}
|
||||
|
||||
// Handle Open Responses store TTL
|
||||
if r.OpenResponsesStoreTTL != "" && r.OpenResponsesStoreTTL != "0" {
|
||||
dur, err := time.ParseDuration(r.OpenResponsesStoreTTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid Open Responses store TTL: %w", err)
|
||||
}
|
||||
opts = append(opts, config.WithOpenResponsesStoreTTL(dur))
|
||||
}
|
||||
|
||||
// split ":" to get backend name and the uri
|
||||
for _, v := range r.ExternalGRPCBackends {
|
||||
backend := v[:strings.IndexByte(v, ':')]
|
||||
|
||||
@@ -86,6 +86,8 @@ type ApplicationConfig struct {
|
||||
|
||||
AgentJobRetentionDays int // Default: 30 days
|
||||
|
||||
OpenResponsesStoreTTL time.Duration // TTL for Open Responses store (0 = no expiration)
|
||||
|
||||
PathWithoutAuth []string
|
||||
}
|
||||
|
||||
@@ -467,6 +469,12 @@ func WithAgentJobRetentionDays(days int) AppOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithOpenResponsesStoreTTL(ttl time.Duration) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.OpenResponsesStoreTTL = ttl
|
||||
}
|
||||
}
|
||||
|
||||
func WithEnforcedPredownloadScans(enforced bool) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.EnforcePredownloadScans = enforced
|
||||
@@ -594,6 +602,12 @@ func (o *ApplicationConfig) ToRuntimeSettings() RuntimeSettings {
|
||||
} else {
|
||||
lruEvictionRetryInterval = "1s" // default
|
||||
}
|
||||
var openResponsesStoreTTL string
|
||||
if o.OpenResponsesStoreTTL > 0 {
|
||||
openResponsesStoreTTL = o.OpenResponsesStoreTTL.String()
|
||||
} else {
|
||||
openResponsesStoreTTL = "0" // default: no expiration
|
||||
}
|
||||
|
||||
return RuntimeSettings{
|
||||
WatchdogEnabled: &watchdogEnabled,
|
||||
@@ -628,6 +642,7 @@ func (o *ApplicationConfig) ToRuntimeSettings() RuntimeSettings {
|
||||
AutoloadBackendGalleries: &autoloadBackendGalleries,
|
||||
ApiKeys: &apiKeys,
|
||||
AgentJobRetentionDays: &agentJobRetentionDays,
|
||||
OpenResponsesStoreTTL: &openResponsesStoreTTL,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -769,6 +784,14 @@ func (o *ApplicationConfig) ApplyRuntimeSettings(settings *RuntimeSettings) (req
|
||||
if settings.AgentJobRetentionDays != nil {
|
||||
o.AgentJobRetentionDays = *settings.AgentJobRetentionDays
|
||||
}
|
||||
if settings.OpenResponsesStoreTTL != nil {
|
||||
if *settings.OpenResponsesStoreTTL == "0" || *settings.OpenResponsesStoreTTL == "" {
|
||||
o.OpenResponsesStoreTTL = 0 // No expiration
|
||||
} else if dur, err := time.ParseDuration(*settings.OpenResponsesStoreTTL); err == nil {
|
||||
o.OpenResponsesStoreTTL = dur
|
||||
}
|
||||
// This setting doesn't require restart, can be updated dynamically
|
||||
}
|
||||
// Note: ApiKeys requires special handling (merging with startup keys) - handled in caller
|
||||
|
||||
return requireRestart
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/mudler/LocalAI/pkg/grpc"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/reasoning"
|
||||
"github.com/mudler/LocalAI/pkg/xsysinfo"
|
||||
"github.com/mudler/xlog"
|
||||
|
||||
gguf "github.com/gpustack/gguf-parser-go"
|
||||
"github.com/gpustack/gguf-parser-go/util/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -62,16 +68,25 @@ func guessGGUFFromFile(cfg *ModelConfig, f *gguf.GGUFFile, defaultCtx int) {
|
||||
cfg.NGPULayers = &defaultHigh
|
||||
}
|
||||
|
||||
xlog.Debug("guessDefaultsFromFile: NGPULayers set", "NGPULayers", cfg.NGPULayers)
|
||||
xlog.Debug("[gguf] guessDefaultsFromFile: NGPULayers set", "NGPULayers", cfg.NGPULayers, "modelName", f.Metadata().Name)
|
||||
|
||||
// identify from well known templates first, otherwise use the raw jinja template
|
||||
chatTemplate, found := f.Header.MetadataKV.Get("tokenizer.chat_template")
|
||||
if found {
|
||||
// fill jinja template
|
||||
cfg.modelTemplate = chatTemplate.ValueString()
|
||||
}
|
||||
|
||||
// Thinking support detection is done after model load via DetectThinkingSupportFromBackend
|
||||
|
||||
// template estimations
|
||||
if cfg.HasTemplate() {
|
||||
// nothing to guess here
|
||||
xlog.Debug("guessDefaultsFromFile: template already set", "name", cfg.Name)
|
||||
xlog.Debug("[gguf] guessDefaultsFromFile: template already set", "name", cfg.Name, "modelName", f.Metadata().Name)
|
||||
return
|
||||
}
|
||||
|
||||
xlog.Debug("Model file loaded", "file", cfg.ModelFileName(), "eosTokenID", f.Tokenizer().EOSTokenID, "bosTokenID", f.Tokenizer().BOSTokenID, "modelName", f.Metadata().Name, "architecture", f.Architecture().Architecture)
|
||||
xlog.Debug("[gguf] Model file loaded", "file", cfg.ModelFileName(), "eosTokenID", f.Tokenizer().EOSTokenID, "bosTokenID", f.Tokenizer().BOSTokenID, "modelName", f.Metadata().Name, "architecture", f.Architecture().Architecture)
|
||||
|
||||
// guess the name
|
||||
if cfg.Name == "" {
|
||||
@@ -83,4 +98,49 @@ func guessGGUFFromFile(cfg *ModelConfig, f *gguf.GGUFFile, defaultCtx int) {
|
||||
cfg.FunctionsConfig.GrammarConfig.NoGrammar = true
|
||||
cfg.Options = append(cfg.Options, "use_jinja:true")
|
||||
cfg.KnownUsecaseStrings = append(cfg.KnownUsecaseStrings, "FLAG_CHAT")
|
||||
|
||||
}
|
||||
|
||||
// DetectThinkingSupportFromBackend calls the ModelMetadata gRPC method to detect
|
||||
// if the model supports thinking mode and if the template ends with a thinking start token.
|
||||
// This should be called after the model is loaded.
|
||||
// The results are stored in cfg.SupportsThinking and cfg.ThinkingForcedOpen.
|
||||
func DetectThinkingSupportFromBackend(ctx context.Context, cfg *ModelConfig, backendClient grpc.Backend, modelOptions *pb.ModelOptions) {
|
||||
if backendClient == nil {
|
||||
xlog.Debug("[gguf] DetectThinkingSupportFromBackend: backend client is nil, skipping detection")
|
||||
return
|
||||
}
|
||||
|
||||
if modelOptions == nil {
|
||||
xlog.Debug("[gguf] DetectThinkingSupportFromBackend: model options is nil, skipping detection")
|
||||
return
|
||||
}
|
||||
|
||||
// Only detect for llama-cpp backend when using tokenizer templates
|
||||
if cfg.Backend != "llama-cpp" || !cfg.TemplateConfig.UseTokenizerTemplate {
|
||||
xlog.Debug("[gguf] DetectThinkingSupportFromBackend: skipping detection", "backend", cfg.Backend, "useTokenizerTemplate", cfg.TemplateConfig.UseTokenizerTemplate)
|
||||
return
|
||||
}
|
||||
|
||||
metadata, err := backendClient.ModelMetadata(ctx, modelOptions)
|
||||
if err != nil {
|
||||
xlog.Warn("[gguf] DetectThinkingSupportFromBackend: failed to get model metadata", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if metadata != nil {
|
||||
cfg.ReasoningConfig.DisableReasoning = ptr.To(!metadata.SupportsThinking)
|
||||
|
||||
// Use the rendered template to detect if thinking token is at the end
|
||||
// This reuses the existing DetectThinkingStartToken function
|
||||
if metadata.RenderedTemplate != "" {
|
||||
thinkingStartToken := reasoning.DetectThinkingStartToken(metadata.RenderedTemplate, &cfg.ReasoningConfig)
|
||||
thinkingForcedOpen := thinkingStartToken != ""
|
||||
cfg.ReasoningConfig.DisableReasoningTagPrefill = ptr.To(!thinkingForcedOpen)
|
||||
xlog.Debug("[gguf] DetectThinkingSupportFromBackend: thinking support detected", "supports_thinking", metadata.SupportsThinking, "thinking_forced_open", thinkingForcedOpen, "thinking_start_token", thinkingStartToken)
|
||||
} else {
|
||||
cfg.ReasoningConfig.DisableReasoningTagPrefill = ptr.To(true)
|
||||
xlog.Debug("[gguf] DetectThinkingSupportFromBackend: thinking support detected", "supports_thinking", metadata.SupportsThinking, "thinking_forced_open", false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
"github.com/mudler/LocalAI/pkg/reasoning"
|
||||
"github.com/mudler/cogito"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
@@ -30,6 +31,7 @@ type TTSConfig struct {
|
||||
// @Description ModelConfig represents a model configuration
|
||||
type ModelConfig struct {
|
||||
modelConfigFile string `yaml:"-" json:"-"`
|
||||
modelTemplate string `yaml:"-" json:"-"`
|
||||
schema.PredictionOptions `yaml:"parameters,omitempty" json:"parameters,omitempty"`
|
||||
Name string `yaml:"name,omitempty" json:"name,omitempty"`
|
||||
|
||||
@@ -51,6 +53,7 @@ type ModelConfig struct {
|
||||
ResponseFormatMap map[string]interface{} `yaml:"-" json:"-"`
|
||||
|
||||
FunctionsConfig functions.FunctionsConfig `yaml:"function,omitempty" json:"function,omitempty"`
|
||||
ReasoningConfig reasoning.Config `yaml:"reasoning,omitempty" json:"reasoning,omitempty"`
|
||||
|
||||
FeatureFlag FeatureFlag `yaml:"feature_flags,omitempty" json:"feature_flags,omitempty"` // Feature Flag registry. We move fast, and features may break on a per model/backend basis. Registry for (usually temporary) flags that indicate aborting something early.
|
||||
// LLM configs (GPT4ALL, Llama.cpp, ...)
|
||||
@@ -521,6 +524,11 @@ func (c *ModelConfig) GetModelConfigFile() string {
|
||||
return c.modelConfigFile
|
||||
}
|
||||
|
||||
// GetModelTemplate returns the model's chat template if available
|
||||
func (c *ModelConfig) GetModelTemplate() string {
|
||||
return c.modelTemplate
|
||||
}
|
||||
|
||||
type ModelConfigUsecase int
|
||||
|
||||
const (
|
||||
|
||||
@@ -246,6 +246,17 @@ func (bcl *ModelConfigLoader) RemoveModelConfig(m string) {
|
||||
delete(bcl.configs, m)
|
||||
}
|
||||
|
||||
// UpdateModelConfig updates an existing model config in the loader.
|
||||
// This is useful for updating runtime-detected properties like thinking support.
|
||||
func (bcl *ModelConfigLoader) UpdateModelConfig(m string, updater func(*ModelConfig)) {
|
||||
bcl.Lock()
|
||||
defer bcl.Unlock()
|
||||
if cfg, exists := bcl.configs[m]; exists {
|
||||
updater(&cfg)
|
||||
bcl.configs[m] = cfg
|
||||
}
|
||||
}
|
||||
|
||||
// Preload prepare models if they are not local but url or huggingface repositories
|
||||
func (bcl *ModelConfigLoader) Preload(modelPath string) error {
|
||||
bcl.Lock()
|
||||
|
||||
@@ -60,4 +60,7 @@ type RuntimeSettings struct {
|
||||
|
||||
// Agent settings
|
||||
AgentJobRetentionDays *int `json:"agent_job_retention_days,omitempty"`
|
||||
|
||||
// Open Responses settings
|
||||
OpenResponsesStoreTTL *string `json:"open_responses_store_ttl,omitempty"` // TTL for stored responses (e.g., "1h", "30m", "0" = no expiration)
|
||||
}
|
||||
|
||||
@@ -193,6 +193,8 @@ func API(application *application.Application) (*echo.Echo, error) {
|
||||
corsConfig.AllowOrigins = strings.Split(application.ApplicationConfig().CORSAllowOrigins, ",")
|
||||
}
|
||||
e.Use(middleware.CORSWithConfig(corsConfig))
|
||||
} else {
|
||||
e.Use(middleware.CORS())
|
||||
}
|
||||
|
||||
// CSRF middleware
|
||||
@@ -214,6 +216,7 @@ func API(application *application.Application) (*echo.Echo, error) {
|
||||
routes.RegisterLocalAIRoutes(e, requestExtractor, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application.TemplatesEvaluator(), application)
|
||||
routes.RegisterOpenAIRoutes(e, requestExtractor, application)
|
||||
routes.RegisterAnthropicRoutes(e, requestExtractor, application)
|
||||
routes.RegisterOpenResponsesRoutes(e, requestExtractor, application)
|
||||
if !application.ApplicationConfig().DisableWebUI {
|
||||
routes.RegisterUIAPIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application)
|
||||
routes.RegisterUIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService())
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/mudler/LocalAI/core/application"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http/endpoints/openresponses"
|
||||
"github.com/mudler/LocalAI/core/p2p"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/xlog"
|
||||
@@ -84,6 +85,16 @@ func UpdateSettingsEndpoint(app *application.Application) echo.HandlerFunc {
|
||||
})
|
||||
}
|
||||
}
|
||||
if settings.OpenResponsesStoreTTL != nil {
|
||||
if *settings.OpenResponsesStoreTTL != "0" && *settings.OpenResponsesStoreTTL != "" {
|
||||
if _, err := time.ParseDuration(*settings.OpenResponsesStoreTTL); err != nil {
|
||||
return c.JSON(http.StatusBadRequest, schema.SettingsResponse{
|
||||
Success: false,
|
||||
Error: "Invalid open_responses_store_ttl format: " + err.Error(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save to file
|
||||
if appConfig.DynamicConfigsDir == "" {
|
||||
@@ -144,6 +155,22 @@ func UpdateSettingsEndpoint(app *application.Application) echo.HandlerFunc {
|
||||
xlog.Info("Updated LRU eviction retry settings", "maxRetries", maxRetries, "retryInterval", retryInterval)
|
||||
}
|
||||
|
||||
// Update Open Responses store TTL dynamically
|
||||
if settings.OpenResponsesStoreTTL != nil {
|
||||
ttl := time.Duration(0)
|
||||
if *settings.OpenResponsesStoreTTL != "0" && *settings.OpenResponsesStoreTTL != "" {
|
||||
if dur, err := time.ParseDuration(*settings.OpenResponsesStoreTTL); err == nil {
|
||||
ttl = dur
|
||||
} else {
|
||||
xlog.Warn("Invalid Open Responses store TTL format", "ttl", *settings.OpenResponsesStoreTTL, "error", err)
|
||||
}
|
||||
}
|
||||
// Import the store package
|
||||
store := openresponses.GetGlobalStore()
|
||||
store.SetTTL(ttl)
|
||||
xlog.Info("Updated Open Responses store TTL", "ttl", ttl)
|
||||
}
|
||||
|
||||
// Check if agent job retention changed
|
||||
agentJobChanged := settings.AgentJobRetentionDays != nil
|
||||
|
||||
|
||||
@@ -167,6 +167,16 @@ func VideoEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfi
|
||||
|
||||
baseURL := middleware.BaseURL(c)
|
||||
|
||||
xlog.Debug("VideoEndpoint: Calling VideoGeneration",
|
||||
"num_frames", input.NumFrames,
|
||||
"fps", input.FPS,
|
||||
"cfg_scale", input.CFGScale,
|
||||
"step", input.Step,
|
||||
"seed", input.Seed,
|
||||
"width", width,
|
||||
"height", height,
|
||||
"negative_prompt", input.NegativePrompt)
|
||||
|
||||
fn, err := backend.VideoGeneration(
|
||||
height,
|
||||
width,
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/mudler/LocalAI/core/http/middleware"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
reason "github.com/mudler/LocalAI/pkg/reasoning"
|
||||
|
||||
"github.com/mudler/LocalAI/core/templates"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
@@ -38,6 +39,16 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
}
|
||||
responses <- initialMessage
|
||||
|
||||
// Detect if thinking token is already in prompt or template
|
||||
// When UseTokenizerTemplate is enabled, predInput is empty, so we check the template
|
||||
var template string
|
||||
if config.TemplateConfig.UseTokenizerTemplate {
|
||||
template = config.GetModelTemplate()
|
||||
} else {
|
||||
template = s
|
||||
}
|
||||
thinkingStartToken := reason.DetectThinkingStartToken(template, &config.ReasoningConfig)
|
||||
|
||||
// Track accumulated content for reasoning extraction
|
||||
accumulatedContent := ""
|
||||
lastEmittedReasoning := ""
|
||||
@@ -45,8 +56,8 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
|
||||
_, _, err := ComputeChoices(req, s, config, cl, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, tokenUsage backend.TokenUsage) bool {
|
||||
accumulatedContent += s
|
||||
// Extract reasoning from accumulated content
|
||||
currentReasoning, cleanedContent := functions.ExtractReasoning(accumulatedContent)
|
||||
|
||||
currentReasoning, cleanedContent := reason.ExtractReasoningWithConfig(accumulatedContent, thinkingStartToken, config.ReasoningConfig)
|
||||
|
||||
// Calculate new reasoning delta (what we haven't emitted yet)
|
||||
var reasoningDelta *string
|
||||
@@ -118,6 +129,15 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
return err
|
||||
}
|
||||
processTools := func(noAction string, prompt string, req *schema.OpenAIRequest, config *config.ModelConfig, loader *model.ModelLoader, responses chan schema.OpenAIResponse, extraUsage bool) error {
|
||||
// Detect if thinking token is already in prompt or template
|
||||
var template string
|
||||
if config.TemplateConfig.UseTokenizerTemplate {
|
||||
template = config.GetModelTemplate()
|
||||
} else {
|
||||
template = prompt
|
||||
}
|
||||
thinkingStartToken := reason.DetectThinkingStartToken(template, &config.ReasoningConfig)
|
||||
|
||||
result := ""
|
||||
lastEmittedCount := 0
|
||||
_, tokenUsage, err := ComputeChoices(req, prompt, config, cl, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool {
|
||||
@@ -229,9 +249,8 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Extract reasoning before processing tool calls
|
||||
reasoning, cleanedResult := functions.ExtractReasoning(result)
|
||||
result = cleanedResult
|
||||
// Prepend thinking token if needed, then extract reasoning before processing tool calls
|
||||
reasoning, result := reason.ExtractReasoningWithConfig(result, thinkingStartToken, config.ReasoningConfig)
|
||||
|
||||
textContentToReturn = functions.ParseTextContent(result, config.FunctionsConfig)
|
||||
result = functions.CleanupLLMResult(result, config.FunctionsConfig)
|
||||
@@ -617,11 +636,20 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
|
||||
// no streaming mode
|
||||
default:
|
||||
// Detect if thinking token is already in prompt or template
|
||||
var template string
|
||||
if config.TemplateConfig.UseTokenizerTemplate {
|
||||
template = config.GetModelTemplate() // TODO: this should be the parsed jinja template. But for now this is the best we can do.
|
||||
} else {
|
||||
template = predInput
|
||||
}
|
||||
thinkingStartToken := reason.DetectThinkingStartToken(template, &config.ReasoningConfig)
|
||||
|
||||
xlog.Debug("Thinking start token", "thinkingStartToken", thinkingStartToken, "template", template)
|
||||
|
||||
tokenCallback := func(s string, c *[]schema.Choice) {
|
||||
// Extract reasoning from the response
|
||||
reasoning, cleanedS := functions.ExtractReasoning(s)
|
||||
s = cleanedS
|
||||
// Prepend thinking token if needed, then extract reasoning from the response
|
||||
reasoning, s := reason.ExtractReasoningWithConfig(s, thinkingStartToken, config.ReasoningConfig)
|
||||
|
||||
if !shouldUseFn {
|
||||
// no function is called, just reply and use stop as finish reason
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http/endpoints/localai"
|
||||
"github.com/mudler/LocalAI/core/http/middleware"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func VideoEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenAIRequest)
|
||||
if !ok || input == nil {
|
||||
return echo.ErrBadRequest
|
||||
}
|
||||
var raw map[string]interface{}
|
||||
body := make([]byte, 0)
|
||||
if c.Request().Body != nil {
|
||||
c.Request().Body.Read(body)
|
||||
}
|
||||
if len(body) > 0 {
|
||||
_ = json.Unmarshal(body, &raw)
|
||||
}
|
||||
// Build VideoRequest using shared mapper
|
||||
vr := MapOpenAIToVideo(input, raw)
|
||||
// Place VideoRequest into context so localai.VideoEndpoint can consume it
|
||||
c.Set(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST, vr)
|
||||
// Delegate to existing localai handler
|
||||
return localai.VideoEndpoint(cl, ml, appConfig)(c)
|
||||
}
|
||||
}
|
||||
|
||||
// VideoEndpoint godoc
|
||||
// @Summary Generate a video from an OpenAI-compatible request
|
||||
// @Description Accepts an OpenAI-style request and delegates to the LocalAI video generator
|
||||
// @Tags openai
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param request body schema.OpenAIRequest true "OpenAI-style request"
|
||||
// @Success 200 {object} map[string]interface{}
|
||||
// @Failure 400 {object} map[string]interface{}
|
||||
// @Router /v1/videos [post]
|
||||
|
||||
func MapOpenAIToVideo(input *schema.OpenAIRequest, raw map[string]interface{}) *schema.VideoRequest {
|
||||
vr := &schema.VideoRequest{}
|
||||
if input == nil {
|
||||
return vr
|
||||
}
|
||||
|
||||
if input.Model != "" {
|
||||
vr.Model = input.Model
|
||||
}
|
||||
|
||||
// Prompt mapping
|
||||
switch p := input.Prompt.(type) {
|
||||
case string:
|
||||
vr.Prompt = p
|
||||
case []interface{}:
|
||||
if len(p) > 0 {
|
||||
if s, ok := p[0].(string); ok {
|
||||
vr.Prompt = s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Size
|
||||
size := input.Size
|
||||
if size == "" && raw != nil {
|
||||
if v, ok := raw["size"].(string); ok {
|
||||
size = v
|
||||
}
|
||||
}
|
||||
if size != "" {
|
||||
parts := strings.SplitN(size, "x", 2)
|
||||
if len(parts) == 2 {
|
||||
if wi, err := strconv.Atoi(parts[0]); err == nil {
|
||||
vr.Width = int32(wi)
|
||||
}
|
||||
if hi, err := strconv.Atoi(parts[1]); err == nil {
|
||||
vr.Height = int32(hi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// seconds -> num frames
|
||||
secondsStr := ""
|
||||
if raw != nil {
|
||||
if v, ok := raw["seconds"].(string); ok {
|
||||
secondsStr = v
|
||||
} else if v, ok := raw["seconds"].(float64); ok {
|
||||
secondsStr = fmt.Sprintf("%v", int(v))
|
||||
}
|
||||
}
|
||||
fps := int32(30)
|
||||
if raw != nil {
|
||||
if rawFPS, ok := raw["fps"]; ok {
|
||||
switch rf := rawFPS.(type) {
|
||||
case float64:
|
||||
fps = int32(rf)
|
||||
case string:
|
||||
if fi, err := strconv.Atoi(rf); err == nil {
|
||||
fps = int32(fi)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if secondsStr != "" {
|
||||
if secF, err := strconv.Atoi(secondsStr); err == nil {
|
||||
vr.FPS = fps
|
||||
vr.NumFrames = int32(secF) * fps
|
||||
}
|
||||
}
|
||||
|
||||
// input_reference
|
||||
if raw != nil {
|
||||
if v, ok := raw["input_reference"].(string); ok {
|
||||
vr.StartImage = v
|
||||
}
|
||||
}
|
||||
|
||||
// response format
|
||||
if input.ResponseFormat != nil {
|
||||
if rf, ok := input.ResponseFormat.(string); ok {
|
||||
vr.ResponseFormat = rf
|
||||
}
|
||||
}
|
||||
|
||||
if input.Step != 0 {
|
||||
vr.Step = int32(input.Step)
|
||||
}
|
||||
|
||||
return vr
|
||||
}
|
||||
3669
core/http/endpoints/openresponses/responses.go
Normal file
3669
core/http/endpoints/openresponses/responses.go
Normal file
File diff suppressed because it is too large
Load Diff
453
core/http/endpoints/openresponses/store.go
Normal file
453
core/http/endpoints/openresponses/store.go
Normal file
@@ -0,0 +1,453 @@
|
||||
package openresponses
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/xlog"
|
||||
)
|
||||
|
||||
// ResponseStore provides thread-safe storage for Open Responses API responses
|
||||
type ResponseStore struct {
|
||||
mu sync.RWMutex
|
||||
responses map[string]*StoredResponse
|
||||
ttl time.Duration // Time-to-live for stored responses (0 = no expiration)
|
||||
cleanupCtx context.Context
|
||||
cleanupCancel context.CancelFunc
|
||||
}
|
||||
|
||||
// StreamedEvent represents a buffered SSE event for streaming resume
|
||||
type StreamedEvent struct {
|
||||
SequenceNumber int `json:"sequence_number"`
|
||||
EventType string `json:"event_type"`
|
||||
Data []byte `json:"data"` // JSON-serialized event
|
||||
}
|
||||
|
||||
// StoredResponse contains a complete response with its input request and output items
|
||||
type StoredResponse struct {
|
||||
Request *schema.OpenResponsesRequest
|
||||
Response *schema.ORResponseResource
|
||||
Items map[string]*schema.ORItemField // item_id -> item mapping for quick lookup
|
||||
StoredAt time.Time
|
||||
ExpiresAt *time.Time // nil if no expiration
|
||||
|
||||
// Background execution support
|
||||
CancelFunc context.CancelFunc // For cancellation of background tasks
|
||||
StreamEvents []StreamedEvent // Buffered events for streaming resume
|
||||
StreamEnabled bool // Was created with stream=true
|
||||
IsBackground bool // Was created with background=true
|
||||
EventsChan chan struct{} // Signals new events for live subscribers
|
||||
mu sync.RWMutex // Protect concurrent access to this response
|
||||
}
|
||||
|
||||
var (
|
||||
globalStore *ResponseStore
|
||||
storeOnce sync.Once
|
||||
)
|
||||
|
||||
// GetGlobalStore returns the singleton response store instance
|
||||
func GetGlobalStore() *ResponseStore {
|
||||
storeOnce.Do(func() {
|
||||
globalStore = NewResponseStore(0) // Default: no TTL, will be updated from appConfig
|
||||
})
|
||||
return globalStore
|
||||
}
|
||||
|
||||
// SetTTL updates the TTL for the store
|
||||
// This will affect all new responses stored after this call
|
||||
func (s *ResponseStore) SetTTL(ttl time.Duration) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Stop existing cleanup loop if running
|
||||
if s.cleanupCancel != nil {
|
||||
s.cleanupCancel()
|
||||
s.cleanupCancel = nil
|
||||
s.cleanupCtx = nil
|
||||
}
|
||||
|
||||
s.ttl = ttl
|
||||
|
||||
// If TTL > 0, start cleanup loop
|
||||
if ttl > 0 {
|
||||
s.cleanupCtx, s.cleanupCancel = context.WithCancel(context.Background())
|
||||
go s.cleanupLoop(s.cleanupCtx)
|
||||
}
|
||||
|
||||
xlog.Debug("Updated Open Responses store TTL", "ttl", ttl, "cleanup_running", ttl > 0)
|
||||
}
|
||||
|
||||
// NewResponseStore creates a new response store with optional TTL
|
||||
// If ttl is 0, responses are stored indefinitely
|
||||
func NewResponseStore(ttl time.Duration) *ResponseStore {
|
||||
store := &ResponseStore{
|
||||
responses: make(map[string]*StoredResponse),
|
||||
ttl: ttl,
|
||||
}
|
||||
|
||||
// Start cleanup goroutine if TTL is set
|
||||
if ttl > 0 {
|
||||
store.cleanupCtx, store.cleanupCancel = context.WithCancel(context.Background())
|
||||
go store.cleanupLoop(store.cleanupCtx)
|
||||
}
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
// Store stores a response with its request and items
|
||||
func (s *ResponseStore) Store(responseID string, request *schema.OpenResponsesRequest, response *schema.ORResponseResource) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Build item index for quick lookup
|
||||
items := make(map[string]*schema.ORItemField)
|
||||
for i := range response.Output {
|
||||
item := &response.Output[i]
|
||||
if item.ID != "" {
|
||||
items[item.ID] = item
|
||||
}
|
||||
}
|
||||
|
||||
stored := &StoredResponse{
|
||||
Request: request,
|
||||
Response: response,
|
||||
Items: items,
|
||||
StoredAt: time.Now(),
|
||||
ExpiresAt: nil,
|
||||
}
|
||||
|
||||
// Set expiration if TTL is configured
|
||||
if s.ttl > 0 {
|
||||
expiresAt := time.Now().Add(s.ttl)
|
||||
stored.ExpiresAt = &expiresAt
|
||||
}
|
||||
|
||||
s.responses[responseID] = stored
|
||||
xlog.Debug("Stored Open Responses response", "response_id", responseID, "items_count", len(items))
|
||||
}
|
||||
|
||||
// Get retrieves a stored response by ID
|
||||
func (s *ResponseStore) Get(responseID string) (*StoredResponse, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
stored, exists := s.responses[responseID]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("response not found: %s", responseID)
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
if stored.ExpiresAt != nil && time.Now().After(*stored.ExpiresAt) {
|
||||
// Expired, but we'll return it anyway and let caller handle cleanup
|
||||
return nil, fmt.Errorf("response expired: %s", responseID)
|
||||
}
|
||||
|
||||
return stored, nil
|
||||
}
|
||||
|
||||
// GetItem retrieves a specific item from a stored response
|
||||
func (s *ResponseStore) GetItem(responseID, itemID string) (*schema.ORItemField, error) {
|
||||
stored, err := s.Get(responseID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
item, exists := stored.Items[itemID]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("item not found: %s in response %s", itemID, responseID)
|
||||
}
|
||||
|
||||
return item, nil
|
||||
}
|
||||
|
||||
// FindItem searches for an item across all stored responses
|
||||
// Returns the item and the response ID it was found in
|
||||
func (s *ResponseStore) FindItem(itemID string) (*schema.ORItemField, string, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
now := time.Now()
|
||||
for responseID, stored := range s.responses {
|
||||
// Skip expired responses
|
||||
if stored.ExpiresAt != nil && now.After(*stored.ExpiresAt) {
|
||||
continue
|
||||
}
|
||||
|
||||
if item, exists := stored.Items[itemID]; exists {
|
||||
return item, responseID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, "", fmt.Errorf("item not found in any stored response: %s", itemID)
|
||||
}
|
||||
|
||||
// Delete removes a response from storage
|
||||
func (s *ResponseStore) Delete(responseID string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
delete(s.responses, responseID)
|
||||
xlog.Debug("Deleted Open Responses response", "response_id", responseID)
|
||||
}
|
||||
|
||||
// Cleanup removes expired responses
|
||||
func (s *ResponseStore) Cleanup() int {
|
||||
if s.ttl == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
count := 0
|
||||
for id, stored := range s.responses {
|
||||
if stored.ExpiresAt != nil && now.After(*stored.ExpiresAt) {
|
||||
delete(s.responses, id)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
xlog.Debug("Cleaned up expired Open Responses", "count", count)
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// cleanupLoop runs periodic cleanup of expired responses
|
||||
func (s *ResponseStore) cleanupLoop(ctx context.Context) {
|
||||
if s.ttl == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(s.ttl / 2) // Cleanup at half TTL interval
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
xlog.Debug("Stopped Open Responses store cleanup loop")
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.Cleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count returns the number of stored responses
|
||||
func (s *ResponseStore) Count() int {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return len(s.responses)
|
||||
}
|
||||
|
||||
// StoreBackground stores a background response with cancel function and optional streaming support
|
||||
func (s *ResponseStore) StoreBackground(responseID string, request *schema.OpenResponsesRequest, response *schema.ORResponseResource, cancelFunc context.CancelFunc, streamEnabled bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Build item index for quick lookup
|
||||
items := make(map[string]*schema.ORItemField)
|
||||
for i := range response.Output {
|
||||
item := &response.Output[i]
|
||||
if item.ID != "" {
|
||||
items[item.ID] = item
|
||||
}
|
||||
}
|
||||
|
||||
stored := &StoredResponse{
|
||||
Request: request,
|
||||
Response: response,
|
||||
Items: items,
|
||||
StoredAt: time.Now(),
|
||||
ExpiresAt: nil,
|
||||
CancelFunc: cancelFunc,
|
||||
StreamEvents: []StreamedEvent{},
|
||||
StreamEnabled: streamEnabled,
|
||||
IsBackground: true,
|
||||
EventsChan: make(chan struct{}, 100), // Buffered channel for event notifications
|
||||
}
|
||||
|
||||
// Set expiration if TTL is configured
|
||||
if s.ttl > 0 {
|
||||
expiresAt := time.Now().Add(s.ttl)
|
||||
stored.ExpiresAt = &expiresAt
|
||||
}
|
||||
|
||||
s.responses[responseID] = stored
|
||||
xlog.Debug("Stored background Open Responses response", "response_id", responseID, "stream_enabled", streamEnabled)
|
||||
}
|
||||
|
||||
// UpdateStatus updates the status of a stored response
|
||||
func (s *ResponseStore) UpdateStatus(responseID string, status string, completedAt *int64) error {
|
||||
s.mu.RLock()
|
||||
stored, exists := s.responses[responseID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return fmt.Errorf("response not found: %s", responseID)
|
||||
}
|
||||
|
||||
stored.mu.Lock()
|
||||
defer stored.mu.Unlock()
|
||||
|
||||
stored.Response.Status = status
|
||||
stored.Response.CompletedAt = completedAt
|
||||
|
||||
xlog.Debug("Updated response status", "response_id", responseID, "status", status)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateResponse updates the entire response object for a stored response
|
||||
func (s *ResponseStore) UpdateResponse(responseID string, response *schema.ORResponseResource) error {
|
||||
s.mu.RLock()
|
||||
stored, exists := s.responses[responseID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return fmt.Errorf("response not found: %s", responseID)
|
||||
}
|
||||
|
||||
stored.mu.Lock()
|
||||
defer stored.mu.Unlock()
|
||||
|
||||
// Rebuild item index
|
||||
items := make(map[string]*schema.ORItemField)
|
||||
for i := range response.Output {
|
||||
item := &response.Output[i]
|
||||
if item.ID != "" {
|
||||
items[item.ID] = item
|
||||
}
|
||||
}
|
||||
|
||||
stored.Response = response
|
||||
stored.Items = items
|
||||
|
||||
xlog.Debug("Updated response", "response_id", responseID, "status", response.Status, "items_count", len(items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendEvent appends a streaming event to the buffer for resume support
|
||||
func (s *ResponseStore) AppendEvent(responseID string, event *schema.ORStreamEvent) error {
|
||||
s.mu.RLock()
|
||||
stored, exists := s.responses[responseID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return fmt.Errorf("response not found: %s", responseID)
|
||||
}
|
||||
|
||||
// Serialize the event
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal event: %w", err)
|
||||
}
|
||||
|
||||
stored.mu.Lock()
|
||||
stored.StreamEvents = append(stored.StreamEvents, StreamedEvent{
|
||||
SequenceNumber: event.SequenceNumber,
|
||||
EventType: event.Type,
|
||||
Data: data,
|
||||
})
|
||||
stored.mu.Unlock()
|
||||
|
||||
// Notify any subscribers of new event
|
||||
select {
|
||||
case stored.EventsChan <- struct{}{}:
|
||||
default:
|
||||
// Channel full, subscribers will catch up
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetEventsAfter returns all events with sequence number greater than startingAfter
|
||||
func (s *ResponseStore) GetEventsAfter(responseID string, startingAfter int) ([]StreamedEvent, error) {
|
||||
s.mu.RLock()
|
||||
stored, exists := s.responses[responseID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("response not found: %s", responseID)
|
||||
}
|
||||
|
||||
stored.mu.RLock()
|
||||
defer stored.mu.RUnlock()
|
||||
|
||||
var result []StreamedEvent
|
||||
for _, event := range stored.StreamEvents {
|
||||
if event.SequenceNumber > startingAfter {
|
||||
result = append(result, event)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Cancel cancels a background response if it's still in progress
|
||||
func (s *ResponseStore) Cancel(responseID string) (*schema.ORResponseResource, error) {
|
||||
s.mu.RLock()
|
||||
stored, exists := s.responses[responseID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("response not found: %s", responseID)
|
||||
}
|
||||
|
||||
stored.mu.Lock()
|
||||
defer stored.mu.Unlock()
|
||||
|
||||
// If already in a terminal state, just return the response (idempotent)
|
||||
status := stored.Response.Status
|
||||
if status == schema.ORStatusCompleted || status == schema.ORStatusFailed ||
|
||||
status == schema.ORStatusIncomplete || status == schema.ORStatusCancelled {
|
||||
xlog.Debug("Response already in terminal state", "response_id", responseID, "status", status)
|
||||
return stored.Response, nil
|
||||
}
|
||||
|
||||
// Cancel the context if available
|
||||
if stored.CancelFunc != nil {
|
||||
stored.CancelFunc()
|
||||
xlog.Debug("Cancelled background response", "response_id", responseID)
|
||||
}
|
||||
|
||||
// Update status to cancelled
|
||||
now := time.Now().Unix()
|
||||
stored.Response.Status = schema.ORStatusCancelled
|
||||
stored.Response.CompletedAt = &now
|
||||
|
||||
return stored.Response, nil
|
||||
}
|
||||
|
||||
// GetEventsChan returns the events notification channel for a response
|
||||
func (s *ResponseStore) GetEventsChan(responseID string) (chan struct{}, error) {
|
||||
s.mu.RLock()
|
||||
stored, exists := s.responses[responseID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("response not found: %s", responseID)
|
||||
}
|
||||
|
||||
return stored.EventsChan, nil
|
||||
}
|
||||
|
||||
// IsStreamEnabled checks if a response was created with streaming enabled
|
||||
func (s *ResponseStore) IsStreamEnabled(responseID string) (bool, error) {
|
||||
s.mu.RLock()
|
||||
stored, exists := s.responses[responseID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return false, fmt.Errorf("response not found: %s", responseID)
|
||||
}
|
||||
|
||||
stored.mu.RLock()
|
||||
defer stored.mu.RUnlock()
|
||||
|
||||
return stored.StreamEnabled, nil
|
||||
}
|
||||
13
core/http/endpoints/openresponses/store_suite_test.go
Normal file
13
core/http/endpoints/openresponses/store_suite_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package openresponses
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestStore(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "ResponseStore Suite")
|
||||
}
|
||||
626
core/http/endpoints/openresponses/store_test.go
Normal file
626
core/http/endpoints/openresponses/store_test.go
Normal file
@@ -0,0 +1,626 @@
|
||||
package openresponses
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ResponseStore", func() {
|
||||
var store *ResponseStore
|
||||
|
||||
BeforeEach(func() {
|
||||
store = NewResponseStore(0) // No TTL for most tests
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Clean up
|
||||
})
|
||||
|
||||
Describe("Store and Get", func() {
|
||||
It("should store and retrieve a response", func() {
|
||||
responseID := "resp_test123"
|
||||
request := &schema.OpenResponsesRequest{
|
||||
Model: "test-model",
|
||||
Input: "Hello",
|
||||
}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
CreatedAt: time.Now().Unix(),
|
||||
Status: "completed",
|
||||
Model: "test-model",
|
||||
Output: []schema.ORItemField{
|
||||
{
|
||||
Type: "message",
|
||||
ID: "msg_123",
|
||||
Status: "completed",
|
||||
Role: "assistant",
|
||||
Content: []schema.ORContentPart{{
|
||||
Type: "output_text",
|
||||
Text: "Hello, world!",
|
||||
Annotations: []schema.ORAnnotation{},
|
||||
Logprobs: []schema.ORLogProb{},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
store.Store(responseID, request, response)
|
||||
|
||||
stored, err := store.Get(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(stored).ToNot(BeNil())
|
||||
Expect(stored.Response.ID).To(Equal(responseID))
|
||||
Expect(stored.Request.Model).To(Equal("test-model"))
|
||||
Expect(len(stored.Items)).To(Equal(1))
|
||||
Expect(stored.Items["msg_123"]).ToNot(BeNil())
|
||||
Expect(stored.Items["msg_123"].ID).To(Equal("msg_123"))
|
||||
})
|
||||
|
||||
It("should return error for non-existent response", func() {
|
||||
_, err := store.Get("nonexistent")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("not found"))
|
||||
})
|
||||
|
||||
It("should index all items by ID", func() {
|
||||
responseID := "resp_test456"
|
||||
request := &schema.OpenResponsesRequest{
|
||||
Model: "test-model",
|
||||
Input: "Test",
|
||||
}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Output: []schema.ORItemField{
|
||||
{
|
||||
Type: "message",
|
||||
ID: "msg_1",
|
||||
Status: "completed",
|
||||
Role: "assistant",
|
||||
},
|
||||
{
|
||||
Type: "function_call",
|
||||
ID: "fc_1",
|
||||
Status: "completed",
|
||||
CallID: "fc_1",
|
||||
Name: "test_function",
|
||||
Arguments: `{"arg": "value"}`,
|
||||
},
|
||||
{
|
||||
Type: "message",
|
||||
ID: "msg_2",
|
||||
Status: "completed",
|
||||
Role: "assistant",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
store.Store(responseID, request, response)
|
||||
|
||||
stored, err := store.Get(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(stored.Items)).To(Equal(3))
|
||||
Expect(stored.Items["msg_1"]).ToNot(BeNil())
|
||||
Expect(stored.Items["fc_1"]).ToNot(BeNil())
|
||||
Expect(stored.Items["msg_2"]).ToNot(BeNil())
|
||||
})
|
||||
|
||||
It("should handle items without IDs", func() {
|
||||
responseID := "resp_test789"
|
||||
request := &schema.OpenResponsesRequest{
|
||||
Model: "test-model",
|
||||
Input: "Test",
|
||||
}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Output: []schema.ORItemField{
|
||||
{
|
||||
Type: "message",
|
||||
ID: "", // No ID
|
||||
Status: "completed",
|
||||
Role: "assistant",
|
||||
},
|
||||
{
|
||||
Type: "message",
|
||||
ID: "msg_with_id",
|
||||
Status: "completed",
|
||||
Role: "assistant",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
store.Store(responseID, request, response)
|
||||
|
||||
stored, err := store.Get(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// Only items with IDs are indexed
|
||||
Expect(len(stored.Items)).To(Equal(1))
|
||||
Expect(stored.Items["msg_with_id"]).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GetItem", func() {
|
||||
It("should retrieve a specific item by ID", func() {
|
||||
responseID := "resp_item_test"
|
||||
itemID := "msg_specific"
|
||||
request := &schema.OpenResponsesRequest{
|
||||
Model: "test-model",
|
||||
Input: "Test",
|
||||
}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Output: []schema.ORItemField{
|
||||
{
|
||||
Type: "message",
|
||||
ID: itemID,
|
||||
Status: "completed",
|
||||
Role: "assistant",
|
||||
Content: []schema.ORContentPart{{
|
||||
Type: "output_text",
|
||||
Text: "Specific message",
|
||||
Annotations: []schema.ORAnnotation{},
|
||||
Logprobs: []schema.ORLogProb{},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
store.Store(responseID, request, response)
|
||||
|
||||
item, err := store.GetItem(responseID, itemID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(item).ToNot(BeNil())
|
||||
Expect(item.ID).To(Equal(itemID))
|
||||
Expect(item.Type).To(Equal("message"))
|
||||
})
|
||||
|
||||
It("should return error for non-existent item", func() {
|
||||
responseID := "resp_item_test2"
|
||||
request := &schema.OpenResponsesRequest{
|
||||
Model: "test-model",
|
||||
Input: "Test",
|
||||
}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Output: []schema.ORItemField{
|
||||
{
|
||||
Type: "message",
|
||||
ID: "msg_existing",
|
||||
Status: "completed",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
store.Store(responseID, request, response)
|
||||
|
||||
_, err := store.GetItem(responseID, "nonexistent_item")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("item not found"))
|
||||
})
|
||||
|
||||
It("should return error for non-existent response when getting item", func() {
|
||||
_, err := store.GetItem("nonexistent_response", "any_item")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("response not found"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("FindItem", func() {
|
||||
It("should find an item across all stored responses", func() {
|
||||
// Store first response
|
||||
responseID1 := "resp_find_1"
|
||||
itemID1 := "msg_find_1"
|
||||
store.Store(responseID1, &schema.OpenResponsesRequest{Model: "test"}, &schema.ORResponseResource{
|
||||
ID: responseID1,
|
||||
Object: "response",
|
||||
Output: []schema.ORItemField{
|
||||
{Type: "message", ID: itemID1, Status: "completed"},
|
||||
},
|
||||
})
|
||||
|
||||
// Store second response
|
||||
responseID2 := "resp_find_2"
|
||||
itemID2 := "msg_find_2"
|
||||
store.Store(responseID2, &schema.OpenResponsesRequest{Model: "test"}, &schema.ORResponseResource{
|
||||
ID: responseID2,
|
||||
Object: "response",
|
||||
Output: []schema.ORItemField{
|
||||
{Type: "message", ID: itemID2, Status: "completed"},
|
||||
},
|
||||
})
|
||||
|
||||
// Find item from first response
|
||||
item, foundResponseID, err := store.FindItem(itemID1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(item).ToNot(BeNil())
|
||||
Expect(item.ID).To(Equal(itemID1))
|
||||
Expect(foundResponseID).To(Equal(responseID1))
|
||||
|
||||
// Find item from second response
|
||||
item, foundResponseID, err = store.FindItem(itemID2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(item).ToNot(BeNil())
|
||||
Expect(item.ID).To(Equal(itemID2))
|
||||
Expect(foundResponseID).To(Equal(responseID2))
|
||||
})
|
||||
|
||||
It("should return error when item not found in any response", func() {
|
||||
_, _, err := store.FindItem("nonexistent_item")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("item not found in any stored response"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Delete", func() {
|
||||
It("should delete a stored response", func() {
|
||||
responseID := "resp_delete_test"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
}
|
||||
|
||||
store.Store(responseID, request, response)
|
||||
Expect(store.Count()).To(Equal(1))
|
||||
|
||||
store.Delete(responseID)
|
||||
Expect(store.Count()).To(Equal(0))
|
||||
|
||||
_, err := store.Get(responseID)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should handle deleting non-existent response gracefully", func() {
|
||||
// Should not panic
|
||||
store.Delete("nonexistent")
|
||||
Expect(store.Count()).To(Equal(0))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Count", func() {
|
||||
It("should return correct count of stored responses", func() {
|
||||
Expect(store.Count()).To(Equal(0))
|
||||
|
||||
store.Store("resp_1", &schema.OpenResponsesRequest{Model: "test"}, &schema.ORResponseResource{ID: "resp_1", Object: "response"})
|
||||
Expect(store.Count()).To(Equal(1))
|
||||
|
||||
store.Store("resp_2", &schema.OpenResponsesRequest{Model: "test"}, &schema.ORResponseResource{ID: "resp_2", Object: "response"})
|
||||
Expect(store.Count()).To(Equal(2))
|
||||
|
||||
store.Delete("resp_1")
|
||||
Expect(store.Count()).To(Equal(1))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("TTL and Expiration", func() {
|
||||
It("should set expiration when TTL is configured", func() {
|
||||
ttlStore := NewResponseStore(100 * time.Millisecond)
|
||||
responseID := "resp_ttl_test"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{ID: responseID, Object: "response"}
|
||||
|
||||
ttlStore.Store(responseID, request, response)
|
||||
|
||||
stored, err := ttlStore.Get(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(stored.ExpiresAt).ToNot(BeNil())
|
||||
Expect(stored.ExpiresAt.After(time.Now())).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should not set expiration when TTL is 0", func() {
|
||||
responseID := "resp_no_ttl"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{ID: responseID, Object: "response"}
|
||||
|
||||
store.Store(responseID, request, response)
|
||||
|
||||
stored, err := store.Get(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(stored.ExpiresAt).To(BeNil())
|
||||
})
|
||||
|
||||
It("should clean up expired responses", func() {
|
||||
ttlStore := NewResponseStore(50 * time.Millisecond)
|
||||
responseID := "resp_expire_test"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{ID: responseID, Object: "response"}
|
||||
|
||||
ttlStore.Store(responseID, request, response)
|
||||
Expect(ttlStore.Count()).To(Equal(1))
|
||||
|
||||
// Wait for expiration (longer than TTL and cleanup interval)
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
|
||||
// Cleanup should remove expired response (may have already been cleaned by goroutine)
|
||||
count := ttlStore.Cleanup()
|
||||
// Count might be 0 if cleanup goroutine already ran, or 1 if we're first
|
||||
Expect(count).To(BeNumerically(">=", 0))
|
||||
Expect(ttlStore.Count()).To(Equal(0))
|
||||
|
||||
_, err := ttlStore.Get(responseID)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should return error for expired response", func() {
|
||||
ttlStore := NewResponseStore(50 * time.Millisecond)
|
||||
responseID := "resp_expire_error"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{ID: responseID, Object: "response"}
|
||||
|
||||
ttlStore.Store(responseID, request, response)
|
||||
|
||||
// Wait for expiration (but not long enough for cleanup goroutine to remove it)
|
||||
time.Sleep(75 * time.Millisecond)
|
||||
|
||||
// Try to get before cleanup goroutine removes it
|
||||
_, err := ttlStore.Get(responseID)
|
||||
// Error could be "expired" or "not found" (if cleanup already ran)
|
||||
Expect(err).To(HaveOccurred())
|
||||
// Either error message is acceptable
|
||||
errMsg := err.Error()
|
||||
Expect(errMsg).To(Or(ContainSubstring("expired"), ContainSubstring("not found")))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Thread Safety", func() {
|
||||
It("should handle concurrent stores and gets", func() {
|
||||
// This is a basic concurrency test
|
||||
done := make(chan bool, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func(id int) {
|
||||
responseID := fmt.Sprintf("resp_concurrent_%d", id)
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Output: []schema.ORItemField{
|
||||
{Type: "message", ID: fmt.Sprintf("msg_%d", id), Status: "completed"},
|
||||
},
|
||||
}
|
||||
store.Store(responseID, request, response)
|
||||
|
||||
// Retrieve immediately
|
||||
stored, err := store.Get(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(stored).ToNot(BeNil())
|
||||
done <- true
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all goroutines
|
||||
for i := 0; i < 10; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
Expect(store.Count()).To(Equal(10))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GetGlobalStore", func() {
|
||||
It("should return singleton instance", func() {
|
||||
store1 := GetGlobalStore()
|
||||
store2 := GetGlobalStore()
|
||||
Expect(store1).To(Equal(store2))
|
||||
})
|
||||
|
||||
It("should persist data across GetGlobalStore calls", func() {
|
||||
globalStore := GetGlobalStore()
|
||||
responseID := "resp_global_test"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{ID: responseID, Object: "response"}
|
||||
|
||||
globalStore.Store(responseID, request, response)
|
||||
|
||||
// Get store again
|
||||
globalStore2 := GetGlobalStore()
|
||||
stored, err := globalStore2.Get(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(stored).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Background Mode Support", func() {
|
||||
It("should store background response with cancel function", func() {
|
||||
responseID := "resp_bg_test"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Status: schema.ORStatusQueued,
|
||||
}
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store.StoreBackground(responseID, request, response, cancel, true)
|
||||
|
||||
stored, err := store.Get(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(stored).ToNot(BeNil())
|
||||
Expect(stored.IsBackground).To(BeTrue())
|
||||
Expect(stored.StreamEnabled).To(BeTrue())
|
||||
Expect(stored.CancelFunc).ToNot(BeNil())
|
||||
})
|
||||
|
||||
It("should update status of stored response", func() {
|
||||
responseID := "resp_status_test"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Status: schema.ORStatusQueued,
|
||||
}
|
||||
|
||||
store.Store(responseID, request, response)
|
||||
|
||||
err := store.UpdateStatus(responseID, schema.ORStatusInProgress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
stored, err := store.Get(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(stored.Response.Status).To(Equal(schema.ORStatusInProgress))
|
||||
})
|
||||
|
||||
It("should append and retrieve streaming events", func() {
|
||||
responseID := "resp_events_test"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Status: schema.ORStatusInProgress,
|
||||
}
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store.StoreBackground(responseID, request, response, cancel, true)
|
||||
|
||||
// Append events
|
||||
event1 := &schema.ORStreamEvent{
|
||||
Type: "response.created",
|
||||
SequenceNumber: 0,
|
||||
}
|
||||
event2 := &schema.ORStreamEvent{
|
||||
Type: "response.in_progress",
|
||||
SequenceNumber: 1,
|
||||
}
|
||||
event3 := &schema.ORStreamEvent{
|
||||
Type: "response.output_text.delta",
|
||||
SequenceNumber: 2,
|
||||
}
|
||||
|
||||
err := store.AppendEvent(responseID, event1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = store.AppendEvent(responseID, event2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = store.AppendEvent(responseID, event3)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Get all events after -1 (all events)
|
||||
events, err := store.GetEventsAfter(responseID, -1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(events).To(HaveLen(3))
|
||||
|
||||
// Get events after sequence 1
|
||||
events, err = store.GetEventsAfter(responseID, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(events).To(HaveLen(1))
|
||||
Expect(events[0].SequenceNumber).To(Equal(2))
|
||||
})
|
||||
|
||||
It("should cancel an in-progress response", func() {
|
||||
responseID := "resp_cancel_test"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Status: schema.ORStatusInProgress,
|
||||
}
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store.StoreBackground(responseID, request, response, cancel, false)
|
||||
|
||||
// Cancel the response
|
||||
cancelledResponse, err := store.Cancel(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cancelledResponse.Status).To(Equal(schema.ORStatusCancelled))
|
||||
Expect(cancelledResponse.CompletedAt).ToNot(BeNil())
|
||||
})
|
||||
|
||||
It("should be idempotent when cancelling already completed response", func() {
|
||||
responseID := "resp_idempotent_cancel"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
completedAt := time.Now().Unix()
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Status: schema.ORStatusCompleted,
|
||||
CompletedAt: &completedAt,
|
||||
}
|
||||
|
||||
store.Store(responseID, request, response)
|
||||
|
||||
// Try to cancel a completed response
|
||||
cancelledResponse, err := store.Cancel(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// Status should remain completed (not changed to cancelled)
|
||||
Expect(cancelledResponse.Status).To(Equal(schema.ORStatusCompleted))
|
||||
})
|
||||
|
||||
It("should check if streaming is enabled", func() {
|
||||
responseID := "resp_stream_check"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Status: schema.ORStatusQueued,
|
||||
}
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store.StoreBackground(responseID, request, response, cancel, true)
|
||||
|
||||
enabled, err := store.IsStreamEnabled(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(enabled).To(BeTrue())
|
||||
|
||||
// Store another without streaming
|
||||
responseID2 := "resp_no_stream"
|
||||
store.StoreBackground(responseID2, request, response, cancel, false)
|
||||
|
||||
enabled2, err := store.IsStreamEnabled(responseID2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(enabled2).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should notify subscribers of new events", func() {
|
||||
responseID := "resp_events_chan"
|
||||
request := &schema.OpenResponsesRequest{Model: "test"}
|
||||
response := &schema.ORResponseResource{
|
||||
ID: responseID,
|
||||
Object: "response",
|
||||
Status: schema.ORStatusInProgress,
|
||||
}
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
store.StoreBackground(responseID, request, response, cancel, true)
|
||||
|
||||
eventsChan, err := store.GetEventsChan(responseID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(eventsChan).ToNot(BeNil())
|
||||
|
||||
// Append an event
|
||||
event := &schema.ORStreamEvent{
|
||||
Type: "response.output_text.delta",
|
||||
SequenceNumber: 0,
|
||||
}
|
||||
|
||||
go func() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
store.AppendEvent(responseID, event)
|
||||
}()
|
||||
|
||||
// Wait for notification
|
||||
select {
|
||||
case <-eventsChan:
|
||||
// Event received
|
||||
case <-time.After(1 * time.Second):
|
||||
Fail("Timeout waiting for event notification")
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,13 +1,33 @@
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var (
|
||||
tmpdir string
|
||||
modelDir string
|
||||
)
|
||||
|
||||
func TestLocalAI(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
var err error
|
||||
tmpdir, err = os.MkdirTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
modelDir = filepath.Join(tmpdir, "models")
|
||||
err = os.Mkdir(modelDir, 0750)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
AfterSuite(func() {
|
||||
err := os.RemoveAll(tmpdir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
RunSpecs(t, "LocalAI HTTP test suite")
|
||||
}
|
||||
|
||||
@@ -484,3 +484,103 @@ func mergeOpenAIRequestAndModelConfig(config *config.ModelConfig, input *schema.
|
||||
}
|
||||
return fmt.Errorf("unable to validate configuration after merging")
|
||||
}
|
||||
|
||||
func (re *RequestExtractor) SetOpenResponsesRequest(c echo.Context) error {
|
||||
input, ok := c.Get(CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.OpenResponsesRequest)
|
||||
if !ok || input.Model == "" {
|
||||
return echo.ErrBadRequest
|
||||
}
|
||||
|
||||
cfg, ok := c.Get(CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig)
|
||||
if !ok || cfg == nil {
|
||||
return echo.ErrBadRequest
|
||||
}
|
||||
|
||||
// Extract or generate the correlation ID (Open Responses uses x-request-id)
|
||||
correlationID := c.Request().Header.Get("x-request-id")
|
||||
if correlationID == "" {
|
||||
correlationID = uuid.New().String()
|
||||
}
|
||||
c.Response().Header().Set("x-request-id", correlationID)
|
||||
|
||||
// Use the request context directly - Echo properly supports context cancellation!
|
||||
reqCtx := c.Request().Context()
|
||||
c1, cancel := context.WithCancel(re.applicationConfig.Context)
|
||||
|
||||
// Cancel when request context is cancelled (client disconnects)
|
||||
go func() {
|
||||
select {
|
||||
case <-reqCtx.Done():
|
||||
cancel()
|
||||
case <-c1.Done():
|
||||
// Already cancelled
|
||||
}
|
||||
}()
|
||||
|
||||
// Add the correlation ID to the new context
|
||||
ctxWithCorrelationID := context.WithValue(c1, CorrelationIDKey, correlationID)
|
||||
|
||||
input.Context = ctxWithCorrelationID
|
||||
input.Cancel = cancel
|
||||
|
||||
err := mergeOpenResponsesRequestAndModelConfig(cfg, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.Model == "" {
|
||||
xlog.Debug("replacing empty cfg.Model with input value", "input.Model", input.Model)
|
||||
cfg.Model = input.Model
|
||||
}
|
||||
|
||||
c.Set(CONTEXT_LOCALS_KEY_LOCALAI_REQUEST, input)
|
||||
c.Set(CONTEXT_LOCALS_KEY_MODEL_CONFIG, cfg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeOpenResponsesRequestAndModelConfig(config *config.ModelConfig, input *schema.OpenResponsesRequest) error {
|
||||
// Temperature
|
||||
if input.Temperature != nil {
|
||||
config.Temperature = input.Temperature
|
||||
}
|
||||
|
||||
// TopP
|
||||
if input.TopP != nil {
|
||||
config.TopP = input.TopP
|
||||
}
|
||||
|
||||
// MaxOutputTokens -> Maxtokens
|
||||
if input.MaxOutputTokens != nil {
|
||||
config.Maxtokens = input.MaxOutputTokens
|
||||
}
|
||||
|
||||
// Convert tools to functions - this will be handled in the endpoint handler
|
||||
// We just validate that tools are present if needed
|
||||
|
||||
// Handle tool_choice
|
||||
if input.ToolChoice != nil {
|
||||
switch tc := input.ToolChoice.(type) {
|
||||
case string:
|
||||
// "auto", "required", or "none"
|
||||
if tc == "required" {
|
||||
config.SetFunctionCallString("required")
|
||||
} else if tc == "none" {
|
||||
// Don't use tools - handled in endpoint
|
||||
}
|
||||
// "auto" is default - let model decide
|
||||
case map[string]interface{}:
|
||||
// Specific tool: {type:"function", name:"..."}
|
||||
if tcType, ok := tc["type"].(string); ok && tcType == "function" {
|
||||
if name, ok := tc["name"].(string); ok {
|
||||
config.SetFunctionCallString(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if valid, _ := config.Validate(); valid {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unable to validate configuration after merging")
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ type APIExchange struct {
|
||||
var traceBuffer *circularbuffer.Queue[APIExchange]
|
||||
var mu sync.Mutex
|
||||
var logChan = make(chan APIExchange, 100)
|
||||
var initOnce sync.Once
|
||||
|
||||
type bodyWriter struct {
|
||||
http.ResponseWriter
|
||||
@@ -53,26 +54,37 @@ func (w *bodyWriter) Flush() {
|
||||
}
|
||||
}
|
||||
|
||||
// TraceMiddleware intercepts and logs JSON API requests and responses
|
||||
func TraceMiddleware(app *application.Application) echo.MiddlewareFunc {
|
||||
if app.ApplicationConfig().EnableTracing && traceBuffer == nil {
|
||||
traceBuffer = circularbuffer.New[APIExchange](app.ApplicationConfig().TracingMaxItems)
|
||||
func initializeTracing(maxItems int) {
|
||||
initOnce.Do(func() {
|
||||
if maxItems <= 0 {
|
||||
maxItems = 100
|
||||
}
|
||||
mu.Lock()
|
||||
traceBuffer = circularbuffer.New[APIExchange](maxItems)
|
||||
mu.Unlock()
|
||||
|
||||
go func() {
|
||||
for exchange := range logChan {
|
||||
mu.Lock()
|
||||
traceBuffer.Enqueue(exchange)
|
||||
if traceBuffer != nil {
|
||||
traceBuffer.Enqueue(exchange)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TraceMiddleware intercepts and logs JSON API requests and responses
|
||||
func TraceMiddleware(app *application.Application) echo.MiddlewareFunc {
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
if !app.ApplicationConfig().EnableTracing {
|
||||
return next(c)
|
||||
}
|
||||
|
||||
initializeTracing(app.ApplicationConfig().TracingMaxItems)
|
||||
|
||||
if c.Request().Header.Get("Content-Type") != "application/json" {
|
||||
return next(c)
|
||||
}
|
||||
@@ -138,6 +150,10 @@ func TraceMiddleware(app *application.Application) echo.MiddlewareFunc {
|
||||
// GetTraces returns a copy of the logged API exchanges for display
|
||||
func GetTraces() []APIExchange {
|
||||
mu.Lock()
|
||||
if traceBuffer == nil {
|
||||
mu.Unlock()
|
||||
return []APIExchange{}
|
||||
}
|
||||
traces := traceBuffer.Values()
|
||||
mu.Unlock()
|
||||
|
||||
@@ -151,6 +167,8 @@ func GetTraces() []APIExchange {
|
||||
// ClearTraces clears the in-memory logs
|
||||
func ClearTraces() {
|
||||
mu.Lock()
|
||||
traceBuffer.Clear()
|
||||
if traceBuffer != nil {
|
||||
traceBuffer.Clear()
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
openai "github.com/mudler/LocalAI/core/http/endpoints/openai"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("MapOpenAIToVideo", func() {
|
||||
It("maps size and seconds correctly", func() {
|
||||
cases := []struct {
|
||||
name string
|
||||
input *schema.OpenAIRequest
|
||||
raw map[string]interface{}
|
||||
expectsW int32
|
||||
expectsH int32
|
||||
expectsF int32
|
||||
expectsN int32
|
||||
}{
|
||||
{
|
||||
name: "size in input",
|
||||
input: &schema.OpenAIRequest{
|
||||
PredictionOptions: schema.PredictionOptions{
|
||||
BasicModelRequest: schema.BasicModelRequest{Model: "m"},
|
||||
},
|
||||
Size: "256x128",
|
||||
},
|
||||
expectsW: 256,
|
||||
expectsH: 128,
|
||||
},
|
||||
{
|
||||
name: "size in raw and seconds as string",
|
||||
input: &schema.OpenAIRequest{PredictionOptions: schema.PredictionOptions{BasicModelRequest: schema.BasicModelRequest{Model: "m"}}},
|
||||
raw: map[string]interface{}{"size": "720x480", "seconds": "2"},
|
||||
expectsW: 720,
|
||||
expectsH: 480,
|
||||
expectsF: 30,
|
||||
expectsN: 60,
|
||||
},
|
||||
{
|
||||
name: "seconds as number and fps override",
|
||||
input: &schema.OpenAIRequest{PredictionOptions: schema.PredictionOptions{BasicModelRequest: schema.BasicModelRequest{Model: "m"}}},
|
||||
raw: map[string]interface{}{"seconds": 3.0, "fps": 24.0},
|
||||
expectsF: 24,
|
||||
expectsN: 72,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
By(c.name)
|
||||
vr := openai.MapOpenAIToVideo(c.input, c.raw)
|
||||
if c.expectsW != 0 {
|
||||
Expect(vr.Width).To(Equal(c.expectsW))
|
||||
}
|
||||
if c.expectsH != 0 {
|
||||
Expect(vr.Height).To(Equal(c.expectsH))
|
||||
}
|
||||
if c.expectsF != 0 {
|
||||
Expect(vr.FPS).To(Equal(c.expectsF))
|
||||
}
|
||||
if c.expectsN != 0 {
|
||||
Expect(vr.NumFrames).To(Equal(c.expectsN))
|
||||
}
|
||||
|
||||
b, err := json.Marshal(vr)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_ = b
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,168 +0,0 @@
|
||||
package http_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/LocalAI/core/application"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/pkg/system"
|
||||
"github.com/mudler/LocalAI/pkg/grpc"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
"fmt"
|
||||
. "github.com/mudler/LocalAI/core/http"
|
||||
"github.com/labstack/echo/v4"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const testAPIKey = "joshua"
|
||||
|
||||
type fakeAI struct{}
|
||||
|
||||
func (f *fakeAI) Busy() bool { return false }
|
||||
func (f *fakeAI) Lock() {}
|
||||
func (f *fakeAI) Unlock() {}
|
||||
func (f *fakeAI) Locking() bool { return false }
|
||||
func (f *fakeAI) Predict(*pb.PredictOptions) (string, error) { return "", nil }
|
||||
func (f *fakeAI) PredictStream(*pb.PredictOptions, chan string) error {
|
||||
return nil
|
||||
}
|
||||
func (f *fakeAI) Load(*pb.ModelOptions) error { return nil }
|
||||
func (f *fakeAI) Embeddings(*pb.PredictOptions) ([]float32, error) { return nil, nil }
|
||||
func (f *fakeAI) GenerateImage(*pb.GenerateImageRequest) error { return nil }
|
||||
func (f *fakeAI) GenerateVideo(*pb.GenerateVideoRequest) error { return nil }
|
||||
func (f *fakeAI) Detect(*pb.DetectOptions) (pb.DetectResponse, error) { return pb.DetectResponse{}, nil }
|
||||
func (f *fakeAI) AudioTranscription(*pb.TranscriptRequest) (pb.TranscriptResult, error) {
|
||||
return pb.TranscriptResult{}, nil
|
||||
}
|
||||
func (f *fakeAI) TTS(*pb.TTSRequest) error { return nil }
|
||||
func (f *fakeAI) SoundGeneration(*pb.SoundGenerationRequest) error { return nil }
|
||||
func (f *fakeAI) TokenizeString(*pb.PredictOptions) (pb.TokenizationResponse, error) {
|
||||
return pb.TokenizationResponse{}, nil
|
||||
}
|
||||
func (f *fakeAI) Status() (pb.StatusResponse, error) { return pb.StatusResponse{}, nil }
|
||||
func (f *fakeAI) StoresSet(*pb.StoresSetOptions) error { return nil }
|
||||
func (f *fakeAI) StoresDelete(*pb.StoresDeleteOptions) error { return nil }
|
||||
func (f *fakeAI) StoresGet(*pb.StoresGetOptions) (pb.StoresGetResult, error) {
|
||||
return pb.StoresGetResult{}, nil
|
||||
}
|
||||
func (f *fakeAI) StoresFind(*pb.StoresFindOptions) (pb.StoresFindResult, error) {
|
||||
return pb.StoresFindResult{}, nil
|
||||
}
|
||||
func (f *fakeAI) VAD(*pb.VADRequest) (pb.VADResponse, error) { return pb.VADResponse{}, nil }
|
||||
|
||||
var _ = Describe("OpenAI /v1/videos (embedded backend)", func() {
|
||||
var tmpdir string
|
||||
var appServer *application.Application
|
||||
var app *echo.Echo
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
tmpdir, err = os.MkdirTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
modelDir := filepath.Join(tmpdir, "models")
|
||||
err = os.Mkdir(modelDir, 0750)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
systemState, err := system.GetSystemState(
|
||||
system.WithModelPath(modelDir),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
grpc.Provide("embedded://fake", &fakeAI{})
|
||||
|
||||
appServer, err = application.New(
|
||||
config.WithContext(ctx),
|
||||
config.WithSystemState(systemState),
|
||||
config.WithApiKeys([]string{testAPIKey}),
|
||||
config.WithGeneratedContentDir(tmpdir),
|
||||
config.WithExternalBackend("fake", "embedded://fake"),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cancel()
|
||||
if app != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
_ = app.Shutdown(ctx)
|
||||
}
|
||||
_ = os.RemoveAll(tmpdir)
|
||||
})
|
||||
|
||||
It("accepts OpenAI-style video create and delegates to backend", func() {
|
||||
var err error
|
||||
app, err = API(appServer)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
go func() {
|
||||
if err := app.Start("127.0.0.1:9091"); err != nil && err != http.ErrServerClosed {
|
||||
// Log error if needed
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for server
|
||||
client := &http.Client{Timeout: 5 * time.Second}
|
||||
Eventually(func() error {
|
||||
req, _ := http.NewRequest("GET", "http://127.0.0.1:9091/v1/models", nil)
|
||||
req.Header.Set("Authorization", "Bearer "+testAPIKey)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf("bad status: %d", resp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}, "30s", "500ms").Should(Succeed())
|
||||
|
||||
body := map[string]interface{}{
|
||||
"model": "fake-model",
|
||||
"backend": "fake",
|
||||
"prompt": "a test video",
|
||||
"size": "256x256",
|
||||
"seconds": "1",
|
||||
}
|
||||
payload, err := json.Marshal(body)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
req, err := http.NewRequest("POST", "http://127.0.0.1:9091/v1/videos", bytes.NewBuffer(payload))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+testAPIKey)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer resp.Body.Close()
|
||||
Expect(resp.StatusCode).To(Equal(200))
|
||||
|
||||
dat, err := io.ReadAll(resp.Body)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
var out map[string]interface{}
|
||||
err = json.Unmarshal(dat, &out)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
data, ok := out["data"].([]interface{})
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(len(data)).To(BeNumerically(">", 0))
|
||||
first := data[0].(map[string]interface{})
|
||||
url, ok := first["url"].(string)
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(url).To(ContainSubstring("/generated-videos/"))
|
||||
Expect(url).To(ContainSubstring(".mp4"))
|
||||
})
|
||||
})
|
||||
1027
core/http/openresponses_test.go
Normal file
1027
core/http/openresponses_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -152,27 +152,6 @@ func RegisterOpenAIRoutes(app *echo.Echo,
|
||||
app.POST("/v1/images/inpainting", inpaintingHandler, imageMiddleware...)
|
||||
app.POST("/images/inpainting", inpaintingHandler, imageMiddleware...)
|
||||
|
||||
// videos (OpenAI-compatible endpoints mapped to LocalAI video handler)
|
||||
videoHandler := openai.VideoEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig())
|
||||
videoMiddleware := []echo.MiddlewareFunc{
|
||||
traceMiddleware,
|
||||
re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_VIDEO)),
|
||||
re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenAIRequest) }),
|
||||
func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
if err := re.SetOpenAIRequest(c); err != nil {
|
||||
return err
|
||||
}
|
||||
return next(c)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// OpenAI-style create video endpoint
|
||||
app.POST("/v1/videos", videoHandler, videoMiddleware...)
|
||||
app.POST("/v1/videos/generations", videoHandler, videoMiddleware...)
|
||||
app.POST("/videos", videoHandler, videoMiddleware...)
|
||||
|
||||
// List models
|
||||
app.GET("/v1/models", openai.ListModelsEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()))
|
||||
app.GET("/models", openai.ListModelsEndpoint(application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig()))
|
||||
|
||||
58
core/http/routes/openresponses.go
Normal file
58
core/http/routes/openresponses.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/mudler/LocalAI/core/application"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http/endpoints/openresponses"
|
||||
"github.com/mudler/LocalAI/core/http/middleware"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
)
|
||||
|
||||
func RegisterOpenResponsesRoutes(app *echo.Echo,
|
||||
re *middleware.RequestExtractor,
|
||||
application *application.Application) {
|
||||
|
||||
// Open Responses API endpoint
|
||||
responsesHandler := openresponses.ResponsesEndpoint(
|
||||
application.ModelConfigLoader(),
|
||||
application.ModelLoader(),
|
||||
application.TemplatesEvaluator(),
|
||||
application.ApplicationConfig(),
|
||||
)
|
||||
|
||||
responsesMiddleware := []echo.MiddlewareFunc{
|
||||
middleware.TraceMiddleware(application),
|
||||
re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_CHAT)),
|
||||
re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.OpenResponsesRequest) }),
|
||||
setOpenResponsesRequestContext(re),
|
||||
}
|
||||
|
||||
// Main Open Responses endpoint
|
||||
app.POST("/v1/responses", responsesHandler, responsesMiddleware...)
|
||||
|
||||
// Also support without version prefix for compatibility
|
||||
app.POST("/responses", responsesHandler, responsesMiddleware...)
|
||||
|
||||
// GET /responses/:id - Retrieve a response (for polling background requests)
|
||||
getResponseHandler := openresponses.GetResponseEndpoint()
|
||||
app.GET("/v1/responses/:id", getResponseHandler, middleware.TraceMiddleware(application))
|
||||
app.GET("/responses/:id", getResponseHandler, middleware.TraceMiddleware(application))
|
||||
|
||||
// POST /responses/:id/cancel - Cancel a background response
|
||||
cancelResponseHandler := openresponses.CancelResponseEndpoint()
|
||||
app.POST("/v1/responses/:id/cancel", cancelResponseHandler, middleware.TraceMiddleware(application))
|
||||
app.POST("/responses/:id/cancel", cancelResponseHandler, middleware.TraceMiddleware(application))
|
||||
}
|
||||
|
||||
// setOpenResponsesRequestContext sets up the context and cancel function for Open Responses requests
|
||||
func setOpenResponsesRequestContext(re *middleware.RequestExtractor) echo.MiddlewareFunc {
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
if err := re.SetOpenResponsesRequest(c); err != nil {
|
||||
return err
|
||||
}
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -135,9 +135,9 @@ async function promptVideo() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Make API request
|
||||
// Make API request to LocalAI endpoint
|
||||
try {
|
||||
const response = await fetch("v1/videos/generations", {
|
||||
const response = await fetch("video", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
@@ -219,9 +219,13 @@ async function promptVideo() {
|
||||
`;
|
||||
captionDiv.appendChild(detailsDiv);
|
||||
|
||||
// Button container
|
||||
const buttonContainer = document.createElement("div");
|
||||
buttonContainer.className = "mt-1.5 flex gap-2";
|
||||
|
||||
// Copy prompt button
|
||||
const copyBtn = document.createElement("button");
|
||||
copyBtn.className = "mt-1.5 px-2 py-0.5 text-[10px] bg-[var(--color-primary)] text-white rounded hover:opacity-80";
|
||||
copyBtn.className = "px-2 py-0.5 text-[10px] bg-[var(--color-primary)] text-white rounded hover:opacity-80";
|
||||
copyBtn.innerHTML = '<i class="fas fa-copy mr-1"></i>Copy Prompt';
|
||||
copyBtn.onclick = () => {
|
||||
navigator.clipboard.writeText(prompt).then(() => {
|
||||
@@ -231,7 +235,18 @@ async function promptVideo() {
|
||||
}, 2000);
|
||||
});
|
||||
};
|
||||
captionDiv.appendChild(copyBtn);
|
||||
buttonContainer.appendChild(copyBtn);
|
||||
|
||||
// Download video button
|
||||
const downloadBtn = document.createElement("button");
|
||||
downloadBtn.className = "px-2 py-0.5 text-[10px] bg-[var(--color-primary)] text-white rounded hover:opacity-80";
|
||||
downloadBtn.innerHTML = '<i class="fas fa-download mr-1"></i>Download Video';
|
||||
downloadBtn.onclick = () => {
|
||||
downloadVideo(item, downloadBtn);
|
||||
};
|
||||
buttonContainer.appendChild(downloadBtn);
|
||||
|
||||
captionDiv.appendChild(buttonContainer);
|
||||
|
||||
videoContainer.appendChild(captionDiv);
|
||||
resultDiv.appendChild(videoContainer);
|
||||
@@ -269,6 +284,67 @@ function escapeHtml(text) {
|
||||
return div.innerHTML;
|
||||
}
|
||||
|
||||
// Helper function to download video
|
||||
function downloadVideo(item, button) {
|
||||
try {
|
||||
let videoUrl;
|
||||
let filename = "generated-video.mp4";
|
||||
|
||||
if (item.url) {
|
||||
// If we have a URL, use it directly
|
||||
videoUrl = item.url;
|
||||
// Extract filename from URL if possible
|
||||
const urlParts = item.url.split("/");
|
||||
if (urlParts.length > 0) {
|
||||
const lastPart = urlParts[urlParts.length - 1];
|
||||
if (lastPart && lastPart.includes(".")) {
|
||||
filename = lastPart;
|
||||
}
|
||||
}
|
||||
} else if (item.b64_json) {
|
||||
// Convert base64 to blob
|
||||
const byteCharacters = atob(item.b64_json);
|
||||
const byteNumbers = new Array(byteCharacters.length);
|
||||
for (let i = 0; i < byteCharacters.length; i++) {
|
||||
byteNumbers[i] = byteCharacters.charCodeAt(i);
|
||||
}
|
||||
const byteArray = new Uint8Array(byteNumbers);
|
||||
const blob = new Blob([byteArray], { type: "video/mp4" });
|
||||
videoUrl = URL.createObjectURL(blob);
|
||||
} else {
|
||||
console.error("No video data available for download");
|
||||
return;
|
||||
}
|
||||
|
||||
// Create a temporary anchor element to trigger download
|
||||
const link = document.createElement("a");
|
||||
link.href = videoUrl;
|
||||
link.download = filename;
|
||||
link.style.display = "none";
|
||||
document.body.appendChild(link);
|
||||
link.click();
|
||||
document.body.removeChild(link);
|
||||
|
||||
// Clean up object URL if we created one
|
||||
if (item.b64_json && videoUrl.startsWith("blob:")) {
|
||||
setTimeout(() => URL.revokeObjectURL(videoUrl), 100);
|
||||
}
|
||||
|
||||
// Show feedback
|
||||
const originalHTML = button.innerHTML;
|
||||
button.innerHTML = '<i class="fas fa-check mr-1"></i>Downloaded!';
|
||||
setTimeout(() => {
|
||||
button.innerHTML = originalHTML;
|
||||
}, 2000);
|
||||
} catch (error) {
|
||||
console.error("Error downloading video:", error);
|
||||
button.innerHTML = '<i class="fas fa-exclamation-triangle mr-1"></i>Error';
|
||||
setTimeout(() => {
|
||||
button.innerHTML = '<i class="fas fa-download mr-1"></i>Download Video';
|
||||
}, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize
|
||||
document.addEventListener("DOMContentLoaded", function() {
|
||||
const input = document.getElementById("input");
|
||||
|
||||
@@ -28,6 +28,9 @@
|
||||
<a href="image/" class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-image text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Images
|
||||
</a>
|
||||
<a href="video/" class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-video text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Video
|
||||
</a>
|
||||
<a href="tts/" class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fa-solid fa-music text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>TTS
|
||||
</a>
|
||||
@@ -88,6 +91,9 @@
|
||||
<a href="image/" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-image text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Images
|
||||
</a>
|
||||
<a href="video/" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-video text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Video
|
||||
</a>
|
||||
<a href="tts/" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fa-solid fa-music text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>TTS
|
||||
</a>
|
||||
|
||||
@@ -485,6 +485,28 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Open Responses Settings Section -->
|
||||
<div class="bg-[var(--color-bg-secondary)] border border-[var(--color-accent)]/20 rounded-lg p-6">
|
||||
<h2 class="text-xl font-semibold text-[var(--color-text-primary)] mb-4 flex items-center">
|
||||
<i class="fas fa-database mr-2 text-[var(--color-accent)] text-sm"></i>
|
||||
Open Responses Settings
|
||||
</h2>
|
||||
<p class="text-xs text-[var(--color-text-secondary)] mb-4">
|
||||
Configure Open Responses API response storage
|
||||
</p>
|
||||
|
||||
<div class="space-y-4">
|
||||
<!-- Store TTL -->
|
||||
<div>
|
||||
<label class="block text-sm font-medium text-[var(--color-text-primary)] mb-2">Response Store TTL</label>
|
||||
<p class="text-xs text-[var(--color-text-secondary)] mb-2">Time-to-live for stored responses (e.g., 1h, 30m, 0 = no expiration)</p>
|
||||
<input type="text" x-model="settings.open_responses_store_ttl"
|
||||
placeholder="0"
|
||||
class="w-full px-3 py-2 bg-[var(--color-bg-primary)] border border-[var(--color-accent)]/20 rounded text-sm text-[var(--color-text-primary)] focus:outline-none focus:ring-2 focus:ring-[var(--color-accent)]/50">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- API Keys Settings Section -->
|
||||
<div class="bg-[var(--color-bg-secondary)] border border-[var(--color-error-light)] rounded-lg p-6">
|
||||
<h2 class="text-xl font-semibold text-[var(--color-text-primary)] mb-4 flex items-center">
|
||||
@@ -633,7 +655,8 @@ function settingsDashboard() {
|
||||
galleries_json: '[]',
|
||||
backend_galleries_json: '[]',
|
||||
api_keys_text: '',
|
||||
agent_job_retention_days: 30
|
||||
agent_job_retention_days: 30,
|
||||
open_responses_store_ttl: '0'
|
||||
},
|
||||
sourceInfo: '',
|
||||
saving: false,
|
||||
@@ -680,7 +703,8 @@ function settingsDashboard() {
|
||||
galleries_json: JSON.stringify(data.galleries || [], null, 2),
|
||||
backend_galleries_json: JSON.stringify(data.backend_galleries || [], null, 2),
|
||||
api_keys_text: (data.api_keys || []).join('\n'),
|
||||
agent_job_retention_days: data.agent_job_retention_days || 30
|
||||
agent_job_retention_days: data.agent_job_retention_days || 30,
|
||||
open_responses_store_ttl: data.open_responses_store_ttl || '0'
|
||||
};
|
||||
this.sourceInfo = data.source || 'default';
|
||||
} else {
|
||||
@@ -838,6 +862,9 @@ function settingsDashboard() {
|
||||
if (this.settings.agent_job_retention_days !== undefined) {
|
||||
payload.agent_job_retention_days = parseInt(this.settings.agent_job_retention_days) || 30;
|
||||
}
|
||||
if (this.settings.open_responses_store_ttl !== undefined) {
|
||||
payload.open_responses_store_ttl = this.settings.open_responses_store_ttl;
|
||||
}
|
||||
|
||||
const response = await fetch('/api/settings', {
|
||||
method: 'POST',
|
||||
|
||||
311
core/schema/openresponses.go
Normal file
311
core/schema/openresponses.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Open Responses status constants
|
||||
const (
|
||||
ORStatusQueued = "queued"
|
||||
ORStatusInProgress = "in_progress"
|
||||
ORStatusCompleted = "completed"
|
||||
ORStatusFailed = "failed"
|
||||
ORStatusIncomplete = "incomplete"
|
||||
ORStatusCancelled = "cancelled"
|
||||
)
|
||||
|
||||
// OpenResponsesRequest represents a request to the Open Responses API
|
||||
// https://www.openresponses.org/specification
|
||||
type OpenResponsesRequest struct {
|
||||
Model string `json:"model"`
|
||||
Input interface{} `json:"input"` // string or []ORItemParam
|
||||
Tools []ORFunctionTool `json:"tools,omitempty"`
|
||||
ToolChoice interface{} `json:"tool_choice,omitempty"` // "auto"|"required"|"none"|{type:"function",name:"..."}
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
MaxOutputTokens *int `json:"max_output_tokens,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
Truncation string `json:"truncation,omitempty"` // "auto"|"disabled"
|
||||
Instructions string `json:"instructions,omitempty"`
|
||||
Reasoning *ORReasoningParam `json:"reasoning,omitempty"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
PreviousResponseID string `json:"previous_response_id,omitempty"`
|
||||
|
||||
// Additional parameters from spec
|
||||
TextFormat interface{} `json:"text_format,omitempty"` // TextResponseFormat or JsonSchemaResponseFormatParam
|
||||
ServiceTier string `json:"service_tier,omitempty"` // "auto"|"default"|priority hint
|
||||
AllowedTools []string `json:"allowed_tools,omitempty"` // Restrict which tools can be invoked
|
||||
Store *bool `json:"store,omitempty"` // Whether to store the response
|
||||
Include []string `json:"include,omitempty"` // What to include in response
|
||||
ParallelToolCalls *bool `json:"parallel_tool_calls,omitempty"` // Allow parallel tool calls
|
||||
PresencePenalty *float64 `json:"presence_penalty,omitempty"` // Presence penalty (-2.0 to 2.0)
|
||||
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` // Frequency penalty (-2.0 to 2.0)
|
||||
TopLogprobs *int `json:"top_logprobs,omitempty"` // Number of top logprobs to return
|
||||
Background *bool `json:"background,omitempty"` // Run request in background
|
||||
MaxToolCalls *int `json:"max_tool_calls,omitempty"` // Maximum number of tool calls
|
||||
|
||||
// OpenAI-compatible extensions (not in Open Responses spec)
|
||||
LogitBias map[string]float64 `json:"logit_bias,omitempty"` // Map of token IDs to bias values (-100 to 100)
|
||||
|
||||
// Internal fields (like OpenAIRequest)
|
||||
Context context.Context `json:"-"`
|
||||
Cancel context.CancelFunc `json:"-"`
|
||||
}
|
||||
|
||||
// ModelName implements the LocalAIRequest interface
|
||||
func (r *OpenResponsesRequest) ModelName(s *string) string {
|
||||
if s != nil {
|
||||
r.Model = *s
|
||||
}
|
||||
return r.Model
|
||||
}
|
||||
|
||||
// ORFunctionTool represents a function tool definition
|
||||
type ORFunctionTool struct {
|
||||
Type string `json:"type"` // always "function"
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Parameters map[string]interface{} `json:"parameters,omitempty"`
|
||||
Strict bool `json:"strict"` // Always include in response
|
||||
}
|
||||
|
||||
// ORReasoningParam represents reasoning configuration
|
||||
type ORReasoningParam struct {
|
||||
Effort string `json:"effort,omitempty"` // "none"|"low"|"medium"|"high"|"xhigh"
|
||||
Summary string `json:"summary,omitempty"` // "auto"|"concise"|"detailed"
|
||||
}
|
||||
|
||||
// ORItemParam represents an input/output item (discriminated union by type)
|
||||
type ORItemParam struct {
|
||||
Type string `json:"type"` // message|function_call|function_call_output|reasoning|item_reference
|
||||
ID string `json:"id,omitempty"` // Present for all output items
|
||||
Status string `json:"status,omitempty"` // in_progress|completed|incomplete
|
||||
|
||||
// Message fields
|
||||
Role string `json:"role,omitempty"` // user|assistant|system|developer
|
||||
Content interface{} `json:"content,omitempty"` // string or []ORContentPart for messages
|
||||
|
||||
// Function call fields
|
||||
CallID string `json:"call_id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Arguments string `json:"arguments,omitempty"`
|
||||
|
||||
// Function call output fields
|
||||
Output interface{} `json:"output,omitempty"` // string or []ORContentPart
|
||||
|
||||
// Reasoning fields (for type == "reasoning")
|
||||
Summary []ORContentPart `json:"summary,omitempty"` // Array of summary parts
|
||||
EncryptedContent *string `json:"encrypted_content,omitempty"` // Provider-specific encrypted content
|
||||
|
||||
// Note: For item_reference type, use the ID field above to reference the item
|
||||
// Note: For reasoning type, Content field (from message fields) contains the raw reasoning content
|
||||
}
|
||||
|
||||
// ORContentPart represents a content block (discriminated union by type)
|
||||
// For output_text: type, text, annotations, logprobs are ALL REQUIRED per Open Responses spec
|
||||
type ORContentPart struct {
|
||||
Type string `json:"type"` // input_text|input_image|input_file|output_text|refusal
|
||||
Text string `json:"text"` // REQUIRED for output_text - must always be present (even if empty)
|
||||
Annotations []ORAnnotation `json:"annotations"` // REQUIRED for output_text - must always be present (use [])
|
||||
Logprobs []ORLogProb `json:"logprobs"` // REQUIRED for output_text - must always be present (use [])
|
||||
ImageURL string `json:"image_url,omitempty"`
|
||||
FileURL string `json:"file_url,omitempty"`
|
||||
Filename string `json:"filename,omitempty"`
|
||||
FileData string `json:"file_data,omitempty"`
|
||||
Refusal string `json:"refusal,omitempty"`
|
||||
Detail string `json:"detail,omitempty"` // low|high|auto for images
|
||||
}
|
||||
|
||||
// OROutputTextContentPart is an alias for ORContentPart used specifically for output_text
|
||||
type OROutputTextContentPart = ORContentPart
|
||||
|
||||
// ORItemField represents an output item (same structure as ORItemParam)
|
||||
type ORItemField = ORItemParam
|
||||
|
||||
// ORResponseResource represents the main response object
|
||||
type ORResponseResource struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"` // always "response"
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
CompletedAt *int64 `json:"completed_at"` // Required: present as number or null
|
||||
Status string `json:"status"` // in_progress|completed|failed|incomplete
|
||||
Model string `json:"model"`
|
||||
Output []ORItemField `json:"output"`
|
||||
Error *ORError `json:"error"` // Always present, null if no error
|
||||
IncompleteDetails *ORIncompleteDetails `json:"incomplete_details"` // Always present, null if complete
|
||||
PreviousResponseID *string `json:"previous_response_id"`
|
||||
Instructions *string `json:"instructions"`
|
||||
|
||||
// Tool-related fields
|
||||
Tools []ORFunctionTool `json:"tools"` // Always present, empty array if no tools
|
||||
ToolChoice interface{} `json:"tool_choice"`
|
||||
ParallelToolCalls bool `json:"parallel_tool_calls"`
|
||||
MaxToolCalls *int `json:"max_tool_calls"` // nullable
|
||||
|
||||
// Sampling parameters (always required)
|
||||
Temperature float64 `json:"temperature"`
|
||||
TopP float64 `json:"top_p"`
|
||||
PresencePenalty float64 `json:"presence_penalty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty"`
|
||||
TopLogprobs int `json:"top_logprobs"` // Default to 0
|
||||
MaxOutputTokens *int `json:"max_output_tokens"`
|
||||
|
||||
// Text format configuration
|
||||
Text *ORTextConfig `json:"text"`
|
||||
|
||||
// Truncation and reasoning
|
||||
Truncation string `json:"truncation"`
|
||||
Reasoning *ORReasoning `json:"reasoning"` // nullable
|
||||
|
||||
// Usage statistics
|
||||
Usage *ORUsage `json:"usage"` // nullable
|
||||
|
||||
// Metadata and operational flags
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
Store bool `json:"store"`
|
||||
Background bool `json:"background"`
|
||||
ServiceTier string `json:"service_tier"`
|
||||
|
||||
// Safety and caching
|
||||
SafetyIdentifier *string `json:"safety_identifier"` // nullable
|
||||
PromptCacheKey *string `json:"prompt_cache_key"` // nullable
|
||||
}
|
||||
|
||||
// ORTextConfig represents text format configuration
|
||||
type ORTextConfig struct {
|
||||
Format *ORTextFormat `json:"format,omitempty"`
|
||||
}
|
||||
|
||||
// ORTextFormat represents the text format type
|
||||
type ORTextFormat struct {
|
||||
Type string `json:"type"` // "text" or "json_schema"
|
||||
}
|
||||
|
||||
// ORError represents an error in the response
|
||||
type ORError struct {
|
||||
Type string `json:"type"` // invalid_request|not_found|server_error|model_error|too_many_requests
|
||||
Code string `json:"code,omitempty"`
|
||||
Message string `json:"message"`
|
||||
Param string `json:"param,omitempty"`
|
||||
}
|
||||
|
||||
// ORUsage represents token usage statistics
|
||||
type ORUsage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
InputTokensDetails *ORInputTokensDetails `json:"input_tokens_details"` // Always present
|
||||
OutputTokensDetails *OROutputTokensDetails `json:"output_tokens_details"` // Always present
|
||||
}
|
||||
|
||||
// ORInputTokensDetails represents input token breakdown
|
||||
type ORInputTokensDetails struct {
|
||||
CachedTokens int `json:"cached_tokens"` // Always include, even if 0
|
||||
}
|
||||
|
||||
// OROutputTokensDetails represents output token breakdown
|
||||
type OROutputTokensDetails struct {
|
||||
ReasoningTokens int `json:"reasoning_tokens"` // Always include, even if 0
|
||||
}
|
||||
|
||||
// ORReasoning represents reasoning configuration and metadata
|
||||
type ORReasoning struct {
|
||||
Effort string `json:"effort,omitempty"`
|
||||
Summary string `json:"summary,omitempty"`
|
||||
}
|
||||
|
||||
// ORIncompleteDetails represents details about why a response was incomplete
|
||||
type ORIncompleteDetails struct {
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
// ORStreamEvent represents a streaming event
|
||||
// Note: Fields like delta, text, logprobs should be set explicitly for events that require them
|
||||
// The sendSSEEvent function uses a custom serializer to handle conditional field inclusion
|
||||
type ORStreamEvent struct {
|
||||
Type string `json:"type"`
|
||||
SequenceNumber int `json:"sequence_number"`
|
||||
Response *ORResponseResource `json:"response,omitempty"`
|
||||
OutputIndex *int `json:"output_index,omitempty"`
|
||||
ContentIndex *int `json:"content_index,omitempty"`
|
||||
SummaryIndex *int `json:"summary_index,omitempty"`
|
||||
ItemID string `json:"item_id,omitempty"`
|
||||
Item *ORItemField `json:"item,omitempty"`
|
||||
Part *ORContentPart `json:"part,omitempty"`
|
||||
Delta *string `json:"delta,omitempty"` // Pointer to distinguish unset from empty
|
||||
Text *string `json:"text,omitempty"` // Pointer to distinguish unset from empty
|
||||
Arguments *string `json:"arguments,omitempty"` // Pointer to distinguish unset from empty
|
||||
Refusal string `json:"refusal,omitempty"`
|
||||
Error *ORErrorPayload `json:"error,omitempty"`
|
||||
Logprobs *[]ORLogProb `json:"logprobs,omitempty"` // Pointer to distinguish unset from empty
|
||||
Obfuscation string `json:"obfuscation,omitempty"`
|
||||
Annotation *ORAnnotation `json:"annotation,omitempty"`
|
||||
AnnotationIndex *int `json:"annotation_index,omitempty"`
|
||||
}
|
||||
|
||||
// ORErrorPayload represents an error payload in streaming events
|
||||
type ORErrorPayload struct {
|
||||
Type string `json:"type"`
|
||||
Code string `json:"code,omitempty"`
|
||||
Message string `json:"message"`
|
||||
Param string `json:"param,omitempty"`
|
||||
Headers map[string]string `json:"headers,omitempty"`
|
||||
}
|
||||
|
||||
// ORLogProb represents log probability information
|
||||
type ORLogProb struct {
|
||||
Token string `json:"token"`
|
||||
Logprob float64 `json:"logprob"`
|
||||
Bytes []int `json:"bytes"`
|
||||
TopLogprobs []ORTopLogProb `json:"top_logprobs,omitempty"`
|
||||
}
|
||||
|
||||
// ORTopLogProb represents a top log probability
|
||||
type ORTopLogProb struct {
|
||||
Token string `json:"token"`
|
||||
Logprob float64 `json:"logprob"`
|
||||
Bytes []int `json:"bytes"`
|
||||
}
|
||||
|
||||
// ORAnnotation represents an annotation (e.g., URL citation)
|
||||
type ORAnnotation struct {
|
||||
Type string `json:"type"` // url_citation
|
||||
StartIndex int `json:"start_index"`
|
||||
EndIndex int `json:"end_index"`
|
||||
URL string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
}
|
||||
|
||||
// ORContentPartWithLogprobs creates an output_text content part with logprobs converted from OpenAI format
|
||||
func ORContentPartWithLogprobs(text string, logprobs *Logprobs) ORContentPart {
|
||||
orLogprobs := []ORLogProb{}
|
||||
|
||||
// Convert OpenAI-style logprobs to Open Responses format
|
||||
if logprobs != nil && len(logprobs.Content) > 0 {
|
||||
for _, lp := range logprobs.Content {
|
||||
// Convert top logprobs
|
||||
topLPs := []ORTopLogProb{}
|
||||
for _, tlp := range lp.TopLogprobs {
|
||||
topLPs = append(topLPs, ORTopLogProb{
|
||||
Token: tlp.Token,
|
||||
Logprob: tlp.Logprob,
|
||||
Bytes: tlp.Bytes,
|
||||
})
|
||||
}
|
||||
|
||||
orLogprobs = append(orLogprobs, ORLogProb{
|
||||
Token: lp.Token,
|
||||
Logprob: lp.Logprob,
|
||||
Bytes: lp.Bytes,
|
||||
TopLogprobs: topLPs,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return ORContentPart{
|
||||
Type: "output_text",
|
||||
Text: text,
|
||||
Annotations: []ORAnnotation{}, // REQUIRED - must always be present as array (empty if none)
|
||||
Logprobs: orLogprobs, // REQUIRED - must always be present as array (empty if none)
|
||||
}
|
||||
}
|
||||
@@ -397,6 +397,83 @@ Agent/autonomous agent configuration:
|
||||
| `agent.enable_mcp_prompts` | bool | Enable MCP prompts |
|
||||
| `agent.enable_plan_re_evaluator` | bool | Enable plan re-evaluation |
|
||||
|
||||
## Reasoning Configuration
|
||||
|
||||
Configure how reasoning tags are extracted and processed from model output. Reasoning tags are used by models like DeepSeek, Command-R, and others to include internal reasoning steps in their responses.
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
|-------|------|---------|-------------|
|
||||
| `reasoning.disable` | bool | `false` | When `true`, disables reasoning extraction entirely. The original content is returned without any processing. |
|
||||
| `reasoning.disable_reasoning_tag_prefill` | bool | `false` | When `true`, disables automatic prepending of thinking start tokens. Use this when your model already includes reasoning tags in its output format. |
|
||||
| `reasoning.strip_reasoning_only` | bool | `false` | When `true`, extracts and removes reasoning tags from content but discards the reasoning text. Useful when you want to clean reasoning tags from output without storing the reasoning content. |
|
||||
| `reasoning.thinking_start_tokens` | array | `[]` | List of custom thinking start tokens to detect in prompts. Custom tokens are checked before default tokens. |
|
||||
| `reasoning.tag_pairs` | array | `[]` | List of custom tag pairs for reasoning extraction. Each entry has `start` and `end` fields. Custom pairs are checked before default pairs. |
|
||||
|
||||
### Reasoning Tag Formats
|
||||
|
||||
The reasoning extraction supports multiple tag formats used by different models:
|
||||
|
||||
- `<thinking>...</thinking>` - General thinking tag
|
||||
- `<think>...</think>` - DeepSeek, Granite, ExaOne, GLM models
|
||||
- `<|START_THINKING|>...<|END_THINKING|>` - Command-R models
|
||||
- `<|inner_prefix|>...<|inner_suffix|>` - Apertus models
|
||||
- `<seed:think>...</seed:think>` - Seed models
|
||||
- `<|think|>...<|end|><|begin|>assistant<|content|>` - Solar Open models
|
||||
- `[THINK]...[/THINK]` - Magistral models
|
||||
|
||||
### Examples
|
||||
|
||||
**Disable reasoning extraction:**
|
||||
```yaml
|
||||
reasoning:
|
||||
disable: true
|
||||
```
|
||||
|
||||
**Extract reasoning but don't prepend tags:**
|
||||
```yaml
|
||||
reasoning:
|
||||
disable_reasoning_tag_prefill: true
|
||||
```
|
||||
|
||||
**Strip reasoning tags without storing reasoning content:**
|
||||
```yaml
|
||||
reasoning:
|
||||
strip_reasoning_only: true
|
||||
```
|
||||
|
||||
**Complete example with reasoning configuration:**
|
||||
```yaml
|
||||
name: deepseek-model
|
||||
backend: llama-cpp
|
||||
parameters:
|
||||
model: deepseek.gguf
|
||||
|
||||
reasoning:
|
||||
disable: false
|
||||
disable_reasoning_tag_prefill: false
|
||||
strip_reasoning_only: false
|
||||
```
|
||||
|
||||
**Example with custom tokens and tag pairs:**
|
||||
```yaml
|
||||
name: custom-reasoning-model
|
||||
backend: llama-cpp
|
||||
parameters:
|
||||
model: custom.gguf
|
||||
|
||||
reasoning:
|
||||
thinking_start_tokens:
|
||||
- "<custom:think>"
|
||||
- "<my:reasoning>"
|
||||
tag_pairs:
|
||||
- start: "<custom:think>"
|
||||
end: "</custom:think>"
|
||||
- start: "<my:reasoning>"
|
||||
end: "</my:reasoning>"
|
||||
```
|
||||
|
||||
**Note:** Custom tokens and tag pairs are checked before the default ones, giving them priority. This allows you to override default behavior or add support for new reasoning tag formats.
|
||||
|
||||
## Pipeline Configuration
|
||||
|
||||
Define pipelines for audio-to-audio processing:
|
||||
|
||||
@@ -72,6 +72,359 @@ You can list all the models available with:
|
||||
curl http://localhost:8080/v1/models
|
||||
```
|
||||
|
||||
### Anthropic Messages API
|
||||
|
||||
LocalAI supports the Anthropic Messages API, which is compatible with Claude clients. This endpoint provides a structured way to send messages and receive responses, with support for tools, streaming, and multimodal content.
|
||||
|
||||
**Endpoint:** `POST /v1/messages` or `POST /messages`
|
||||
|
||||
**Reference:** https://docs.anthropic.com/claude/reference/messages_post
|
||||
|
||||
#### Basic Usage
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/messages \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "anthropic-version: 2023-06-01" \
|
||||
-d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"max_tokens": 1024,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Say this is a test!"}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
#### Request Parameters
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `model` | string | Yes | The model identifier |
|
||||
| `messages` | array | Yes | Array of message objects with `role` and `content` |
|
||||
| `max_tokens` | integer | Yes | Maximum number of tokens to generate (must be > 0) |
|
||||
| `system` | string | No | System message to set the assistant's behavior |
|
||||
| `temperature` | float | No | Sampling temperature (0.0 to 1.0) |
|
||||
| `top_p` | float | No | Nucleus sampling parameter |
|
||||
| `top_k` | integer | No | Top-k sampling parameter |
|
||||
| `stop_sequences` | array | No | Array of strings that will stop generation |
|
||||
| `stream` | boolean | No | Enable streaming responses |
|
||||
| `tools` | array | No | Array of tool definitions for function calling |
|
||||
| `tool_choice` | string/object | No | Tool choice strategy: "auto", "any", "none", or specific tool |
|
||||
| `metadata` | object | No | Custom metadata to attach to the request |
|
||||
|
||||
#### Message Format
|
||||
|
||||
Messages can contain text or structured content blocks:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/messages \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"max_tokens": 1024,
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "What is in this image?"
|
||||
},
|
||||
{
|
||||
"type": "image",
|
||||
"source": {
|
||||
"type": "base64",
|
||||
"media_type": "image/jpeg",
|
||||
"data": "base64_encoded_image_data"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
#### Tool Calling
|
||||
|
||||
The Anthropic API supports function calling through tools:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/messages \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"max_tokens": 1024,
|
||||
"tools": [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state"
|
||||
}
|
||||
},
|
||||
"required": ["location"]
|
||||
}
|
||||
}
|
||||
],
|
||||
"tool_choice": "auto",
|
||||
"messages": [
|
||||
{"role": "user", "content": "What is the weather in San Francisco?"}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
#### Streaming
|
||||
|
||||
Enable streaming responses by setting `stream: true`:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/messages \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"max_tokens": 1024,
|
||||
"stream": true,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Tell me a story"}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
Streaming responses use Server-Sent Events (SSE) format with event types: `message_start`, `content_block_start`, `content_block_delta`, `content_block_stop`, `message_delta`, and `message_stop`.
|
||||
|
||||
#### Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "msg_abc123",
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "This is a test!"
|
||||
}
|
||||
],
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"stop_reason": "end_turn",
|
||||
"usage": {
|
||||
"input_tokens": 10,
|
||||
"output_tokens": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Open Responses API
|
||||
|
||||
LocalAI supports the Open Responses API specification, which provides a standardized interface for AI model interactions with support for background processing, streaming, tool calling, and advanced features like reasoning.
|
||||
|
||||
**Endpoint:** `POST /v1/responses` or `POST /responses`
|
||||
|
||||
**Reference:** https://www.openresponses.org/specification
|
||||
|
||||
#### Basic Usage
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/responses \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"input": "Say this is a test!",
|
||||
"max_output_tokens": 1024
|
||||
}'
|
||||
```
|
||||
|
||||
#### Request Parameters
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `model` | string | Yes | The model identifier |
|
||||
| `input` | string/array | Yes | Input text or array of input items |
|
||||
| `max_output_tokens` | integer | No | Maximum number of tokens to generate |
|
||||
| `temperature` | float | No | Sampling temperature |
|
||||
| `top_p` | float | No | Nucleus sampling parameter |
|
||||
| `instructions` | string | No | System instructions |
|
||||
| `tools` | array | No | Array of tool definitions |
|
||||
| `tool_choice` | string/object | No | Tool choice: "auto", "required", "none", or specific tool |
|
||||
| `stream` | boolean | No | Enable streaming responses |
|
||||
| `background` | boolean | No | Run request in background (returns immediately) |
|
||||
| `store` | boolean | No | Whether to store the response |
|
||||
| `reasoning` | object | No | Reasoning configuration with `effort` and `summary` |
|
||||
| `parallel_tool_calls` | boolean | No | Allow parallel tool calls |
|
||||
| `max_tool_calls` | integer | No | Maximum number of tool calls |
|
||||
| `presence_penalty` | float | No | Presence penalty (-2.0 to 2.0) |
|
||||
| `frequency_penalty` | float | No | Frequency penalty (-2.0 to 2.0) |
|
||||
| `top_logprobs` | integer | No | Number of top logprobs to return |
|
||||
| `truncation` | string | No | Truncation mode: "auto" or "disabled" |
|
||||
| `text_format` | object | No | Text format configuration |
|
||||
| `metadata` | object | No | Custom metadata |
|
||||
|
||||
#### Input Format
|
||||
|
||||
Input can be a simple string or an array of structured items:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/responses \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"input": [
|
||||
{
|
||||
"type": "message",
|
||||
"role": "user",
|
||||
"content": "What is the weather?"
|
||||
}
|
||||
],
|
||||
"max_output_tokens": 1024
|
||||
}'
|
||||
```
|
||||
|
||||
#### Background Processing
|
||||
|
||||
Run requests in the background for long-running tasks:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/responses \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"input": "Generate a long story",
|
||||
"max_output_tokens": 4096,
|
||||
"background": true
|
||||
}'
|
||||
```
|
||||
|
||||
The response will include a response ID that can be used to poll for completion:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "resp_abc123",
|
||||
"object": "response",
|
||||
"status": "in_progress",
|
||||
"created_at": 1234567890
|
||||
}
|
||||
```
|
||||
|
||||
#### Retrieving Background Responses
|
||||
|
||||
Use the GET endpoint to retrieve background responses:
|
||||
|
||||
```bash
|
||||
# Get response by ID
|
||||
curl http://localhost:8080/v1/responses/resp_abc123
|
||||
|
||||
# Resume streaming with query parameters
|
||||
curl "http://localhost:8080/v1/responses/resp_abc123?stream=true&starting_after=10"
|
||||
```
|
||||
|
||||
#### Canceling Background Responses
|
||||
|
||||
Cancel a background response that's still in progress:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/v1/responses/resp_abc123/cancel
|
||||
```
|
||||
|
||||
#### Tool Calling
|
||||
|
||||
Open Responses API supports function calling with tools:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/responses \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"input": "What is the weather in San Francisco?",
|
||||
"tools": [
|
||||
{
|
||||
"type": "function",
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state"
|
||||
}
|
||||
},
|
||||
"required": ["location"]
|
||||
}
|
||||
}
|
||||
],
|
||||
"tool_choice": "auto",
|
||||
"max_output_tokens": 1024
|
||||
}'
|
||||
```
|
||||
|
||||
#### Reasoning Configuration
|
||||
|
||||
Configure reasoning effort and summary style:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/responses \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"input": "Solve this complex problem step by step",
|
||||
"reasoning": {
|
||||
"effort": "high",
|
||||
"summary": "detailed"
|
||||
},
|
||||
"max_output_tokens": 2048
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "resp_abc123",
|
||||
"object": "response",
|
||||
"created_at": 1234567890,
|
||||
"completed_at": 1234567895,
|
||||
"status": "completed",
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"output": [
|
||||
{
|
||||
"type": "message",
|
||||
"id": "msg_001",
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "output_text",
|
||||
"text": "This is a test!",
|
||||
"annotations": [],
|
||||
"logprobs": []
|
||||
}
|
||||
],
|
||||
"status": "completed"
|
||||
}
|
||||
],
|
||||
"error": null,
|
||||
"incomplete_details": null,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1.0,
|
||||
"presence_penalty": 0.0,
|
||||
"frequency_penalty": 0.0,
|
||||
"usage": {
|
||||
"input_tokens": 10,
|
||||
"output_tokens": 5,
|
||||
"total_tokens": 15,
|
||||
"input_tokens_details": {
|
||||
"cached_tokens": 0
|
||||
},
|
||||
"output_tokens_details": {
|
||||
"reasoning_tokens": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Backends
|
||||
|
||||
### RWKV
|
||||
|
||||
@@ -215,50 +215,90 @@ curl http://localhost:8080/tts -H "Content-Type: application/json" -d '{
|
||||
}' | aplay
|
||||
```
|
||||
|
||||
### Vall-E-X
|
||||
### Qwen3-TTS
|
||||
|
||||
[VALL-E-X](https://github.com/Plachtaa/VALL-E-X) is an open source implementation of Microsoft's VALL-E X zero-shot TTS model.
|
||||
[Qwen3-TTS](https://github.com/QwenLM/Qwen3-TTS) is a high-quality text-to-speech model that supports three modes: custom voice (predefined speakers), voice design (natural language instructions), and voice cloning (from reference audio).
|
||||
|
||||
#### Setup
|
||||
|
||||
The backend will automatically download the required files in order to run the model.
|
||||
|
||||
This is an extra backend - in the container is already available and there is nothing to do for the setup. If you are building manually, you need to install Vall-E-X manually first.
|
||||
Install the `qwen-tts` model in the Model gallery or run `local-ai run models install qwen-tts`.
|
||||
|
||||
#### Usage
|
||||
|
||||
Use the tts endpoint by specifying the vall-e-x backend:
|
||||
Use the tts endpoint by specifying the qwen-tts backend:
|
||||
|
||||
```
|
||||
curl http://localhost:8080/tts -H "Content-Type: application/json" -d '{
|
||||
"backend": "vall-e-x",
|
||||
"input":"Hello!"
|
||||
"model": "qwen-tts",
|
||||
"input":"Hello world, this is a test."
|
||||
}' | aplay
|
||||
```
|
||||
|
||||
#### Voice cloning
|
||||
#### Custom Voice Mode
|
||||
|
||||
In order to use voice cloning capabilities you must create a `YAML` configuration file to setup a model:
|
||||
Qwen3-TTS supports predefined speakers. You can specify a speaker using the `voice` parameter:
|
||||
|
||||
```yaml
|
||||
name: cloned-voice
|
||||
backend: vall-e-x
|
||||
name: qwen-tts
|
||||
backend: qwen-tts
|
||||
parameters:
|
||||
model: "cloned-voice"
|
||||
model: Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice
|
||||
tts:
|
||||
vall-e:
|
||||
# The path to the audio file to be cloned
|
||||
# relative to the models directory
|
||||
# Max 15s
|
||||
audio_path: "audio-sample.wav"
|
||||
voice: "Vivian" # Available speakers: Vivian, Serena, Uncle_Fu, Dylan, Eric, Ryan, Aiden, Ono_Anna, Sohee
|
||||
```
|
||||
|
||||
Then you can specify the model name in the requests:
|
||||
Available speakers:
|
||||
- **Chinese**: Vivian, Serena, Uncle_Fu, Dylan, Eric
|
||||
- **English**: Ryan, Aiden
|
||||
- **Japanese**: Ono_Anna
|
||||
- **Korean**: Sohee
|
||||
|
||||
#### Voice Design Mode
|
||||
|
||||
Voice Design allows you to create custom voices using natural language instructions. Configure the model with an `instruct` option:
|
||||
|
||||
```yaml
|
||||
name: qwen-tts-design
|
||||
backend: qwen-tts
|
||||
parameters:
|
||||
model: Qwen/Qwen3-TTS-12Hz-1.7B-VoiceDesign
|
||||
options:
|
||||
- "instruct:体现撒娇稚嫩的萝莉女声,音调偏高且起伏明显,营造出黏人、做作又刻意卖萌的听觉效果。"
|
||||
```
|
||||
|
||||
Then use the model:
|
||||
|
||||
```
|
||||
curl http://localhost:8080/tts -H "Content-Type: application/json" -d '{
|
||||
"model": "cloned-voice",
|
||||
"input":"Hello!"
|
||||
"model": "qwen-tts-design",
|
||||
"input":"Hello world, this is a test."
|
||||
}' | aplay
|
||||
```
|
||||
|
||||
#### Voice Clone Mode
|
||||
|
||||
Voice Clone allows you to clone a voice from reference audio. Configure the model with an `AudioPath` and optional `ref_text`:
|
||||
|
||||
```yaml
|
||||
name: qwen-tts-clone
|
||||
backend: qwen-tts
|
||||
parameters:
|
||||
model: Qwen/Qwen3-TTS-12Hz-1.7B-Base
|
||||
tts:
|
||||
audio_path: "path/to/reference_audio.wav" # Reference audio file
|
||||
options:
|
||||
- "ref_text:This is the transcript of the reference audio."
|
||||
- "x_vector_only_mode:false" # Set to true to use only speaker embedding (ref_text not required)
|
||||
```
|
||||
|
||||
You can also use URLs or base64 strings for the reference audio. The backend automatically detects the mode based on available parameters (AudioPath → VoiceClone, instruct option → VoiceDesign, voice parameter → CustomVoice).
|
||||
|
||||
Then use the model:
|
||||
|
||||
```
|
||||
curl http://localhost:8080/tts -H "Content-Type: application/json" -d '{
|
||||
"model": "qwen-tts-clone",
|
||||
"input":"Hello world, this is a test."
|
||||
}' | aplay
|
||||
```
|
||||
|
||||
|
||||
@@ -112,6 +112,66 @@ curl http://localhost:8080/v1/chat/completions \
|
||||
|
||||
</details>
|
||||
|
||||
### Anthropic Messages API
|
||||
|
||||
LocalAI supports the Anthropic Messages API for Claude-compatible models. [Anthropic documentation](https://docs.anthropic.com/claude/reference/messages_post).
|
||||
|
||||
<details>
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/messages \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "anthropic-version: 2023-06-01" \
|
||||
-d '{
|
||||
"model": "gpt-4",
|
||||
"max_tokens": 1024,
|
||||
"messages": [
|
||||
{"role": "user", "content": "How are you doing?"}
|
||||
],
|
||||
"temperature": 0.7
|
||||
}'
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Open Responses API
|
||||
|
||||
LocalAI supports the Open Responses API specification with support for background processing, streaming, and advanced features. [Open Responses documentation](https://www.openresponses.org/specification).
|
||||
|
||||
<details>
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/responses \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "gpt-4",
|
||||
"input": "Say this is a test!",
|
||||
"max_output_tokens": 1024,
|
||||
"temperature": 0.7
|
||||
}'
|
||||
```
|
||||
|
||||
For background processing:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/responses \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "gpt-4",
|
||||
"input": "Generate a long story",
|
||||
"max_output_tokens": 4096,
|
||||
"background": true
|
||||
}'
|
||||
```
|
||||
|
||||
Then retrieve the response:
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/v1/responses/<response_id>
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Image Generation
|
||||
|
||||
Creates an image given a prompt. [OpenAI documentation](https://platform.openai.com/docs/api-reference/images/create).
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
"version": "v3.9.0"
|
||||
"version": "v3.10.0"
|
||||
}
|
||||
|
||||
@@ -1,4 +1,86 @@
|
||||
---
|
||||
- name: "huihui-glm-4.7-flash-abliterated-i1"
|
||||
url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
|
||||
urls:
|
||||
- https://huggingface.co/mradermacher/Huihui-GLM-4.7-Flash-abliterated-i1-GGUF
|
||||
description: |
|
||||
The model is a quantized version of **huihui-ai/Huihui-GLM-4.7-Flash-abliterated**, optimized for efficiency and deployment. It uses GGUF files with various quantization levels (e.g., IQ1_M, IQ2_XXS, Q4_K_M) and is designed for tasks requiring low-resource deployment. Key features include:
|
||||
- **Base Model**: Huihui-GLM-4.7-Flash-abliterated (unmodified, original model).
|
||||
- **Quantization**: Supports IQ1_M to Q4_K_M, balancing accuracy and efficiency.
|
||||
- **Use Cases**: Suitable for applications needing lightweight inference, such as edge devices or resource-constrained environments.
|
||||
- **Downloads**: Available in GGUF format with varying quality and size (e.g., 0.2GB to 18.2GB).
|
||||
- **Tags**: Abliterated, uncensored, and optimized for specific tasks.
|
||||
|
||||
This model is a modified version of the original GLM-4.7, tailored for deployment with quantized weights.
|
||||
overrides:
|
||||
parameters:
|
||||
model: llama-cpp/models/Huihui-GLM-4.7-Flash-abliterated.i1-Q4_K_M.gguf
|
||||
name: Huihui-GLM-4.7-Flash-abliterated-i1-GGUF
|
||||
backend: llama-cpp
|
||||
template:
|
||||
use_tokenizer_template: true
|
||||
known_usecases:
|
||||
- chat
|
||||
function:
|
||||
grammar:
|
||||
disable: true
|
||||
description: Imported from https://huggingface.co/mradermacher/Huihui-GLM-4.7-Flash-abliterated-i1-GGUF
|
||||
options:
|
||||
- use_jinja:true
|
||||
files:
|
||||
- filename: llama-cpp/models/Huihui-GLM-4.7-Flash-abliterated.i1-Q4_K_M.gguf
|
||||
sha256: 2ec5fcf2aa882c0c55fc67a35ea7ed50c24016bc4a8a4ceacfcea103dc2f1cb8
|
||||
uri: https://huggingface.co/mradermacher/Huihui-GLM-4.7-Flash-abliterated-i1-GGUF/resolve/main/Huihui-GLM-4.7-Flash-abliterated.i1-Q4_K_M.gguf
|
||||
- name: "mox-small-1-i1"
|
||||
url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
|
||||
urls:
|
||||
- https://huggingface.co/mradermacher/mox-small-1-i1-GGUF
|
||||
description: |
|
||||
The model, **vanta-research/mox-small-1**, is a small-scale text-generation model optimized for conversational AI tasks. It supports chat, persona research, and chatbot applications. The quantized versions (e.g., i1-Q4_K_M, i1-Q4_K_S) are available for efficient deployment, with the i1-Q4_K_S variant offering the best balance of size, speed, and quality. The model is designed for lightweight inference and is compatible with frameworks like HuggingFace Transformers.
|
||||
overrides:
|
||||
parameters:
|
||||
model: llama-cpp/models/mox-small-1.i1-Q4_K_M.gguf
|
||||
name: mox-small-1-i1-GGUF
|
||||
backend: llama-cpp
|
||||
template:
|
||||
use_tokenizer_template: true
|
||||
known_usecases:
|
||||
- chat
|
||||
function:
|
||||
grammar:
|
||||
disable: true
|
||||
description: Imported from https://huggingface.co/mradermacher/mox-small-1-i1-GGUF
|
||||
options:
|
||||
- use_jinja:true
|
||||
files:
|
||||
- filename: llama-cpp/models/mox-small-1.i1-Q4_K_M.gguf
|
||||
sha256: f25e9612e985adf01869f412f997a7aaace65e1ee0c97d4975070febdcbbb978
|
||||
uri: https://huggingface.co/mradermacher/mox-small-1-i1-GGUF/resolve/main/mox-small-1.i1-Q4_K_M.gguf
|
||||
- name: "glm-4.7-flash"
|
||||
url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
|
||||
urls:
|
||||
- https://huggingface.co/unsloth/GLM-4.7-Flash-GGUF
|
||||
description: |
|
||||
**GLM-4.7-Flash** is a 30B-A3B MoE (Model Organism Ensemble) model designed for efficient deployment. It outperforms competitors in benchmarks like AIME 25, GPQA, and τ²-Bench, offering strong accuracy while balancing performance and efficiency. Optimized for lightweight use cases, it supports inference via frameworks like vLLM and SGLang, with detailed deployment instructions in the official repository. Ideal for applications requiring high-quality text generation with minimal resource consumption.
|
||||
overrides:
|
||||
parameters:
|
||||
model: llama-cpp/models/GLM-4.7-Flash-Q4_K_M.gguf
|
||||
name: GLM-4.7-Flash-GGUF
|
||||
backend: llama-cpp
|
||||
template:
|
||||
use_tokenizer_template: true
|
||||
known_usecases:
|
||||
- chat
|
||||
function:
|
||||
grammar:
|
||||
disable: true
|
||||
description: Imported from https://huggingface.co/unsloth/GLM-4.7-Flash-GGUF
|
||||
options:
|
||||
- use_jinja:true
|
||||
files:
|
||||
- filename: llama-cpp/models/GLM-4.7-Flash-Q4_K_M.gguf
|
||||
uri: https://huggingface.co/unsloth/GLM-4.7-Flash-GGUF/resolve/main/GLM-4.7-Flash-Q4_K_M.gguf
|
||||
sha256: 73ba18480e06ccda453a26263c0e2be2bd86294e827b1812ddea2f88bba2d924
|
||||
- name: "qwen3-vl-reranker-8b"
|
||||
url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
|
||||
urls:
|
||||
@@ -29,6 +111,7 @@
|
||||
|
||||
This description emphasizes its capabilities, efficiency, and versatility for multimodal search tasks.
|
||||
overrides:
|
||||
reranking: true
|
||||
parameters:
|
||||
model: llama-cpp/models/Qwen3-VL-Reranker-8B.Q4_K_M.gguf
|
||||
name: Qwen3-VL-Reranker-8B-GGUF
|
||||
@@ -3822,6 +3905,41 @@
|
||||
- filename: boomerang-qwen3-4.9B.Q4_K_M.gguf
|
||||
sha256: 11e6c068351d104dee31dd63550e5e2fc9be70467c1cfc07a6f84030cb701537
|
||||
uri: huggingface://mradermacher/boomerang-qwen3-4.9B-GGUF/boomerang-qwen3-4.9B.Q4_K_M.gguf
|
||||
- !!merge <<: *qwen3
|
||||
name: "qwen3-coder-30b-a3b-instruct"
|
||||
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/620760a26e3b7210c2ff1943/-s1gyJfvbE1RgO5iBeNOi.png
|
||||
url: "github:mudler/LocalAI/gallery/qwen3.yaml@master"
|
||||
urls:
|
||||
- https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct
|
||||
- https://huggingface.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF
|
||||
description: |
|
||||
Qwen3-Coder is available in multiple sizes. Today, we're excited to introduce Qwen3-Coder-30B-A3B-Instruct. This streamlined model maintains impressive performance and efficiency, featuring the following key enhancements:
|
||||
|
||||
- Significant Performance among open models on Agentic Coding, Agentic Browser-Use, and other foundational coding tasks.
|
||||
- Long-context Capabilities with native support for 256K tokens, extendable up to 1M tokens using Yarn, optimized for repository-scale understanding.
|
||||
- Agentic Coding supporting for most platform such as Qwen Code, CLINE, featuring a specially designed function call format.
|
||||
|
||||
|
||||
Model Overview:
|
||||
Qwen3-Coder-30B-A3B-Instruct has the following features:
|
||||
|
||||
- Type: Causal Language Models
|
||||
- Training Stage: Pretraining & Post-training
|
||||
- Number of Parameters: 30.5B in total and 3.3B activated
|
||||
- Number of Layers: 48
|
||||
- Number of Attention Heads (GQA): 32 for Q and 4 for KV
|
||||
- Number of Experts: 128
|
||||
- Number of Activated Experts: 8
|
||||
- Context Length: 262,144 natively.
|
||||
|
||||
NOTE: This model supports only non-thinking mode and does not generate <think></think> blocks in its output. Meanwhile, specifying enable_thinking=False is no longer required.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf
|
||||
sha256: fadc3e5f8d42bf7e894a785b05082e47daee4df26680389817e2093056f088ad
|
||||
uri: huggingface://unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf
|
||||
- &gemma3
|
||||
url: "github:mudler/LocalAI/gallery/gemma.yaml@master"
|
||||
name: "gemma-3-27b-it"
|
||||
@@ -13060,6 +13178,96 @@
|
||||
- filename: t5xxl_fp16.safetensors
|
||||
sha256: 6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635
|
||||
uri: https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp16.safetensors
|
||||
- !!merge <<: *flux
|
||||
name: flux.2-dev
|
||||
url: "github:mudler/LocalAI/gallery/flux-ggml.yaml@master"
|
||||
description: |
|
||||
FLUX.2 [dev] is a 32 billion parameter rectified flow transformer capable of generating, editing and combining images based on text instructions.
|
||||
urls:
|
||||
- https://huggingface.co/black-forest-labs/FLUX.2-dev
|
||||
overrides:
|
||||
step: 50
|
||||
options:
|
||||
- "diffusion_model"
|
||||
- "vae_path:stablediffusion-cpp/models/flux2-vae.safetensors"
|
||||
- "sampler:euler"
|
||||
- llm_path:stablediffusion-cpp/models/Mistral-Small-3.2-24B-Instruct-2506-Q4_K_M.gguf
|
||||
- offload_params_to_cpu:true
|
||||
cfg_scale: 1
|
||||
parameters:
|
||||
model: stablediffusion-cpp/models/flux2-dev-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: "stablediffusion-cpp/models/flux2-dev-Q4_K_M.gguf"
|
||||
sha256: "fca680c7b221a713b5cf7db6cf6b33474875320ee61f4c585bc33fe391dab9a6"
|
||||
uri: "https://huggingface.co/city96/FLUX.2-dev-gguf/resolve/main/flux2-dev-Q4_K_M.gguf"
|
||||
- filename: stablediffusion-cpp/models/flux2-vae.safetensors
|
||||
sha256: d64f3a68e1cc4f9f4e29b6e0da38a0204fe9a49f2d4053f0ec1fa1ca02f9c4b5
|
||||
uri: https://huggingface.co/Comfy-Org/flux2-dev/resolve/main/split_files/vae/flux2-vae.safetensors
|
||||
- filename: stablediffusion-cpp/models/Mistral-Small-3.2-24B-Instruct-2506-Q4_K_M.gguf
|
||||
sha256: a3cc56310807ed0d145eaf9f018ccda9ae7ad8edb41ec870aa2454b0d4700b3c
|
||||
uri: https://huggingface.co/unsloth/Mistral-Small-3.2-24B-Instruct-2506-GGUF/resolve/main/Mistral-Small-3.2-24B-Instruct-2506-Q4_K_M.gguf
|
||||
- !!merge <<: *flux
|
||||
name: flux.2-klein-4b
|
||||
url: "github:mudler/LocalAI/gallery/flux-ggml.yaml@master"
|
||||
license: apache-2.0
|
||||
description: |
|
||||
The FLUX.2 [klein] model family are our fastest image models to date. FLUX.2 [klein] unifies generation and editing in a single compact architecture, delivering state-of-the-art quality with end-to-end inference in as low as under a second. Built for applications that require real-time image generation without sacrificing quality, and runs on consumer hardware, with as little as 13GB VRAM.
|
||||
|
||||
FLUX.2 [klein] 4B is a 4 billion parameter rectified flow transformer capable of generating images from text descriptions and supports multi-reference editing capabilities.
|
||||
urls:
|
||||
- https://huggingface.co/black-forest-labs/FLUX.2-klein-4B
|
||||
overrides:
|
||||
step: 4
|
||||
options:
|
||||
- "diffusion_model"
|
||||
- "vae_path:stablediffusion-cpp/models/flux2-vae.safetensors"
|
||||
- "sampler:euler"
|
||||
- llm_path:stablediffusion-cpp/models/Qwen3-4B-Q4_K_M.gguf
|
||||
- offload_params_to_cpu:true
|
||||
cfg_scale: 1
|
||||
parameters:
|
||||
model: stablediffusion-cpp/models/flux-2-klein-4b-Q4_0.gguf
|
||||
files:
|
||||
- filename: "stablediffusion-cpp/models/flux-2-klein-4b-Q4_0.gguf"
|
||||
sha256: "d1023499ef3f2f82ff7c50e6778495195c1b6cc34835741778868428111f9ff4"
|
||||
uri: "https://huggingface.co/leejet/FLUX.2-klein-4B-GGUF/resolve/main/flux-2-klein-4b-Q4_0.gguf"
|
||||
- filename: stablediffusion-cpp/models/flux2-vae.safetensors
|
||||
sha256: d64f3a68e1cc4f9f4e29b6e0da38a0204fe9a49f2d4053f0ec1fa1ca02f9c4b5
|
||||
uri: https://huggingface.co/Comfy-Org/flux2-dev/resolve/main/split_files/vae/flux2-vae.safetensors
|
||||
- filename: stablediffusion-cpp/models/Qwen3-4B-Q4_K_M.gguf
|
||||
sha256: f6f851777709861056efcdad3af01da38b31223a3ba26e61a4f8bf3a2195813a
|
||||
uri: https://huggingface.co/unsloth/Qwen3-4B-GGUF/resolve/main/Qwen3-4B-Q4_K_M.gguf
|
||||
- !!merge <<: *flux
|
||||
name: flux.2-klein-9b
|
||||
url: "github:mudler/LocalAI/gallery/flux-ggml.yaml@master"
|
||||
license: apache-2.0
|
||||
description: |
|
||||
The FLUX.2 [klein] model family are our fastest image models to date. FLUX.2 [klein] unifies generation and editing in a single compact architecture, delivering state-of-the-art quality with end-to-end inference in as low as under a second. Built for applications that require real-time image generation without sacrificing quality, and runs on consumer hardware, with as little as 13GB VRAM.
|
||||
|
||||
FLUX.2 [klein] 9B is a 9 billion parameter rectified flow transformer capable of generating images from text descriptions and supports multi-reference editing capabilities.
|
||||
urls:
|
||||
- https://huggingface.co/black-forest-labs/FLUX.2-klein-4B
|
||||
overrides:
|
||||
step: 4
|
||||
options:
|
||||
- "diffusion_model"
|
||||
- "vae_path:stablediffusion-cpp/models/flux2-vae.safetensors"
|
||||
- "sampler:euler"
|
||||
- llm_path:stablediffusion-cpp/models/Qwen3-4B-Q4_K_M.gguf
|
||||
- offload_params_to_cpu:true
|
||||
cfg_scale: 1
|
||||
parameters:
|
||||
model: stablediffusion-cpp/models/flux-2-klein-9b-Q4_0.gguf
|
||||
files:
|
||||
- filename: "stablediffusion-cpp/models/flux-2-klein-9b-Q4_0.gguf"
|
||||
sha256: "a7e77afa96871d16679ff7b949bd25f20c8179f219c4b662cac91e81ed99b944"
|
||||
uri: "https://huggingface.co/leejet/FLUX.2-klein-9B-GGUF/resolve/main/flux-2-klein-9b-Q4_0.gguf"
|
||||
- filename: stablediffusion-cpp/models/flux2-vae.safetensors
|
||||
sha256: d64f3a68e1cc4f9f4e29b6e0da38a0204fe9a49f2d4053f0ec1fa1ca02f9c4b5
|
||||
uri: https://huggingface.co/Comfy-Org/flux2-dev/resolve/main/split_files/vae/flux2-vae.safetensors
|
||||
- filename: stablediffusion-cpp/models/Qwen3-4B-Q4_K_M.gguf
|
||||
sha256: f6f851777709861056efcdad3af01da38b31223a3ba26e61a4f8bf3a2195813a
|
||||
uri: https://huggingface.co/unsloth/Qwen3-4B-GGUF/resolve/main/Qwen3-4B-Q4_K_M.gguf
|
||||
- &zimage
|
||||
name: Z-Image-Turbo
|
||||
icon: https://z-image.ai/logo.png
|
||||
|
||||
2
go.mod
2
go.mod
@@ -32,7 +32,7 @@ require (
|
||||
github.com/mholt/archiver/v3 v3.5.1
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/modelcontextprotocol/go-sdk v1.2.0
|
||||
github.com/mudler/cogito v0.7.2
|
||||
github.com/mudler/cogito v0.8.1
|
||||
github.com/mudler/edgevpn v0.31.1
|
||||
github.com/mudler/go-processmanager v0.1.0
|
||||
github.com/mudler/memory v0.0.0-20251216220809-d1256471a6c2
|
||||
|
||||
4
go.sum
4
go.sum
@@ -507,8 +507,8 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mudler/cogito v0.7.2 h1:J5eHZPsxpoKcnYUfogje5u0nnzGww7ytv7nSn1DMpms=
|
||||
github.com/mudler/cogito v0.7.2/go.mod h1:6sfja3lcu2nWRzEc0wwqGNu/eCG3EWgij+8s7xyUeQ4=
|
||||
github.com/mudler/cogito v0.8.1 h1:66qPJkAMrq/Vo8AC/PvXWuVxYPhi7X2DQuJIilL8+3I=
|
||||
github.com/mudler/cogito v0.8.1/go.mod h1:6sfja3lcu2nWRzEc0wwqGNu/eCG3EWgij+8s7xyUeQ4=
|
||||
github.com/mudler/edgevpn v0.31.1 h1:7qegiDWd0kAg6ljhNHxqvp8hbo/6BbzSdbb7/2WZfiY=
|
||||
github.com/mudler/edgevpn v0.31.1/go.mod h1:ftV5B0nKFzm4R8vR80UYnCb2nf7lxCRgAALxUEEgCf8=
|
||||
github.com/mudler/go-piper v0.0.0-20241023091659-2494246fd9fc h1:RxwneJl1VgvikiX28EkpdAyL4yQVnJMrbquKospjHyA=
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
package functions
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ExtractReasoning extracts reasoning content from thinking tags and returns
|
||||
// both the extracted reasoning and the cleaned content (with tags removed).
|
||||
// It handles <thinking>...</thinking> and <think>...</think> tags.
|
||||
// Multiple reasoning blocks are concatenated with newlines.
|
||||
func ExtractReasoning(content string) (reasoning string, cleanedContent string) {
|
||||
if content == "" {
|
||||
return "", content
|
||||
}
|
||||
|
||||
var reasoningParts []string
|
||||
var cleanedParts []string
|
||||
remaining := content
|
||||
|
||||
// Define tag pairs to look for
|
||||
tagPairs := []struct {
|
||||
start string
|
||||
end string
|
||||
}{
|
||||
{"<thinking>", "</thinking>"},
|
||||
{"<think>", "</think>"},
|
||||
}
|
||||
|
||||
// Track the last position we've processed
|
||||
lastPos := 0
|
||||
|
||||
for {
|
||||
// Find the earliest tag start
|
||||
earliestStart := -1
|
||||
earliestEnd := -1
|
||||
isUnclosed := false
|
||||
var matchedTag struct {
|
||||
start string
|
||||
end string
|
||||
}
|
||||
|
||||
for _, tagPair := range tagPairs {
|
||||
startIdx := strings.Index(remaining[lastPos:], tagPair.start)
|
||||
if startIdx == -1 {
|
||||
continue
|
||||
}
|
||||
startIdx += lastPos
|
||||
|
||||
// Find the corresponding end tag
|
||||
endIdx := strings.Index(remaining[startIdx+len(tagPair.start):], tagPair.end)
|
||||
if endIdx == -1 {
|
||||
// Unclosed tag - extract what we have
|
||||
if earliestStart == -1 || startIdx < earliestStart {
|
||||
earliestStart = startIdx
|
||||
earliestEnd = len(remaining)
|
||||
isUnclosed = true
|
||||
matchedTag = tagPair
|
||||
}
|
||||
continue
|
||||
}
|
||||
endIdx += startIdx + len(tagPair.start)
|
||||
|
||||
// Found a complete tag pair
|
||||
if earliestStart == -1 || startIdx < earliestStart {
|
||||
earliestStart = startIdx
|
||||
earliestEnd = endIdx + len(tagPair.end)
|
||||
isUnclosed = false
|
||||
matchedTag = tagPair
|
||||
}
|
||||
}
|
||||
|
||||
if earliestStart == -1 {
|
||||
// No more tags found, add remaining content
|
||||
if lastPos < len(remaining) {
|
||||
cleanedParts = append(cleanedParts, remaining[lastPos:])
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Add content before the tag
|
||||
if earliestStart > lastPos {
|
||||
cleanedParts = append(cleanedParts, remaining[lastPos:earliestStart])
|
||||
}
|
||||
|
||||
// Extract reasoning content
|
||||
reasoningStart := earliestStart + len(matchedTag.start)
|
||||
// For unclosed tags, earliestEnd is already at the end of the string
|
||||
// For closed tags, earliestEnd points to after the closing tag, so we subtract the end tag length
|
||||
var reasoningEnd int
|
||||
if isUnclosed {
|
||||
// Unclosed tag - extract everything to the end
|
||||
reasoningEnd = len(remaining)
|
||||
} else {
|
||||
// Closed tag - exclude the end tag
|
||||
reasoningEnd = earliestEnd - len(matchedTag.end)
|
||||
}
|
||||
if reasoningEnd > reasoningStart {
|
||||
reasoningContent := strings.TrimSpace(remaining[reasoningStart:reasoningEnd])
|
||||
if reasoningContent != "" {
|
||||
reasoningParts = append(reasoningParts, reasoningContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Move past this tag
|
||||
lastPos = earliestEnd
|
||||
}
|
||||
|
||||
// Combine reasoning parts
|
||||
reasoning = strings.Join(reasoningParts, "\n\n")
|
||||
// Combine cleaned content parts
|
||||
cleanedContent = strings.Join(cleanedParts, "")
|
||||
|
||||
return reasoning, cleanedContent
|
||||
}
|
||||
@@ -1,261 +0,0 @@
|
||||
package functions_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
. "github.com/mudler/LocalAI/pkg/functions"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ExtractReasoning", func() {
|
||||
Context("when content has no reasoning tags", func() {
|
||||
It("should return empty reasoning and original content", func() {
|
||||
content := "This is regular content without any tags."
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal(content))
|
||||
})
|
||||
|
||||
It("should handle empty string", func() {
|
||||
content := ""
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("should handle content with only whitespace", func() {
|
||||
content := " \n\t "
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal(content))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has <thinking> tags", func() {
|
||||
It("should extract reasoning from single thinking block", func() {
|
||||
content := "Some text <thinking>This is my reasoning</thinking> More text"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("This is my reasoning"))
|
||||
Expect(cleaned).To(Equal("Some text More text"))
|
||||
})
|
||||
|
||||
It("should extract reasoning and preserve surrounding content", func() {
|
||||
content := "Before <thinking>Reasoning here</thinking> After"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning here"))
|
||||
Expect(cleaned).To(Equal("Before After"))
|
||||
})
|
||||
|
||||
It("should handle thinking block at the start", func() {
|
||||
content := "<thinking>Start reasoning</thinking> Regular content"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Start reasoning"))
|
||||
Expect(cleaned).To(Equal(" Regular content"))
|
||||
})
|
||||
|
||||
It("should handle thinking block at the end", func() {
|
||||
content := "Regular content <thinking>End reasoning</thinking>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("End reasoning"))
|
||||
Expect(cleaned).To(Equal("Regular content "))
|
||||
})
|
||||
|
||||
It("should handle only thinking block", func() {
|
||||
content := "<thinking>Only reasoning</thinking>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Only reasoning"))
|
||||
Expect(cleaned).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("should trim whitespace from reasoning content", func() {
|
||||
content := "Text <thinking> \n Reasoning with spaces \n </thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning with spaces"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has <think> tags", func() {
|
||||
It("should extract reasoning from redacted_reasoning block", func() {
|
||||
content := "Text <think>Redacted reasoning</think> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Redacted reasoning"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
|
||||
It("should handle redacted_reasoning with multiline content", func() {
|
||||
content := "Before <think>Line 1\nLine 2\nLine 3</think> After"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Line 1\nLine 2\nLine 3"))
|
||||
Expect(cleaned).To(Equal("Before After"))
|
||||
})
|
||||
|
||||
It("should handle redacted_reasoning with complex content", func() {
|
||||
content := "Start <think>Complex reasoning\nwith\nmultiple\nlines</think> End"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Complex reasoning\nwith\nmultiple\nlines"))
|
||||
Expect(cleaned).To(Equal("Start End"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has multiple reasoning blocks", func() {
|
||||
It("should concatenate multiple thinking blocks with newlines", func() {
|
||||
content := "Text <thinking>First</thinking> Middle <thinking>Second</thinking> End"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("First\n\nSecond"))
|
||||
Expect(cleaned).To(Equal("Text Middle End"))
|
||||
})
|
||||
|
||||
It("should handle multiple different tag types", func() {
|
||||
content := "A <thinking>One</thinking> B <think>Two</think> C <think>Three</think> D"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(ContainSubstring("One"))
|
||||
Expect(reasoning).To(ContainSubstring("Two"))
|
||||
Expect(reasoning).To(ContainSubstring("Three"))
|
||||
Expect(cleaned).To(Equal("A B C D"))
|
||||
})
|
||||
|
||||
It("should handle nested tags correctly (extracts first match)", func() {
|
||||
content := "Text <thinking>Outer <think>Inner</think></thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
// Should extract the outer thinking block
|
||||
Expect(reasoning).To(ContainSubstring("Outer"))
|
||||
Expect(reasoning).To(ContainSubstring("Inner"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has unclosed reasoning tags", func() {
|
||||
It("should extract unclosed thinking block", func() {
|
||||
content := "Text <thinking>Unclosed reasoning"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Unclosed reasoning"))
|
||||
Expect(cleaned).To(Equal("Text "))
|
||||
})
|
||||
|
||||
It("should extract unclosed think block", func() {
|
||||
content := "Before <think>Incomplete"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Incomplete"))
|
||||
Expect(cleaned).To(Equal("Before "))
|
||||
})
|
||||
|
||||
It("should extract unclosed redacted_reasoning block", func() {
|
||||
content := "Start <think>Partial reasoning content"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Partial reasoning content"))
|
||||
Expect(cleaned).To(Equal("Start "))
|
||||
})
|
||||
|
||||
It("should handle unclosed tag at the end", func() {
|
||||
content := "Regular content <thinking>Unclosed at end"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Unclosed at end"))
|
||||
Expect(cleaned).To(Equal("Regular content "))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has empty reasoning blocks", func() {
|
||||
It("should ignore empty thinking block", func() {
|
||||
content := "Text <thinking></thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
|
||||
It("should ignore thinking block with only whitespace", func() {
|
||||
content := "Text <thinking> \n\t </thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has reasoning tags with special characters", func() {
|
||||
It("should handle reasoning with newlines", func() {
|
||||
content := "Before <thinking>Line 1\nLine 2\nLine 3</thinking> After"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Line 1\nLine 2\nLine 3"))
|
||||
Expect(cleaned).To(Equal("Before After"))
|
||||
})
|
||||
|
||||
It("should handle reasoning with code blocks", func() {
|
||||
content := "Text <thinking>Reasoning with ```code``` blocks</thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning with ```code``` blocks"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
|
||||
It("should handle reasoning with JSON", func() {
|
||||
content := "Before <think>{\"key\": \"value\"}</think> After"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("{\"key\": \"value\"}"))
|
||||
Expect(cleaned).To(Equal("Before After"))
|
||||
})
|
||||
|
||||
It("should handle reasoning with HTML-like content", func() {
|
||||
content := "Text <thinking>Reasoning with <tags> inside</thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning with <tags> inside"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has reasoning mixed with regular content", func() {
|
||||
It("should preserve content order correctly", func() {
|
||||
content := "Start <thinking>Reasoning</thinking> Middle <think>More reasoning</think> End"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(ContainSubstring("Reasoning"))
|
||||
Expect(reasoning).To(ContainSubstring("More reasoning"))
|
||||
Expect(cleaned).To(Equal("Start Middle End"))
|
||||
})
|
||||
|
||||
It("should handle reasoning in the middle of a sentence", func() {
|
||||
content := "This is a <thinking>reasoning</thinking> sentence."
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("reasoning"))
|
||||
Expect(cleaned).To(Equal("This is a sentence."))
|
||||
})
|
||||
})
|
||||
|
||||
Context("edge cases", func() {
|
||||
It("should handle content with only opening tag", func() {
|
||||
content := "<thinking>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal(""))
|
||||
})
|
||||
|
||||
It("should handle content with only closing tag", func() {
|
||||
content := "</thinking>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal("</thinking>"))
|
||||
})
|
||||
|
||||
It("should handle mismatched tags", func() {
|
||||
content := "<thinking>Content</think>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
// Should extract unclosed thinking block
|
||||
Expect(reasoning).To(ContainSubstring("Content"))
|
||||
Expect(cleaned).To(Equal(""))
|
||||
})
|
||||
|
||||
It("should handle very long reasoning content", func() {
|
||||
longReasoning := strings.Repeat("This is reasoning content. ", 100)
|
||||
content := "Text <thinking>" + longReasoning + "</thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
// TrimSpace is applied, so we need to account for that
|
||||
Expect(reasoning).To(Equal(strings.TrimSpace(longReasoning)))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
|
||||
It("should handle reasoning with unicode characters", func() {
|
||||
content := "Text <thinking>Reasoning with 中文 and emoji 🧠</thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning with 中文 and emoji 🧠"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -57,4 +57,6 @@ type Backend interface {
|
||||
GetTokenMetrics(ctx context.Context, in *pb.MetricsRequest, opts ...grpc.CallOption) (*pb.MetricsResponse, error)
|
||||
|
||||
VAD(ctx context.Context, in *pb.VADRequest, opts ...grpc.CallOption) (*pb.VADResponse, error)
|
||||
|
||||
ModelMetadata(ctx context.Context, in *pb.ModelOptions, opts ...grpc.CallOption) (*pb.ModelMetadataResponse, error)
|
||||
}
|
||||
|
||||
@@ -77,6 +77,10 @@ func (llm *Base) TokenizeString(opts *pb.PredictOptions) (pb.TokenizationRespons
|
||||
return pb.TokenizationResponse{}, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (llm *Base) ModelMetadata(opts *pb.ModelOptions) (*pb.ModelMetadataResponse, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
// backends may wish to call this to capture the gopsutil info, then enhance with additional memory usage details?
|
||||
func (llm *Base) Status() (pb.StatusResponse, error) {
|
||||
return pb.StatusResponse{
|
||||
|
||||
@@ -537,3 +537,25 @@ func (c *Client) Detect(ctx context.Context, in *pb.DetectOptions, opts ...grpc.
|
||||
client := pb.NewBackendClient(conn)
|
||||
return client.Detect(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (c *Client) ModelMetadata(ctx context.Context, in *pb.ModelOptions, opts ...grpc.CallOption) (*pb.ModelMetadataResponse, error) {
|
||||
if !c.parallel {
|
||||
c.opMutex.Lock()
|
||||
defer c.opMutex.Unlock()
|
||||
}
|
||||
c.setBusy(true)
|
||||
defer c.setBusy(false)
|
||||
c.wdMark()
|
||||
defer c.wdUnMark()
|
||||
conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(50*1024*1024), // 50MB
|
||||
grpc.MaxCallSendMsgSize(50*1024*1024), // 50MB
|
||||
))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
client := pb.NewBackendClient(conn)
|
||||
return client.ModelMetadata(ctx, in, opts...)
|
||||
}
|
||||
|
||||
@@ -99,6 +99,10 @@ func (e *embedBackend) VAD(ctx context.Context, in *pb.VADRequest, opts ...grpc.
|
||||
return e.s.VAD(ctx, in)
|
||||
}
|
||||
|
||||
func (e *embedBackend) ModelMetadata(ctx context.Context, in *pb.ModelOptions, opts ...grpc.CallOption) (*pb.ModelMetadataResponse, error) {
|
||||
return e.s.ModelMetadata(ctx, in)
|
||||
}
|
||||
|
||||
func (e *embedBackend) GetTokenMetrics(ctx context.Context, in *pb.MetricsRequest, opts ...grpc.CallOption) (*pb.MetricsResponse, error) {
|
||||
return e.s.GetMetrics(ctx, in)
|
||||
}
|
||||
|
||||
@@ -28,6 +28,8 @@ type AIModel interface {
|
||||
StoresFind(*pb.StoresFindOptions) (pb.StoresFindResult, error)
|
||||
|
||||
VAD(*pb.VADRequest) (pb.VADResponse, error)
|
||||
|
||||
ModelMetadata(*pb.ModelOptions) (*pb.ModelMetadataResponse, error)
|
||||
}
|
||||
|
||||
func newReply(s string) *pb.Reply {
|
||||
|
||||
@@ -263,6 +263,18 @@ func (s *server) VAD(ctx context.Context, in *pb.VADRequest) (*pb.VADResponse, e
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (s *server) ModelMetadata(ctx context.Context, in *pb.ModelOptions) (*pb.ModelMetadataResponse, error) {
|
||||
if s.llm.Locking() {
|
||||
s.llm.Lock()
|
||||
defer s.llm.Unlock()
|
||||
}
|
||||
res, err := s.llm.ModelMetadata(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func StartServer(address string, model AIModel) error {
|
||||
lis, err := net.Listen("tcp", address)
|
||||
if err != nil {
|
||||
|
||||
@@ -393,7 +393,7 @@ func (wd *WatchDog) checkMemory() {
|
||||
memoryType = "RAM"
|
||||
}
|
||||
|
||||
xlog.Debug("[WatchDog] Memory check", "type", memoryType, "usage_percent", aggregate.UsagePercent, "threshold_percent", thresholdPercent, "loaded_models", modelCount)
|
||||
//xlog.Debug("[WatchDog] Memory check", "type", memoryType, "usage_percent", aggregate.UsagePercent, "threshold_percent", thresholdPercent, "loaded_models", modelCount)
|
||||
|
||||
// Check if usage exceeds threshold
|
||||
if aggregate.UsagePercent > thresholdPercent {
|
||||
|
||||
15
pkg/reasoning/config.go
Normal file
15
pkg/reasoning/config.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package reasoning
|
||||
|
||||
// TagPair represents a start/end tag pair for reasoning extraction
|
||||
type TagPair struct {
|
||||
Start string `yaml:"start" json:"start"`
|
||||
End string `yaml:"end" json:"end"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
DisableReasoningTagPrefill *bool `yaml:"disable_reasoning_tag_prefill,omitempty" json:"disable_reasoning_tag_prefill,omitempty"`
|
||||
DisableReasoning *bool `yaml:"disable,omitempty" json:"disable,omitempty"`
|
||||
StripReasoningOnly *bool `yaml:"strip_reasoning_only,omitempty" json:"strip_reasoning_only,omitempty"`
|
||||
ThinkingStartTokens []string `yaml:"thinking_start_tokens,omitempty" json:"thinking_start_tokens,omitempty"`
|
||||
TagPairs []TagPair `yaml:"tag_pairs,omitempty" json:"tag_pairs,omitempty"`
|
||||
}
|
||||
258
pkg/reasoning/reasoning.go
Normal file
258
pkg/reasoning/reasoning.go
Normal file
@@ -0,0 +1,258 @@
|
||||
package reasoning
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DetectThinkingStartToken checks if the prompt or template contains a thinking start token
|
||||
// and returns the detected token. This indicates that the model's prompt template
|
||||
// already includes the thinking token, so the model output will start with reasoning
|
||||
// content without an explicit opening tag.
|
||||
// Returns the detected token if found, empty string otherwise.
|
||||
// Common tokens checked (in order of specificity - longer first):
|
||||
// Based on llama.cpp's chat-parser.cpp implementations:
|
||||
// - <|START_THINKING|> (Command-R models)
|
||||
// - <|inner_prefix|> (Apertus models)
|
||||
// - <seed:think> (Seed models)
|
||||
// - <think> (DeepSeek, Granite, ExaOne models)
|
||||
// - <|think|> (Solar Open models)
|
||||
// - <thinking> (General thinking tag)
|
||||
// - [THINK] (Magistral models)
|
||||
// Custom tokens from config are checked first, then default tokens.
|
||||
func DetectThinkingStartToken(prompt string, config *Config) string {
|
||||
// Common thinking start tokens (in order of specificity - longer first)
|
||||
// Based on llama.cpp's chat-parser.cpp implementations
|
||||
defaultTokens := []string{
|
||||
"<|START_THINKING|>", // Command-R models
|
||||
"<|inner_prefix|>", // Apertus models
|
||||
"<seed:think>", // Seed models
|
||||
"<think>", // DeepSeek, Granite, ExaOne models
|
||||
"<|think|>", // Solar Open models
|
||||
"<thinking>", // General thinking tag
|
||||
"[THINK]", // Magistral models
|
||||
}
|
||||
|
||||
// Merge custom tokens with default tokens (custom tokens first for priority)
|
||||
var thinkingStartTokens []string
|
||||
if config != nil && len(config.ThinkingStartTokens) > 0 {
|
||||
thinkingStartTokens = append(thinkingStartTokens, config.ThinkingStartTokens...)
|
||||
}
|
||||
thinkingStartTokens = append(thinkingStartTokens, defaultTokens...)
|
||||
|
||||
// Check if prompt ends with any of these tokens (allowing for trailing whitespace/newlines)
|
||||
trimmedPrompt := strings.TrimRight(prompt, " \t\n\r")
|
||||
for _, token := range thinkingStartTokens {
|
||||
if strings.Contains(trimmedPrompt, token) {
|
||||
return token
|
||||
}
|
||||
}
|
||||
|
||||
// Also check if any of these tokens appear near the end (within last 100 chars)
|
||||
// This handles cases where there might be stop tokens or other content after
|
||||
if len(trimmedPrompt) > 100 {
|
||||
lastPart := trimmedPrompt[len(trimmedPrompt)-100:]
|
||||
for _, token := range thinkingStartTokens {
|
||||
if idx := strings.LastIndex(lastPart, token); idx != -1 {
|
||||
// Check if this is the last meaningful content (only whitespace after)
|
||||
afterToken := lastPart[idx+len(token):]
|
||||
if strings.TrimSpace(afterToken) == "" {
|
||||
return token
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// ExtractReasoningWithConfig extracts reasoning from content with the given config.
|
||||
// If reasoning is disabled, it returns the original content.
|
||||
// If thinking start token prefill is enabled, it prepends the thinking start token to the content.
|
||||
// It returns the extracted reasoning and the cleaned content.
|
||||
func ExtractReasoningWithConfig(content, thinkingStartToken string, config Config) (reasoning string, cleanedContent string) {
|
||||
cleanedContent = content
|
||||
// If reasoning is not disabled, prepend the thinking start token if needed and extract reasoning
|
||||
if config.DisableReasoning == nil || !*config.DisableReasoning {
|
||||
// If thinking start token prefill is not disabled, prepend the thinking start token
|
||||
if config.DisableReasoningTagPrefill == nil || !*config.DisableReasoningTagPrefill {
|
||||
cleanedContent = PrependThinkingTokenIfNeeded(cleanedContent, thinkingStartToken)
|
||||
}
|
||||
// Extract reasoning from the cleaned content
|
||||
reasoning, cleanedContent = ExtractReasoning(cleanedContent, &config)
|
||||
if config.StripReasoningOnly != nil && *config.StripReasoningOnly {
|
||||
reasoning = ""
|
||||
}
|
||||
}
|
||||
|
||||
return reasoning, cleanedContent
|
||||
}
|
||||
|
||||
// PrependThinkingTokenIfNeeded prepends the thinking start token to content if it was
|
||||
// detected in the prompt. This allows the standard extraction logic to work correctly
|
||||
// for models where the thinking token is already in the prompt.
|
||||
func PrependThinkingTokenIfNeeded(content string, startToken string) string {
|
||||
if startToken == "" {
|
||||
return content
|
||||
}
|
||||
|
||||
// Check if content already starts with the token (allowing for leading whitespace)
|
||||
trimmed := strings.TrimLeftFunc(content, func(r rune) bool {
|
||||
return r == ' ' || r == '\t' || r == '\n' || r == '\r'
|
||||
})
|
||||
|
||||
// If content already starts with the token, don't prepend
|
||||
if strings.Contains(trimmed, startToken) {
|
||||
return content
|
||||
}
|
||||
|
||||
// Find where leading whitespace ends
|
||||
whitespaceEnd := 0
|
||||
for whitespaceEnd < len(content) {
|
||||
r := content[whitespaceEnd]
|
||||
if r != ' ' && r != '\t' && r != '\n' && r != '\r' {
|
||||
break
|
||||
}
|
||||
whitespaceEnd++
|
||||
}
|
||||
|
||||
// Prepend the token after whitespace to make it look like normal tagged content
|
||||
if whitespaceEnd > 0 {
|
||||
return content[:whitespaceEnd] + startToken + content[whitespaceEnd:]
|
||||
}
|
||||
return startToken + content
|
||||
}
|
||||
|
||||
// ExtractReasoning extracts reasoning content from thinking tags and returns
|
||||
// both the extracted reasoning and the cleaned content (with tags removed).
|
||||
// It handles <thinking>...</thinking> and <think>...</think> tags.
|
||||
// Multiple reasoning blocks are concatenated with newlines.
|
||||
// Custom tag pairs from config are checked first, then default tag pairs.
|
||||
func ExtractReasoning(content string, config *Config) (reasoning string, cleanedContent string) {
|
||||
if content == "" {
|
||||
return "", content
|
||||
}
|
||||
|
||||
var reasoningParts []string
|
||||
var cleanedParts []string
|
||||
remaining := content
|
||||
|
||||
// Define default tag pairs to look for (matching llama.cpp's chat-parser.cpp)
|
||||
defaultTagPairs := []struct {
|
||||
start string
|
||||
end string
|
||||
}{
|
||||
{"<|START_THINKING|>", "<|END_THINKING|>"}, // Command-R models
|
||||
{"<|inner_prefix|>", "<|inner_suffix|>"}, // Apertus models
|
||||
{"<seed:think>", "</seed:think>"}, // Seed models
|
||||
{"<think>", "</think>"}, // DeepSeek, Granite, ExaOne models
|
||||
{"<|think|>", "<|end|><|begin|>assistant<|content|>"}, // Solar Open models (complex end)
|
||||
{"<thinking>", "</thinking>"}, // General thinking tag
|
||||
{"[THINK]", "[/THINK]"}, // Magistral models
|
||||
}
|
||||
|
||||
// Merge custom tag pairs with default tag pairs (custom pairs first for priority)
|
||||
var tagPairs []struct {
|
||||
start string
|
||||
end string
|
||||
}
|
||||
if config != nil && len(config.TagPairs) > 0 {
|
||||
for _, pair := range config.TagPairs {
|
||||
if pair.Start != "" && pair.End != "" {
|
||||
tagPairs = append(tagPairs, struct {
|
||||
start string
|
||||
end string
|
||||
}{pair.Start, pair.End})
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add default tag pairs
|
||||
for _, pair := range defaultTagPairs {
|
||||
tagPairs = append(tagPairs, pair)
|
||||
}
|
||||
|
||||
// Track the last position we've processed
|
||||
lastPos := 0
|
||||
|
||||
for {
|
||||
// Find the earliest tag start
|
||||
earliestStart := -1
|
||||
earliestEnd := -1
|
||||
isUnclosed := false
|
||||
var matchedTag struct {
|
||||
start string
|
||||
end string
|
||||
}
|
||||
|
||||
for _, tagPair := range tagPairs {
|
||||
startIdx := strings.Index(remaining[lastPos:], tagPair.start)
|
||||
if startIdx == -1 {
|
||||
continue
|
||||
}
|
||||
startIdx += lastPos
|
||||
|
||||
// Find the corresponding end tag
|
||||
endIdx := strings.Index(remaining[startIdx+len(tagPair.start):], tagPair.end)
|
||||
if endIdx == -1 {
|
||||
// Unclosed tag - extract what we have
|
||||
if earliestStart == -1 || startIdx < earliestStart {
|
||||
earliestStart = startIdx
|
||||
earliestEnd = len(remaining)
|
||||
isUnclosed = true
|
||||
matchedTag = tagPair
|
||||
}
|
||||
continue
|
||||
}
|
||||
endIdx += startIdx + len(tagPair.start)
|
||||
|
||||
// Found a complete tag pair
|
||||
if earliestStart == -1 || startIdx < earliestStart {
|
||||
earliestStart = startIdx
|
||||
earliestEnd = endIdx + len(tagPair.end)
|
||||
isUnclosed = false
|
||||
matchedTag = tagPair
|
||||
}
|
||||
}
|
||||
|
||||
if earliestStart == -1 {
|
||||
// No more tags found, add remaining content
|
||||
if lastPos < len(remaining) {
|
||||
cleanedParts = append(cleanedParts, remaining[lastPos:])
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Add content before the tag
|
||||
if earliestStart > lastPos {
|
||||
cleanedParts = append(cleanedParts, remaining[lastPos:earliestStart])
|
||||
}
|
||||
|
||||
// Extract reasoning content
|
||||
reasoningStart := earliestStart + len(matchedTag.start)
|
||||
// For unclosed tags, earliestEnd is already at the end of the string
|
||||
// For closed tags, earliestEnd points to after the closing tag, so we subtract the end tag length
|
||||
var reasoningEnd int
|
||||
if isUnclosed {
|
||||
// Unclosed tag - extract everything to the end
|
||||
reasoningEnd = len(remaining)
|
||||
} else {
|
||||
// Closed tag - exclude the end tag
|
||||
reasoningEnd = earliestEnd - len(matchedTag.end)
|
||||
}
|
||||
if reasoningEnd > reasoningStart {
|
||||
reasoningContent := strings.TrimSpace(remaining[reasoningStart:reasoningEnd])
|
||||
if reasoningContent != "" {
|
||||
reasoningParts = append(reasoningParts, reasoningContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Move past this tag
|
||||
lastPos = earliestEnd
|
||||
}
|
||||
|
||||
// Combine reasoning parts
|
||||
reasoning = strings.Join(reasoningParts, "\n\n")
|
||||
// Combine cleaned content parts
|
||||
cleanedContent = strings.Join(cleanedParts, "")
|
||||
|
||||
return reasoning, cleanedContent
|
||||
}
|
||||
13
pkg/reasoning/reasoning_suite_test.go
Normal file
13
pkg/reasoning/reasoning_suite_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package reasoning_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestReasoning(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Reasoning test suite")
|
||||
}
|
||||
1144
pkg/reasoning/reasoning_test.go
Normal file
1144
pkg/reasoning/reasoning_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -344,8 +344,6 @@ func getNVIDIAGPUMemory() []GPUMemoryInfo {
|
||||
if totalBytes > 0 {
|
||||
usagePercent = float64(usedBytes) / float64(totalBytes) * 100
|
||||
}
|
||||
|
||||
xlog.Debug("using system RAM for unified memory GPU", "device", name, "system_ram_bytes", totalBytes)
|
||||
} else if isNA {
|
||||
// Unknown device with N/A values - skip memory info
|
||||
xlog.Debug("nvidia-smi returned N/A for unknown device", "device", name)
|
||||
@@ -569,7 +567,7 @@ func getIntelGPUTop() []GPUMemoryInfo {
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
xlog.Debug("intel_gpu_top failed", "error", err, "stderr", stderr.String())
|
||||
xlog.Debug("intel_gpu_top failed", "error", err, "stderr", stderr.String(), "stdout", stdout.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package xsysinfo
|
||||
|
||||
import (
|
||||
"github.com/mudler/memory"
|
||||
"github.com/mudler/xlog"
|
||||
)
|
||||
|
||||
// SystemRAMInfo contains system RAM usage information
|
||||
@@ -25,7 +24,6 @@ func GetSystemRAMInfo() (*SystemRAMInfo, error) {
|
||||
if total > 0 {
|
||||
usagePercent = float64(used) / float64(total) * 100
|
||||
}
|
||||
xlog.Debug("System RAM Info", "total", total, "used", used, "free", free, "usage_percent", usagePercent)
|
||||
return &SystemRAMInfo{
|
||||
Total: total,
|
||||
Used: used,
|
||||
|
||||
652
swagger/docs.go
652
swagger/docs.go
@@ -1259,6 +1259,116 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/responses": {
|
||||
"post": {
|
||||
"summary": "Create a response using the Open Responses API",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Request body",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/schema.OpenResponsesRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Response",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/schema.ORResponseResource"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/responses/{id}": {
|
||||
"get": {
|
||||
"description": "Retrieve a response by ID. Can be used for polling background responses or resuming streaming responses.",
|
||||
"summary": "Get a response by ID",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Response ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Set to 'true' to resume streaming",
|
||||
"name": "stream",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "Sequence number to resume from (for streaming)",
|
||||
"name": "starting_after",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Response",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/schema.ORResponseResource"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not Found",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/responses/{id}/cancel": {
|
||||
"post": {
|
||||
"description": "Cancel a background response if it's still in progress",
|
||||
"summary": "Cancel a response",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Response ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Response",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/schema.ORResponseResource"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not Found",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/sound-generation": {
|
||||
"post": {
|
||||
"summary": "Generates audio from the input text.",
|
||||
@@ -2507,6 +2617,440 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORAnnotation": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"end_index": {
|
||||
"type": "integer"
|
||||
},
|
||||
"start_index": {
|
||||
"type": "integer"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "url_citation",
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORContentPart": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"description": "REQUIRED for output_text - must always be present (use [])",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORAnnotation"
|
||||
}
|
||||
},
|
||||
"detail": {
|
||||
"description": "low|high|auto for images",
|
||||
"type": "string"
|
||||
},
|
||||
"file_data": {
|
||||
"type": "string"
|
||||
},
|
||||
"file_url": {
|
||||
"type": "string"
|
||||
},
|
||||
"filename": {
|
||||
"type": "string"
|
||||
},
|
||||
"image_url": {
|
||||
"type": "string"
|
||||
},
|
||||
"logprobs": {
|
||||
"description": "REQUIRED for output_text - must always be present (use [])",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORLogProb"
|
||||
}
|
||||
},
|
||||
"refusal": {
|
||||
"type": "string"
|
||||
},
|
||||
"text": {
|
||||
"description": "REQUIRED for output_text - must always be present (even if empty)",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "input_text|input_image|input_file|output_text|refusal",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORError": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"param": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "invalid_request|not_found|server_error|model_error|too_many_requests",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORFunctionTool": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
},
|
||||
"strict": {
|
||||
"description": "Always include in response",
|
||||
"type": "boolean"
|
||||
},
|
||||
"type": {
|
||||
"description": "always \"function\"",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORIncompleteDetails": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reason": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORInputTokensDetails": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cached_tokens": {
|
||||
"description": "Always include, even if 0",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORItemField": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"arguments": {
|
||||
"type": "string"
|
||||
},
|
||||
"call_id": {
|
||||
"description": "Function call fields",
|
||||
"type": "string"
|
||||
},
|
||||
"content": {
|
||||
"description": "string or []ORContentPart for messages"
|
||||
},
|
||||
"encrypted_content": {
|
||||
"description": "Provider-specific encrypted content",
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"description": "Present for all output items",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"output": {
|
||||
"description": "Function call output fields"
|
||||
},
|
||||
"role": {
|
||||
"description": "Message fields",
|
||||
"type": "string"
|
||||
},
|
||||
"status": {
|
||||
"description": "in_progress|completed|incomplete",
|
||||
"type": "string"
|
||||
},
|
||||
"summary": {
|
||||
"description": "Reasoning fields (for type == \"reasoning\")",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORContentPart"
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"description": "message|function_call|function_call_output|reasoning|item_reference",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORLogProb": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"bytes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"logprob": {
|
||||
"type": "number"
|
||||
},
|
||||
"token": {
|
||||
"type": "string"
|
||||
},
|
||||
"top_logprobs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORTopLogProb"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.OROutputTokensDetails": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reasoning_tokens": {
|
||||
"description": "Always include, even if 0",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORReasoning": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"effort": {
|
||||
"type": "string"
|
||||
},
|
||||
"summary": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORReasoningParam": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"effort": {
|
||||
"description": "\"none\"|\"low\"|\"medium\"|\"high\"|\"xhigh\"",
|
||||
"type": "string"
|
||||
},
|
||||
"summary": {
|
||||
"description": "\"auto\"|\"concise\"|\"detailed\"",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORResponseResource": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"background": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"completed_at": {
|
||||
"description": "Required: present as number or null",
|
||||
"type": "integer"
|
||||
},
|
||||
"created_at": {
|
||||
"type": "integer"
|
||||
},
|
||||
"error": {
|
||||
"description": "Always present, null if no error",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORError"
|
||||
}
|
||||
]
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "number"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"incomplete_details": {
|
||||
"description": "Always present, null if complete",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORIncompleteDetails"
|
||||
}
|
||||
]
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string"
|
||||
},
|
||||
"max_output_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"max_tool_calls": {
|
||||
"description": "nullable",
|
||||
"type": "integer"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Metadata and operational flags",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"object": {
|
||||
"description": "always \"response\"",
|
||||
"type": "string"
|
||||
},
|
||||
"output": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORItemField"
|
||||
}
|
||||
},
|
||||
"parallel_tool_calls": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"presence_penalty": {
|
||||
"type": "number"
|
||||
},
|
||||
"previous_response_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"prompt_cache_key": {
|
||||
"description": "nullable",
|
||||
"type": "string"
|
||||
},
|
||||
"reasoning": {
|
||||
"description": "nullable",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORReasoning"
|
||||
}
|
||||
]
|
||||
},
|
||||
"safety_identifier": {
|
||||
"description": "Safety and caching",
|
||||
"type": "string"
|
||||
},
|
||||
"service_tier": {
|
||||
"type": "string"
|
||||
},
|
||||
"status": {
|
||||
"description": "in_progress|completed|failed|incomplete",
|
||||
"type": "string"
|
||||
},
|
||||
"store": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"temperature": {
|
||||
"description": "Sampling parameters (always required)",
|
||||
"type": "number"
|
||||
},
|
||||
"text": {
|
||||
"description": "Text format configuration",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORTextConfig"
|
||||
}
|
||||
]
|
||||
},
|
||||
"tool_choice": {},
|
||||
"tools": {
|
||||
"description": "Tool-related fields",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORFunctionTool"
|
||||
}
|
||||
},
|
||||
"top_logprobs": {
|
||||
"description": "Default to 0",
|
||||
"type": "integer"
|
||||
},
|
||||
"top_p": {
|
||||
"type": "number"
|
||||
},
|
||||
"truncation": {
|
||||
"description": "Truncation and reasoning",
|
||||
"type": "string"
|
||||
},
|
||||
"usage": {
|
||||
"description": "Usage statistics",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORUsage"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORTextConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"format": {
|
||||
"$ref": "#/definitions/schema.ORTextFormat"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORTextFormat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"description": "\"text\" or \"json_schema\"",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORTopLogProb": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"bytes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"logprob": {
|
||||
"type": "number"
|
||||
},
|
||||
"token": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORUsage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"input_tokens_details": {
|
||||
"description": "Always present",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORInputTokensDetails"
|
||||
}
|
||||
]
|
||||
},
|
||||
"output_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"output_tokens_details": {
|
||||
"description": "Always present",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.OROutputTokensDetails"
|
||||
}
|
||||
]
|
||||
},
|
||||
"total_tokens": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.OpenAIModel": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2781,6 +3325,114 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.OpenResponsesRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"allowed_tools": {
|
||||
"description": "Restrict which tools can be invoked",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"background": {
|
||||
"description": "Run request in background",
|
||||
"type": "boolean"
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"description": "Frequency penalty (-2.0 to 2.0)",
|
||||
"type": "number"
|
||||
},
|
||||
"include": {
|
||||
"description": "What to include in response",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"input": {
|
||||
"description": "string or []ORItemParam"
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string"
|
||||
},
|
||||
"logit_bias": {
|
||||
"description": "OpenAI-compatible extensions (not in Open Responses spec)",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
}
|
||||
},
|
||||
"max_output_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"max_tool_calls": {
|
||||
"description": "Maximum number of tool calls",
|
||||
"type": "integer"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"parallel_tool_calls": {
|
||||
"description": "Allow parallel tool calls",
|
||||
"type": "boolean"
|
||||
},
|
||||
"presence_penalty": {
|
||||
"description": "Presence penalty (-2.0 to 2.0)",
|
||||
"type": "number"
|
||||
},
|
||||
"previous_response_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"reasoning": {
|
||||
"$ref": "#/definitions/schema.ORReasoningParam"
|
||||
},
|
||||
"service_tier": {
|
||||
"description": "\"auto\"|\"default\"|priority hint",
|
||||
"type": "string"
|
||||
},
|
||||
"store": {
|
||||
"description": "Whether to store the response",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stream": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number"
|
||||
},
|
||||
"text_format": {
|
||||
"description": "Additional parameters from spec"
|
||||
},
|
||||
"tool_choice": {
|
||||
"description": "\"auto\"|\"required\"|\"none\"|{type:\"function\",name:\"...\"}"
|
||||
},
|
||||
"tools": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORFunctionTool"
|
||||
}
|
||||
},
|
||||
"top_logprobs": {
|
||||
"description": "Number of top logprobs to return",
|
||||
"type": "integer"
|
||||
},
|
||||
"top_p": {
|
||||
"type": "number"
|
||||
},
|
||||
"truncation": {
|
||||
"description": "\"auto\"|\"disabled\"",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.P2PNodesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
@@ -1252,6 +1252,116 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/responses": {
|
||||
"post": {
|
||||
"summary": "Create a response using the Open Responses API",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Request body",
|
||||
"name": "request",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/schema.OpenResponsesRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Response",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/schema.ORResponseResource"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/responses/{id}": {
|
||||
"get": {
|
||||
"description": "Retrieve a response by ID. Can be used for polling background responses or resuming streaming responses.",
|
||||
"summary": "Get a response by ID",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Response ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Set to 'true' to resume streaming",
|
||||
"name": "stream",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"description": "Sequence number to resume from (for streaming)",
|
||||
"name": "starting_after",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Response",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/schema.ORResponseResource"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not Found",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/responses/{id}/cancel": {
|
||||
"post": {
|
||||
"description": "Cancel a background response if it's still in progress",
|
||||
"summary": "Cancel a response",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Response ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Response",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/schema.ORResponseResource"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad Request",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not Found",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/sound-generation": {
|
||||
"post": {
|
||||
"summary": "Generates audio from the input text.",
|
||||
@@ -2500,6 +2610,440 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORAnnotation": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"end_index": {
|
||||
"type": "integer"
|
||||
},
|
||||
"start_index": {
|
||||
"type": "integer"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "url_citation",
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORContentPart": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"description": "REQUIRED for output_text - must always be present (use [])",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORAnnotation"
|
||||
}
|
||||
},
|
||||
"detail": {
|
||||
"description": "low|high|auto for images",
|
||||
"type": "string"
|
||||
},
|
||||
"file_data": {
|
||||
"type": "string"
|
||||
},
|
||||
"file_url": {
|
||||
"type": "string"
|
||||
},
|
||||
"filename": {
|
||||
"type": "string"
|
||||
},
|
||||
"image_url": {
|
||||
"type": "string"
|
||||
},
|
||||
"logprobs": {
|
||||
"description": "REQUIRED for output_text - must always be present (use [])",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORLogProb"
|
||||
}
|
||||
},
|
||||
"refusal": {
|
||||
"type": "string"
|
||||
},
|
||||
"text": {
|
||||
"description": "REQUIRED for output_text - must always be present (even if empty)",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "input_text|input_image|input_file|output_text|refusal",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORError": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"param": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "invalid_request|not_found|server_error|model_error|too_many_requests",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORFunctionTool": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"additionalProperties": true
|
||||
},
|
||||
"strict": {
|
||||
"description": "Always include in response",
|
||||
"type": "boolean"
|
||||
},
|
||||
"type": {
|
||||
"description": "always \"function\"",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORIncompleteDetails": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reason": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORInputTokensDetails": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cached_tokens": {
|
||||
"description": "Always include, even if 0",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORItemField": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"arguments": {
|
||||
"type": "string"
|
||||
},
|
||||
"call_id": {
|
||||
"description": "Function call fields",
|
||||
"type": "string"
|
||||
},
|
||||
"content": {
|
||||
"description": "string or []ORContentPart for messages"
|
||||
},
|
||||
"encrypted_content": {
|
||||
"description": "Provider-specific encrypted content",
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"description": "Present for all output items",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"output": {
|
||||
"description": "Function call output fields"
|
||||
},
|
||||
"role": {
|
||||
"description": "Message fields",
|
||||
"type": "string"
|
||||
},
|
||||
"status": {
|
||||
"description": "in_progress|completed|incomplete",
|
||||
"type": "string"
|
||||
},
|
||||
"summary": {
|
||||
"description": "Reasoning fields (for type == \"reasoning\")",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORContentPart"
|
||||
}
|
||||
},
|
||||
"type": {
|
||||
"description": "message|function_call|function_call_output|reasoning|item_reference",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORLogProb": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"bytes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"logprob": {
|
||||
"type": "number"
|
||||
},
|
||||
"token": {
|
||||
"type": "string"
|
||||
},
|
||||
"top_logprobs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORTopLogProb"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.OROutputTokensDetails": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reasoning_tokens": {
|
||||
"description": "Always include, even if 0",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORReasoning": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"effort": {
|
||||
"type": "string"
|
||||
},
|
||||
"summary": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORReasoningParam": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"effort": {
|
||||
"description": "\"none\"|\"low\"|\"medium\"|\"high\"|\"xhigh\"",
|
||||
"type": "string"
|
||||
},
|
||||
"summary": {
|
||||
"description": "\"auto\"|\"concise\"|\"detailed\"",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORResponseResource": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"background": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"completed_at": {
|
||||
"description": "Required: present as number or null",
|
||||
"type": "integer"
|
||||
},
|
||||
"created_at": {
|
||||
"type": "integer"
|
||||
},
|
||||
"error": {
|
||||
"description": "Always present, null if no error",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORError"
|
||||
}
|
||||
]
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "number"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"incomplete_details": {
|
||||
"description": "Always present, null if complete",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORIncompleteDetails"
|
||||
}
|
||||
]
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string"
|
||||
},
|
||||
"max_output_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"max_tool_calls": {
|
||||
"description": "nullable",
|
||||
"type": "integer"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Metadata and operational flags",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"object": {
|
||||
"description": "always \"response\"",
|
||||
"type": "string"
|
||||
},
|
||||
"output": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORItemField"
|
||||
}
|
||||
},
|
||||
"parallel_tool_calls": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"presence_penalty": {
|
||||
"type": "number"
|
||||
},
|
||||
"previous_response_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"prompt_cache_key": {
|
||||
"description": "nullable",
|
||||
"type": "string"
|
||||
},
|
||||
"reasoning": {
|
||||
"description": "nullable",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORReasoning"
|
||||
}
|
||||
]
|
||||
},
|
||||
"safety_identifier": {
|
||||
"description": "Safety and caching",
|
||||
"type": "string"
|
||||
},
|
||||
"service_tier": {
|
||||
"type": "string"
|
||||
},
|
||||
"status": {
|
||||
"description": "in_progress|completed|failed|incomplete",
|
||||
"type": "string"
|
||||
},
|
||||
"store": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"temperature": {
|
||||
"description": "Sampling parameters (always required)",
|
||||
"type": "number"
|
||||
},
|
||||
"text": {
|
||||
"description": "Text format configuration",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORTextConfig"
|
||||
}
|
||||
]
|
||||
},
|
||||
"tool_choice": {},
|
||||
"tools": {
|
||||
"description": "Tool-related fields",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORFunctionTool"
|
||||
}
|
||||
},
|
||||
"top_logprobs": {
|
||||
"description": "Default to 0",
|
||||
"type": "integer"
|
||||
},
|
||||
"top_p": {
|
||||
"type": "number"
|
||||
},
|
||||
"truncation": {
|
||||
"description": "Truncation and reasoning",
|
||||
"type": "string"
|
||||
},
|
||||
"usage": {
|
||||
"description": "Usage statistics",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORUsage"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORTextConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"format": {
|
||||
"$ref": "#/definitions/schema.ORTextFormat"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORTextFormat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"description": "\"text\" or \"json_schema\"",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORTopLogProb": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"bytes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"logprob": {
|
||||
"type": "number"
|
||||
},
|
||||
"token": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.ORUsage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"input_tokens_details": {
|
||||
"description": "Always present",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.ORInputTokensDetails"
|
||||
}
|
||||
]
|
||||
},
|
||||
"output_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"output_tokens_details": {
|
||||
"description": "Always present",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/schema.OROutputTokensDetails"
|
||||
}
|
||||
]
|
||||
},
|
||||
"total_tokens": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.OpenAIModel": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2774,6 +3318,114 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.OpenResponsesRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"allowed_tools": {
|
||||
"description": "Restrict which tools can be invoked",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"background": {
|
||||
"description": "Run request in background",
|
||||
"type": "boolean"
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"description": "Frequency penalty (-2.0 to 2.0)",
|
||||
"type": "number"
|
||||
},
|
||||
"include": {
|
||||
"description": "What to include in response",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"input": {
|
||||
"description": "string or []ORItemParam"
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string"
|
||||
},
|
||||
"logit_bias": {
|
||||
"description": "OpenAI-compatible extensions (not in Open Responses spec)",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
}
|
||||
},
|
||||
"max_output_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"max_tool_calls": {
|
||||
"description": "Maximum number of tool calls",
|
||||
"type": "integer"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
"parallel_tool_calls": {
|
||||
"description": "Allow parallel tool calls",
|
||||
"type": "boolean"
|
||||
},
|
||||
"presence_penalty": {
|
||||
"description": "Presence penalty (-2.0 to 2.0)",
|
||||
"type": "number"
|
||||
},
|
||||
"previous_response_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"reasoning": {
|
||||
"$ref": "#/definitions/schema.ORReasoningParam"
|
||||
},
|
||||
"service_tier": {
|
||||
"description": "\"auto\"|\"default\"|priority hint",
|
||||
"type": "string"
|
||||
},
|
||||
"store": {
|
||||
"description": "Whether to store the response",
|
||||
"type": "boolean"
|
||||
},
|
||||
"stream": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number"
|
||||
},
|
||||
"text_format": {
|
||||
"description": "Additional parameters from spec"
|
||||
},
|
||||
"tool_choice": {
|
||||
"description": "\"auto\"|\"required\"|\"none\"|{type:\"function\",name:\"...\"}"
|
||||
},
|
||||
"tools": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/schema.ORFunctionTool"
|
||||
}
|
||||
},
|
||||
"top_logprobs": {
|
||||
"description": "Number of top logprobs to return",
|
||||
"type": "integer"
|
||||
},
|
||||
"top_p": {
|
||||
"type": "number"
|
||||
},
|
||||
"truncation": {
|
||||
"description": "\"auto\"|\"disabled\"",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schema.P2PNodesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
@@ -742,6 +742,292 @@ definitions:
|
||||
tunnelAddress:
|
||||
type: string
|
||||
type: object
|
||||
schema.ORAnnotation:
|
||||
properties:
|
||||
end_index:
|
||||
type: integer
|
||||
start_index:
|
||||
type: integer
|
||||
title:
|
||||
type: string
|
||||
type:
|
||||
description: url_citation
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
type: object
|
||||
schema.ORContentPart:
|
||||
properties:
|
||||
annotations:
|
||||
description: REQUIRED for output_text - must always be present (use [])
|
||||
items:
|
||||
$ref: '#/definitions/schema.ORAnnotation'
|
||||
type: array
|
||||
detail:
|
||||
description: low|high|auto for images
|
||||
type: string
|
||||
file_data:
|
||||
type: string
|
||||
file_url:
|
||||
type: string
|
||||
filename:
|
||||
type: string
|
||||
image_url:
|
||||
type: string
|
||||
logprobs:
|
||||
description: REQUIRED for output_text - must always be present (use [])
|
||||
items:
|
||||
$ref: '#/definitions/schema.ORLogProb'
|
||||
type: array
|
||||
refusal:
|
||||
type: string
|
||||
text:
|
||||
description: REQUIRED for output_text - must always be present (even if empty)
|
||||
type: string
|
||||
type:
|
||||
description: input_text|input_image|input_file|output_text|refusal
|
||||
type: string
|
||||
type: object
|
||||
schema.ORError:
|
||||
properties:
|
||||
code:
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
param:
|
||||
type: string
|
||||
type:
|
||||
description: invalid_request|not_found|server_error|model_error|too_many_requests
|
||||
type: string
|
||||
type: object
|
||||
schema.ORFunctionTool:
|
||||
properties:
|
||||
description:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
parameters:
|
||||
additionalProperties: true
|
||||
type: object
|
||||
strict:
|
||||
description: Always include in response
|
||||
type: boolean
|
||||
type:
|
||||
description: always "function"
|
||||
type: string
|
||||
type: object
|
||||
schema.ORIncompleteDetails:
|
||||
properties:
|
||||
reason:
|
||||
type: string
|
||||
type: object
|
||||
schema.ORInputTokensDetails:
|
||||
properties:
|
||||
cached_tokens:
|
||||
description: Always include, even if 0
|
||||
type: integer
|
||||
type: object
|
||||
schema.ORItemField:
|
||||
properties:
|
||||
arguments:
|
||||
type: string
|
||||
call_id:
|
||||
description: Function call fields
|
||||
type: string
|
||||
content:
|
||||
description: string or []ORContentPart for messages
|
||||
encrypted_content:
|
||||
description: Provider-specific encrypted content
|
||||
type: string
|
||||
id:
|
||||
description: Present for all output items
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
output:
|
||||
description: Function call output fields
|
||||
role:
|
||||
description: Message fields
|
||||
type: string
|
||||
status:
|
||||
description: in_progress|completed|incomplete
|
||||
type: string
|
||||
summary:
|
||||
description: Reasoning fields (for type == "reasoning")
|
||||
items:
|
||||
$ref: '#/definitions/schema.ORContentPart'
|
||||
type: array
|
||||
type:
|
||||
description: message|function_call|function_call_output|reasoning|item_reference
|
||||
type: string
|
||||
type: object
|
||||
schema.ORLogProb:
|
||||
properties:
|
||||
bytes:
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
logprob:
|
||||
type: number
|
||||
token:
|
||||
type: string
|
||||
top_logprobs:
|
||||
items:
|
||||
$ref: '#/definitions/schema.ORTopLogProb'
|
||||
type: array
|
||||
type: object
|
||||
schema.OROutputTokensDetails:
|
||||
properties:
|
||||
reasoning_tokens:
|
||||
description: Always include, even if 0
|
||||
type: integer
|
||||
type: object
|
||||
schema.ORReasoning:
|
||||
properties:
|
||||
effort:
|
||||
type: string
|
||||
summary:
|
||||
type: string
|
||||
type: object
|
||||
schema.ORReasoningParam:
|
||||
properties:
|
||||
effort:
|
||||
description: '"none"|"low"|"medium"|"high"|"xhigh"'
|
||||
type: string
|
||||
summary:
|
||||
description: '"auto"|"concise"|"detailed"'
|
||||
type: string
|
||||
type: object
|
||||
schema.ORResponseResource:
|
||||
properties:
|
||||
background:
|
||||
type: boolean
|
||||
completed_at:
|
||||
description: 'Required: present as number or null'
|
||||
type: integer
|
||||
created_at:
|
||||
type: integer
|
||||
error:
|
||||
allOf:
|
||||
- $ref: '#/definitions/schema.ORError'
|
||||
description: Always present, null if no error
|
||||
frequency_penalty:
|
||||
type: number
|
||||
id:
|
||||
type: string
|
||||
incomplete_details:
|
||||
allOf:
|
||||
- $ref: '#/definitions/schema.ORIncompleteDetails'
|
||||
description: Always present, null if complete
|
||||
instructions:
|
||||
type: string
|
||||
max_output_tokens:
|
||||
type: integer
|
||||
max_tool_calls:
|
||||
description: nullable
|
||||
type: integer
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Metadata and operational flags
|
||||
type: object
|
||||
model:
|
||||
type: string
|
||||
object:
|
||||
description: always "response"
|
||||
type: string
|
||||
output:
|
||||
items:
|
||||
$ref: '#/definitions/schema.ORItemField'
|
||||
type: array
|
||||
parallel_tool_calls:
|
||||
type: boolean
|
||||
presence_penalty:
|
||||
type: number
|
||||
previous_response_id:
|
||||
type: string
|
||||
prompt_cache_key:
|
||||
description: nullable
|
||||
type: string
|
||||
reasoning:
|
||||
allOf:
|
||||
- $ref: '#/definitions/schema.ORReasoning'
|
||||
description: nullable
|
||||
safety_identifier:
|
||||
description: Safety and caching
|
||||
type: string
|
||||
service_tier:
|
||||
type: string
|
||||
status:
|
||||
description: in_progress|completed|failed|incomplete
|
||||
type: string
|
||||
store:
|
||||
type: boolean
|
||||
temperature:
|
||||
description: Sampling parameters (always required)
|
||||
type: number
|
||||
text:
|
||||
allOf:
|
||||
- $ref: '#/definitions/schema.ORTextConfig'
|
||||
description: Text format configuration
|
||||
tool_choice: {}
|
||||
tools:
|
||||
description: Tool-related fields
|
||||
items:
|
||||
$ref: '#/definitions/schema.ORFunctionTool'
|
||||
type: array
|
||||
top_logprobs:
|
||||
description: Default to 0
|
||||
type: integer
|
||||
top_p:
|
||||
type: number
|
||||
truncation:
|
||||
description: Truncation and reasoning
|
||||
type: string
|
||||
usage:
|
||||
allOf:
|
||||
- $ref: '#/definitions/schema.ORUsage'
|
||||
description: Usage statistics
|
||||
type: object
|
||||
schema.ORTextConfig:
|
||||
properties:
|
||||
format:
|
||||
$ref: '#/definitions/schema.ORTextFormat'
|
||||
type: object
|
||||
schema.ORTextFormat:
|
||||
properties:
|
||||
type:
|
||||
description: '"text" or "json_schema"'
|
||||
type: string
|
||||
type: object
|
||||
schema.ORTopLogProb:
|
||||
properties:
|
||||
bytes:
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
logprob:
|
||||
type: number
|
||||
token:
|
||||
type: string
|
||||
type: object
|
||||
schema.ORUsage:
|
||||
properties:
|
||||
input_tokens:
|
||||
type: integer
|
||||
input_tokens_details:
|
||||
allOf:
|
||||
- $ref: '#/definitions/schema.ORInputTokensDetails'
|
||||
description: Always present
|
||||
output_tokens:
|
||||
type: integer
|
||||
output_tokens_details:
|
||||
allOf:
|
||||
- $ref: '#/definitions/schema.OROutputTokensDetails'
|
||||
description: Always present
|
||||
total_tokens:
|
||||
type: integer
|
||||
type: object
|
||||
schema.OpenAIModel:
|
||||
properties:
|
||||
id:
|
||||
@@ -936,6 +1222,82 @@ definitions:
|
||||
total_tokens:
|
||||
type: integer
|
||||
type: object
|
||||
schema.OpenResponsesRequest:
|
||||
properties:
|
||||
allowed_tools:
|
||||
description: Restrict which tools can be invoked
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
background:
|
||||
description: Run request in background
|
||||
type: boolean
|
||||
frequency_penalty:
|
||||
description: Frequency penalty (-2.0 to 2.0)
|
||||
type: number
|
||||
include:
|
||||
description: What to include in response
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
input:
|
||||
description: string or []ORItemParam
|
||||
instructions:
|
||||
type: string
|
||||
logit_bias:
|
||||
additionalProperties:
|
||||
format: float64
|
||||
type: number
|
||||
description: OpenAI-compatible extensions (not in Open Responses spec)
|
||||
type: object
|
||||
max_output_tokens:
|
||||
type: integer
|
||||
max_tool_calls:
|
||||
description: Maximum number of tool calls
|
||||
type: integer
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
model:
|
||||
type: string
|
||||
parallel_tool_calls:
|
||||
description: Allow parallel tool calls
|
||||
type: boolean
|
||||
presence_penalty:
|
||||
description: Presence penalty (-2.0 to 2.0)
|
||||
type: number
|
||||
previous_response_id:
|
||||
type: string
|
||||
reasoning:
|
||||
$ref: '#/definitions/schema.ORReasoningParam'
|
||||
service_tier:
|
||||
description: '"auto"|"default"|priority hint'
|
||||
type: string
|
||||
store:
|
||||
description: Whether to store the response
|
||||
type: boolean
|
||||
stream:
|
||||
type: boolean
|
||||
temperature:
|
||||
type: number
|
||||
text_format:
|
||||
description: Additional parameters from spec
|
||||
tool_choice:
|
||||
description: '"auto"|"required"|"none"|{type:"function",name:"..."}'
|
||||
tools:
|
||||
items:
|
||||
$ref: '#/definitions/schema.ORFunctionTool'
|
||||
type: array
|
||||
top_logprobs:
|
||||
description: Number of top logprobs to return
|
||||
type: integer
|
||||
top_p:
|
||||
type: number
|
||||
truncation:
|
||||
description: '"auto"|"disabled"'
|
||||
type: string
|
||||
type: object
|
||||
schema.P2PNodesResponse:
|
||||
properties:
|
||||
federated_nodes:
|
||||
@@ -1962,6 +2324,80 @@ paths:
|
||||
schema:
|
||||
$ref: '#/definitions/schema.JINARerankResponse'
|
||||
summary: Reranks a list of phrases by relevance to a given text query.
|
||||
/v1/responses:
|
||||
post:
|
||||
parameters:
|
||||
- description: Request body
|
||||
in: body
|
||||
name: request
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/schema.OpenResponsesRequest'
|
||||
responses:
|
||||
"200":
|
||||
description: Response
|
||||
schema:
|
||||
$ref: '#/definitions/schema.ORResponseResource'
|
||||
summary: Create a response using the Open Responses API
|
||||
/v1/responses/{id}:
|
||||
get:
|
||||
description: Retrieve a response by ID. Can be used for polling background responses
|
||||
or resuming streaming responses.
|
||||
parameters:
|
||||
- description: Response ID
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: Set to 'true' to resume streaming
|
||||
in: query
|
||||
name: stream
|
||||
type: string
|
||||
- description: Sequence number to resume from (for streaming)
|
||||
in: query
|
||||
name: starting_after
|
||||
type: integer
|
||||
responses:
|
||||
"200":
|
||||
description: Response
|
||||
schema:
|
||||
$ref: '#/definitions/schema.ORResponseResource'
|
||||
"400":
|
||||
description: Bad Request
|
||||
schema:
|
||||
additionalProperties: true
|
||||
type: object
|
||||
"404":
|
||||
description: Not Found
|
||||
schema:
|
||||
additionalProperties: true
|
||||
type: object
|
||||
summary: Get a response by ID
|
||||
/v1/responses/{id}/cancel:
|
||||
post:
|
||||
description: Cancel a background response if it's still in progress
|
||||
parameters:
|
||||
- description: Response ID
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: Response
|
||||
schema:
|
||||
$ref: '#/definitions/schema.ORResponseResource'
|
||||
"400":
|
||||
description: Bad Request
|
||||
schema:
|
||||
additionalProperties: true
|
||||
type: object
|
||||
"404":
|
||||
description: Not Found
|
||||
schema:
|
||||
additionalProperties: true
|
||||
type: object
|
||||
summary: Cancel a response
|
||||
/v1/sound-generation:
|
||||
post:
|
||||
parameters:
|
||||
|
||||
Reference in New Issue
Block a user