mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 03:02:38 -05:00
Compare commits
66 Commits
fix/mcp
...
copilot/co
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5ecda78be4 | ||
|
|
8da5ef7231 | ||
|
|
4758996936 | ||
|
|
9a50215867 | ||
|
|
4435c8af57 | ||
|
|
65a57daba6 | ||
|
|
b5465cbc3a | ||
|
|
2de30440fe | ||
|
|
673a80a578 | ||
|
|
2554e9fabe | ||
|
|
5bfc3eebf8 | ||
|
|
ab893fe302 | ||
|
|
c88074a19e | ||
|
|
5ca8f0aea0 | ||
|
|
84234e531f | ||
|
|
4cbf9abfef | ||
|
|
fdc2c0737c | ||
|
|
f4b0a304d7 | ||
|
|
d16ec7aa9e | ||
|
|
d699b7ccdc | ||
|
|
a4d224dd1b | ||
|
|
917c7aa9f3 | ||
|
|
5aa66842dd | ||
|
|
f5dee90962 | ||
|
|
06323df457 | ||
|
|
98f28bf583 | ||
|
|
383312b50e | ||
|
|
b736db4bbe | ||
|
|
09bc2e4a00 | ||
|
|
c03e532a18 | ||
|
|
fcb58ee243 | ||
|
|
b2ff1cea2a | ||
|
|
b964b3d53e | ||
|
|
0b26669d0b | ||
|
|
5a9698bc69 | ||
|
|
1fe0e9f74f | ||
|
|
ffb2dc4666 | ||
|
|
cfc2225fc7 | ||
|
|
fd53978a7b | ||
|
|
7abc0242bb | ||
|
|
23df29fbd3 | ||
|
|
fb9879949c | ||
|
|
1642b39cb8 | ||
|
|
e6ba26c3e7 | ||
|
|
26c4f80d1b | ||
|
|
5add7b47f5 | ||
|
|
3244ccc224 | ||
|
|
4f7b6b0bff | ||
|
|
3a629cea2f | ||
|
|
f917feda29 | ||
|
|
e2018cdc8f | ||
|
|
a3b8a94187 | ||
|
|
41de7d32ad | ||
|
|
93364df0a8 | ||
|
|
21c84f432f | ||
|
|
9d3da0bed5 | ||
|
|
1b063b5595 | ||
|
|
560bf50299 | ||
|
|
a7e155240b | ||
|
|
793e4907a2 | ||
|
|
d38811560c | ||
|
|
33cc0b8e13 | ||
|
|
4cd95b8a9d | ||
|
|
8c504113a2 | ||
|
|
666d110714 | ||
|
|
641606ae93 |
670
.github/workflows/backend.yml
vendored
670
.github/workflows/backend.yml
vendored
File diff suppressed because it is too large
Load Diff
2
.github/workflows/dependabot_auto.yml
vendored
2
.github/workflows/dependabot_auto.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
steps:
|
||||
- name: Dependabot metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@v2.4.0
|
||||
uses: dependabot/fetch-metadata@v2.5.0
|
||||
with:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
skip-commit-verification: true
|
||||
|
||||
2
.github/workflows/generate_grpc_cache.yaml
vendored
2
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- grpc-base-image: ubuntu:22.04
|
||||
- grpc-base-image: ubuntu:24.04
|
||||
runs-on: 'ubuntu-latest'
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
runs-on: ${{matrix.runs-on}}
|
||||
|
||||
4
.github/workflows/generate_intel_image.yaml
vendored
4
.github/workflows/generate_intel_image.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- base-image: intel/oneapi-basekit:2025.2.0-0-devel-ubuntu22.04
|
||||
- base-image: intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04
|
||||
runs-on: 'arc-runner-set'
|
||||
platforms: 'linux/amd64'
|
||||
runs-on: ${{matrix.runs-on}}
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
BASE_IMAGE=${{ matrix.base-image }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
tags: quay.io/go-skynet/intel-oneapi-base:latest
|
||||
tags: quay.io/go-skynet/intel-oneapi-base:24.04
|
||||
push: true
|
||||
target: intel
|
||||
platforms: ${{ matrix.platforms }}
|
||||
|
||||
187
.github/workflows/image-pr.yml
vendored
187
.github/workflows/image-pr.yml
vendored
@@ -1,94 +1,95 @@
|
||||
---
|
||||
name: 'build container images tests'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
image-build:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
# Pushing with all jobs in parallel
|
||||
# eats the bandwidth of all the nodes
|
||||
max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }}
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'hipblas'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'sycl'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: 'sycl'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-vulkan-core'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
name: 'build container images tests'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
image-build:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
# Pushing with all jobs in parallel
|
||||
# eats the bandwidth of all the nodes
|
||||
max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }}
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
grpc-base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'sycl'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
grpc-base-image: "ubuntu:24.04"
|
||||
tag-suffix: 'sycl'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-vulkan-core'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
|
||||
372
.github/workflows/image.yml
vendored
372
.github/workflows/image.yml
vendored
@@ -1,187 +1,187 @@
|
||||
---
|
||||
name: 'build container images'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
hipblas-jobs:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
aio: ${{ matrix.aio }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'hipblas'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-hipblas"
|
||||
ubuntu-version: '2204'
|
||||
|
||||
core-image-build:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
aio: ${{ matrix.aio }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
skip-drivers: ${{ matrix.skip-drivers }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
#max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
|
||||
matrix:
|
||||
include:
|
||||
- build-type: ''
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: ''
|
||||
base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
aio: "-aio-cpu"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
cuda-minor-version: "7"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-11'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
aio: "-aio-gpu-nvidia-cuda-11"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-12"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-13"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-vulkan'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-vulkan"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'intel'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-gpu-intel'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-intel"
|
||||
ubuntu-version: '2204'
|
||||
|
||||
gh-runner:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
aio: ${{ matrix.aio }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
skip-drivers: ${{ matrix.skip-drivers }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'true'
|
||||
ubuntu-version: "2204"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
name: 'build container images'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
hipblas-jobs:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
aio: ${{ matrix.aio }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
ubuntu-codename: ${{ matrix.ubuntu-codename }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'hipblas'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
grpc-base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-hipblas"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
|
||||
core-image-build:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
aio: ${{ matrix.aio }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
skip-drivers: ${{ matrix.skip-drivers }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
ubuntu-codename: ${{ matrix.ubuntu-codename }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
#max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
|
||||
matrix:
|
||||
include:
|
||||
- build-type: ''
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: ''
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
aio: "-aio-cpu"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-12"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-13"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-vulkan'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-vulkan"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'intel'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
grpc-base-image: "ubuntu:24.04"
|
||||
tag-suffix: '-gpu-intel'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-intel"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
|
||||
gh-runner:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
aio: ${{ matrix.aio }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
skip-drivers: ${{ matrix.skip-drivers }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
ubuntu-codename: ${{ matrix.ubuntu-codename }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'true'
|
||||
ubuntu-version: "2204"
|
||||
ubuntu-codename: 'jammy'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
|
||||
9
.github/workflows/image_build.yml
vendored
9
.github/workflows/image_build.yml
vendored
@@ -23,7 +23,7 @@ on:
|
||||
type: string
|
||||
cuda-minor-version:
|
||||
description: 'CUDA minor version'
|
||||
default: "4"
|
||||
default: "9"
|
||||
type: string
|
||||
platforms:
|
||||
description: 'Platforms'
|
||||
@@ -61,6 +61,11 @@ on:
|
||||
required: false
|
||||
default: '2204'
|
||||
type: string
|
||||
ubuntu-codename:
|
||||
description: 'Ubuntu codename'
|
||||
required: false
|
||||
default: 'noble'
|
||||
type: string
|
||||
secrets:
|
||||
dockerUsername:
|
||||
required: true
|
||||
@@ -244,6 +249,7 @@ jobs:
|
||||
MAKEFLAGS=${{ inputs.makeflags }}
|
||||
SKIP_DRIVERS=${{ inputs.skip-drivers }}
|
||||
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
|
||||
UBUNTU_CODENAME=${{ inputs.ubuntu-codename }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
cache-from: type=gha
|
||||
@@ -272,6 +278,7 @@ jobs:
|
||||
MAKEFLAGS=${{ inputs.makeflags }}
|
||||
SKIP_DRIVERS=${{ inputs.skip-drivers }}
|
||||
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
|
||||
UBUNTU_CODENAME=${{ inputs.ubuntu-codename }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
cache-from: type=gha
|
||||
|
||||
19
.github/workflows/test-extra.yml
vendored
19
.github/workflows/test-extra.yml
vendored
@@ -247,3 +247,22 @@ jobs:
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/coqui
|
||||
make --jobs=5 --output-sync=target -C backend/python/coqui test
|
||||
tests-moonshine:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||
- name: Test moonshine
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/moonshine
|
||||
make --jobs=5 --output-sync=target -C backend/python/moonshine test
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -25,6 +25,7 @@ go-bert
|
||||
# LocalAI build binary
|
||||
LocalAI
|
||||
/local-ai
|
||||
/local-ai-launcher
|
||||
# prevent above rules from omitting the helm chart
|
||||
!charts/*
|
||||
# prevent above rules from omitting the api/localai folder
|
||||
|
||||
203
AGENTS.md
203
AGENTS.md
@@ -2,6 +2,163 @@
|
||||
|
||||
Building and testing the project depends on the components involved and the platform where development is taking place. Due to the amount of context required it's usually best not to try building or testing the project unless the user requests it. If you must build the project then inspect the Makefile in the project root and the Makefiles of any backends that are effected by changes you are making. In addition the workflows in .github/workflows can be used as a reference when it is unclear how to build or test a component. The primary Makefile contains targets for building inside or outside Docker, if the user has not previously specified a preference then ask which they would like to use.
|
||||
|
||||
## Building a specified backend
|
||||
|
||||
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build bark for ROCM/hipblas
|
||||
|
||||
- The Makefile has targets like `docker-build-bark` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
|
||||
- At a minimum we need to set the BUILD_TYPE, BASE_IMAGE build-args
|
||||
- Use .github/workflows/backend.yml as a reference it lists the needed args in the `include` job strategy matrix
|
||||
- l4t and cublas also requires the CUDA major and minor version
|
||||
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-bark`
|
||||
- Unless the user specifies that they want you to run the command, then just print it because not all agent frontends handle long running jobs well and the output may overflow your context
|
||||
- The user may say they want to build AMD or ROCM instead of hipblas, or Intel instead of SYCL or NVIDIA insted of l4t or cublas. Ask for confirmation if there is ambiguity.
|
||||
- Sometimes the user may need extra parameters to be added to `docker build` (e.g. `--platform` for cross-platform builds or `--progress` to view the full logs), in which case you can generate the `docker build` command directly.
|
||||
|
||||
## Adding a New Backend
|
||||
|
||||
When adding a new backend to LocalAI, you need to update several files to ensure the backend is properly built, tested, and registered. Here's a step-by-step guide based on the pattern used for adding backends like `moonshine`:
|
||||
|
||||
### 1. Create Backend Directory Structure
|
||||
|
||||
Create the backend directory under the appropriate location:
|
||||
- **Python backends**: `backend/python/<backend-name>/`
|
||||
- **Go backends**: `backend/go/<backend-name>/`
|
||||
- **C++ backends**: `backend/cpp/<backend-name>/`
|
||||
|
||||
For Python backends, you'll typically need:
|
||||
- `backend.py` - Main gRPC server implementation
|
||||
- `Makefile` - Build configuration
|
||||
- `install.sh` - Installation script for dependencies
|
||||
- `protogen.sh` - Protocol buffer generation script
|
||||
- `requirements.txt` - Python dependencies
|
||||
- `run.sh` - Runtime script
|
||||
- `test.py` / `test.sh` - Test files
|
||||
|
||||
### 2. Add Build Configurations to `.github/workflows/backend.yml`
|
||||
|
||||
Add build matrix entries for each platform/GPU type you want to support. Look at similar backends (e.g., `chatterbox`, `faster-whisper`) for reference.
|
||||
|
||||
**Placement in file:**
|
||||
- CPU builds: Add after other CPU builds (e.g., after `cpu-chatterbox`)
|
||||
- CUDA 12 builds: Add after other CUDA 12 builds (e.g., after `gpu-nvidia-cuda-12-chatterbox`)
|
||||
- CUDA 13 builds: Add after other CUDA 13 builds (e.g., after `gpu-nvidia-cuda-13-chatterbox`)
|
||||
|
||||
**Additional build types you may need:**
|
||||
- ROCm/HIP: Use `build-type: 'hipblas'` with `base-image: "rocm/dev-ubuntu-24.04:6.4.4"`
|
||||
- Intel/SYCL: Use `build-type: 'intel'` or `build-type: 'sycl_f16'`/`sycl_f32` with `base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"`
|
||||
- L4T (ARM): Use `build-type: 'l4t'` with `platforms: 'linux/arm64'` and `runs-on: 'ubuntu-24.04-arm'`
|
||||
|
||||
### 3. Add Backend Metadata to `backend/index.yaml`
|
||||
|
||||
**Step 3a: Add Meta Definition**
|
||||
|
||||
Add a YAML anchor definition in the `## metas` section (around line 2-300). Look for similar backends to use as a template such as `diffusers` or `chatterbox`
|
||||
|
||||
**Step 3b: Add Image Entries**
|
||||
|
||||
Add image entries at the end of the file, following the pattern of similar backends such as `diffusers` or `chatterbox`. Include both `latest` (production) and `master` (development) tags.
|
||||
|
||||
### 4. Update the Makefile
|
||||
|
||||
The Makefile needs to be updated in several places to support building and testing the new backend:
|
||||
|
||||
**Step 4a: Add to `.NOTPARALLEL`**
|
||||
|
||||
Add `backends/<backend-name>` to the `.NOTPARALLEL` line (around line 2) to prevent parallel execution conflicts:
|
||||
|
||||
```makefile
|
||||
.NOTPARALLEL: ... backends/<backend-name>
|
||||
```
|
||||
|
||||
**Step 4b: Add to `prepare-test-extra`**
|
||||
|
||||
Add the backend to the `prepare-test-extra` target (around line 312) to prepare it for testing:
|
||||
|
||||
```makefile
|
||||
prepare-test-extra: protogen-python
|
||||
...
|
||||
$(MAKE) -C backend/python/<backend-name>
|
||||
```
|
||||
|
||||
**Step 4c: Add to `test-extra`**
|
||||
|
||||
Add the backend to the `test-extra` target (around line 319) to run its tests:
|
||||
|
||||
```makefile
|
||||
test-extra: prepare-test-extra
|
||||
...
|
||||
$(MAKE) -C backend/python/<backend-name> test
|
||||
```
|
||||
|
||||
**Step 4d: Add Backend Definition**
|
||||
|
||||
Add a backend definition variable in the backend definitions section (around line 428-457). The format depends on the backend type:
|
||||
|
||||
**For Python backends with root context** (like `faster-whisper`, `bark`):
|
||||
```makefile
|
||||
BACKEND_<BACKEND_NAME> = <backend-name>|python|.|false|true
|
||||
```
|
||||
|
||||
**For Python backends with `./backend` context** (like `chatterbox`, `moonshine`):
|
||||
```makefile
|
||||
BACKEND_<BACKEND_NAME> = <backend-name>|python|./backend|false|true
|
||||
```
|
||||
|
||||
**For Go backends**:
|
||||
```makefile
|
||||
BACKEND_<BACKEND_NAME> = <backend-name>|golang|.|false|true
|
||||
```
|
||||
|
||||
**Step 4e: Generate Docker Build Target**
|
||||
|
||||
Add an eval call to generate the docker-build target (around line 480-501):
|
||||
|
||||
```makefile
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_<BACKEND_NAME>)))
|
||||
```
|
||||
|
||||
**Step 4f: Add to `docker-build-backends`**
|
||||
|
||||
Add `docker-build-<backend-name>` to the `docker-build-backends` target (around line 507):
|
||||
|
||||
```makefile
|
||||
docker-build-backends: ... docker-build-<backend-name>
|
||||
```
|
||||
|
||||
**Determining the Context:**
|
||||
|
||||
- If the backend is in `backend/python/<backend-name>/` and uses `./backend` as context in the workflow file, use `./backend` context
|
||||
- If the backend is in `backend/python/<backend-name>/` but uses `.` as context in the workflow file, use `.` context
|
||||
- Check similar backends to determine the correct context
|
||||
|
||||
### 5. Verification Checklist
|
||||
|
||||
After adding a new backend, verify:
|
||||
|
||||
- [ ] Backend directory structure is complete with all necessary files
|
||||
- [ ] Build configurations added to `.github/workflows/backend.yml` for all desired platforms
|
||||
- [ ] Meta definition added to `backend/index.yaml` in the `## metas` section
|
||||
- [ ] Image entries added to `backend/index.yaml` for all build variants (latest + development)
|
||||
- [ ] Tag suffixes match between workflow file and index.yaml
|
||||
- [ ] Makefile updated with all 6 required changes (`.NOTPARALLEL`, `prepare-test-extra`, `test-extra`, backend definition, docker-build target eval, `docker-build-backends`)
|
||||
- [ ] No YAML syntax errors (check with linter)
|
||||
- [ ] No Makefile syntax errors (check with linter)
|
||||
- [ ] Follows the same pattern as similar backends (e.g., if it's a transcription backend, follow `faster-whisper` pattern)
|
||||
|
||||
### 6. Example: Adding a Python Backend
|
||||
|
||||
For reference, when `moonshine` was added:
|
||||
- **Files created**: `backend/python/moonshine/{backend.py, Makefile, install.sh, protogen.sh, requirements.txt, run.sh, test.py, test.sh}`
|
||||
- **Workflow entries**: 3 build configurations (CPU, CUDA 12, CUDA 13)
|
||||
- **Index entries**: 1 meta definition + 6 image entries (cpu, cuda12, cuda13 × latest/development)
|
||||
- **Makefile updates**:
|
||||
- Added to `.NOTPARALLEL` line
|
||||
- Added to `prepare-test-extra` and `test-extra` targets
|
||||
- Added `BACKEND_MOONSHINE = moonshine|python|./backend|false|true`
|
||||
- Added eval for docker-build target generation
|
||||
- Added `docker-build-moonshine` to `docker-build-backends`
|
||||
|
||||
# Coding style
|
||||
|
||||
- The project has the following .editorconfig
|
||||
@@ -77,3 +234,49 @@ When fixing compilation errors after upstream changes:
|
||||
- HTTP server uses `server_routes` with HTTP handlers
|
||||
- Both use the same `server_context` and task queue infrastructure
|
||||
- gRPC methods: `LoadModel`, `Predict`, `PredictStream`, `Embedding`, `Rerank`, `TokenizeString`, `GetMetrics`, `Health`
|
||||
|
||||
## Tool Call Parsing Maintenance
|
||||
|
||||
When working on JSON/XML tool call parsing functionality, always check llama.cpp for reference implementation and updates:
|
||||
|
||||
### Checking for XML Parsing Changes
|
||||
|
||||
1. **Review XML Format Definitions**: Check `llama.cpp/common/chat-parser-xml-toolcall.h` for `xml_tool_call_format` struct changes
|
||||
2. **Review Parsing Logic**: Check `llama.cpp/common/chat-parser-xml-toolcall.cpp` for parsing algorithm updates
|
||||
3. **Review Format Presets**: Check `llama.cpp/common/chat-parser.cpp` for new XML format presets (search for `xml_tool_call_format form`)
|
||||
4. **Review Model Lists**: Check `llama.cpp/common/chat.h` for `COMMON_CHAT_FORMAT_*` enum values that use XML parsing:
|
||||
- `COMMON_CHAT_FORMAT_GLM_4_5`
|
||||
- `COMMON_CHAT_FORMAT_MINIMAX_M2`
|
||||
- `COMMON_CHAT_FORMAT_KIMI_K2`
|
||||
- `COMMON_CHAT_FORMAT_QWEN3_CODER_XML`
|
||||
- `COMMON_CHAT_FORMAT_APRIEL_1_5`
|
||||
- `COMMON_CHAT_FORMAT_XIAOMI_MIMO`
|
||||
- Any new formats added
|
||||
|
||||
### Model Configuration Options
|
||||
|
||||
Always check `llama.cpp` for new model configuration options that should be supported in LocalAI:
|
||||
|
||||
1. **Check Server Context**: Review `llama.cpp/tools/server/server-context.cpp` for new parameters
|
||||
2. **Check Chat Params**: Review `llama.cpp/common/chat.h` for `common_chat_params` struct changes
|
||||
3. **Check Server Options**: Review `llama.cpp/tools/server/server.cpp` for command-line argument changes
|
||||
4. **Examples of options to check**:
|
||||
- `ctx_shift` - Context shifting support
|
||||
- `parallel_tool_calls` - Parallel tool calling
|
||||
- `reasoning_format` - Reasoning format options
|
||||
- Any new flags or parameters
|
||||
|
||||
### Implementation Guidelines
|
||||
|
||||
1. **Feature Parity**: Always aim for feature parity with llama.cpp's implementation
|
||||
2. **Test Coverage**: Add tests for new features matching llama.cpp's behavior
|
||||
3. **Documentation**: Update relevant documentation when adding new formats or options
|
||||
4. **Backward Compatibility**: Ensure changes don't break existing functionality
|
||||
|
||||
### Files to Monitor
|
||||
|
||||
- `llama.cpp/common/chat-parser-xml-toolcall.h` - Format definitions
|
||||
- `llama.cpp/common/chat-parser-xml-toolcall.cpp` - Parsing logic
|
||||
- `llama.cpp/common/chat-parser.cpp` - Format presets and model-specific handlers
|
||||
- `llama.cpp/common/chat.h` - Format enums and parameter structures
|
||||
- `llama.cpp/tools/server/server-context.cpp` - Server configuration options
|
||||
|
||||
@@ -78,6 +78,20 @@ LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio make run-e2e-aio
|
||||
|
||||
We are welcome the contribution of the documents, please open new PR or create a new issue. The documentation is available under `docs/` https://github.com/mudler/LocalAI/tree/master/docs
|
||||
|
||||
### Gallery YAML Schema
|
||||
|
||||
LocalAI provides a JSON Schema for gallery model YAML files at:
|
||||
|
||||
`core/schema/gallery-model.schema.json`
|
||||
|
||||
This schema mirrors the internal gallery model configuration and can be used by editors (such as VS Code) to enable autocomplete, validation, and inline documentation when creating or modifying gallery files.
|
||||
|
||||
To use it with the YAML language server, add the following comment at the top of a gallery YAML file:
|
||||
|
||||
```yaml
|
||||
# yaml-language-server: $schema=../core/schema/gallery-model.schema.json
|
||||
```
|
||||
|
||||
## Community and Communication
|
||||
|
||||
- You can reach out via the Github issue tracker.
|
||||
|
||||
59
Dockerfile
59
Dockerfile
@@ -1,6 +1,7 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
|
||||
ARG INTEL_BASE_IMAGE=${BASE_IMAGE}
|
||||
ARG UBUNTU_CODENAME=noble
|
||||
|
||||
FROM ${BASE_IMAGE} AS requirements
|
||||
|
||||
@@ -9,7 +10,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates curl wget espeak-ng libgomp1 \
|
||||
ffmpeg && \
|
||||
ffmpeg libopenblas0 libopenblas-dev && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -23,7 +24,7 @@ ARG SKIP_DRIVERS=false
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||
ARG UBUNTU_VERSION=2204
|
||||
ARG UBUNTU_VERSION=2404
|
||||
|
||||
RUN mkdir -p /run/localai
|
||||
RUN echo "default" > /run/localai/capability
|
||||
@@ -34,11 +35,45 @@ RUN <<EOT bash
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common pciutils wget gpg-agent && \
|
||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
vulkan-sdk && \
|
||||
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
|
||||
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
|
||||
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
|
||||
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
|
||||
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
|
||||
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils mesa-vulkan-drivers
|
||||
if [ "amd64" = "$TARGETARCH" ]; then
|
||||
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
|
||||
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
mkdir -p /opt/vulkan-sdk && \
|
||||
mv 1.4.328.1 /opt/vulkan-sdk/ && \
|
||||
cd /opt/vulkan-sdk/1.4.328.1 && \
|
||||
./vulkansdk --no-deps --maxjobs \
|
||||
vulkan-loader \
|
||||
vulkan-validationlayers \
|
||||
vulkan-extensionlayer \
|
||||
vulkan-tools \
|
||||
shaderc && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
|
||||
rm -rf /opt/vulkan-sdk
|
||||
fi
|
||||
if [ "arm64" = "$TARGETARCH" ]; then
|
||||
mkdir vulkan && cd vulkan && \
|
||||
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
|
||||
tar -xvf vulkan-sdk.tar.xz && \
|
||||
rm vulkan-sdk.tar.xz && \
|
||||
cd 1.4.335.0 && \
|
||||
cp -rfv aarch64/bin/* /usr/bin/ && \
|
||||
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
|
||||
cp -rfv aarch64/include/* /usr/include/ && \
|
||||
cp -rfv aarch64/share/* /usr/share/ && \
|
||||
cd ../.. && \
|
||||
rm -rf vulkan
|
||||
fi
|
||||
ldconfig && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
echo "vulkan" > /run/localai/capability
|
||||
@@ -141,13 +176,12 @@ ENV PATH=/opt/rocm/bin:${PATH}
|
||||
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
|
||||
FROM requirements-drivers AS build-requirements
|
||||
|
||||
ARG GO_VERSION=1.22.6
|
||||
ARG CMAKE_VERSION=3.26.4
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG CMAKE_VERSION=3.31.10
|
||||
ARG CMAKE_FROM_SOURCE=false
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
@@ -204,9 +238,10 @@ WORKDIR /build
|
||||
# https://community.intel.com/t5/Intel-oneAPI-Math-Kernel-Library/APT-Repository-not-working-signatures-invalid/m-p/1599436/highlight/true#M36143
|
||||
# This is a temporary workaround until Intel fixes their repository
|
||||
FROM ${INTEL_BASE_IMAGE} AS intel
|
||||
ARG UBUNTU_CODENAME=noble
|
||||
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
|
||||
gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
||||
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
|
||||
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu ${UBUNTU_CODENAME}/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
intel-oneapi-runtime-libs && \
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
|
||||
293
Makefile
293
Makefile
@@ -1,3 +1,6 @@
|
||||
# Disable parallel execution for backend builds
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine
|
||||
|
||||
GOCMD=go
|
||||
GOTEST=$(GOCMD) test
|
||||
GOVET=$(GOCMD) vet
|
||||
@@ -6,10 +9,14 @@ LAUNCHER_BINARY_NAME=local-ai-launcher
|
||||
|
||||
CUDA_MAJOR_VERSION?=13
|
||||
CUDA_MINOR_VERSION?=0
|
||||
UBUNTU_VERSION?=2204
|
||||
UBUNTU_CODENAME?=noble
|
||||
|
||||
GORELEASER?=
|
||||
|
||||
export BUILD_TYPE?=
|
||||
export CUDA_MAJOR_VERSION?=12
|
||||
export CUDA_MINOR_VERSION?=9
|
||||
|
||||
GO_TAGS?=
|
||||
BUILD_ID?=
|
||||
@@ -155,7 +162,17 @@ test: test-models/testmodel.ggml protogen-go
|
||||
########################################################
|
||||
|
||||
docker-build-aio:
|
||||
docker build --build-arg MAKEFLAGS="--jobs=5 --output-sync=target" -t local-ai:tests -f Dockerfile .
|
||||
docker build \
|
||||
--build-arg MAKEFLAGS="--jobs=5 --output-sync=target" \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
-t local-ai:tests -f Dockerfile .
|
||||
BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test $(MAKE) docker-aio
|
||||
|
||||
e2e-aio:
|
||||
@@ -177,7 +194,17 @@ prepare-e2e:
|
||||
mkdir -p $(TEST_DIR)
|
||||
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
|
||||
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
|
||||
docker build --build-arg IMAGE_TYPE=core --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION=12 --build-arg CUDA_MINOR_VERSION=0 -t localai-tests .
|
||||
docker build \
|
||||
--build-arg IMAGE_TYPE=core \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
-t localai-tests .
|
||||
|
||||
run-e2e-image:
|
||||
ls -liah $(abspath ./tests/e2e-fixtures)
|
||||
@@ -288,6 +315,7 @@ prepare-test-extra: protogen-python
|
||||
$(MAKE) -C backend/python/chatterbox
|
||||
$(MAKE) -C backend/python/vllm
|
||||
$(MAKE) -C backend/python/vibevoice
|
||||
$(MAKE) -C backend/python/moonshine
|
||||
|
||||
test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/transformers test
|
||||
@@ -295,11 +323,12 @@ test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/chatterbox test
|
||||
$(MAKE) -C backend/python/vllm test
|
||||
$(MAKE) -C backend/python/vibevoice test
|
||||
$(MAKE) -C backend/python/moonshine test
|
||||
|
||||
DOCKER_IMAGE?=local-ai
|
||||
DOCKER_AIO_IMAGE?=local-ai-aio
|
||||
IMAGE_TYPE?=core
|
||||
BASE_IMAGE?=ubuntu:22.04
|
||||
BASE_IMAGE?=ubuntu:24.04
|
||||
|
||||
docker:
|
||||
docker build \
|
||||
@@ -308,24 +337,34 @@ docker:
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
-t $(DOCKER_IMAGE) .
|
||||
|
||||
docker-cuda11:
|
||||
docker-cuda12:
|
||||
docker build \
|
||||
--build-arg CUDA_MAJOR_VERSION=11 \
|
||||
--build-arg CUDA_MINOR_VERSION=8 \
|
||||
--build-arg CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION} \
|
||||
--build-arg CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION} \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
-t $(DOCKER_IMAGE)-cuda-11 .
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
-t $(DOCKER_IMAGE)-cuda-12 .
|
||||
|
||||
docker-aio:
|
||||
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
-t $(DOCKER_AIO_IMAGE) -f Dockerfile.aio .
|
||||
|
||||
docker-aio-all:
|
||||
@@ -334,66 +373,31 @@ docker-aio-all:
|
||||
|
||||
docker-image-intel:
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE=quay.io/go-skynet/intel-oneapi-base:latest \
|
||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04 \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=intel -t $(DOCKER_IMAGE) .
|
||||
--build-arg BUILD_TYPE=intel \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
-t $(DOCKER_IMAGE) .
|
||||
|
||||
########################################################
|
||||
## Backends
|
||||
########################################################
|
||||
|
||||
# Pattern rule for standard backends (docker-based)
|
||||
# This matches all backends that use docker-build-* and docker-save-*
|
||||
backends/%: docker-build-% docker-save-% build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/$*.tar)"
|
||||
|
||||
backends/diffusers: docker-build-diffusers docker-save-diffusers build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
|
||||
|
||||
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||
|
||||
backends/piper: docker-build-piper docker-save-piper build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
|
||||
|
||||
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
|
||||
|
||||
backends/whisper: docker-build-whisper docker-save-whisper build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
|
||||
|
||||
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
|
||||
|
||||
backends/local-store: docker-build-local-store docker-save-local-store build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
|
||||
|
||||
backends/huggingface: docker-build-huggingface docker-save-huggingface build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
|
||||
|
||||
backends/rfdetr: docker-build-rfdetr docker-save-rfdetr build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/rfdetr.tar)"
|
||||
|
||||
backends/kitten-tts: docker-build-kitten-tts docker-save-kitten-tts build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/kitten-tts.tar)"
|
||||
|
||||
backends/kokoro: docker-build-kokoro docker-save-kokoro build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/kokoro.tar)"
|
||||
|
||||
backends/chatterbox: docker-build-chatterbox docker-save-chatterbox build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/chatterbox.tar)"
|
||||
|
||||
# Darwin-specific backends (keep as explicit targets since they have special build logic)
|
||||
backends/llama-cpp-darwin: build
|
||||
bash ./scripts/build/llama-cpp-darwin.sh
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||
|
||||
backends/neutts: docker-build-neutts docker-save-neutts build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/neutts.tar)"
|
||||
|
||||
backends/vllm: docker-build-vllm docker-save-vllm build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/vllm.tar)"
|
||||
|
||||
backends/vibevoice: docker-build-vibevoice docker-save-vibevoice build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/vibevoice.tar)"
|
||||
|
||||
build-darwin-python-backend: build
|
||||
bash ./scripts/build/python-darwin.sh
|
||||
|
||||
@@ -423,121 +427,88 @@ backends/stablediffusion-ggml-darwin:
|
||||
backend-images:
|
||||
mkdir -p backend-images
|
||||
|
||||
docker-build-llama-cpp:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp .
|
||||
# Backend metadata: BACKEND_NAME | DOCKERFILE_TYPE | BUILD_CONTEXT | PROGRESS_FLAG | NEEDS_BACKEND_ARG
|
||||
# llama-cpp is special - uses llama-cpp Dockerfile and doesn't need BACKEND arg
|
||||
BACKEND_LLAMA_CPP = llama-cpp|llama-cpp|.|false|false
|
||||
|
||||
docker-build-bark-cpp:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark-cpp -f backend/Dockerfile.golang --build-arg BACKEND=bark-cpp .
|
||||
# Golang backends
|
||||
BACKEND_BARK_CPP = bark-cpp|golang|.|false|true
|
||||
BACKEND_PIPER = piper|golang|.|false|true
|
||||
BACKEND_LOCAL_STORE = local-store|golang|.|false|true
|
||||
BACKEND_HUGGINGFACE = huggingface|golang|.|false|true
|
||||
BACKEND_SILERO_VAD = silero-vad|golang|.|false|true
|
||||
BACKEND_STABLEDIFFUSION_GGML = stablediffusion-ggml|golang|.|--progress=plain|true
|
||||
BACKEND_WHISPER = whisper|golang|.|false|true
|
||||
|
||||
docker-build-piper:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:piper -f backend/Dockerfile.golang --build-arg BACKEND=piper .
|
||||
# Python backends with root context
|
||||
BACKEND_RERANKERS = rerankers|python|.|false|true
|
||||
BACKEND_TRANSFORMERS = transformers|python|.|false|true
|
||||
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
|
||||
BACKEND_COQUI = coqui|python|.|false|true
|
||||
BACKEND_BARK = bark|python|.|false|true
|
||||
BACKEND_EXLLAMA2 = exllama2|python|.|false|true
|
||||
|
||||
docker-build-local-store:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:local-store -f backend/Dockerfile.golang --build-arg BACKEND=local-store .
|
||||
# Python backends with ./backend context
|
||||
BACKEND_RFDETR = rfdetr|python|./backend|false|true
|
||||
BACKEND_KITTEN_TTS = kitten-tts|python|./backend|false|true
|
||||
BACKEND_NEUTTS = neutts|python|./backend|false|true
|
||||
BACKEND_KOKORO = kokoro|python|./backend|false|true
|
||||
BACKEND_VLLM = vllm|python|./backend|false|true
|
||||
BACKEND_DIFFUSERS = diffusers|python|./backend|--progress=plain|true
|
||||
BACKEND_CHATTERBOX = chatterbox|python|./backend|false|true
|
||||
BACKEND_VIBEVOICE = vibevoice|python|./backend|--progress=plain|true
|
||||
BACKEND_MOONSHINE = moonshine|python|./backend|false|true
|
||||
|
||||
docker-build-huggingface:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:huggingface -f backend/Dockerfile.golang --build-arg BACKEND=huggingface .
|
||||
# Helper function to build docker image for a backend
|
||||
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
|
||||
define docker-build-backend
|
||||
docker build $(if $(filter-out false,$(4)),$(4)) \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
$(if $(filter true,$(5)),--build-arg BACKEND=$(1)) \
|
||||
-t local-ai-backend:$(1) -f backend/Dockerfile.$(2) $(3)
|
||||
endef
|
||||
|
||||
docker-build-rfdetr:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rfdetr -f backend/Dockerfile.python --build-arg BACKEND=rfdetr ./backend
|
||||
# Generate docker-build targets from backend definitions
|
||||
define generate-docker-build-target
|
||||
docker-build-$(word 1,$(subst |, ,$(1))):
|
||||
$$(call docker-build-backend,$(word 1,$(subst |, ,$(1))),$(word 2,$(subst |, ,$(1))),$(word 3,$(subst |, ,$(1))),$(word 4,$(subst |, ,$(1))),$(word 5,$(subst |, ,$(1))))
|
||||
endef
|
||||
|
||||
docker-build-kitten-tts:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kitten-tts -f backend/Dockerfile.python --build-arg BACKEND=kitten-tts ./backend
|
||||
# Generate all docker-build targets
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_LLAMA_CPP)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_BARK_CPP)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_PIPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_LOCAL_STORE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_HUGGINGFACE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_SILERO_VAD)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_STABLEDIFFUSION_GGML)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_BARK)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_EXLLAMA2)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_KITTEN_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_NEUTTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_KOKORO)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_DIFFUSERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_CHATTERBOX)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
|
||||
|
||||
docker-save-kitten-tts: backend-images
|
||||
docker save local-ai-backend:kitten-tts -o backend-images/kitten-tts.tar
|
||||
# Pattern rule for docker-save targets
|
||||
docker-save-%: backend-images
|
||||
docker save local-ai-backend:$* -o backend-images/$*.tar
|
||||
|
||||
docker-save-chatterbox: backend-images
|
||||
docker save local-ai-backend:chatterbox -o backend-images/chatterbox.tar
|
||||
|
||||
docker-save-vibevoice: backend-images
|
||||
docker save local-ai-backend:vibevoice -o backend-images/vibevoice.tar
|
||||
|
||||
docker-build-neutts:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:neutts -f backend/Dockerfile.python --build-arg BACKEND=neutts ./backend
|
||||
|
||||
docker-save-neutts: backend-images
|
||||
docker save local-ai-backend:neutts -o backend-images/neutts.tar
|
||||
|
||||
docker-build-kokoro:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro ./backend
|
||||
|
||||
docker-build-vllm:
|
||||
docker build --build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) --build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm ./backend
|
||||
|
||||
docker-save-vllm: backend-images
|
||||
docker save local-ai-backend:vllm -o backend-images/vllm.tar
|
||||
|
||||
docker-save-kokoro: backend-images
|
||||
docker save local-ai-backend:kokoro -o backend-images/kokoro.tar
|
||||
|
||||
docker-save-rfdetr: backend-images
|
||||
docker save local-ai-backend:rfdetr -o backend-images/rfdetr.tar
|
||||
|
||||
docker-save-huggingface: backend-images
|
||||
docker save local-ai-backend:huggingface -o backend-images/huggingface.tar
|
||||
|
||||
docker-save-local-store: backend-images
|
||||
docker save local-ai-backend:local-store -o backend-images/local-store.tar
|
||||
|
||||
docker-build-silero-vad:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:silero-vad -f backend/Dockerfile.golang --build-arg BACKEND=silero-vad .
|
||||
|
||||
docker-save-silero-vad: backend-images
|
||||
docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar
|
||||
|
||||
docker-save-piper: backend-images
|
||||
docker save local-ai-backend:piper -o backend-images/piper.tar
|
||||
|
||||
docker-save-llama-cpp: backend-images
|
||||
docker save local-ai-backend:llama-cpp -o backend-images/llama-cpp.tar
|
||||
|
||||
docker-save-bark-cpp: backend-images
|
||||
docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar
|
||||
|
||||
docker-build-stablediffusion-ggml:
|
||||
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) --build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) --build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.golang --build-arg BACKEND=stablediffusion-ggml .
|
||||
|
||||
docker-save-stablediffusion-ggml: backend-images
|
||||
docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar
|
||||
|
||||
docker-build-rerankers:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers .
|
||||
|
||||
docker-build-transformers:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
|
||||
|
||||
docker-build-diffusers:
|
||||
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers ./backend
|
||||
|
||||
docker-save-diffusers: backend-images
|
||||
docker save local-ai-backend:diffusers -o backend-images/diffusers.tar
|
||||
|
||||
docker-build-whisper:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) --build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) --build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) -t local-ai-backend:whisper -f backend/Dockerfile.golang --build-arg BACKEND=whisper .
|
||||
|
||||
docker-save-whisper: backend-images
|
||||
docker save local-ai-backend:whisper -o backend-images/whisper.tar
|
||||
|
||||
docker-build-faster-whisper:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper .
|
||||
|
||||
docker-build-coqui:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui .
|
||||
|
||||
docker-build-bark:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark .
|
||||
|
||||
docker-build-chatterbox:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox ./backend
|
||||
|
||||
docker-build-vibevoice:
|
||||
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:vibevoice -f backend/Dockerfile.python --build-arg BACKEND=vibevoice ./backend
|
||||
|
||||
docker-build-exllama2:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .
|
||||
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine
|
||||
|
||||
########################################################
|
||||
### END Backends
|
||||
|
||||
17
README.md
17
README.md
@@ -152,9 +152,6 @@ docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gp
|
||||
# CUDA 12.0
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
|
||||
|
||||
# CUDA 11.7
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-11
|
||||
|
||||
# NVIDIA Jetson (L4T) ARM64
|
||||
# CUDA 12 (for Nvidia AGX Orin and similar platforms)
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64
|
||||
@@ -193,9 +190,6 @@ docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-ai
|
||||
# NVIDIA CUDA 12 version
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
# NVIDIA CUDA 11 version
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
|
||||
|
||||
# Intel GPU version
|
||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel
|
||||
|
||||
@@ -279,9 +273,9 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
### Text Generation & Language Models
|
||||
| Backend | Description | Acceleration Support |
|
||||
|---------|-------------|---------------------|
|
||||
| **llama.cpp** | LLM inference in C/C++ | CUDA 11/12/13, ROCm, Intel SYCL, Vulkan, Metal, CPU |
|
||||
| **llama.cpp** | LLM inference in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, Metal, CPU |
|
||||
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12/13, ROCm, Intel |
|
||||
| **transformers** | HuggingFace transformers framework | CUDA 11/12/13, ROCm, Intel, CPU |
|
||||
| **transformers** | HuggingFace transformers framework | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **exllama2** | GPTQ inference library | CUDA 12/13 |
|
||||
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
|
||||
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
|
||||
@@ -295,7 +289,7 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
|
||||
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **chatterbox** | Production-grade TTS | CUDA 11/12/13, CPU |
|
||||
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
|
||||
| **piper** | Fast neural TTS system | CPU |
|
||||
| **kitten-tts** | Kitten TTS models | CPU |
|
||||
| **silero-vad** | Voice Activity Detection | CPU |
|
||||
@@ -306,13 +300,13 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
| Backend | Description | Acceleration Support |
|
||||
|---------|-------------|---------------------|
|
||||
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12/13, Intel SYCL, Vulkan, CPU |
|
||||
| **diffusers** | HuggingFace diffusion models | CUDA 11/12/13, ROCm, Intel, Metal, CPU |
|
||||
| **diffusers** | HuggingFace diffusion models | CUDA 12/13, ROCm, Intel, Metal, CPU |
|
||||
|
||||
### Specialized AI Tasks
|
||||
| Backend | Description | Acceleration Support |
|
||||
|---------|-------------|---------------------|
|
||||
| **rfdetr** | Real-time object detection | CUDA 12/13, Intel, CPU |
|
||||
| **rerankers** | Document reranking API | CUDA 11/12/13, ROCm, Intel, CPU |
|
||||
| **rerankers** | Document reranking API | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **local-store** | Vector database | CPU |
|
||||
| **huggingface** | HuggingFace API integration | API-based |
|
||||
|
||||
@@ -320,7 +314,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|
||||
| Acceleration Type | Supported Backends | Hardware Support |
|
||||
|-------------------|-------------------|------------------|
|
||||
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
|
||||
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts, vibevoice | AMD Graphics |
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
|
||||
FROM ${BASE_IMAGE} AS builder
|
||||
ARG BACKEND=rerankers
|
||||
@@ -12,8 +12,8 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG GO_VERSION=1.22.6
|
||||
ARG UBUNTU_VERSION=2204
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG UBUNTU_VERSION=2404
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
@@ -40,11 +40,45 @@ RUN <<EOT bash
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common pciutils wget gpg-agent && \
|
||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
vulkan-sdk && \
|
||||
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
|
||||
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
|
||||
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
|
||||
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
|
||||
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
|
||||
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
|
||||
if [ "amd64" = "$TARGETARCH" ]; then
|
||||
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
|
||||
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
mkdir -p /opt/vulkan-sdk && \
|
||||
mv 1.4.328.1 /opt/vulkan-sdk/ && \
|
||||
cd /opt/vulkan-sdk/1.4.328.1 && \
|
||||
./vulkansdk --no-deps --maxjobs \
|
||||
vulkan-loader \
|
||||
vulkan-validationlayers \
|
||||
vulkan-extensionlayer \
|
||||
vulkan-tools \
|
||||
shaderc && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
|
||||
rm -rf /opt/vulkan-sdk
|
||||
fi
|
||||
if [ "arm64" = "$TARGETARCH" ]; then
|
||||
mkdir vulkan && cd vulkan && \
|
||||
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
|
||||
tar -xvf vulkan-sdk.tar.xz && \
|
||||
rm vulkan-sdk.tar.xz && \
|
||||
cd 1.4.335.0 && \
|
||||
cp -rfv aarch64/bin/* /usr/bin/ && \
|
||||
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
|
||||
cp -rfv aarch64/include/* /usr/include/ && \
|
||||
cp -rfv aarch64/share/* /usr/share/ && \
|
||||
cd ../.. && \
|
||||
rm -rf vulkan
|
||||
fi
|
||||
ldconfig && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
@@ -148,6 +182,8 @@ EOT
|
||||
|
||||
COPY . /LocalAI
|
||||
|
||||
RUN git config --global --add safe.directory /LocalAI
|
||||
|
||||
RUN cd /LocalAI && make protogen-go && make -C /LocalAI/backend/go/${BACKEND} build
|
||||
|
||||
FROM scratch
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ FROM ${GRPC_BASE_IMAGE} AS grpc
|
||||
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
|
||||
ARG GRPC_VERSION=v1.65.0
|
||||
ARG CMAKE_FROM_SOURCE=false
|
||||
ARG CMAKE_VERSION=3.26.4
|
||||
# CUDA Toolkit 13.x compatibility: CMake 3.31.9+ fixes toolchain detection/arch table issues
|
||||
ARG CMAKE_VERSION=3.31.10
|
||||
|
||||
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
|
||||
|
||||
@@ -26,7 +27,7 @@ RUN apt-get update && \
|
||||
|
||||
# Install CMake (the version in 22.04 is too old)
|
||||
RUN <<EOT bash
|
||||
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
||||
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then
|
||||
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
||||
else
|
||||
apt-get update && \
|
||||
@@ -50,6 +51,13 @@ RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shall
|
||||
rm -rf /build
|
||||
|
||||
FROM ${BASE_IMAGE} AS builder
|
||||
ARG CMAKE_FROM_SOURCE=false
|
||||
ARG CMAKE_VERSION=3.31.10
|
||||
# We can target specific CUDA ARCHITECTURES like --build-arg CUDA_DOCKER_ARCH='75;86;89;120'
|
||||
ARG CUDA_DOCKER_ARCH
|
||||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||
ARG CMAKE_ARGS
|
||||
ENV CMAKE_ARGS=${CMAKE_ARGS}
|
||||
ARG BACKEND=rerankers
|
||||
ARG BUILD_TYPE
|
||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||
@@ -61,8 +69,8 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG GO_VERSION=1.22.6
|
||||
ARG UBUNTU_VERSION=2204
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG UBUNTU_VERSION=2404
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
@@ -70,6 +78,7 @@ RUN apt-get update && \
|
||||
ccache git \
|
||||
ca-certificates \
|
||||
make \
|
||||
pkg-config libcurl4-openssl-dev \
|
||||
curl unzip \
|
||||
libssl-dev wget && \
|
||||
apt-get clean && \
|
||||
@@ -88,11 +97,45 @@ RUN <<EOT bash
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common pciutils wget gpg-agent && \
|
||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
vulkan-sdk && \
|
||||
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
|
||||
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
|
||||
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
|
||||
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
|
||||
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
|
||||
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
|
||||
if [ "amd64" = "$TARGETARCH" ]; then
|
||||
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
|
||||
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
mkdir -p /opt/vulkan-sdk && \
|
||||
mv 1.4.328.1 /opt/vulkan-sdk/ && \
|
||||
cd /opt/vulkan-sdk/1.4.328.1 && \
|
||||
./vulkansdk --no-deps --maxjobs \
|
||||
vulkan-loader \
|
||||
vulkan-validationlayers \
|
||||
vulkan-extensionlayer \
|
||||
vulkan-tools \
|
||||
shaderc && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
|
||||
rm -rf /opt/vulkan-sdk
|
||||
fi
|
||||
if [ "arm64" = "$TARGETARCH" ]; then
|
||||
mkdir vulkan && cd vulkan && \
|
||||
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
|
||||
tar -xvf vulkan-sdk.tar.xz && \
|
||||
rm vulkan-sdk.tar.xz && \
|
||||
cd 1.4.335.0 && \
|
||||
cp -rfv aarch64/bin/* /usr/bin/ && \
|
||||
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
|
||||
cp -rfv aarch64/include/* /usr/include/ && \
|
||||
cp -rfv aarch64/share/* /usr/share/ && \
|
||||
cd ../.. && \
|
||||
rm -rf vulkan
|
||||
fi
|
||||
ldconfig && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
@@ -189,7 +232,7 @@ EOT
|
||||
|
||||
# Install CMake (the version in 22.04 is too old)
|
||||
RUN <<EOT bash
|
||||
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
||||
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then
|
||||
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
||||
else
|
||||
apt-get update && \
|
||||
@@ -205,19 +248,30 @@ COPY --from=grpc /opt/grpc /usr/local
|
||||
|
||||
COPY . /LocalAI
|
||||
|
||||
## Otherwise just run the normal build
|
||||
RUN <<EOT bash
|
||||
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-fallback && \
|
||||
make llama-cpp-grpc && make llama-cpp-rpc-server; \
|
||||
else \
|
||||
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-avx && \
|
||||
make llama-cpp-avx2 && \
|
||||
make llama-cpp-avx512 && \
|
||||
make llama-cpp-fallback && \
|
||||
make llama-cpp-grpc && \
|
||||
make llama-cpp-rpc-server; \
|
||||
fi
|
||||
RUN <<'EOT' bash
|
||||
set -euxo pipefail
|
||||
|
||||
if [[ -n "${CUDA_DOCKER_ARCH:-}" ]]; then
|
||||
CUDA_ARCH_ESC="${CUDA_DOCKER_ARCH//;/\\;}"
|
||||
export CMAKE_ARGS="${CMAKE_ARGS:-} -DCMAKE_CUDA_ARCHITECTURES=${CUDA_ARCH_ESC}"
|
||||
echo "CMAKE_ARGS(env) = ${CMAKE_ARGS}"
|
||||
rm -rf /LocalAI/backend/cpp/llama-cpp-*-build
|
||||
fi
|
||||
|
||||
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then
|
||||
cd /LocalAI/backend/cpp/llama-cpp
|
||||
make llama-cpp-fallback
|
||||
make llama-cpp-grpc
|
||||
make llama-cpp-rpc-server
|
||||
else
|
||||
cd /LocalAI/backend/cpp/llama-cpp
|
||||
make llama-cpp-avx
|
||||
make llama-cpp-avx2
|
||||
make llama-cpp-avx512
|
||||
make llama-cpp-fallback
|
||||
make llama-cpp-grpc
|
||||
make llama-cpp-rpc-server
|
||||
fi
|
||||
EOT
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
|
||||
FROM ${BASE_IMAGE} AS builder
|
||||
ARG BACKEND=rerankers
|
||||
@@ -12,7 +12,7 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG UBUNTU_VERSION=2204
|
||||
ARG UBUNTU_VERSION=2404
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
@@ -54,11 +54,45 @@ RUN <<EOT bash
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common pciutils wget gpg-agent && \
|
||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
vulkan-sdk && \
|
||||
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
|
||||
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
|
||||
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
|
||||
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
|
||||
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
|
||||
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
|
||||
if [ "amd64" = "$TARGETARCH" ]; then
|
||||
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
|
||||
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
mkdir -p /opt/vulkan-sdk && \
|
||||
mv 1.4.328.1 /opt/vulkan-sdk/ && \
|
||||
cd /opt/vulkan-sdk/1.4.328.1 && \
|
||||
./vulkansdk --no-deps --maxjobs \
|
||||
vulkan-loader \
|
||||
vulkan-validationlayers \
|
||||
vulkan-extensionlayer \
|
||||
vulkan-tools \
|
||||
shaderc && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
|
||||
rm -rf /opt/vulkan-sdk
|
||||
fi
|
||||
if [ "arm64" = "$TARGETARCH" ]; then
|
||||
mkdir vulkan && cd vulkan && \
|
||||
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
|
||||
tar -xvf vulkan-sdk.tar.xz && \
|
||||
rm vulkan-sdk.tar.xz && \
|
||||
cd 1.4.335.0 && \
|
||||
cp -rfv aarch64/bin/* /usr/bin/ && \
|
||||
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
|
||||
cp -rfv aarch64/include/* /usr/include/ && \
|
||||
cp -rfv aarch64/share/* /usr/share/ && \
|
||||
cd ../.. && \
|
||||
rm -rf vulkan
|
||||
fi
|
||||
ldconfig && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
@@ -142,7 +176,8 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||
# Install uv as a system package
|
||||
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Increase timeout for uv installs behind slow networks
|
||||
ENV UV_HTTP_TIMEOUT=180
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Install grpcio-tools (the version in 22.04 is too old)
|
||||
@@ -155,12 +190,18 @@ RUN <<EOT bash
|
||||
EOT
|
||||
|
||||
|
||||
COPY python/${BACKEND} /${BACKEND}
|
||||
COPY backend.proto /${BACKEND}/backend.proto
|
||||
COPY python/common/ /${BACKEND}/common
|
||||
COPY backend/python/${BACKEND} /${BACKEND}
|
||||
COPY backend/backend.proto /${BACKEND}/backend.proto
|
||||
COPY backend/python/common/ /${BACKEND}/common
|
||||
COPY scripts/build/package-gpu-libs.sh /package-gpu-libs.sh
|
||||
|
||||
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make
|
||||
|
||||
# Package GPU libraries into the backend's lib directory
|
||||
RUN mkdir -p /${BACKEND}/lib && \
|
||||
TARGET_LIB_DIR="/${BACKEND}/lib" BUILD_TYPE="${BUILD_TYPE}" CUDA_MAJOR_VERSION="${CUDA_MAJOR_VERSION}" \
|
||||
bash /package-gpu-libs.sh "/${BACKEND}/lib"
|
||||
|
||||
FROM scratch
|
||||
ARG BACKEND=rerankers
|
||||
COPY --from=builder /${BACKEND}/ /
|
||||
@@ -65,7 +65,7 @@ The backend system provides language-specific Dockerfiles that handle the build
|
||||
## Hardware Acceleration Support
|
||||
|
||||
### CUDA (NVIDIA)
|
||||
- **Versions**: CUDA 11.x, 12.x
|
||||
- **Versions**: CUDA 12.x, 13.x
|
||||
- **Features**: cuBLAS, cuDNN, TensorRT optimization
|
||||
- **Targets**: x86_64, ARM64 (Jetson)
|
||||
|
||||
@@ -132,8 +132,7 @@ For ARM64/Mac builds, docker can't be used, and the makefile in the respective b
|
||||
### Build Types
|
||||
|
||||
- **`cpu`**: CPU-only optimization
|
||||
- **`cublas11`**: CUDA 11.x with cuBLAS
|
||||
- **`cublas12`**: CUDA 12.x with cuBLAS
|
||||
- **`cublas12`**, **`cublas13`**: CUDA 12.x, 13.x with cuBLAS
|
||||
- **`hipblas`**: ROCm with rocBLAS
|
||||
- **`intel`**: Intel oneAPI optimization
|
||||
- **`vulkan`**: Vulkan-based acceleration
|
||||
@@ -210,4 +209,4 @@ When contributing to the backend system:
|
||||
2. **Add Tests**: Include comprehensive test coverage
|
||||
3. **Document**: Provide clear usage examples
|
||||
4. **Optimize**: Consider performance and resource usage
|
||||
5. **Validate**: Test across different hardware targets
|
||||
5. **Validate**: Test across different hardware targets
|
||||
|
||||
@@ -70,4 +70,4 @@ target_link_libraries(${TARGET} PRIVATE common llama mtmd ${CMAKE_THREAD_LIBS_IN
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
if(TARGET BUILD_INFO)
|
||||
add_dependencies(${TARGET} BUILD_INFO)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
LLAMA_VERSION?=ced765be44ce173c374f295b3c6f4175f8fd109b
|
||||
LLAMA_VERSION?=b1377188784f9aea26b8abde56d4aee8c733eec7
|
||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||
|
||||
CMAKE_ARGS?=
|
||||
@@ -7,7 +7,8 @@ BUILD_TYPE?=
|
||||
NATIVE?=false
|
||||
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
||||
TARGET?=--target grpc-server
|
||||
JOBS?=$(shell nproc)
|
||||
JOBS?=$(shell nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 1)
|
||||
ARCH?=$(shell uname -m)
|
||||
|
||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
||||
@@ -106,21 +107,21 @@ llama-cpp-avx: llama.cpp
|
||||
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build
|
||||
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build purge
|
||||
$(info ${GREEN}I llama-cpp build info:avx${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
|
||||
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build/grpc-server llama-cpp-avx
|
||||
|
||||
llama-cpp-fallback: llama.cpp
|
||||
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build
|
||||
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build purge
|
||||
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
|
||||
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build/grpc-server llama-cpp-fallback
|
||||
|
||||
llama-cpp-grpc: llama.cpp
|
||||
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build
|
||||
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build purge
|
||||
$(info ${GREEN}I llama-cpp build info:grpc${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
|
||||
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/grpc-server llama-cpp-grpc
|
||||
|
||||
llama-cpp-rpc-server: llama-cpp-grpc
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <grpcpp/health_check_service_interface.h>
|
||||
#include <regex>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <signal.h>
|
||||
#include <thread>
|
||||
|
||||
@@ -390,8 +391,9 @@ static void params_parse(server_context& /*ctx_server*/, const backend::ModelOpt
|
||||
// Initialize fit_params options (can be overridden by options)
|
||||
// fit_params: whether to auto-adjust params to fit device memory (default: true as in llama.cpp)
|
||||
params.fit_params = true;
|
||||
// fit_params_target: target margin per device in bytes (default: 1GB)
|
||||
params.fit_params_target = 1024 * 1024 * 1024;
|
||||
// fit_params_target: target margin per device in bytes (default: 1GB per device)
|
||||
// Initialize as vector with default value for all devices
|
||||
params.fit_params_target = std::vector<size_t>(llama_max_devices(), 1024 * 1024 * 1024);
|
||||
// fit_params_min_ctx: minimum context size for fit (default: 4096)
|
||||
params.fit_params_min_ctx = 4096;
|
||||
|
||||
@@ -468,10 +470,28 @@ static void params_parse(server_context& /*ctx_server*/, const backend::ModelOpt
|
||||
} else if (!strcmp(optname, "fit_params_target") || !strcmp(optname, "fit_target")) {
|
||||
if (optval != NULL) {
|
||||
try {
|
||||
// Value is in MiB, convert to bytes
|
||||
params.fit_params_target = static_cast<size_t>(std::stoi(optval_str)) * 1024 * 1024;
|
||||
// Value is in MiB, can be comma-separated list for multiple devices
|
||||
// Single value is broadcast across all devices
|
||||
std::string arg_next = optval_str;
|
||||
const std::regex regex{ R"([,/]+)" };
|
||||
std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
|
||||
std::vector<std::string> split_arg{ it, {} };
|
||||
if (split_arg.size() >= llama_max_devices()) {
|
||||
// Too many values provided
|
||||
continue;
|
||||
}
|
||||
if (split_arg.size() == 1) {
|
||||
// Single value: broadcast to all devices
|
||||
size_t value_mib = std::stoul(split_arg[0]);
|
||||
std::fill(params.fit_params_target.begin(), params.fit_params_target.end(), value_mib * 1024 * 1024);
|
||||
} else {
|
||||
// Multiple values: set per device
|
||||
for (size_t i = 0; i < split_arg.size() && i < params.fit_params_target.size(); i++) {
|
||||
params.fit_params_target[i] = std::stoul(split_arg[i]) * 1024 * 1024;
|
||||
}
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
// If conversion fails, keep default value (1GB)
|
||||
// If conversion fails, keep default value (1GB per device)
|
||||
}
|
||||
}
|
||||
} else if (!strcmp(optname, "fit_params_min_ctx") || !strcmp(optname, "fit_ctx")) {
|
||||
@@ -686,13 +706,13 @@ private:
|
||||
public:
|
||||
BackendServiceImpl(server_context& ctx) : ctx_server(ctx) {}
|
||||
|
||||
grpc::Status Health(ServerContext* /*context*/, const backend::HealthMessage* /*request*/, backend::Reply* reply) {
|
||||
grpc::Status Health(ServerContext* /*context*/, const backend::HealthMessage* /*request*/, backend::Reply* reply) override {
|
||||
// Implement Health RPC
|
||||
reply->set_message("OK");
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status LoadModel(ServerContext* /*context*/, const backend::ModelOptions* request, backend::Result* result) {
|
||||
grpc::Status LoadModel(ServerContext* /*context*/, const backend::ModelOptions* request, backend::Result* result) override {
|
||||
// Implement LoadModel RPC
|
||||
common_params params;
|
||||
params_parse(ctx_server, request, params);
|
||||
@@ -709,11 +729,72 @@ public:
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
LOG_INF("\n");
|
||||
|
||||
// Capture error messages during model loading
|
||||
struct error_capture {
|
||||
std::string captured_error;
|
||||
std::mutex error_mutex;
|
||||
ggml_log_callback original_callback;
|
||||
void* original_user_data;
|
||||
} error_capture_data;
|
||||
|
||||
// Get original log callback
|
||||
llama_log_get(&error_capture_data.original_callback, &error_capture_data.original_user_data);
|
||||
|
||||
// Set custom callback to capture errors
|
||||
llama_log_set([](ggml_log_level level, const char * text, void * user_data) {
|
||||
auto* capture = static_cast<error_capture*>(user_data);
|
||||
|
||||
// Capture error messages
|
||||
if (level == GGML_LOG_LEVEL_ERROR) {
|
||||
std::lock_guard<std::mutex> lock(capture->error_mutex);
|
||||
// Append error message, removing trailing newlines
|
||||
std::string msg(text);
|
||||
while (!msg.empty() && (msg.back() == '\n' || msg.back() == '\r')) {
|
||||
msg.pop_back();
|
||||
}
|
||||
if (!msg.empty()) {
|
||||
if (!capture->captured_error.empty()) {
|
||||
capture->captured_error.append("; ");
|
||||
}
|
||||
capture->captured_error.append(msg);
|
||||
}
|
||||
}
|
||||
|
||||
// Also call original callback to preserve logging
|
||||
if (capture->original_callback) {
|
||||
capture->original_callback(level, text, capture->original_user_data);
|
||||
}
|
||||
}, &error_capture_data);
|
||||
|
||||
// load the model
|
||||
if (!ctx_server.load_model(params)) {
|
||||
result->set_message("Failed loading model");
|
||||
bool load_success = ctx_server.load_model(params);
|
||||
|
||||
// Restore original log callback
|
||||
llama_log_set(error_capture_data.original_callback, error_capture_data.original_user_data);
|
||||
|
||||
if (!load_success) {
|
||||
std::string error_msg = "Failed to load model: " + params.model.path;
|
||||
if (!params.mmproj.path.empty()) {
|
||||
error_msg += " (with mmproj: " + params.mmproj.path + ")";
|
||||
}
|
||||
if (params.has_speculative() && !params.speculative.model.path.empty()) {
|
||||
error_msg += " (with draft model: " + params.speculative.model.path + ")";
|
||||
}
|
||||
|
||||
// Add captured error details if available
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(error_capture_data.error_mutex);
|
||||
if (!error_capture_data.captured_error.empty()) {
|
||||
error_msg += ". Error: " + error_capture_data.captured_error;
|
||||
} else {
|
||||
error_msg += ". Model file may not exist or be invalid.";
|
||||
}
|
||||
}
|
||||
|
||||
result->set_message(error_msg);
|
||||
result->set_success(false);
|
||||
return Status::CANCELLED;
|
||||
return grpc::Status(grpc::StatusCode::INTERNAL, error_msg);
|
||||
}
|
||||
|
||||
// Process grammar triggers now that vocab is available
|
||||
@@ -1492,7 +1573,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status Predict(ServerContext* context, const backend::PredictOptions* request, backend::Reply* reply) {
|
||||
grpc::Status Predict(ServerContext* context, const backend::PredictOptions* request, backend::Reply* reply) override {
|
||||
if (params_base.model.path.empty()) {
|
||||
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
|
||||
}
|
||||
@@ -2163,7 +2244,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status Embedding(ServerContext* context, const backend::PredictOptions* request, backend::EmbeddingResult* embeddingResult) {
|
||||
grpc::Status Embedding(ServerContext* context, const backend::PredictOptions* request, backend::EmbeddingResult* embeddingResult) override {
|
||||
if (params_base.model.path.empty()) {
|
||||
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
|
||||
}
|
||||
@@ -2258,7 +2339,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status Rerank(ServerContext* context, const backend::RerankRequest* request, backend::RerankResult* rerankResult) {
|
||||
grpc::Status Rerank(ServerContext* context, const backend::RerankRequest* request, backend::RerankResult* rerankResult) override {
|
||||
if (!params_base.embedding || params_base.pooling_type != LLAMA_POOLING_TYPE_RANK) {
|
||||
return grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "This server does not support reranking. Start it with `--reranking` and without `--embedding`");
|
||||
}
|
||||
@@ -2344,7 +2425,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status TokenizeString(ServerContext* /*context*/, const backend::PredictOptions* request, backend::TokenizationResponse* response) {
|
||||
grpc::Status TokenizeString(ServerContext* /*context*/, const backend::PredictOptions* request, backend::TokenizationResponse* response) override {
|
||||
if (params_base.model.path.empty()) {
|
||||
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
|
||||
}
|
||||
@@ -2367,7 +2448,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status GetMetrics(ServerContext* /*context*/, const backend::MetricsRequest* /*request*/, backend::MetricsResponse* response) {
|
||||
grpc::Status GetMetrics(ServerContext* /*context*/, const backend::MetricsRequest* /*request*/, backend::MetricsResponse* response) override {
|
||||
|
||||
// request slots data using task queue
|
||||
auto rd = ctx_server.get_response_reader();
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
REPO_ROOT="${CURDIR}/../../.."
|
||||
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
@@ -37,6 +38,15 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Package GPU libraries based on BUILD_TYPE
|
||||
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
|
||||
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
|
||||
if [ -f "$GPU_LIB_SCRIPT" ]; then
|
||||
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
|
||||
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
|
||||
package_gpu_libs
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# stablediffusion.cpp (ggml)
|
||||
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
|
||||
STABLEDIFFUSION_GGML_VERSION?=4ff2c8c74bd17c2cfffe3a01be77743fb3efba2f
|
||||
STABLEDIFFUSION_GGML_VERSION?=0e52afc6513cc2dea9a1a017afc4a008d5acf2b0
|
||||
|
||||
CMAKE_ARGS+=-DGGML_MAX_NAME=128
|
||||
|
||||
@@ -28,7 +28,12 @@ else ifeq ($(BUILD_TYPE),clblas)
|
||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
||||
else ifeq ($(BUILD_TYPE),hipblas)
|
||||
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON
|
||||
ROCM_HOME ?= /opt/rocm
|
||||
ROCM_PATH ?= /opt/rocm
|
||||
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
||||
export CC=$(ROCM_HOME)/llvm/bin/clang
|
||||
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201
|
||||
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
|
||||
else ifeq ($(BUILD_TYPE),vulkan)
|
||||
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
|
||||
else ifeq ($(OS),Darwin)
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
REPO_ROOT="${CURDIR}/../../.."
|
||||
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
@@ -50,6 +51,15 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Package GPU libraries based on BUILD_TYPE
|
||||
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
|
||||
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
|
||||
if [ -f "$GPU_LIB_SCRIPT" ]; then
|
||||
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
|
||||
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
|
||||
package_gpu_libs
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||
WHISPER_CPP_VERSION?=e9898ddfb908ffaa7026c66852a023889a5a7202
|
||||
WHISPER_CPP_VERSION?=679bdb53dbcbfb3e42685f50c7ff367949fd4d48
|
||||
SO_TARGET?=libgowhisper.so
|
||||
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
REPO_ROOT="${CURDIR}/../../.."
|
||||
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
@@ -50,6 +51,15 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Package GPU libraries based on BUILD_TYPE
|
||||
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
|
||||
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
|
||||
if [ -f "$GPU_LIB_SCRIPT" ]; then
|
||||
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
|
||||
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
|
||||
package_gpu_libs
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
|
||||
@@ -275,6 +275,24 @@
|
||||
amd: "rocm-faster-whisper"
|
||||
nvidia-cuda-13: "cuda13-faster-whisper"
|
||||
nvidia-cuda-12: "cuda12-faster-whisper"
|
||||
- &moonshine
|
||||
description: |
|
||||
Moonshine is a fast, accurate, and efficient speech-to-text transcription model using ONNX Runtime.
|
||||
It provides real-time transcription capabilities with support for multiple model sizes and GPU acceleration.
|
||||
urls:
|
||||
- https://github.com/moonshine-ai/moonshine
|
||||
tags:
|
||||
- speech-to-text
|
||||
- transcription
|
||||
- ONNX
|
||||
license: MIT
|
||||
name: "moonshine"
|
||||
alias: "moonshine"
|
||||
capabilities:
|
||||
nvidia: "cuda12-moonshine"
|
||||
default: "cpu-moonshine"
|
||||
nvidia-cuda-13: "cuda13-moonshine"
|
||||
nvidia-cuda-12: "cuda12-moonshine"
|
||||
- &kokoro
|
||||
icon: https://avatars.githubusercontent.com/u/166769057?v=4
|
||||
description: |
|
||||
@@ -634,11 +652,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-llama-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-llama-cpp
|
||||
- !!merge <<: *llamacpp
|
||||
name: "cuda11-llama-cpp"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-llama-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-llama-cpp
|
||||
- !!merge <<: *llamacpp
|
||||
name: "cuda12-llama-cpp"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-llama-cpp"
|
||||
@@ -679,11 +692,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-llama-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-metal-darwin-arm64-llama-cpp
|
||||
- !!merge <<: *llamacpp
|
||||
name: "cuda11-llama-cpp-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-llama-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-llama-cpp
|
||||
- !!merge <<: *llamacpp
|
||||
name: "cuda12-llama-cpp-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-llama-cpp"
|
||||
@@ -755,11 +763,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-whisper
|
||||
- !!merge <<: *whispercpp
|
||||
name: "cuda11-whisper"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-whisper
|
||||
- !!merge <<: *whispercpp
|
||||
name: "cuda12-whisper"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-whisper"
|
||||
@@ -800,11 +803,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-metal-darwin-arm64-whisper
|
||||
- !!merge <<: *whispercpp
|
||||
name: "cuda11-whisper-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-whisper
|
||||
- !!merge <<: *whispercpp
|
||||
name: "cuda12-whisper-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-whisper"
|
||||
@@ -879,11 +877,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-stablediffusion-ggml"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-sycl-f16-stablediffusion-ggml
|
||||
- !!merge <<: *stablediffusionggml
|
||||
name: "cuda11-stablediffusion-ggml"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-stablediffusion-ggml"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-stablediffusion-ggml
|
||||
- !!merge <<: *stablediffusionggml
|
||||
name: "cuda12-stablediffusion-ggml-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-stablediffusion-ggml"
|
||||
@@ -899,11 +892,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-stablediffusion-ggml"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-sycl-f16-stablediffusion-ggml
|
||||
- !!merge <<: *stablediffusionggml
|
||||
name: "cuda11-stablediffusion-ggml-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-stablediffusion-ggml"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-stablediffusion-ggml
|
||||
- !!merge <<: *stablediffusionggml
|
||||
name: "nvidia-l4t-arm64-stablediffusion-ggml-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-stablediffusion-ggml"
|
||||
@@ -1054,11 +1042,6 @@
|
||||
intel: "intel-rerankers-development"
|
||||
amd: "rocm-rerankers-development"
|
||||
nvidia-cuda-13: "cuda13-rerankers-development"
|
||||
- !!merge <<: *rerankers
|
||||
name: "cuda11-rerankers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-rerankers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-rerankers
|
||||
- !!merge <<: *rerankers
|
||||
name: "cuda12-rerankers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-rerankers"
|
||||
@@ -1074,11 +1057,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-rerankers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-rerankers
|
||||
- !!merge <<: *rerankers
|
||||
name: "cuda11-rerankers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-rerankers"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-rerankers
|
||||
- !!merge <<: *rerankers
|
||||
name: "cuda12-rerankers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-rerankers"
|
||||
@@ -1127,16 +1105,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-transformers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-transformers
|
||||
- !!merge <<: *transformers
|
||||
name: "cuda11-transformers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-transformers"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-transformers
|
||||
- !!merge <<: *transformers
|
||||
name: "cuda11-transformers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-transformers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-transformers
|
||||
- !!merge <<: *transformers
|
||||
name: "cuda12-transformers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-transformers"
|
||||
@@ -1213,21 +1181,11 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-diffusers
|
||||
- !!merge <<: *diffusers
|
||||
name: "cuda11-diffusers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-diffusers
|
||||
- !!merge <<: *diffusers
|
||||
name: "intel-diffusers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-diffusers
|
||||
- !!merge <<: *diffusers
|
||||
name: "cuda11-diffusers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-diffusers
|
||||
- !!merge <<: *diffusers
|
||||
name: "cuda12-diffusers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-diffusers"
|
||||
@@ -1269,21 +1227,11 @@
|
||||
capabilities:
|
||||
nvidia: "cuda12-exllama2-development"
|
||||
intel: "intel-exllama2-development"
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda11-exllama2"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda12-exllama2"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda11-exllama2-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda12-exllama2-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-exllama2"
|
||||
@@ -1297,11 +1245,6 @@
|
||||
intel: "intel-kokoro-development"
|
||||
amd: "rocm-kokoro-development"
|
||||
nvidia-l4t: "nvidia-l4t-kokoro-development"
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda11-kokoro-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-kokoro"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda12-kokoro-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-kokoro"
|
||||
@@ -1332,11 +1275,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-kokoro"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda11-kokoro"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-kokoro"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda12-kokoro"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-kokoro"
|
||||
@@ -1365,11 +1303,6 @@
|
||||
intel: "intel-faster-whisper-development"
|
||||
amd: "rocm-faster-whisper-development"
|
||||
nvidia-cuda-13: "cuda13-faster-whisper-development"
|
||||
- !!merge <<: *faster-whisper
|
||||
name: "cuda11-faster-whisper"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-faster-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-faster-whisper
|
||||
- !!merge <<: *faster-whisper
|
||||
name: "cuda12-faster-whisper-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-faster-whisper"
|
||||
@@ -1400,6 +1333,44 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-faster-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-faster-whisper
|
||||
## moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "moonshine-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-moonshine-development"
|
||||
default: "cpu-moonshine-development"
|
||||
nvidia-cuda-13: "cuda13-moonshine-development"
|
||||
nvidia-cuda-12: "cuda12-moonshine-development"
|
||||
- !!merge <<: *moonshine
|
||||
name: "cpu-moonshine"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cpu-moonshine-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cuda12-moonshine"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cuda12-moonshine-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cuda13-moonshine"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cuda13-moonshine-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-moonshine
|
||||
## coqui
|
||||
|
||||
- !!merge <<: *coqui
|
||||
@@ -1408,21 +1379,11 @@
|
||||
nvidia: "cuda12-coqui-development"
|
||||
intel: "intel-coqui-development"
|
||||
amd: "rocm-coqui-development"
|
||||
- !!merge <<: *coqui
|
||||
name: "cuda11-coqui"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-coqui"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-coqui
|
||||
- !!merge <<: *coqui
|
||||
name: "cuda12-coqui"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-coqui"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-coqui
|
||||
- !!merge <<: *coqui
|
||||
name: "cuda11-coqui-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-coqui"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-coqui
|
||||
- !!merge <<: *coqui
|
||||
name: "cuda12-coqui-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-coqui"
|
||||
@@ -1455,16 +1416,6 @@
|
||||
nvidia: "cuda12-bark-development"
|
||||
intel: "intel-bark-development"
|
||||
amd: "rocm-bark-development"
|
||||
- !!merge <<: *bark
|
||||
name: "cuda11-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-bark
|
||||
- !!merge <<: *bark
|
||||
name: "cuda11-bark"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-bark
|
||||
- !!merge <<: *bark
|
||||
name: "rocm-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-bark"
|
||||
@@ -1546,16 +1497,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-chatterbox"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "cuda11-chatterbox"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-chatterbox"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "cuda11-chatterbox-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-chatterbox"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "cuda12-chatterbox"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-chatterbox"
|
||||
|
||||
@@ -85,7 +85,7 @@ runUnittests
|
||||
The build system automatically detects and configures for different hardware:
|
||||
|
||||
- **CPU** - Standard CPU-only builds
|
||||
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 11/12)
|
||||
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 12/13)
|
||||
- **Intel** - Intel XPU/GPU optimization
|
||||
- **MLX** - Apple Silicon (M1/M2/M3) optimization
|
||||
- **HIP** - AMD GPU acceleration
|
||||
@@ -95,8 +95,8 @@ The build system automatically detects and configures for different hardware:
|
||||
Backends can specify hardware-specific dependencies:
|
||||
- `requirements.txt` - Base requirements
|
||||
- `requirements-cpu.txt` - CPU-specific packages
|
||||
- `requirements-cublas11.txt` - CUDA 11 packages
|
||||
- `requirements-cublas12.txt` - CUDA 12 packages
|
||||
- `requirements-cublas13.txt` - CUDA 13 packages
|
||||
- `requirements-intel.txt` - Intel-optimized packages
|
||||
- `requirements-mps.txt` - Apple Silicon packages
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
torchaudio==2.4.1+cu118
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
torch==2.4.1+rocm6.0
|
||||
torchaudio==2.4.1+rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchaudio==2.8.0+rocm6.4
|
||||
transformers
|
||||
accelerate
|
||||
@@ -17,4 +17,9 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
fi
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
|
||||
installRequirements
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.6.0+cu118
|
||||
torchaudio==2.6.0+cu118
|
||||
transformers==4.46.3
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||
accelerate
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
torch==2.6.0+rocm6.1
|
||||
torchaudio==2.6.0+rocm6.1
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.9.1+rocm6.4
|
||||
torchaudio==2.9.1+rocm6.4
|
||||
transformers
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
|
||||
5
backend/python/chatterbox/requirements-install.txt
Normal file
5
backend/python/chatterbox/requirements-install.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
# Build dependencies needed for packages installed from source (e.g., git dependencies)
|
||||
# When using --no-build-isolation, these must be installed in the venv first
|
||||
wheel
|
||||
setuptools
|
||||
packaging
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
#
|
||||
#
|
||||
# use the library by adding the following line to a script:
|
||||
# source $(dirname $0)/../common/libbackend.sh
|
||||
#
|
||||
@@ -206,8 +206,8 @@ function init() {
|
||||
|
||||
# getBuildProfile will inspect the system to determine which build profile is appropriate:
|
||||
# returns one of the following:
|
||||
# - cublas11
|
||||
# - cublas12
|
||||
# - cublas13
|
||||
# - hipblas
|
||||
# - intel
|
||||
function getBuildProfile() {
|
||||
@@ -392,7 +392,7 @@ function runProtogen() {
|
||||
# - requirements-${BUILD_TYPE}.txt
|
||||
# - requirements-${BUILD_PROFILE}.txt
|
||||
#
|
||||
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-11 or cuda-12
|
||||
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-12 or cuda-13
|
||||
# it can also include some options that we do not have BUILD_TYPES for, ex: intel
|
||||
#
|
||||
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
|
||||
@@ -465,6 +465,14 @@ function startBackend() {
|
||||
if [ "x${PORTABLE_PYTHON}" == "xtrue" ] || [ -x "$(_portable_python)" ]; then
|
||||
_makeVenvPortable --update-pyvenv-cfg
|
||||
fi
|
||||
|
||||
# Set up GPU library paths if a lib directory exists
|
||||
# This allows backends to include their own GPU libraries (CUDA, ROCm, etc.)
|
||||
if [ -d "${EDIR}/lib" ]; then
|
||||
export LD_LIBRARY_PATH="${EDIR}/lib:${LD_LIBRARY_PATH:-}"
|
||||
echo "Added ${EDIR}/lib to LD_LIBRARY_PATH for GPU libraries"
|
||||
fi
|
||||
|
||||
if [ ! -z "${BACKEND_FILE:-}" ]; then
|
||||
exec "${EDIR}/venv/bin/python" "${BACKEND_FILE}" "$@"
|
||||
elif [ -e "${MY_DIR}/server.py" ]; then
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch
|
||||
@@ -1,6 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
torchaudio==2.4.1+cu118
|
||||
transformers==4.48.3
|
||||
accelerate
|
||||
coqui-tts
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
torch==2.4.1+rocm6.0
|
||||
torchaudio==2.4.1+rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchaudio==2.8.0+rocm6.4
|
||||
transformers==4.48.3
|
||||
accelerate
|
||||
coqui-tts
|
||||
@@ -16,8 +16,12 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
# Use python 3.12 for l4t
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ] || [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
|
||||
PYTHON_VERSION="3.12"
|
||||
PYTHON_PATCH="12"
|
||||
PY_STANDALONE_TAG="20251120"
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
torchvision==0.22.1
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
sentencepiece
|
||||
torch==2.7.1
|
||||
optimum-quanto
|
||||
ftfy
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchvision==0.22.1+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchvision==0.23.0+rocm6.4
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,9 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
faster-whisper
|
||||
opencv-python
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
sentencepiece
|
||||
optimum-quanto
|
||||
@@ -1,3 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch
|
||||
faster-whisper
|
||||
@@ -16,4 +16,8 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.7.1+cu118
|
||||
torchaudio==2.7.1+cu118
|
||||
transformers
|
||||
accelerate
|
||||
kokoro
|
||||
soundfile
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchaudio==2.7.1+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchaudio==2.8.0+rocm6.4
|
||||
transformers
|
||||
accelerate
|
||||
kokoro
|
||||
|
||||
16
backend/python/moonshine/Makefile
Normal file
16
backend/python/moonshine/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
.DEFAULT_GOAL := install
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
|
||||
test: install
|
||||
bash test.sh
|
||||
113
backend/python/moonshine/backend.py
Normal file
113
backend/python/moonshine/backend.py
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Moonshine transcription
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import moonshine_onnx
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
try:
|
||||
print("Preparing models, please wait", file=sys.stderr)
|
||||
# Store the model name for use in transcription
|
||||
# Model name format: e.g., "moonshine/tiny"
|
||||
self.model_name = request.Model
|
||||
print(f"Model name set to: {self.model_name}", file=sys.stderr)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def AudioTranscription(self, request, context):
|
||||
resultSegments = []
|
||||
text = ""
|
||||
try:
|
||||
# moonshine_onnx.transcribe returns a list of strings
|
||||
transcriptions = moonshine_onnx.transcribe(request.dst, self.model_name)
|
||||
|
||||
# Combine all transcriptions into a single text
|
||||
if isinstance(transcriptions, list):
|
||||
text = " ".join(transcriptions)
|
||||
# Create segments for each transcription in the list
|
||||
for id, trans in enumerate(transcriptions):
|
||||
# Since moonshine doesn't provide timing info, we'll create a single segment
|
||||
# with id and text, using approximate timing
|
||||
resultSegments.append(backend_pb2.TranscriptSegment(
|
||||
id=id,
|
||||
start=0,
|
||||
end=0,
|
||||
text=trans
|
||||
))
|
||||
else:
|
||||
# Handle case where it's not a list (shouldn't happen, but be safe)
|
||||
text = str(transcriptions)
|
||||
resultSegments.append(backend_pb2.TranscriptSegment(
|
||||
id=0,
|
||||
start=0,
|
||||
end=0,
|
||||
text=text
|
||||
))
|
||||
except Exception as err:
|
||||
print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
return backend_pb2.TranscriptResult(segments=resultSegments, text=text)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
|
||||
12
backend/python/moonshine/install.sh
Executable file
12
backend/python/moonshine/install.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
12
backend/python/moonshine/protogen.sh
Executable file
12
backend/python/moonshine/protogen.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
|
||||
4
backend/python/moonshine/requirements.txt
Normal file
4
backend/python/moonshine/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
grpcio-tools
|
||||
useful-moonshine-onnx@git+https://git@github.com/moonshine-ai/moonshine.git#subdirectory=moonshine-onnx
|
||||
10
backend/python/moonshine/run.sh
Executable file
10
backend/python/moonshine/run.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
|
||||
139
backend/python/moonshine/test.py
Normal file
139
backend/python/moonshine/test.py
Normal file
@@ -0,0 +1,139 @@
|
||||
"""
|
||||
A test script to test the gRPC service for Moonshine transcription
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
This method sets up the gRPC service by starting the server
|
||||
"""
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(10)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
"""
|
||||
This method tears down the gRPC service by terminating the server
|
||||
"""
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
"""
|
||||
This method tests if the server starts up successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
"""
|
||||
This method tests if the model is loaded successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="moonshine/tiny"))
|
||||
self.assertTrue(response.success)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_audio_transcription(self):
|
||||
"""
|
||||
This method tests if audio transcription works successfully
|
||||
"""
|
||||
# Create a temporary directory for the audio file
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
audio_file = os.path.join(temp_dir, 'audio.wav')
|
||||
|
||||
try:
|
||||
# Download the audio file to the temporary directory
|
||||
print(f"Downloading audio file to {audio_file}...")
|
||||
url = "https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav"
|
||||
result = subprocess.run(
|
||||
["wget", "-q", url, "-O", audio_file],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
if result.returncode != 0:
|
||||
self.fail(f"Failed to download audio file: {result.stderr}")
|
||||
|
||||
# Verify the file was downloaded
|
||||
if not os.path.exists(audio_file):
|
||||
self.fail(f"Audio file was not downloaded to {audio_file}")
|
||||
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
# Load the model first
|
||||
load_response = stub.LoadModel(backend_pb2.ModelOptions(Model="moonshine/tiny"))
|
||||
self.assertTrue(load_response.success)
|
||||
|
||||
# Perform transcription
|
||||
transcript_request = backend_pb2.TranscriptRequest(dst=audio_file)
|
||||
transcript_response = stub.AudioTranscription(transcript_request)
|
||||
|
||||
# Print the transcribed text for debugging
|
||||
print(f"Transcribed text: {transcript_response.text}")
|
||||
print(f"Number of segments: {len(transcript_response.segments)}")
|
||||
|
||||
# Verify response structure
|
||||
self.assertIsNotNone(transcript_response)
|
||||
self.assertIsNotNone(transcript_response.text)
|
||||
# Protobuf repeated fields return a sequence, not a list
|
||||
self.assertIsNotNone(transcript_response.segments)
|
||||
# Check if segments is iterable (has length)
|
||||
self.assertGreaterEqual(len(transcript_response.segments), 0)
|
||||
|
||||
# Verify the transcription contains the expected text
|
||||
expected_text = "This is the micro machine man presenting the most midget miniature"
|
||||
self.assertIn(
|
||||
expected_text.lower(),
|
||||
transcript_response.text.lower(),
|
||||
f"Expected text '{expected_text}' not found in transcription: '{transcript_response.text}'"
|
||||
)
|
||||
|
||||
# If we got segments, verify they have the expected structure
|
||||
if len(transcript_response.segments) > 0:
|
||||
segment = transcript_response.segments[0]
|
||||
self.assertIsNotNone(segment.text)
|
||||
self.assertIsInstance(segment.id, int)
|
||||
else:
|
||||
# Even if no segments, we should have text
|
||||
self.assertIsNotNone(transcript_response.text)
|
||||
self.assertGreater(len(transcript_response.text), 0)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("AudioTranscription service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
# Clean up the temporary directory
|
||||
if os.path.exists(temp_dir):
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
12
backend/python/moonshine/test.sh
Executable file
12
backend/python/moonshine/test.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
|
||||
@@ -26,6 +26,12 @@ fi
|
||||
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
|
||||
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
|
||||
git clone https://github.com/neuphonic/neutts-air neutts-air
|
||||
|
||||
cp -rfv neutts-air/neuttsair ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.8.0+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
transformers==4.56.1
|
||||
accelerate
|
||||
librosa==0.11.0
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.4.1+cu118
|
||||
rerankers[transformers]
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.4.1+rocm6.0
|
||||
torch==2.8.0+rocm6.4
|
||||
rerankers[transformers]
|
||||
@@ -1,8 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.7.1+cu118
|
||||
rfdetr
|
||||
opencv-python
|
||||
accelerate
|
||||
inference
|
||||
peft
|
||||
optimum-quanto
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchvision==0.22.1+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchvision==0.23.0+rocm6.4
|
||||
rfdetr
|
||||
opencv-python
|
||||
accelerate
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.7.1+cu118
|
||||
llvmlite==0.43.0
|
||||
numba==0.60.0
|
||||
accelerate
|
||||
transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.2
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
accelerate
|
||||
transformers
|
||||
llvmlite==0.43.0
|
||||
|
||||
@@ -17,12 +17,16 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
fi
|
||||
|
||||
# Use python 3.12 for l4t
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ] || [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
|
||||
PYTHON_VERSION="3.12"
|
||||
PYTHON_PATCH="12"
|
||||
PY_STANDALONE_TAG="20251120"
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
git clone https://github.com/microsoft/VibeVoice.git
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers==4.51.3
|
||||
torchvision==0.22.1
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
sentencepiece
|
||||
torch==2.7.1
|
||||
optimum-quanto
|
||||
ftfy
|
||||
llvmlite>=0.40.0
|
||||
numba>=0.57.0
|
||||
tqdm
|
||||
numpy
|
||||
scipy
|
||||
librosa
|
||||
ml-collections
|
||||
absl-py
|
||||
gradio
|
||||
av
|
||||
@@ -28,7 +28,7 @@ fi
|
||||
|
||||
# We don't embed this into the images as it is a large dependency and not always needed.
|
||||
# Besides, the speed inference are not actually usable in the current state for production use-cases.
|
||||
if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE}" == "xtrue" ]; then
|
||||
if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE:-}" == "xtrue" ]; then
|
||||
ensureVenv
|
||||
# https://docs.vllm.ai/en/v0.6.1/getting_started/cpu-installation.html
|
||||
if [ ! -d vllm ]; then
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
flash-attn
|
||||
@@ -1,5 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
accelerate
|
||||
torch==2.7.0+cu118
|
||||
transformers
|
||||
bitsandbytes
|
||||
@@ -1,4 +1,4 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/nightly/rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/nightly/rocm6.4
|
||||
accelerate
|
||||
torch
|
||||
transformers
|
||||
|
||||
@@ -63,6 +63,25 @@ func (m *GalleryBackend) IsMeta() bool {
|
||||
return len(m.CapabilitiesMap) > 0 && m.URI == ""
|
||||
}
|
||||
|
||||
// IsCompatibleWith checks if the backend is compatible with the current system capability.
|
||||
// For meta backends, it checks if any of the capabilities in the map match the system capability.
|
||||
// For concrete backends, it delegates to SystemState.IsBackendCompatible.
|
||||
func (m *GalleryBackend) IsCompatibleWith(systemState *system.SystemState) bool {
|
||||
if systemState == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Meta backends are compatible if the system capability matches one of the keys
|
||||
if m.IsMeta() {
|
||||
capability := systemState.Capability(m.CapabilitiesMap)
|
||||
_, exists := m.CapabilitiesMap[capability]
|
||||
return exists
|
||||
}
|
||||
|
||||
// For concrete backends, delegate to the system package
|
||||
return systemState.IsBackendCompatible(m.Name, m.URI)
|
||||
}
|
||||
|
||||
func (m *GalleryBackend) SetInstalled(installed bool) {
|
||||
m.Installed = installed
|
||||
}
|
||||
|
||||
@@ -172,6 +172,252 @@ var _ = Describe("Gallery Backends", func() {
|
||||
Expect(nilMetaBackend.IsMeta()).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should check IsCompatibleWith correctly for meta backends", func() {
|
||||
metaBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "meta-backend",
|
||||
},
|
||||
CapabilitiesMap: map[string]string{
|
||||
"nvidia": "nvidia-backend",
|
||||
"amd": "amd-backend",
|
||||
"default": "default-backend",
|
||||
},
|
||||
}
|
||||
|
||||
// Test with nil state - should be compatible
|
||||
Expect(metaBackend.IsCompatibleWith(nil)).To(BeTrue())
|
||||
|
||||
// Test with NVIDIA system - should be compatible (has nvidia key)
|
||||
nvidiaState := &system.SystemState{GPUVendor: "nvidia", VRAM: 8 * 1024 * 1024 * 1024}
|
||||
Expect(metaBackend.IsCompatibleWith(nvidiaState)).To(BeTrue())
|
||||
|
||||
// Test with default (no GPU) - should be compatible (has default key)
|
||||
defaultState := &system.SystemState{}
|
||||
Expect(metaBackend.IsCompatibleWith(defaultState)).To(BeTrue())
|
||||
})
|
||||
|
||||
Describe("IsCompatibleWith for concrete backends", func() {
|
||||
Context("CPU backends", func() {
|
||||
It("should be compatible on all systems", func() {
|
||||
cpuBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "cpu-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-cpu-llama-cpp",
|
||||
}
|
||||
Expect(cpuBackend.IsCompatibleWith(&system.SystemState{})).To(BeTrue())
|
||||
Expect(cpuBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
Expect(cpuBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.AMD, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Darwin/Metal backends", func() {
|
||||
When("running on darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS != "darwin" {
|
||||
Skip("Skipping darwin-specific tests on non-darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should be compatible for MLX backend", func() {
|
||||
mlxBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "mlx",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx",
|
||||
}
|
||||
Expect(mlxBackend.IsCompatibleWith(&system.SystemState{})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible for metal-llama-cpp backend", func() {
|
||||
metalBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "metal-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-llama-cpp",
|
||||
}
|
||||
Expect(metalBackend.IsCompatibleWith(&system.SystemState{})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
When("running on non-darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS == "darwin" {
|
||||
Skip("Skipping non-darwin-specific tests on darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should NOT be compatible for MLX backend", func() {
|
||||
mlxBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "mlx",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx",
|
||||
}
|
||||
Expect(mlxBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should NOT be compatible for metal-llama-cpp backend", func() {
|
||||
metalBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "metal-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-llama-cpp",
|
||||
}
|
||||
Expect(metalBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("NVIDIA/CUDA backends", func() {
|
||||
When("running on non-darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS == "darwin" {
|
||||
Skip("Skipping CUDA tests on darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should NOT be compatible without nvidia GPU", func() {
|
||||
cudaBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "cuda12-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-llama-cpp",
|
||||
}
|
||||
Expect(cudaBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
Expect(cudaBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.AMD, VRAM: 8 * 1024 * 1024 * 1024})).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should be compatible with nvidia GPU", func() {
|
||||
cudaBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "cuda12-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-llama-cpp",
|
||||
}
|
||||
Expect(cudaBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible with cuda13 backend on nvidia GPU", func() {
|
||||
cuda13Backend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "cuda13-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-llama-cpp",
|
||||
}
|
||||
Expect(cuda13Backend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("AMD/ROCm backends", func() {
|
||||
When("running on non-darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS == "darwin" {
|
||||
Skip("Skipping AMD/ROCm tests on darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should NOT be compatible without AMD GPU", func() {
|
||||
rocmBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "rocm-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-llama-cpp",
|
||||
}
|
||||
Expect(rocmBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
Expect(rocmBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should be compatible with AMD GPU", func() {
|
||||
rocmBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "rocm-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-llama-cpp",
|
||||
}
|
||||
Expect(rocmBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.AMD, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible with hipblas backend on AMD GPU", func() {
|
||||
hipBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "hip-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-hip-llama-cpp",
|
||||
}
|
||||
Expect(hipBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.AMD, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("Intel/SYCL backends", func() {
|
||||
When("running on non-darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS == "darwin" {
|
||||
Skip("Skipping Intel/SYCL tests on darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should NOT be compatible without Intel GPU", func() {
|
||||
intelBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "intel-sycl-f16-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-llama-cpp",
|
||||
}
|
||||
Expect(intelBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
Expect(intelBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should be compatible with Intel GPU", func() {
|
||||
intelBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "intel-sycl-f16-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-llama-cpp",
|
||||
}
|
||||
Expect(intelBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Intel, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible with intel-sycl-f32 backend on Intel GPU", func() {
|
||||
intelF32Backend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "intel-sycl-f32-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-llama-cpp",
|
||||
}
|
||||
Expect(intelF32Backend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Intel, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible with intel-transformers backend on Intel GPU", func() {
|
||||
intelTransformersBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "intel-transformers",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-intel-transformers",
|
||||
}
|
||||
Expect(intelTransformersBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Intel, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("Vulkan backends", func() {
|
||||
It("should be compatible on CPU-only systems", func() {
|
||||
// Vulkan backends don't have a specific GPU vendor requirement in the current logic
|
||||
// They are compatible if no other GPU-specific pattern matches
|
||||
vulkanBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "vulkan-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-vulkan-llama-cpp",
|
||||
}
|
||||
// Vulkan doesn't have vendor-specific filtering in current implementation
|
||||
Expect(vulkanBackend.IsCompatibleWith(&system.SystemState{})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
It("should find best backend from meta based on system capabilities", func() {
|
||||
|
||||
metaBackend := &GalleryBackend{
|
||||
|
||||
@@ -226,6 +226,16 @@ func AvailableGalleryModels(galleries []config.Gallery, systemState *system.Syst
|
||||
|
||||
// List available backends
|
||||
func AvailableBackends(galleries []config.Gallery, systemState *system.SystemState) (GalleryElements[*GalleryBackend], error) {
|
||||
return availableBackendsWithFilter(galleries, systemState, true)
|
||||
}
|
||||
|
||||
// AvailableBackendsUnfiltered returns all available backends without filtering by system capability.
|
||||
func AvailableBackendsUnfiltered(galleries []config.Gallery, systemState *system.SystemState) (GalleryElements[*GalleryBackend], error) {
|
||||
return availableBackendsWithFilter(galleries, systemState, false)
|
||||
}
|
||||
|
||||
// availableBackendsWithFilter is a helper function that lists available backends with optional filtering.
|
||||
func availableBackendsWithFilter(galleries []config.Gallery, systemState *system.SystemState, filterByCapability bool) (GalleryElements[*GalleryBackend], error) {
|
||||
var backends []*GalleryBackend
|
||||
|
||||
systemBackends, err := ListSystemBackends(systemState)
|
||||
@@ -241,7 +251,17 @@ func AvailableBackends(galleries []config.Gallery, systemState *system.SystemSta
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
backends = append(backends, galleryBackends...)
|
||||
|
||||
// Filter backends by system capability if requested
|
||||
if filterByCapability {
|
||||
for _, backend := range galleryBackends {
|
||||
if backend.IsCompatibleWith(systemState) {
|
||||
backends = append(backends, backend)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
backends = append(backends, galleryBackends...)
|
||||
}
|
||||
}
|
||||
|
||||
return backends, nil
|
||||
|
||||
@@ -205,6 +205,7 @@ func API(application *application.Application) (*echo.Echo, error) {
|
||||
|
||||
routes.RegisterLocalAIRoutes(e, requestExtractor, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application.TemplatesEvaluator(), application)
|
||||
routes.RegisterOpenAIRoutes(e, requestExtractor, application)
|
||||
routes.RegisterAnthropicRoutes(e, requestExtractor, application)
|
||||
if !application.ApplicationConfig().DisableWebUI {
|
||||
routes.RegisterUIAPIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application)
|
||||
routes.RegisterUIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService())
|
||||
|
||||
537
core/http/endpoints/anthropic/messages.go
Normal file
537
core/http/endpoints/anthropic/messages.go
Normal file
@@ -0,0 +1,537 @@
|
||||
package anthropic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http/middleware"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/templates"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/mudler/xlog"
|
||||
)
|
||||
|
||||
// MessagesEndpoint is the Anthropic Messages API endpoint
|
||||
// https://docs.anthropic.com/claude/reference/messages_post
|
||||
// @Summary Generate a message response for the given messages and model.
|
||||
// @Param request body schema.AnthropicRequest true "query params"
|
||||
// @Success 200 {object} schema.AnthropicResponse "Response"
|
||||
// @Router /v1/messages [post]
|
||||
func MessagesEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator *templates.Evaluator, appConfig *config.ApplicationConfig) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
id := uuid.New().String()
|
||||
|
||||
input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.AnthropicRequest)
|
||||
if !ok || input.Model == "" {
|
||||
return sendAnthropicError(c, 400, "invalid_request_error", "model is required")
|
||||
}
|
||||
|
||||
cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig)
|
||||
if !ok || cfg == nil {
|
||||
return sendAnthropicError(c, 400, "invalid_request_error", "model configuration not found")
|
||||
}
|
||||
|
||||
if input.MaxTokens <= 0 {
|
||||
return sendAnthropicError(c, 400, "invalid_request_error", "max_tokens is required and must be greater than 0")
|
||||
}
|
||||
|
||||
xlog.Debug("Anthropic Messages endpoint configuration read", "config", cfg)
|
||||
|
||||
// Convert Anthropic messages to OpenAI format for internal processing
|
||||
openAIMessages := convertAnthropicToOpenAIMessages(input)
|
||||
|
||||
// Convert Anthropic tools to internal Functions format
|
||||
funcs, shouldUseFn := convertAnthropicTools(input, cfg)
|
||||
|
||||
// Create an OpenAI-compatible request for internal processing
|
||||
openAIReq := &schema.OpenAIRequest{
|
||||
PredictionOptions: schema.PredictionOptions{
|
||||
BasicModelRequest: schema.BasicModelRequest{Model: input.Model},
|
||||
Temperature: input.Temperature,
|
||||
TopK: input.TopK,
|
||||
TopP: input.TopP,
|
||||
Maxtokens: &input.MaxTokens,
|
||||
},
|
||||
Messages: openAIMessages,
|
||||
Stream: input.Stream,
|
||||
Context: input.Context,
|
||||
Cancel: input.Cancel,
|
||||
}
|
||||
|
||||
// Set stop sequences
|
||||
if len(input.StopSequences) > 0 {
|
||||
openAIReq.Stop = input.StopSequences
|
||||
}
|
||||
|
||||
// Merge config settings
|
||||
if input.Temperature != nil {
|
||||
cfg.Temperature = input.Temperature
|
||||
}
|
||||
if input.TopK != nil {
|
||||
cfg.TopK = input.TopK
|
||||
}
|
||||
if input.TopP != nil {
|
||||
cfg.TopP = input.TopP
|
||||
}
|
||||
cfg.Maxtokens = &input.MaxTokens
|
||||
if len(input.StopSequences) > 0 {
|
||||
cfg.StopWords = append(cfg.StopWords, input.StopSequences...)
|
||||
}
|
||||
|
||||
// Template the prompt with tools if available
|
||||
predInput := evaluator.TemplateMessages(*openAIReq, openAIReq.Messages, cfg, funcs, shouldUseFn)
|
||||
xlog.Debug("Anthropic Messages - Prompt (after templating)", "prompt", predInput)
|
||||
|
||||
if input.Stream {
|
||||
return handleAnthropicStream(c, id, input, cfg, ml, predInput, openAIReq, funcs, shouldUseFn)
|
||||
}
|
||||
|
||||
return handleAnthropicNonStream(c, id, input, cfg, ml, predInput, openAIReq, funcs, shouldUseFn)
|
||||
}
|
||||
}
|
||||
|
||||
func handleAnthropicNonStream(c echo.Context, id string, input *schema.AnthropicRequest, cfg *config.ModelConfig, ml *model.ModelLoader, predInput string, openAIReq *schema.OpenAIRequest, funcs functions.Functions, shouldUseFn bool) error {
|
||||
images := []string{}
|
||||
for _, m := range openAIReq.Messages {
|
||||
images = append(images, m.StringImages...)
|
||||
}
|
||||
|
||||
predFunc, err := backend.ModelInference(
|
||||
input.Context, predInput, openAIReq.Messages, images, nil, nil, ml, cfg, nil, nil, nil, "", "", nil, nil, nil)
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic model inference failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("model inference failed: %v", err))
|
||||
}
|
||||
|
||||
prediction, err := predFunc()
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic prediction failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("prediction failed: %v", err))
|
||||
}
|
||||
|
||||
result := backend.Finetune(*cfg, predInput, prediction.Response)
|
||||
|
||||
// Check if the result contains tool calls
|
||||
toolCalls := functions.ParseFunctionCall(result, cfg.FunctionsConfig)
|
||||
|
||||
var contentBlocks []schema.AnthropicContentBlock
|
||||
var stopReason string
|
||||
|
||||
if shouldUseFn && len(toolCalls) > 0 {
|
||||
// Model wants to use tools
|
||||
stopReason = "tool_use"
|
||||
for _, tc := range toolCalls {
|
||||
// Parse arguments as JSON
|
||||
var inputArgs map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(tc.Arguments), &inputArgs); err != nil {
|
||||
xlog.Warn("Failed to parse tool call arguments as JSON", "error", err, "args", tc.Arguments)
|
||||
inputArgs = map[string]interface{}{"raw": tc.Arguments}
|
||||
}
|
||||
|
||||
contentBlocks = append(contentBlocks, schema.AnthropicContentBlock{
|
||||
Type: "tool_use",
|
||||
ID: fmt.Sprintf("toolu_%s_%d", id, len(contentBlocks)),
|
||||
Name: tc.Name,
|
||||
Input: inputArgs,
|
||||
})
|
||||
}
|
||||
|
||||
// Add any text content before the tool calls
|
||||
textContent := functions.ParseTextContent(result, cfg.FunctionsConfig)
|
||||
if textContent != "" {
|
||||
// Prepend text block
|
||||
contentBlocks = append([]schema.AnthropicContentBlock{{Type: "text", Text: textContent}}, contentBlocks...)
|
||||
}
|
||||
} else {
|
||||
// Normal text response
|
||||
stopReason = "end_turn"
|
||||
contentBlocks = []schema.AnthropicContentBlock{
|
||||
{Type: "text", Text: result},
|
||||
}
|
||||
}
|
||||
|
||||
resp := &schema.AnthropicResponse{
|
||||
ID: fmt.Sprintf("msg_%s", id),
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Model: input.Model,
|
||||
StopReason: &stopReason,
|
||||
Content: contentBlocks,
|
||||
Usage: schema.AnthropicUsage{
|
||||
InputTokens: prediction.Usage.Prompt,
|
||||
OutputTokens: prediction.Usage.Completion,
|
||||
},
|
||||
}
|
||||
|
||||
if respData, err := json.Marshal(resp); err == nil {
|
||||
xlog.Debug("Anthropic Response", "response", string(respData))
|
||||
}
|
||||
|
||||
return c.JSON(200, resp)
|
||||
}
|
||||
|
||||
func handleAnthropicStream(c echo.Context, id string, input *schema.AnthropicRequest, cfg *config.ModelConfig, ml *model.ModelLoader, predInput string, openAIReq *schema.OpenAIRequest, funcs functions.Functions, shouldUseFn bool) error {
|
||||
c.Response().Header().Set("Content-Type", "text/event-stream")
|
||||
c.Response().Header().Set("Cache-Control", "no-cache")
|
||||
c.Response().Header().Set("Connection", "keep-alive")
|
||||
|
||||
// Create OpenAI messages for inference
|
||||
openAIMessages := openAIReq.Messages
|
||||
|
||||
images := []string{}
|
||||
for _, m := range openAIMessages {
|
||||
images = append(images, m.StringImages...)
|
||||
}
|
||||
|
||||
// Send message_start event
|
||||
messageStart := schema.AnthropicStreamEvent{
|
||||
Type: "message_start",
|
||||
Message: &schema.AnthropicStreamMessage{
|
||||
ID: fmt.Sprintf("msg_%s", id),
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Content: []schema.AnthropicContentBlock{},
|
||||
Model: input.Model,
|
||||
Usage: schema.AnthropicUsage{InputTokens: 0, OutputTokens: 0},
|
||||
},
|
||||
}
|
||||
sendAnthropicSSE(c, messageStart)
|
||||
|
||||
// Track accumulated content for tool call detection
|
||||
accumulatedContent := ""
|
||||
currentBlockIndex := 0
|
||||
inToolCall := false
|
||||
toolCallsEmitted := 0
|
||||
|
||||
// Send initial content_block_start event
|
||||
contentBlockStart := schema.AnthropicStreamEvent{
|
||||
Type: "content_block_start",
|
||||
Index: currentBlockIndex,
|
||||
ContentBlock: &schema.AnthropicContentBlock{Type: "text", Text: ""},
|
||||
}
|
||||
sendAnthropicSSE(c, contentBlockStart)
|
||||
|
||||
// Stream content deltas
|
||||
tokenCallback := func(token string, usage backend.TokenUsage) bool {
|
||||
accumulatedContent += token
|
||||
|
||||
// If we're using functions, try to detect tool calls incrementally
|
||||
if shouldUseFn {
|
||||
cleanedResult := functions.CleanupLLMResult(accumulatedContent, cfg.FunctionsConfig)
|
||||
|
||||
// Try parsing for tool calls
|
||||
toolCalls := functions.ParseFunctionCall(cleanedResult, cfg.FunctionsConfig)
|
||||
|
||||
// If we detected new tool calls and haven't emitted them yet
|
||||
if len(toolCalls) > toolCallsEmitted {
|
||||
// Stop the current text block if we were in one
|
||||
if !inToolCall && currentBlockIndex == 0 {
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_stop",
|
||||
Index: currentBlockIndex,
|
||||
})
|
||||
currentBlockIndex++
|
||||
inToolCall = true
|
||||
}
|
||||
|
||||
// Emit new tool calls
|
||||
for i := toolCallsEmitted; i < len(toolCalls); i++ {
|
||||
tc := toolCalls[i]
|
||||
|
||||
// Send content_block_start for tool_use
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_start",
|
||||
Index: currentBlockIndex,
|
||||
ContentBlock: &schema.AnthropicContentBlock{
|
||||
Type: "tool_use",
|
||||
ID: fmt.Sprintf("toolu_%s_%d", id, i),
|
||||
Name: tc.Name,
|
||||
},
|
||||
})
|
||||
|
||||
// Send input_json_delta with the arguments
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_delta",
|
||||
Index: currentBlockIndex,
|
||||
Delta: &schema.AnthropicStreamDelta{
|
||||
Type: "input_json_delta",
|
||||
PartialJSON: tc.Arguments,
|
||||
},
|
||||
})
|
||||
|
||||
// Send content_block_stop
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_stop",
|
||||
Index: currentBlockIndex,
|
||||
})
|
||||
|
||||
currentBlockIndex++
|
||||
}
|
||||
toolCallsEmitted = len(toolCalls)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Send regular text delta if not in tool call mode
|
||||
if !inToolCall {
|
||||
delta := schema.AnthropicStreamEvent{
|
||||
Type: "content_block_delta",
|
||||
Index: 0,
|
||||
Delta: &schema.AnthropicStreamDelta{
|
||||
Type: "text_delta",
|
||||
Text: token,
|
||||
},
|
||||
}
|
||||
sendAnthropicSSE(c, delta)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
predFunc, err := backend.ModelInference(
|
||||
input.Context, predInput, openAIMessages, images, nil, nil, ml, cfg, nil, nil, tokenCallback, "", "", nil, nil, nil)
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic stream model inference failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("model inference failed: %v", err))
|
||||
}
|
||||
|
||||
prediction, err := predFunc()
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic stream prediction failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("prediction failed: %v", err))
|
||||
}
|
||||
|
||||
// Send content_block_stop event for last block if we didn't close it yet
|
||||
if !inToolCall {
|
||||
contentBlockStop := schema.AnthropicStreamEvent{
|
||||
Type: "content_block_stop",
|
||||
Index: 0,
|
||||
}
|
||||
sendAnthropicSSE(c, contentBlockStop)
|
||||
}
|
||||
|
||||
// Determine stop reason
|
||||
stopReason := "end_turn"
|
||||
if toolCallsEmitted > 0 {
|
||||
stopReason = "tool_use"
|
||||
}
|
||||
|
||||
// Send message_delta event with stop_reason
|
||||
messageDelta := schema.AnthropicStreamEvent{
|
||||
Type: "message_delta",
|
||||
Delta: &schema.AnthropicStreamDelta{
|
||||
StopReason: &stopReason,
|
||||
},
|
||||
Usage: &schema.AnthropicUsage{
|
||||
OutputTokens: prediction.Usage.Completion,
|
||||
},
|
||||
}
|
||||
sendAnthropicSSE(c, messageDelta)
|
||||
|
||||
// Send message_stop event
|
||||
messageStop := schema.AnthropicStreamEvent{
|
||||
Type: "message_stop",
|
||||
}
|
||||
sendAnthropicSSE(c, messageStop)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sendAnthropicSSE(c echo.Context, event schema.AnthropicStreamEvent) {
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
xlog.Error("Failed to marshal SSE event", "error", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(c.Response().Writer, "event: %s\ndata: %s\n\n", event.Type, string(data))
|
||||
c.Response().Flush()
|
||||
}
|
||||
|
||||
func sendAnthropicError(c echo.Context, statusCode int, errorType, message string) error {
|
||||
resp := schema.AnthropicErrorResponse{
|
||||
Type: "error",
|
||||
Error: schema.AnthropicError{
|
||||
Type: errorType,
|
||||
Message: message,
|
||||
},
|
||||
}
|
||||
return c.JSON(statusCode, resp)
|
||||
}
|
||||
|
||||
func convertAnthropicToOpenAIMessages(input *schema.AnthropicRequest) []schema.Message {
|
||||
var messages []schema.Message
|
||||
|
||||
// Add system message if present
|
||||
if input.System != "" {
|
||||
messages = append(messages, schema.Message{
|
||||
Role: "system",
|
||||
StringContent: input.System,
|
||||
Content: input.System,
|
||||
})
|
||||
}
|
||||
|
||||
// Convert Anthropic messages to OpenAI format
|
||||
for _, msg := range input.Messages {
|
||||
openAIMsg := schema.Message{
|
||||
Role: msg.Role,
|
||||
}
|
||||
|
||||
// Handle content (can be string or array of content blocks)
|
||||
switch content := msg.Content.(type) {
|
||||
case string:
|
||||
openAIMsg.StringContent = content
|
||||
openAIMsg.Content = content
|
||||
case []interface{}:
|
||||
// Handle array of content blocks
|
||||
var textContent string
|
||||
var stringImages []string
|
||||
var toolCalls []schema.ToolCall
|
||||
toolCallIndex := 0
|
||||
|
||||
for _, block := range content {
|
||||
if blockMap, ok := block.(map[string]interface{}); ok {
|
||||
blockType, _ := blockMap["type"].(string)
|
||||
switch blockType {
|
||||
case "text":
|
||||
if text, ok := blockMap["text"].(string); ok {
|
||||
textContent += text
|
||||
}
|
||||
case "image":
|
||||
// Handle image content
|
||||
if source, ok := blockMap["source"].(map[string]interface{}); ok {
|
||||
if sourceType, ok := source["type"].(string); ok && sourceType == "base64" {
|
||||
if data, ok := source["data"].(string); ok {
|
||||
mediaType, _ := source["media_type"].(string)
|
||||
// Format as data URI
|
||||
dataURI := fmt.Sprintf("data:%s;base64,%s", mediaType, data)
|
||||
stringImages = append(stringImages, dataURI)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "tool_use":
|
||||
// Convert tool_use to ToolCall format
|
||||
toolID, _ := blockMap["id"].(string)
|
||||
toolName, _ := blockMap["name"].(string)
|
||||
toolInput := blockMap["input"]
|
||||
|
||||
// Serialize input to JSON string
|
||||
inputJSON, err := json.Marshal(toolInput)
|
||||
if err != nil {
|
||||
xlog.Warn("Failed to marshal tool input", "error", err)
|
||||
inputJSON = []byte("{}")
|
||||
}
|
||||
|
||||
toolCalls = append(toolCalls, schema.ToolCall{
|
||||
Index: toolCallIndex,
|
||||
ID: toolID,
|
||||
Type: "function",
|
||||
FunctionCall: schema.FunctionCall{
|
||||
Name: toolName,
|
||||
Arguments: string(inputJSON),
|
||||
},
|
||||
})
|
||||
toolCallIndex++
|
||||
case "tool_result":
|
||||
// Convert tool_result to a message with role "tool"
|
||||
// This is handled by creating a separate message after this block
|
||||
// For now, we'll add it as text content
|
||||
toolUseID, _ := blockMap["tool_use_id"].(string)
|
||||
isError := false
|
||||
if isErrorPtr, ok := blockMap["is_error"].(*bool); ok && isErrorPtr != nil {
|
||||
isError = *isErrorPtr
|
||||
}
|
||||
|
||||
var resultText string
|
||||
if resultContent, ok := blockMap["content"]; ok {
|
||||
switch rc := resultContent.(type) {
|
||||
case string:
|
||||
resultText = rc
|
||||
case []interface{}:
|
||||
// Array of content blocks
|
||||
for _, cb := range rc {
|
||||
if cbMap, ok := cb.(map[string]interface{}); ok {
|
||||
if cbMap["type"] == "text" {
|
||||
if text, ok := cbMap["text"].(string); ok {
|
||||
resultText += text
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add tool result as a tool role message
|
||||
// We need to handle this differently - create a new message
|
||||
if msg.Role == "user" {
|
||||
// Store tool result info for creating separate message
|
||||
prefix := ""
|
||||
if isError {
|
||||
prefix = "Error: "
|
||||
}
|
||||
textContent += fmt.Sprintf("\n[Tool Result for %s]: %s%s", toolUseID, prefix, resultText)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
openAIMsg.StringContent = textContent
|
||||
openAIMsg.Content = textContent
|
||||
openAIMsg.StringImages = stringImages
|
||||
|
||||
// Add tool calls if present
|
||||
if len(toolCalls) > 0 {
|
||||
openAIMsg.ToolCalls = toolCalls
|
||||
}
|
||||
}
|
||||
|
||||
messages = append(messages, openAIMsg)
|
||||
}
|
||||
|
||||
return messages
|
||||
}
|
||||
|
||||
// convertAnthropicTools converts Anthropic tools to internal Functions format
|
||||
func convertAnthropicTools(input *schema.AnthropicRequest, cfg *config.ModelConfig) (functions.Functions, bool) {
|
||||
if len(input.Tools) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var funcs functions.Functions
|
||||
for _, tool := range input.Tools {
|
||||
f := functions.Function{
|
||||
Name: tool.Name,
|
||||
Description: tool.Description,
|
||||
Parameters: tool.InputSchema,
|
||||
}
|
||||
funcs = append(funcs, f)
|
||||
}
|
||||
|
||||
// Handle tool_choice
|
||||
if input.ToolChoice != nil {
|
||||
switch tc := input.ToolChoice.(type) {
|
||||
case string:
|
||||
// "auto", "any", or "none"
|
||||
if tc == "any" {
|
||||
// Force the model to use one of the tools
|
||||
cfg.SetFunctionCallString("required")
|
||||
} else if tc == "none" {
|
||||
// Don't use tools
|
||||
return nil, false
|
||||
}
|
||||
// "auto" is the default - let model decide
|
||||
case map[string]interface{}:
|
||||
// Specific tool selection: {"type": "tool", "name": "tool_name"}
|
||||
if tcType, ok := tc["type"].(string); ok && tcType == "tool" {
|
||||
if name, ok := tc["name"].(string); ok {
|
||||
// Force specific tool
|
||||
cfg.SetFunctionCallString(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return funcs, len(funcs) > 0 && cfg.ShouldUseFunctions()
|
||||
}
|
||||
@@ -65,13 +65,9 @@ func WelcomeEndpoint(appConfig *config.ApplicationConfig,
|
||||
// The client expects a JSON response
|
||||
return c.JSON(200, summary)
|
||||
} else {
|
||||
// Check if this is the manage route
|
||||
templateName := "views/index"
|
||||
if strings.HasSuffix(c.Request().URL.Path, "/manage") || c.Request().URL.Path == "/manage" {
|
||||
templateName = "views/manage"
|
||||
}
|
||||
// Render appropriate template
|
||||
return c.Render(200, templateName, summary)
|
||||
// Serve the SPA for both index and manage routes
|
||||
// The SPA handles routing client-side via Alpine.js
|
||||
return c.Render(200, "views/spa", summary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package openai
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -34,11 +35,54 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
Created: created,
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []schema.Choice{{Delta: &schema.Message{Role: "assistant"}, Index: 0, FinishReason: nil}},
|
||||
Object: "chat.completion.chunk",
|
||||
}
|
||||
responses <- initialMessage
|
||||
|
||||
// Track accumulated content for reasoning extraction
|
||||
accumulatedContent := ""
|
||||
lastEmittedReasoning := ""
|
||||
lastEmittedCleanedContent := ""
|
||||
|
||||
_, _, err := ComputeChoices(req, s, config, cl, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, tokenUsage backend.TokenUsage) bool {
|
||||
accumulatedContent += s
|
||||
// Extract reasoning from accumulated content
|
||||
currentReasoning, cleanedContent := functions.ExtractReasoning(accumulatedContent)
|
||||
|
||||
// Calculate new reasoning delta (what we haven't emitted yet)
|
||||
var reasoningDelta *string
|
||||
if currentReasoning != lastEmittedReasoning {
|
||||
// Extract only the new part
|
||||
if len(currentReasoning) > len(lastEmittedReasoning) && strings.HasPrefix(currentReasoning, lastEmittedReasoning) {
|
||||
newReasoning := currentReasoning[len(lastEmittedReasoning):]
|
||||
reasoningDelta = &newReasoning
|
||||
lastEmittedReasoning = currentReasoning
|
||||
} else if currentReasoning != "" {
|
||||
// If reasoning changed in a non-append way, emit the full current reasoning
|
||||
reasoningDelta = ¤tReasoning
|
||||
lastEmittedReasoning = currentReasoning
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate content delta from cleaned content
|
||||
var deltaContent string
|
||||
if len(cleanedContent) > len(lastEmittedCleanedContent) && strings.HasPrefix(cleanedContent, lastEmittedCleanedContent) {
|
||||
deltaContent = cleanedContent[len(lastEmittedCleanedContent):]
|
||||
lastEmittedCleanedContent = cleanedContent
|
||||
} else if cleanedContent != lastEmittedCleanedContent {
|
||||
// If cleaned content changed but not in a simple append, extract delta from cleaned content
|
||||
// This handles cases where thinking tags are removed mid-stream
|
||||
if lastEmittedCleanedContent == "" {
|
||||
deltaContent = cleanedContent
|
||||
lastEmittedCleanedContent = cleanedContent
|
||||
} else {
|
||||
// Content changed in non-append way, use the new cleaned content
|
||||
deltaContent = cleanedContent
|
||||
lastEmittedCleanedContent = cleanedContent
|
||||
}
|
||||
}
|
||||
// Only emit content if there's actual content (not just thinking tags)
|
||||
// If deltaContent is empty, we still emit the response but with empty content
|
||||
|
||||
usage := schema.OpenAIUsage{
|
||||
PromptTokens: tokenUsage.Prompt,
|
||||
CompletionTokens: tokenUsage.Completion,
|
||||
@@ -49,11 +93,20 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
usage.TimingPromptProcessing = tokenUsage.TimingPromptProcessing
|
||||
}
|
||||
|
||||
delta := &schema.Message{}
|
||||
// Only include content if there's actual content (not just thinking tags)
|
||||
if deltaContent != "" {
|
||||
delta.Content = &deltaContent
|
||||
}
|
||||
if reasoningDelta != nil && *reasoningDelta != "" {
|
||||
delta.Reasoning = reasoningDelta
|
||||
}
|
||||
|
||||
resp := schema.OpenAIResponse{
|
||||
ID: id,
|
||||
Created: created,
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []schema.Choice{{Delta: &schema.Message{Content: &s}, Index: 0, FinishReason: nil}},
|
||||
Choices: []schema.Choice{{Delta: delta, Index: 0, FinishReason: nil}},
|
||||
Object: "chat.completion.chunk",
|
||||
Usage: usage,
|
||||
}
|
||||
@@ -66,15 +119,120 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
}
|
||||
processTools := func(noAction string, prompt string, req *schema.OpenAIRequest, config *config.ModelConfig, loader *model.ModelLoader, responses chan schema.OpenAIResponse, extraUsage bool) error {
|
||||
result := ""
|
||||
lastEmittedCount := 0
|
||||
_, tokenUsage, err := ComputeChoices(req, prompt, config, cl, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, usage backend.TokenUsage) bool {
|
||||
result += s
|
||||
// TODO: Change generated BNF grammar to be compliant with the schema so we can
|
||||
// stream the result token by token here.
|
||||
// Try incremental XML parsing for streaming support using iterative parser
|
||||
// This allows emitting partial tool calls as they're being generated
|
||||
cleanedResult := functions.CleanupLLMResult(result, config.FunctionsConfig)
|
||||
|
||||
// Determine XML format from config
|
||||
var xmlFormat *functions.XMLToolCallFormat
|
||||
if config.FunctionsConfig.XMLFormat != nil {
|
||||
xmlFormat = config.FunctionsConfig.XMLFormat
|
||||
} else if config.FunctionsConfig.XMLFormatPreset != "" {
|
||||
xmlFormat = functions.GetXMLFormatPreset(config.FunctionsConfig.XMLFormatPreset)
|
||||
}
|
||||
|
||||
// Use iterative parser for streaming (partial parsing enabled)
|
||||
// Try XML parsing first
|
||||
partialResults, parseErr := functions.ParseXMLIterative(cleanedResult, xmlFormat, true)
|
||||
if parseErr == nil && len(partialResults) > 0 {
|
||||
// Emit new XML tool calls that weren't emitted before
|
||||
if len(partialResults) > lastEmittedCount {
|
||||
for i := lastEmittedCount; i < len(partialResults); i++ {
|
||||
toolCall := partialResults[i]
|
||||
initialMessage := schema.OpenAIResponse{
|
||||
ID: id,
|
||||
Created: created,
|
||||
Model: req.Model,
|
||||
Choices: []schema.Choice{{
|
||||
Delta: &schema.Message{
|
||||
Role: "assistant",
|
||||
ToolCalls: []schema.ToolCall{
|
||||
{
|
||||
Index: i,
|
||||
ID: id,
|
||||
Type: "function",
|
||||
FunctionCall: schema.FunctionCall{
|
||||
Name: toolCall.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Index: 0,
|
||||
FinishReason: nil,
|
||||
}},
|
||||
Object: "chat.completion.chunk",
|
||||
}
|
||||
select {
|
||||
case responses <- initialMessage:
|
||||
default:
|
||||
}
|
||||
}
|
||||
lastEmittedCount = len(partialResults)
|
||||
}
|
||||
} else {
|
||||
// Try JSON tool call parsing for streaming
|
||||
// Check if the result looks like JSON tool calls
|
||||
jsonResults, jsonErr := functions.ParseJSONIterative(cleanedResult, true)
|
||||
if jsonErr == nil && len(jsonResults) > 0 {
|
||||
// Check if these are tool calls (have "name" and optionally "arguments")
|
||||
for _, jsonObj := range jsonResults {
|
||||
if name, ok := jsonObj["name"].(string); ok && name != "" {
|
||||
// This looks like a tool call
|
||||
args := "{}"
|
||||
if argsVal, ok := jsonObj["arguments"]; ok {
|
||||
if argsStr, ok := argsVal.(string); ok {
|
||||
args = argsStr
|
||||
} else {
|
||||
argsBytes, _ := json.Marshal(argsVal)
|
||||
args = string(argsBytes)
|
||||
}
|
||||
}
|
||||
// Emit tool call
|
||||
initialMessage := schema.OpenAIResponse{
|
||||
ID: id,
|
||||
Created: created,
|
||||
Model: req.Model,
|
||||
Choices: []schema.Choice{{
|
||||
Delta: &schema.Message{
|
||||
Role: "assistant",
|
||||
ToolCalls: []schema.ToolCall{
|
||||
{
|
||||
Index: lastEmittedCount,
|
||||
ID: id,
|
||||
Type: "function",
|
||||
FunctionCall: schema.FunctionCall{
|
||||
Name: name,
|
||||
Arguments: args,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Index: 0,
|
||||
FinishReason: nil,
|
||||
}},
|
||||
Object: "chat.completion.chunk",
|
||||
}
|
||||
select {
|
||||
case responses <- initialMessage:
|
||||
default:
|
||||
}
|
||||
lastEmittedCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Extract reasoning before processing tool calls
|
||||
reasoning, cleanedResult := functions.ExtractReasoning(result)
|
||||
result = cleanedResult
|
||||
|
||||
textContentToReturn = functions.ParseTextContent(result, config.FunctionsConfig)
|
||||
result = functions.CleanupLLMResult(result, config.FunctionsConfig)
|
||||
functionResults := functions.ParseFunctionCall(result, config.FunctionsConfig)
|
||||
@@ -107,11 +265,20 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
usage.TimingPromptProcessing = tokenUsage.TimingPromptProcessing
|
||||
}
|
||||
|
||||
var deltaReasoning *string
|
||||
if reasoning != "" {
|
||||
deltaReasoning = &reasoning
|
||||
}
|
||||
delta := &schema.Message{Content: &result}
|
||||
if deltaReasoning != nil {
|
||||
delta.Reasoning = deltaReasoning
|
||||
}
|
||||
|
||||
resp := schema.OpenAIResponse{
|
||||
ID: id,
|
||||
Created: created,
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []schema.Choice{{Delta: &schema.Message{Content: &result}, Index: 0, FinishReason: nil}},
|
||||
Choices: []schema.Choice{{Delta: delta, Index: 0, FinishReason: nil}},
|
||||
Object: "chat.completion.chunk",
|
||||
Usage: usage,
|
||||
}
|
||||
@@ -452,10 +619,18 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
default:
|
||||
|
||||
tokenCallback := func(s string, c *[]schema.Choice) {
|
||||
// Extract reasoning from the response
|
||||
reasoning, cleanedS := functions.ExtractReasoning(s)
|
||||
s = cleanedS
|
||||
|
||||
if !shouldUseFn {
|
||||
// no function is called, just reply and use stop as finish reason
|
||||
stopReason := FinishReasonStop
|
||||
*c = append(*c, schema.Choice{FinishReason: &stopReason, Index: 0, Message: &schema.Message{Role: "assistant", Content: &s}})
|
||||
message := &schema.Message{Role: "assistant", Content: &s}
|
||||
if reasoning != "" {
|
||||
message.Reasoning = &reasoning
|
||||
}
|
||||
*c = append(*c, schema.Choice{FinishReason: &stopReason, Index: 0, Message: message})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -474,9 +649,13 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
}
|
||||
|
||||
stopReason := FinishReasonStop
|
||||
message := &schema.Message{Role: "assistant", Content: &result}
|
||||
if reasoning != "" {
|
||||
message.Reasoning = &reasoning
|
||||
}
|
||||
*c = append(*c, schema.Choice{
|
||||
FinishReason: &stopReason,
|
||||
Message: &schema.Message{Role: "assistant", Content: &result}})
|
||||
Message: message})
|
||||
default:
|
||||
toolCallsReason := FinishReasonToolCalls
|
||||
toolChoice := schema.Choice{
|
||||
@@ -485,6 +664,9 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
Role: "assistant",
|
||||
},
|
||||
}
|
||||
if reasoning != "" {
|
||||
toolChoice.Message.Reasoning = &reasoning
|
||||
}
|
||||
|
||||
for _, ss := range results {
|
||||
name, args := ss.Name, ss.Arguments
|
||||
@@ -505,16 +687,20 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
} else {
|
||||
// otherwise we return more choices directly (deprecated)
|
||||
functionCallReason := FinishReasonFunctionCall
|
||||
message := &schema.Message{
|
||||
Role: "assistant",
|
||||
Content: &textContentToReturn,
|
||||
FunctionCall: map[string]interface{}{
|
||||
"name": name,
|
||||
"arguments": args,
|
||||
},
|
||||
}
|
||||
if reasoning != "" {
|
||||
message.Reasoning = &reasoning
|
||||
}
|
||||
*c = append(*c, schema.Choice{
|
||||
FinishReason: &functionCallReason,
|
||||
Message: &schema.Message{
|
||||
Role: "assistant",
|
||||
Content: &textContentToReturn,
|
||||
FunctionCall: map[string]interface{}{
|
||||
"name": name,
|
||||
"arguments": args,
|
||||
},
|
||||
},
|
||||
Message: message,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
108
core/http/routes/anthropic.go
Normal file
108
core/http/routes/anthropic.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/mudler/LocalAI/core/application"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http/endpoints/anthropic"
|
||||
"github.com/mudler/LocalAI/core/http/middleware"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/xlog"
|
||||
)
|
||||
|
||||
func RegisterAnthropicRoutes(app *echo.Echo,
|
||||
re *middleware.RequestExtractor,
|
||||
application *application.Application) {
|
||||
|
||||
// Anthropic Messages API endpoint
|
||||
messagesHandler := anthropic.MessagesEndpoint(
|
||||
application.ModelConfigLoader(),
|
||||
application.ModelLoader(),
|
||||
application.TemplatesEvaluator(),
|
||||
application.ApplicationConfig(),
|
||||
)
|
||||
|
||||
messagesMiddleware := []echo.MiddlewareFunc{
|
||||
middleware.TraceMiddleware(application),
|
||||
re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_CHAT)),
|
||||
re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.AnthropicRequest) }),
|
||||
setAnthropicRequestContext(application.ApplicationConfig()),
|
||||
}
|
||||
|
||||
// Main Anthropic endpoint
|
||||
app.POST("/v1/messages", messagesHandler, messagesMiddleware...)
|
||||
|
||||
// Also support without version prefix for compatibility
|
||||
app.POST("/messages", messagesHandler, messagesMiddleware...)
|
||||
}
|
||||
|
||||
// setAnthropicRequestContext sets up the context and cancel function for Anthropic requests
|
||||
func setAnthropicRequestContext(appConfig *config.ApplicationConfig) echo.MiddlewareFunc {
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.AnthropicRequest)
|
||||
if !ok || input.Model == "" {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "model is required")
|
||||
}
|
||||
|
||||
cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig)
|
||||
if !ok || cfg == nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "model configuration not found")
|
||||
}
|
||||
|
||||
// Extract or generate the correlation ID
|
||||
// Anthropic uses x-request-id header
|
||||
correlationID := c.Request().Header.Get("x-request-id")
|
||||
if correlationID == "" {
|
||||
correlationID = uuid.New().String()
|
||||
}
|
||||
c.Response().Header().Set("x-request-id", correlationID)
|
||||
|
||||
// Set up context with cancellation
|
||||
reqCtx := c.Request().Context()
|
||||
c1, cancel := context.WithCancel(appConfig.Context)
|
||||
|
||||
// Cancel when request context is cancelled (client disconnects)
|
||||
go func() {
|
||||
select {
|
||||
case <-reqCtx.Done():
|
||||
cancel()
|
||||
case <-c1.Done():
|
||||
// Already cancelled
|
||||
}
|
||||
}()
|
||||
|
||||
// Add the correlation ID to the new context
|
||||
ctxWithCorrelationID := context.WithValue(c1, middleware.CorrelationIDKey, correlationID)
|
||||
|
||||
input.Context = ctxWithCorrelationID
|
||||
input.Cancel = cancel
|
||||
|
||||
if cfg.Model == "" {
|
||||
xlog.Debug("replacing empty cfg.Model with input value", "input.Model", input.Model)
|
||||
cfg.Model = input.Model
|
||||
}
|
||||
|
||||
c.Set(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST, input)
|
||||
c.Set(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG, cfg)
|
||||
|
||||
// Log the Anthropic API version if provided
|
||||
anthropicVersion := c.Request().Header.Get("anthropic-version")
|
||||
if anthropicVersion != "" {
|
||||
xlog.Debug("Anthropic API version", "version", anthropicVersion)
|
||||
}
|
||||
|
||||
// Validate max_tokens is provided
|
||||
if input.MaxTokens <= 0 {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("max_tokens is required and must be greater than 0"))
|
||||
}
|
||||
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package routes
|
||||
import (
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/core/http/endpoints/localai"
|
||||
"github.com/mudler/LocalAI/core/http/middleware"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
@@ -115,208 +114,24 @@ func RegisterUIRoutes(app *echo.Echo,
|
||||
registerBackendGalleryRoutes(app, appConfig, galleryService, processingOps)
|
||||
}
|
||||
|
||||
app.GET("/talk", func(c echo.Context) error {
|
||||
modelConfigs, _ := services.ListModels(cl, ml, config.NoFilterFn, services.SKIP_IF_CONFIGURED)
|
||||
// Talk route - now served by SPA
|
||||
app.GET("/talk", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps))
|
||||
|
||||
if len(modelConfigs) == 0 {
|
||||
// If no model is available redirect to the index which suggests how to install models
|
||||
return c.Redirect(302, middleware.BaseURL(c))
|
||||
}
|
||||
// Chat routes - now served by SPA
|
||||
app.GET("/chat", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps))
|
||||
|
||||
summary := map[string]interface{}{
|
||||
"Title": "LocalAI - Talk",
|
||||
"BaseURL": middleware.BaseURL(c),
|
||||
"ModelsConfig": modelConfigs,
|
||||
"Model": modelConfigs[0],
|
||||
// Show the Chat page with specific model
|
||||
app.GET("/chat/:model", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps))
|
||||
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
// Text2Image routes - now served by SPA
|
||||
app.GET("/text2image/:model", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps))
|
||||
|
||||
// Render index
|
||||
return c.Render(200, "views/talk", summary)
|
||||
})
|
||||
app.GET("/text2image", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps))
|
||||
|
||||
app.GET("/chat", func(c echo.Context) error {
|
||||
modelConfigs := cl.GetAllModelsConfigs()
|
||||
modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
|
||||
// TTS routes - now served by SPA
|
||||
app.GET("/tts/:model", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps))
|
||||
|
||||
if len(modelConfigs)+len(modelsWithoutConfig) == 0 {
|
||||
// If no model is available redirect to the index which suggests how to install models
|
||||
return c.Redirect(302, middleware.BaseURL(c))
|
||||
}
|
||||
modelThatCanBeUsed := ""
|
||||
galleryConfigs := map[string]*gallery.ModelConfig{}
|
||||
|
||||
for _, m := range modelConfigs {
|
||||
cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
galleryConfigs[m.Name] = cfg
|
||||
}
|
||||
|
||||
title := "LocalAI - Chat"
|
||||
var modelContextSize *int
|
||||
|
||||
for _, b := range modelConfigs {
|
||||
if b.HasUsecases(config.FLAG_CHAT) {
|
||||
modelThatCanBeUsed = b.Name
|
||||
title = "LocalAI - Chat with " + modelThatCanBeUsed
|
||||
if b.LLMConfig.ContextSize != nil {
|
||||
modelContextSize = b.LLMConfig.ContextSize
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
summary := map[string]interface{}{
|
||||
"Title": title,
|
||||
"BaseURL": middleware.BaseURL(c),
|
||||
"ModelsWithoutConfig": modelsWithoutConfig,
|
||||
"GalleryConfig": galleryConfigs,
|
||||
"ModelsConfig": modelConfigs,
|
||||
"Model": modelThatCanBeUsed,
|
||||
"ContextSize": modelContextSize,
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render(200, "views/chat", summary)
|
||||
})
|
||||
|
||||
// Show the Chat page
|
||||
app.GET("/chat/:model", func(c echo.Context) error {
|
||||
modelConfigs := cl.GetAllModelsConfigs()
|
||||
modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
|
||||
|
||||
galleryConfigs := map[string]*gallery.ModelConfig{}
|
||||
modelName := c.Param("model")
|
||||
var modelContextSize *int
|
||||
|
||||
for _, m := range modelConfigs {
|
||||
cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
galleryConfigs[m.Name] = cfg
|
||||
if m.Name == modelName && m.LLMConfig.ContextSize != nil {
|
||||
modelContextSize = m.LLMConfig.ContextSize
|
||||
}
|
||||
}
|
||||
|
||||
summary := map[string]interface{}{
|
||||
"Title": "LocalAI - Chat with " + modelName,
|
||||
"BaseURL": middleware.BaseURL(c),
|
||||
"ModelsConfig": modelConfigs,
|
||||
"GalleryConfig": galleryConfigs,
|
||||
"ModelsWithoutConfig": modelsWithoutConfig,
|
||||
"Model": modelName,
|
||||
"ContextSize": modelContextSize,
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render(200, "views/chat", summary)
|
||||
})
|
||||
|
||||
app.GET("/text2image/:model", func(c echo.Context) error {
|
||||
modelConfigs := cl.GetAllModelsConfigs()
|
||||
modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
|
||||
|
||||
summary := map[string]interface{}{
|
||||
"Title": "LocalAI - Generate images with " + c.Param("model"),
|
||||
"BaseURL": middleware.BaseURL(c),
|
||||
"ModelsConfig": modelConfigs,
|
||||
"ModelsWithoutConfig": modelsWithoutConfig,
|
||||
"Model": c.Param("model"),
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render(200, "views/text2image", summary)
|
||||
})
|
||||
|
||||
app.GET("/text2image", func(c echo.Context) error {
|
||||
modelConfigs := cl.GetAllModelsConfigs()
|
||||
modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
|
||||
|
||||
if len(modelConfigs)+len(modelsWithoutConfig) == 0 {
|
||||
// If no model is available redirect to the index which suggests how to install models
|
||||
return c.Redirect(302, middleware.BaseURL(c))
|
||||
}
|
||||
|
||||
modelThatCanBeUsed := ""
|
||||
title := "LocalAI - Generate images"
|
||||
|
||||
for _, b := range modelConfigs {
|
||||
if b.HasUsecases(config.FLAG_IMAGE) {
|
||||
modelThatCanBeUsed = b.Name
|
||||
title = "LocalAI - Generate images with " + modelThatCanBeUsed
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
summary := map[string]interface{}{
|
||||
"Title": title,
|
||||
"BaseURL": middleware.BaseURL(c),
|
||||
"ModelsConfig": modelConfigs,
|
||||
"ModelsWithoutConfig": modelsWithoutConfig,
|
||||
"Model": modelThatCanBeUsed,
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render(200, "views/text2image", summary)
|
||||
})
|
||||
|
||||
app.GET("/tts/:model", func(c echo.Context) error {
|
||||
modelConfigs := cl.GetAllModelsConfigs()
|
||||
modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
|
||||
|
||||
summary := map[string]interface{}{
|
||||
"Title": "LocalAI - Generate images with " + c.Param("model"),
|
||||
"BaseURL": middleware.BaseURL(c),
|
||||
"ModelsConfig": modelConfigs,
|
||||
"ModelsWithoutConfig": modelsWithoutConfig,
|
||||
"Model": c.Param("model"),
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render(200, "views/tts", summary)
|
||||
})
|
||||
|
||||
app.GET("/tts", func(c echo.Context) error {
|
||||
modelConfigs := cl.GetAllModelsConfigs()
|
||||
modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
|
||||
|
||||
if len(modelConfigs)+len(modelsWithoutConfig) == 0 {
|
||||
// If no model is available redirect to the index which suggests how to install models
|
||||
return c.Redirect(302, middleware.BaseURL(c))
|
||||
}
|
||||
|
||||
modelThatCanBeUsed := ""
|
||||
title := "LocalAI - Generate audio"
|
||||
|
||||
for _, b := range modelConfigs {
|
||||
if b.HasUsecases(config.FLAG_TTS) {
|
||||
modelThatCanBeUsed = b.Name
|
||||
title = "LocalAI - Generate audio with " + modelThatCanBeUsed
|
||||
break
|
||||
}
|
||||
}
|
||||
summary := map[string]interface{}{
|
||||
"Title": title,
|
||||
"BaseURL": middleware.BaseURL(c),
|
||||
"ModelsConfig": modelConfigs,
|
||||
"ModelsWithoutConfig": modelsWithoutConfig,
|
||||
"Model": modelThatCanBeUsed,
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render(200, "views/tts", summary)
|
||||
})
|
||||
app.GET("/tts", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps))
|
||||
|
||||
// Traces UI
|
||||
app.GET("/traces", func(c echo.Context) error {
|
||||
|
||||
@@ -617,6 +617,12 @@ func RegisterUIAPIRoutes(app *echo.Echo, cl *config.ModelConfigLoader, ml *model
|
||||
installedBackendsCount = len(installedBackends)
|
||||
}
|
||||
|
||||
// Get the detected system capability
|
||||
detectedCapability := ""
|
||||
if appConfig.SystemState != nil {
|
||||
detectedCapability = appConfig.SystemState.DetectedCapability()
|
||||
}
|
||||
|
||||
return c.JSON(200, map[string]interface{}{
|
||||
"backends": backendsJSON,
|
||||
"repositories": appConfig.BackendGalleries,
|
||||
@@ -629,6 +635,7 @@ func RegisterUIAPIRoutes(app *echo.Echo, cl *config.ModelConfigLoader, ml *model
|
||||
"totalPages": totalPages,
|
||||
"prevPage": prevPage,
|
||||
"nextPage": nextPage,
|
||||
"systemCapability": detectedCapability,
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -750,6 +750,7 @@ function stopRequest() {
|
||||
if (!activeChat) return;
|
||||
|
||||
const request = activeRequests.get(activeChat.id);
|
||||
const requestModel = request?.model || null; // Get model before deleting request
|
||||
if (request) {
|
||||
if (request.controller) {
|
||||
request.controller.abort();
|
||||
@@ -779,7 +780,8 @@ function stopRequest() {
|
||||
`<span class='error'>Request cancelled by user</span>`,
|
||||
null,
|
||||
null,
|
||||
activeChat.id
|
||||
activeChat.id,
|
||||
requestModel
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1231,7 +1233,8 @@ async function promptGPT(systemPrompt, input) {
|
||||
startTime: requestStartTime,
|
||||
tokensReceived: 0,
|
||||
interval: null,
|
||||
maxTokensPerSecond: 0
|
||||
maxTokensPerSecond: 0,
|
||||
model: model // Store the model used for this request
|
||||
});
|
||||
|
||||
// Update reactive tracking for UI indicators
|
||||
@@ -1271,21 +1274,27 @@ async function promptGPT(systemPrompt, input) {
|
||||
return;
|
||||
} else {
|
||||
// Timeout error (controller was aborted by timeout, not user)
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add(
|
||||
"assistant",
|
||||
`<span class='error'>Request timeout: MCP processing is taking longer than expected. Please try again.</span>`,
|
||||
null,
|
||||
null,
|
||||
chatId
|
||||
chatId,
|
||||
requestModel
|
||||
);
|
||||
}
|
||||
} else {
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add(
|
||||
"assistant",
|
||||
`<span class='error'>Network Error: ${error.message}</span>`,
|
||||
null,
|
||||
null,
|
||||
chatId
|
||||
chatId,
|
||||
requestModel
|
||||
);
|
||||
}
|
||||
toggleLoader(false, chatId);
|
||||
@@ -1299,12 +1308,15 @@ async function promptGPT(systemPrompt, input) {
|
||||
}
|
||||
|
||||
if (!response.ok) {
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add(
|
||||
"assistant",
|
||||
`<span class='error'>Error: POST ${endpoint} ${response.status}</span>`,
|
||||
null,
|
||||
null,
|
||||
chatId
|
||||
chatId,
|
||||
requestModel
|
||||
);
|
||||
toggleLoader(false, chatId);
|
||||
activeRequests.delete(chatId);
|
||||
@@ -1324,12 +1336,15 @@ async function promptGPT(systemPrompt, input) {
|
||||
.getReader();
|
||||
|
||||
if (!reader) {
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add(
|
||||
"assistant",
|
||||
`<span class='error'>Error: Failed to decode MCP API response</span>`,
|
||||
null,
|
||||
null,
|
||||
chatId
|
||||
chatId,
|
||||
requestModel
|
||||
);
|
||||
toggleLoader(false, chatId);
|
||||
activeRequests.delete(chatId);
|
||||
@@ -1353,6 +1368,7 @@ async function promptGPT(systemPrompt, input) {
|
||||
let lastAssistantMessageIndex = -1;
|
||||
let lastThinkingMessageIndex = -1;
|
||||
let lastThinkingScrollTime = 0;
|
||||
let hasReasoningFromAPI = false; // Track if we're receiving reasoning from API (skip tag-based detection)
|
||||
const THINKING_SCROLL_THROTTLE = 200; // Throttle scrolling to every 200ms
|
||||
|
||||
try {
|
||||
@@ -1386,19 +1402,24 @@ async function promptGPT(systemPrompt, input) {
|
||||
// Handle different event types
|
||||
switch (eventData.type) {
|
||||
case "reasoning":
|
||||
hasReasoningFromAPI = true; // Mark that we're receiving reasoning from API
|
||||
if (eventData.content) {
|
||||
// Insert reasoning before assistant message if it exists
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) break; // Chat was deleted
|
||||
const isMCPMode = currentChat.mcpMode || false;
|
||||
const shouldExpand = !isMCPMode; // Expanded in non-MCP mode, collapsed in MCP mode
|
||||
// Insert thinking before assistant message if it exists (always use "thinking" role)
|
||||
if (lastAssistantMessageIndex >= 0 && targetHistory[lastAssistantMessageIndex]?.role === "assistant") {
|
||||
targetHistory.splice(lastAssistantMessageIndex, 0, {
|
||||
role: "reasoning",
|
||||
role: "thinking",
|
||||
content: eventData.content,
|
||||
html: DOMPurify.sanitize(marked.parse(eventData.content)),
|
||||
image: [],
|
||||
audio: [],
|
||||
expanded: false // Reasoning is always collapsed
|
||||
expanded: shouldExpand
|
||||
});
|
||||
lastAssistantMessageIndex++; // Adjust index since we inserted
|
||||
// Scroll smoothly after adding reasoning
|
||||
// Scroll smoothly after adding thinking
|
||||
setTimeout(() => {
|
||||
const chatContainer = document.getElementById('chat');
|
||||
if (chatContainer) {
|
||||
@@ -1410,7 +1431,7 @@ async function promptGPT(systemPrompt, input) {
|
||||
}, 100);
|
||||
} else {
|
||||
// No assistant message yet, just add normally
|
||||
chatStore.add("reasoning", eventData.content, null, null, chatId);
|
||||
chatStore.add("thinking", eventData.content, null, null, chatId);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -1476,14 +1497,17 @@ async function promptGPT(systemPrompt, input) {
|
||||
// Only update display if this is the active chat (interval will handle it)
|
||||
// Don't call updateTokensPerSecond here to avoid unnecessary updates
|
||||
|
||||
// Check for thinking tags in the chunk (incremental detection)
|
||||
if (contentChunk.includes("<thinking>") || contentChunk.includes("<think>")) {
|
||||
isThinking = true;
|
||||
thinkingContent = "";
|
||||
lastThinkingMessageIndex = -1;
|
||||
}
|
||||
|
||||
if (contentChunk.includes("</thinking>") || contentChunk.includes("</think>")) {
|
||||
// Only check for thinking tags if we're NOT receiving reasoning from API
|
||||
// This prevents duplicate thinking/reasoning messages
|
||||
if (!hasReasoningFromAPI) {
|
||||
// Check for thinking tags in the chunk (incremental detection)
|
||||
if (contentChunk.includes("<thinking>") || contentChunk.includes("<think>")) {
|
||||
isThinking = true;
|
||||
thinkingContent = "";
|
||||
lastThinkingMessageIndex = -1;
|
||||
}
|
||||
|
||||
if (contentChunk.includes("</thinking>") || contentChunk.includes("</think>")) {
|
||||
isThinking = false;
|
||||
// When closing tag is detected, process the accumulated thinking content
|
||||
if (thinkingContent.trim()) {
|
||||
@@ -1537,10 +1561,11 @@ async function promptGPT(systemPrompt, input) {
|
||||
}
|
||||
thinkingContent = "";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle content based on thinking state
|
||||
if (isThinking) {
|
||||
// Handle content based on thinking state (only if not receiving reasoning from API)
|
||||
if (!hasReasoningFromAPI && isThinking) {
|
||||
thinkingContent += contentChunk;
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) break; // Chat was deleted
|
||||
@@ -1598,12 +1623,15 @@ async function promptGPT(systemPrompt, input) {
|
||||
break;
|
||||
|
||||
case "error":
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add(
|
||||
"assistant",
|
||||
`<span class='error'>MCP Error: ${eventData.message}</span>`,
|
||||
null,
|
||||
null,
|
||||
chatId
|
||||
chatId,
|
||||
requestModel
|
||||
);
|
||||
break;
|
||||
}
|
||||
@@ -1619,16 +1647,21 @@ async function promptGPT(systemPrompt, input) {
|
||||
|
||||
// Process any thinking tags that might be in the accumulated content
|
||||
// This handles cases where tags are split across chunks
|
||||
const { regularContent: processedRegular, thinkingContent: processedThinking } = processThinkingTags(regularContent);
|
||||
// Only process if we're NOT receiving reasoning from API (to avoid duplicates)
|
||||
const { regularContent: processedRegular, thinkingContent: processedThinking } = hasReasoningFromAPI
|
||||
? { regularContent: regularContent, thinkingContent: "" }
|
||||
: processThinkingTags(regularContent);
|
||||
|
||||
// Update or create assistant message with processed regular content
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) break; // Chat was deleted
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
if (lastAssistantMessageIndex === -1) {
|
||||
if (processedRegular && processedRegular.trim()) {
|
||||
chatStore.add("assistant", processedRegular, null, null, chatId);
|
||||
lastAssistantMessageIndex = targetHistory.length - 1;
|
||||
}
|
||||
// Create assistant message if we have any content (even if empty string after processing)
|
||||
// This ensures the message is created and can be updated with more content later
|
||||
chatStore.add("assistant", processedRegular || "", null, null, chatId, requestModel);
|
||||
lastAssistantMessageIndex = targetHistory.length - 1;
|
||||
} else {
|
||||
const lastMessage = targetHistory[lastAssistantMessageIndex];
|
||||
if (lastMessage && lastMessage.role === "assistant") {
|
||||
@@ -1666,7 +1699,10 @@ async function promptGPT(systemPrompt, input) {
|
||||
if (assistantContentBuffer.length > 0) {
|
||||
const regularContent = assistantContentBuffer.join("");
|
||||
// Process any remaining thinking tags that might be in the buffer
|
||||
const { regularContent: processedRegular, thinkingContent: processedThinking } = processThinkingTags(regularContent);
|
||||
// Only process if we're NOT receiving reasoning from API (to avoid duplicates)
|
||||
const { regularContent: processedRegular, thinkingContent: processedThinking } = hasReasoningFromAPI
|
||||
? { regularContent: regularContent, thinkingContent: "" }
|
||||
: processThinkingTags(regularContent);
|
||||
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) {
|
||||
@@ -1699,21 +1735,26 @@ async function promptGPT(systemPrompt, input) {
|
||||
}
|
||||
|
||||
// Then update or create assistant message
|
||||
// Always create/update assistant message if we have any content
|
||||
if (lastAssistantMessageIndex !== -1) {
|
||||
const lastMessage = targetHistory[lastAssistantMessageIndex];
|
||||
if (lastMessage && lastMessage.role === "assistant") {
|
||||
lastMessage.content = (lastMessage.content || "") + (processedRegular || "");
|
||||
lastMessage.html = DOMPurify.sanitize(marked.parse(lastMessage.content));
|
||||
}
|
||||
} else if (processedRegular && processedRegular.trim()) {
|
||||
chatStore.add("assistant", processedRegular, null, null, chatId);
|
||||
} else {
|
||||
// Create assistant message (even if empty, so it can be updated with more content)
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add("assistant", processedRegular || "", null, null, chatId, requestModel);
|
||||
lastAssistantMessageIndex = targetHistory.length - 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Final thinking content flush if any data remains (from incremental detection)
|
||||
// Only process if we're NOT receiving reasoning from API (to avoid duplicates)
|
||||
const finalChat = chatStore.getChat(chatId);
|
||||
if (finalChat && thinkingContent.trim() && lastThinkingMessageIndex === -1) {
|
||||
if (finalChat && !hasReasoningFromAPI && thinkingContent.trim() && lastThinkingMessageIndex === -1) {
|
||||
const finalHistory = finalChat.history;
|
||||
// Extract thinking content if tags are present
|
||||
const thinkingMatch = thinkingContent.match(/<(?:thinking|redacted_reasoning)>(.*?)<\/(?:thinking|redacted_reasoning)>/s);
|
||||
@@ -1754,7 +1795,9 @@ async function promptGPT(systemPrompt, input) {
|
||||
lastMessage.html = DOMPurify.sanitize(marked.parse(lastMessage.content));
|
||||
}
|
||||
} else {
|
||||
chatStore.add("assistant", finalRegular, null, null, chatId);
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add("assistant", finalRegular, null, null, chatId, requestModel);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1812,12 +1855,15 @@ async function promptGPT(systemPrompt, input) {
|
||||
.getReader();
|
||||
|
||||
if (!reader) {
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add(
|
||||
"assistant",
|
||||
`<span class='error'>Error: Failed to decode API response</span>`,
|
||||
null,
|
||||
null,
|
||||
chatId
|
||||
chatId,
|
||||
requestModel
|
||||
);
|
||||
toggleLoader(false, chatId);
|
||||
activeRequests.delete(chatId);
|
||||
@@ -1848,9 +1894,11 @@ async function promptGPT(systemPrompt, input) {
|
||||
const addToChat = (token) => {
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) return; // Chat was deleted
|
||||
chatStore.add("assistant", token, null, null, chatId);
|
||||
// Count tokens for rate calculation (per chat)
|
||||
// Get model from request for this chat
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add("assistant", token, null, null, chatId, requestModel);
|
||||
// Count tokens for rate calculation (per chat)
|
||||
if (request) {
|
||||
const tokenCount = Math.ceil(token.length / 4);
|
||||
request.tokensReceived += tokenCount;
|
||||
@@ -1862,9 +1910,13 @@ async function promptGPT(systemPrompt, input) {
|
||||
let buffer = "";
|
||||
let contentBuffer = [];
|
||||
let thinkingContent = "";
|
||||
let reasoningContent = ""; // Track reasoning from API reasoning field
|
||||
let isThinking = false;
|
||||
let lastThinkingMessageIndex = -1;
|
||||
let lastReasoningMessageIndex = -1; // Track reasoning message separately
|
||||
let lastAssistantMessageIndex = -1; // Track assistant message for reasoning placement
|
||||
let lastThinkingScrollTime = 0;
|
||||
let hasReasoningFromAPI = false; // Track if we're receiving reasoning from API (skip tag-based detection)
|
||||
const THINKING_SCROLL_THROTTLE = 200; // Throttle scrolling to every 200ms
|
||||
|
||||
try {
|
||||
@@ -1900,30 +1952,100 @@ async function promptGPT(systemPrompt, input) {
|
||||
chatStore.updateTokenUsage(jsonData.usage, chatId);
|
||||
}
|
||||
|
||||
const token = jsonData.choices[0].delta.content;
|
||||
const token = jsonData.choices?.[0]?.delta?.content;
|
||||
const reasoningDelta = jsonData.choices?.[0]?.delta?.reasoning;
|
||||
|
||||
if (token) {
|
||||
// Check for thinking tags
|
||||
if (token.includes("<thinking>") || token.includes("<think>")) {
|
||||
isThinking = true;
|
||||
thinkingContent = "";
|
||||
lastThinkingMessageIndex = -1;
|
||||
// Handle reasoning from API reasoning field - always use "thinking" role
|
||||
if (reasoningDelta && reasoningDelta.trim() !== "") {
|
||||
hasReasoningFromAPI = true; // Mark that we're receiving reasoning from API
|
||||
reasoningContent += reasoningDelta;
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) {
|
||||
// Chat was deleted, skip this line
|
||||
return;
|
||||
}
|
||||
if (token.includes("</thinking>") || token.includes("</think>")) {
|
||||
isThinking = false;
|
||||
if (thinkingContent.trim()) {
|
||||
// Only add the final thinking message if we don't already have one
|
||||
if (lastThinkingMessageIndex === -1) {
|
||||
chatStore.add("thinking", thinkingContent, null, null, chatId);
|
||||
const isMCPMode = currentChat.mcpMode || false;
|
||||
const shouldExpand = !isMCPMode; // Expanded in non-MCP mode, collapsed in MCP mode
|
||||
|
||||
// Only create/update thinking message if we have actual content
|
||||
if (reasoningContent.trim() !== "") {
|
||||
// Update or create thinking message (always use "thinking" role, not "reasoning")
|
||||
if (lastReasoningMessageIndex === -1) {
|
||||
// Find the last assistant message index to insert thinking before it
|
||||
const targetHistory = currentChat.history;
|
||||
const assistantIndex = targetHistory.length - 1;
|
||||
if (assistantIndex >= 0 && targetHistory[assistantIndex]?.role === "assistant") {
|
||||
// Insert thinking before assistant message
|
||||
targetHistory.splice(assistantIndex, 0, {
|
||||
role: "thinking",
|
||||
content: reasoningContent,
|
||||
html: DOMPurify.sanitize(marked.parse(reasoningContent)),
|
||||
image: [],
|
||||
audio: [],
|
||||
expanded: shouldExpand
|
||||
});
|
||||
lastReasoningMessageIndex = assistantIndex;
|
||||
lastAssistantMessageIndex = assistantIndex + 1; // Adjust for inserted thinking
|
||||
} else {
|
||||
// No assistant message yet, just add normally
|
||||
chatStore.add("thinking", reasoningContent, null, null, chatId);
|
||||
lastReasoningMessageIndex = currentChat.history.length - 1;
|
||||
}
|
||||
} else {
|
||||
// Update existing thinking message
|
||||
const targetHistory = currentChat.history;
|
||||
if (lastReasoningMessageIndex >= 0 && lastReasoningMessageIndex < targetHistory.length) {
|
||||
const thinkingMessage = targetHistory[lastReasoningMessageIndex];
|
||||
if (thinkingMessage && thinkingMessage.role === "thinking") {
|
||||
thinkingMessage.content = reasoningContent;
|
||||
thinkingMessage.html = DOMPurify.sanitize(marked.parse(reasoningContent));
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Scroll when reasoning is updated (throttled)
|
||||
const now = Date.now();
|
||||
if (now - lastThinkingScrollTime > THINKING_SCROLL_THROTTLE) {
|
||||
lastThinkingScrollTime = now;
|
||||
setTimeout(() => {
|
||||
const chatContainer = document.getElementById('chat');
|
||||
if (chatContainer) {
|
||||
chatContainer.scrollTo({
|
||||
top: chatContainer.scrollHeight,
|
||||
behavior: 'smooth'
|
||||
});
|
||||
}
|
||||
scrollThinkingBoxToBottom();
|
||||
}, 100);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle content based on thinking state
|
||||
if (isThinking) {
|
||||
thinkingContent += token;
|
||||
if (token && token.trim() !== "") {
|
||||
// Only check for thinking tags if we're NOT receiving reasoning from API
|
||||
// This prevents duplicate thinking/reasoning messages
|
||||
if (!hasReasoningFromAPI) {
|
||||
// Check for thinking tags (legacy support - models that output tags directly)
|
||||
if (token.includes("<thinking>") || token.includes("<think>")) {
|
||||
isThinking = true;
|
||||
thinkingContent = "";
|
||||
lastThinkingMessageIndex = -1;
|
||||
return;
|
||||
}
|
||||
if (token.includes("</thinking>") || token.includes("</think>")) {
|
||||
isThinking = false;
|
||||
if (thinkingContent.trim()) {
|
||||
// Only add the final thinking message if we don't already have one
|
||||
if (lastThinkingMessageIndex === -1) {
|
||||
chatStore.add("thinking", thinkingContent, null, null, chatId);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle content based on thinking state
|
||||
if (isThinking) {
|
||||
thinkingContent += token;
|
||||
// Count tokens for rate calculation (per chat)
|
||||
const request = activeRequests.get(chatId);
|
||||
if (request) {
|
||||
@@ -1966,7 +2088,42 @@ async function promptGPT(systemPrompt, input) {
|
||||
}, 100);
|
||||
}
|
||||
} else {
|
||||
// Not in thinking state, add to content buffer
|
||||
contentBuffer.push(token);
|
||||
// Track assistant message index for reasoning placement
|
||||
if (lastAssistantMessageIndex === -1) {
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (currentChat) {
|
||||
const targetHistory = currentChat.history;
|
||||
// Find or create assistant message index
|
||||
for (let i = targetHistory.length - 1; i >= 0; i--) {
|
||||
if (targetHistory[i].role === "assistant") {
|
||||
lastAssistantMessageIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// If no assistant message yet, it will be created when we flush contentBuffer
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Receiving reasoning from API, just add token to content buffer
|
||||
contentBuffer.push(token);
|
||||
// Track assistant message index for reasoning placement
|
||||
if (lastAssistantMessageIndex === -1) {
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (currentChat) {
|
||||
const targetHistory = currentChat.history;
|
||||
// Find or create assistant message index
|
||||
for (let i = targetHistory.length - 1; i >= 0; i--) {
|
||||
if (targetHistory[i].role === "assistant") {
|
||||
lastAssistantMessageIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// If no assistant message yet, it will be created when we flush contentBuffer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -1978,6 +2135,17 @@ async function promptGPT(systemPrompt, input) {
|
||||
// Efficiently update the chat in batch
|
||||
if (contentBuffer.length > 0) {
|
||||
addToChat(contentBuffer.join(""));
|
||||
// Update assistant message index after adding content
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (currentChat) {
|
||||
const targetHistory = currentChat.history;
|
||||
for (let i = targetHistory.length - 1; i >= 0; i--) {
|
||||
if (targetHistory[i].role === "assistant") {
|
||||
lastAssistantMessageIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
contentBuffer = [];
|
||||
// Scroll when assistant content is updated (this will also show thinking messages above)
|
||||
setTimeout(() => {
|
||||
@@ -1996,7 +2164,30 @@ async function promptGPT(systemPrompt, input) {
|
||||
if (contentBuffer.length > 0) {
|
||||
addToChat(contentBuffer.join(""));
|
||||
}
|
||||
|
||||
// Final reasoning flush if any data remains - always use "thinking" role
|
||||
const finalChat = chatStore.getChat(chatId);
|
||||
if (finalChat && reasoningContent.trim() && lastReasoningMessageIndex === -1) {
|
||||
const isMCPMode = finalChat.mcpMode || false;
|
||||
const shouldExpand = !isMCPMode;
|
||||
const targetHistory = finalChat.history;
|
||||
// Find assistant message to insert before
|
||||
const assistantIndex = targetHistory.length - 1;
|
||||
if (assistantIndex >= 0 && targetHistory[assistantIndex]?.role === "assistant") {
|
||||
targetHistory.splice(assistantIndex, 0, {
|
||||
role: "thinking",
|
||||
content: reasoningContent,
|
||||
html: DOMPurify.sanitize(marked.parse(reasoningContent)),
|
||||
image: [],
|
||||
audio: [],
|
||||
expanded: shouldExpand
|
||||
});
|
||||
} else {
|
||||
chatStore.add("thinking", reasoningContent, null, null, chatId);
|
||||
}
|
||||
}
|
||||
|
||||
// Final thinking content flush (legacy tag-based thinking)
|
||||
if (finalChat && thinkingContent.trim() && lastThinkingMessageIndex === -1) {
|
||||
chatStore.add("thinking", thinkingContent, null, null, chatId);
|
||||
}
|
||||
@@ -2008,12 +2199,15 @@ async function promptGPT(systemPrompt, input) {
|
||||
if (error.name !== 'AbortError' || !currentAbortController) {
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (currentChat) {
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add(
|
||||
"assistant",
|
||||
`<span class='error'>Error: Failed to process stream</span>`,
|
||||
null,
|
||||
null,
|
||||
chatId
|
||||
chatId,
|
||||
requestModel
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ async function promptDallE() {
|
||||
if (json.data && json.data.length > 0) {
|
||||
json.data.forEach((item, index) => {
|
||||
const imageContainer = document.createElement("div");
|
||||
imageContainer.className = "mb-4 bg-[var(--color-bg-primary)]/50 border border-[#1E293B] rounded-lg p-2";
|
||||
imageContainer.className = "flex flex-col";
|
||||
|
||||
// Create image element
|
||||
const img = document.createElement("img");
|
||||
@@ -166,23 +166,23 @@ async function promptDallE() {
|
||||
return; // Skip invalid items
|
||||
}
|
||||
img.alt = prompt;
|
||||
img.className = "w-full h-auto rounded-lg mb-2";
|
||||
img.className = "w-full h-auto rounded-lg";
|
||||
imageContainer.appendChild(img);
|
||||
|
||||
// Create caption container
|
||||
// Create caption container (optional, can be collapsed or shown on hover)
|
||||
const captionDiv = document.createElement("div");
|
||||
captionDiv.className = "mt-2 p-2 bg-[var(--color-bg-secondary)] rounded-lg";
|
||||
captionDiv.className = "mt-2 p-2 bg-[var(--color-bg-secondary)] rounded-lg text-xs";
|
||||
|
||||
// Prompt caption
|
||||
const promptCaption = document.createElement("p");
|
||||
promptCaption.className = "text-xs text-[var(--color-text-primary)] mb-1.5";
|
||||
promptCaption.className = "text-[var(--color-text-primary)] mb-1.5 break-words";
|
||||
promptCaption.innerHTML = '<strong>Prompt:</strong> ' + escapeHtml(prompt);
|
||||
captionDiv.appendChild(promptCaption);
|
||||
|
||||
// Negative prompt if provided
|
||||
if (negativePrompt) {
|
||||
const negativeCaption = document.createElement("p");
|
||||
negativeCaption.className = "text-xs text-[var(--color-text-secondary)] mb-1.5";
|
||||
negativeCaption.className = "text-[var(--color-text-secondary)] mb-1.5 break-words";
|
||||
negativeCaption.innerHTML = '<strong>Negative Prompt:</strong> ' + escapeHtml(negativePrompt);
|
||||
captionDiv.appendChild(negativeCaption);
|
||||
}
|
||||
|
||||
411
core/http/static/spa-home.js
Normal file
411
core/http/static/spa-home.js
Normal file
@@ -0,0 +1,411 @@
|
||||
/**
|
||||
* SPA Home View JavaScript
|
||||
* Contains Alpine.js components and functions for the home view
|
||||
*/
|
||||
|
||||
// Home input form component
|
||||
function homeInputForm() {
|
||||
return {
|
||||
selectedModel: '',
|
||||
inputValue: '',
|
||||
shiftPressed: false,
|
||||
fileName: '',
|
||||
imageFiles: [],
|
||||
audioFiles: [],
|
||||
textFiles: [],
|
||||
attachedFiles: [],
|
||||
mcpMode: false,
|
||||
mcpAvailable: false,
|
||||
mcpModels: {},
|
||||
currentPlaceholder: 'Send a message...',
|
||||
placeholderIndex: 0,
|
||||
charIndex: 0,
|
||||
isTyping: false,
|
||||
typingTimeout: null,
|
||||
displayTimeout: null,
|
||||
placeholderMessages: [
|
||||
'What is Nuclear fusion?',
|
||||
'How does a combustion engine work?',
|
||||
'Explain quantum computing',
|
||||
'What causes climate change?',
|
||||
'How do neural networks learn?',
|
||||
'What is the theory of relativity?',
|
||||
'How does photosynthesis work?',
|
||||
'Explain the water cycle',
|
||||
'What is machine learning?',
|
||||
'How do black holes form?',
|
||||
'What is DNA and how does it work?',
|
||||
'Explain the greenhouse effect',
|
||||
'How does the immune system work?',
|
||||
'What is artificial intelligence?',
|
||||
'How do solar panels generate electricity?',
|
||||
'Explain the process of evolution',
|
||||
'What is the difference between weather and climate?',
|
||||
'How does the human brain process information?',
|
||||
'What is the structure of an atom?',
|
||||
'How do vaccines work?',
|
||||
'Explain the concept of entropy',
|
||||
'What is the speed of light?',
|
||||
'How does gravity work?',
|
||||
'What is the difference between mass and weight?'
|
||||
],
|
||||
|
||||
init() {
|
||||
window.currentPlaceholderText = this.currentPlaceholder;
|
||||
this.startTypingAnimation();
|
||||
// Build MCP models map from data attributes
|
||||
this.buildMCPModelsMap();
|
||||
// Select first model by default
|
||||
this.$nextTick(() => {
|
||||
const select = this.$el.querySelector('select');
|
||||
if (select && select.options.length > 1) {
|
||||
const firstModelOption = select.options[1];
|
||||
if (firstModelOption && firstModelOption.value) {
|
||||
this.selectedModel = firstModelOption.value;
|
||||
this.checkMCPAvailability();
|
||||
}
|
||||
}
|
||||
});
|
||||
// Watch for changes to selectedModel to update MCP availability
|
||||
this.$watch('selectedModel', () => {
|
||||
this.checkMCPAvailability();
|
||||
});
|
||||
},
|
||||
|
||||
buildMCPModelsMap() {
|
||||
const select = this.$el.querySelector('select');
|
||||
if (!select) return;
|
||||
this.mcpModels = {};
|
||||
for (let i = 0; i < select.options.length; i++) {
|
||||
const option = select.options[i];
|
||||
if (option.value) {
|
||||
const hasMcpAttr = option.getAttribute('data-has-mcp');
|
||||
this.mcpModels[option.value] = hasMcpAttr === 'true';
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
checkMCPAvailability() {
|
||||
if (!this.selectedModel) {
|
||||
this.mcpAvailable = false;
|
||||
this.mcpMode = false;
|
||||
return;
|
||||
}
|
||||
const hasMCP = this.mcpModels[this.selectedModel] === true;
|
||||
this.mcpAvailable = hasMCP;
|
||||
if (!hasMCP) {
|
||||
this.mcpMode = false;
|
||||
}
|
||||
},
|
||||
|
||||
startTypingAnimation() {
|
||||
if (this.isTyping) return;
|
||||
this.typeNextPlaceholder();
|
||||
},
|
||||
|
||||
typeNextPlaceholder() {
|
||||
if (this.isTyping) return;
|
||||
this.isTyping = true;
|
||||
this.charIndex = 0;
|
||||
const message = this.placeholderMessages[this.placeholderIndex];
|
||||
this.currentPlaceholder = '';
|
||||
window.currentPlaceholderText = '';
|
||||
|
||||
const typeChar = () => {
|
||||
if (this.charIndex < message.length) {
|
||||
this.currentPlaceholder = message.substring(0, this.charIndex + 1);
|
||||
window.currentPlaceholderText = this.currentPlaceholder;
|
||||
this.charIndex++;
|
||||
this.typingTimeout = setTimeout(typeChar, 30);
|
||||
} else {
|
||||
this.isTyping = false;
|
||||
window.currentPlaceholderText = this.currentPlaceholder;
|
||||
this.displayTimeout = setTimeout(() => {
|
||||
this.placeholderIndex = (this.placeholderIndex + 1) % this.placeholderMessages.length;
|
||||
this.typeNextPlaceholder();
|
||||
}, 2000);
|
||||
}
|
||||
};
|
||||
|
||||
typeChar();
|
||||
},
|
||||
|
||||
pauseTyping() {
|
||||
if (this.typingTimeout) {
|
||||
clearTimeout(this.typingTimeout);
|
||||
this.typingTimeout = null;
|
||||
}
|
||||
if (this.displayTimeout) {
|
||||
clearTimeout(this.displayTimeout);
|
||||
this.displayTimeout = null;
|
||||
}
|
||||
this.isTyping = false;
|
||||
},
|
||||
|
||||
resumeTyping() {
|
||||
if (!this.inputValue.trim() && !this.isTyping) {
|
||||
this.startTypingAnimation();
|
||||
}
|
||||
},
|
||||
|
||||
handleFocus() {
|
||||
if (this.isTyping && this.placeholderIndex < this.placeholderMessages.length) {
|
||||
const fullMessage = this.placeholderMessages[this.placeholderIndex];
|
||||
this.currentPlaceholder = fullMessage;
|
||||
window.currentPlaceholderText = fullMessage;
|
||||
}
|
||||
this.pauseTyping();
|
||||
},
|
||||
|
||||
handleBlur() {
|
||||
if (!this.inputValue.trim()) {
|
||||
this.resumeTyping();
|
||||
}
|
||||
},
|
||||
|
||||
handleInput() {
|
||||
if (this.inputValue.trim()) {
|
||||
this.pauseTyping();
|
||||
} else {
|
||||
this.resumeTyping();
|
||||
}
|
||||
},
|
||||
|
||||
handleFileSelection(files, fileType) {
|
||||
Array.from(files).forEach(file => {
|
||||
const exists = this.attachedFiles.some(f => f.name === file.name && f.type === fileType);
|
||||
if (!exists) {
|
||||
this.attachedFiles.push({ name: file.name, type: fileType });
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
removeAttachedFile(fileType, fileName) {
|
||||
const index = this.attachedFiles.findIndex(f => f.name === fileName && f.type === fileType);
|
||||
if (index !== -1) {
|
||||
this.attachedFiles.splice(index, 1);
|
||||
}
|
||||
if (fileType === 'image') {
|
||||
this.imageFiles = this.imageFiles.filter(f => f.name !== fileName);
|
||||
} else if (fileType === 'audio') {
|
||||
this.audioFiles = this.audioFiles.filter(f => f.name !== fileName);
|
||||
} else if (fileType === 'file') {
|
||||
this.textFiles = this.textFiles.filter(f => f.name !== fileName);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Start chat function for SPA - navigates to chat view instead of full page redirect
|
||||
function startChatSPA(event) {
|
||||
if (event) {
|
||||
event.preventDefault();
|
||||
}
|
||||
|
||||
const form = event ? event.target.closest('form') : document.querySelector('form');
|
||||
if (!form) return;
|
||||
|
||||
const alpineComponent = form.closest('[x-data]');
|
||||
const select = alpineComponent ? alpineComponent.querySelector('select') : null;
|
||||
const textarea = form.querySelector('textarea');
|
||||
|
||||
const selectedModel = select ? select.value : '';
|
||||
let message = textarea ? textarea.value : '';
|
||||
|
||||
if (!message.trim() && window.currentPlaceholderText) {
|
||||
message = window.currentPlaceholderText;
|
||||
}
|
||||
|
||||
if (!selectedModel || !message.trim()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get MCP mode from checkbox
|
||||
let mcpMode = false;
|
||||
const mcpToggle = document.getElementById('spa_home_mcp_toggle');
|
||||
if (mcpToggle && mcpToggle.checked) {
|
||||
mcpMode = true;
|
||||
}
|
||||
|
||||
// Store message and files in localStorage for chat view to pick up
|
||||
const chatData = {
|
||||
message: message,
|
||||
imageFiles: [],
|
||||
audioFiles: [],
|
||||
textFiles: [],
|
||||
mcpMode: mcpMode
|
||||
};
|
||||
|
||||
// Convert files to base64 for storage
|
||||
const imageInput = document.getElementById('spa_home_input_image');
|
||||
const audioInput = document.getElementById('spa_home_input_audio');
|
||||
const fileInput = document.getElementById('spa_home_input_file');
|
||||
|
||||
const filePromises = [
|
||||
...Array.from(imageInput?.files || []).map(file =>
|
||||
new Promise(resolve => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = e => resolve({ name: file.name, data: e.target.result, type: file.type });
|
||||
reader.readAsDataURL(file);
|
||||
})
|
||||
),
|
||||
...Array.from(audioInput?.files || []).map(file =>
|
||||
new Promise(resolve => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = e => resolve({ name: file.name, data: e.target.result, type: file.type });
|
||||
reader.readAsDataURL(file);
|
||||
})
|
||||
),
|
||||
...Array.from(fileInput?.files || []).map(file =>
|
||||
new Promise(resolve => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = e => resolve({ name: file.name, data: e.target.result, type: file.type });
|
||||
reader.readAsText(file);
|
||||
})
|
||||
)
|
||||
];
|
||||
|
||||
const navigateToChat = () => {
|
||||
// Store in localStorage
|
||||
localStorage.setItem('localai_index_chat_data', JSON.stringify(chatData));
|
||||
|
||||
// Use SPA router to navigate to chat
|
||||
if (window.Alpine && Alpine.store('router')) {
|
||||
Alpine.store('router').navigate('chat', { model: selectedModel });
|
||||
} else {
|
||||
// Fallback to full page redirect if router not available
|
||||
window.location.href = `/chat/${selectedModel}`;
|
||||
}
|
||||
};
|
||||
|
||||
if (filePromises.length > 0) {
|
||||
Promise.all(filePromises).then(files => {
|
||||
files.forEach(file => {
|
||||
if (file.type.startsWith('image/')) {
|
||||
chatData.imageFiles.push(file);
|
||||
} else if (file.type.startsWith('audio/')) {
|
||||
chatData.audioFiles.push(file);
|
||||
} else {
|
||||
chatData.textFiles.push(file);
|
||||
}
|
||||
});
|
||||
navigateToChat();
|
||||
}).catch(err => {
|
||||
console.error('Error processing files:', err);
|
||||
navigateToChat();
|
||||
});
|
||||
} else {
|
||||
navigateToChat();
|
||||
}
|
||||
}
|
||||
|
||||
// Resource Monitor component (GPU if available, otherwise RAM)
|
||||
function resourceMonitor() {
|
||||
return {
|
||||
resourceData: null,
|
||||
pollInterval: null,
|
||||
|
||||
async fetchResourceData() {
|
||||
try {
|
||||
const response = await fetch('/api/resources');
|
||||
if (response.ok) {
|
||||
this.resourceData = await response.json();
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching resource data:', error);
|
||||
}
|
||||
},
|
||||
|
||||
startPolling() {
|
||||
this.fetchResourceData();
|
||||
this.pollInterval = setInterval(() => this.fetchResourceData(), 5000);
|
||||
},
|
||||
|
||||
stopPolling() {
|
||||
if (this.pollInterval) {
|
||||
clearInterval(this.pollInterval);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Stop individual model
|
||||
async function stopModel(modelName) {
|
||||
if (!confirm(`Are you sure you want to stop "${modelName}"?`)) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch('/backend/shutdown', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ model: modelName })
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
setTimeout(() => {
|
||||
window.location.reload();
|
||||
}, 500);
|
||||
} else {
|
||||
alert('Failed to stop model');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error stopping model:', error);
|
||||
alert('Failed to stop model');
|
||||
}
|
||||
}
|
||||
|
||||
// Stop all loaded models
|
||||
async function stopAllModels(component) {
|
||||
// Get loaded models from DOM
|
||||
const loadedModelElements = document.querySelectorAll('[data-loaded-model]');
|
||||
const loadedModelNames = Array.from(loadedModelElements).map(el => {
|
||||
const span = el.querySelector('span.truncate');
|
||||
return span ? span.textContent.trim() : '';
|
||||
}).filter(name => name.length > 0);
|
||||
|
||||
if (loadedModelNames.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!confirm(`Are you sure you want to stop all ${loadedModelNames.length} loaded model(s)?`)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (component) {
|
||||
component.stoppingAll = true;
|
||||
}
|
||||
|
||||
try {
|
||||
const stopPromises = loadedModelNames.map(modelName =>
|
||||
fetch('/backend/shutdown', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ model: modelName })
|
||||
})
|
||||
);
|
||||
|
||||
await Promise.all(stopPromises);
|
||||
|
||||
setTimeout(() => {
|
||||
window.location.reload();
|
||||
}, 1000);
|
||||
} catch (error) {
|
||||
console.error('Error stopping models:', error);
|
||||
alert('Failed to stop some models');
|
||||
if (component) {
|
||||
component.stoppingAll = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make functions available globally
|
||||
window.homeInputForm = homeInputForm;
|
||||
window.startChatSPA = startChatSPA;
|
||||
window.resourceMonitor = resourceMonitor;
|
||||
window.stopModel = stopModel;
|
||||
window.stopAllModels = stopAllModels;
|
||||
148
core/http/static/spa-router.js
Normal file
148
core/http/static/spa-router.js
Normal file
@@ -0,0 +1,148 @@
|
||||
/**
|
||||
* LocalAI SPA Router
|
||||
* Client-side routing for the single-page application
|
||||
*/
|
||||
|
||||
// Define routes and their corresponding view IDs
|
||||
const SPA_ROUTES = {
|
||||
'home': { title: 'LocalAI', viewId: 'view-home', paths: ['/', ''] },
|
||||
'chat': { title: 'LocalAI - Chat', viewId: 'view-chat', paths: ['/chat'] },
|
||||
'text2image': { title: 'LocalAI - Images', viewId: 'view-text2image', paths: ['/text2image'] },
|
||||
'tts': { title: 'LocalAI - TTS', viewId: 'view-tts', paths: ['/tts'] },
|
||||
'talk': { title: 'LocalAI - Talk', viewId: 'view-talk', paths: ['/talk'] },
|
||||
'manage': { title: 'LocalAI - System', viewId: 'view-manage', paths: ['/manage'] },
|
||||
'browse': { title: 'LocalAI - Model Gallery', viewId: 'view-browse', paths: ['/browse'] }
|
||||
};
|
||||
|
||||
// Parse URL path to determine route
|
||||
function parseUrlPath(pathname) {
|
||||
// Remove trailing slash
|
||||
pathname = pathname.replace(/\/$/, '') || '/';
|
||||
|
||||
// Check for hash-based routes first (for SPA navigation)
|
||||
const hash = window.location.hash.slice(1);
|
||||
if (hash) {
|
||||
const hashParts = hash.split('/');
|
||||
const route = hashParts[0];
|
||||
const model = hashParts[1] || null;
|
||||
if (SPA_ROUTES[route]) {
|
||||
return { route, params: model ? { model } : {} };
|
||||
}
|
||||
}
|
||||
|
||||
// Check path-based routes
|
||||
for (const [route, config] of Object.entries(SPA_ROUTES)) {
|
||||
for (const path of config.paths) {
|
||||
if (pathname === path) {
|
||||
return { route, params: {} };
|
||||
}
|
||||
// Check for parameterized routes like /chat/:model
|
||||
if (pathname.startsWith(path + '/')) {
|
||||
const param = pathname.slice(path.length + 1);
|
||||
if (param) {
|
||||
return { route, params: { model: param } };
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default to home
|
||||
return { route: 'home', params: {} };
|
||||
}
|
||||
|
||||
// Initialize the router store for Alpine.js
|
||||
document.addEventListener('alpine:init', () => {
|
||||
// Parse initial route from URL
|
||||
const initialRoute = parseUrlPath(window.location.pathname);
|
||||
|
||||
Alpine.store('router', {
|
||||
currentRoute: initialRoute.route,
|
||||
routeParams: initialRoute.params,
|
||||
previousRoute: null,
|
||||
|
||||
/**
|
||||
* Navigate to a route
|
||||
* @param {string} route - The route name to navigate to
|
||||
* @param {Object} params - Optional parameters for the route
|
||||
*/
|
||||
navigate(route, params = {}) {
|
||||
if (!SPA_ROUTES[route]) {
|
||||
console.warn(`Unknown route: ${route}`);
|
||||
return;
|
||||
}
|
||||
|
||||
this.previousRoute = this.currentRoute;
|
||||
this.currentRoute = route;
|
||||
this.routeParams = params;
|
||||
|
||||
// Update document title
|
||||
document.title = SPA_ROUTES[route].title;
|
||||
|
||||
// Update URL without page reload using history API
|
||||
const url = route === 'home' ? '/' : `/#${route}`;
|
||||
if (params.model) {
|
||||
window.history.pushState({ route, params }, '', `/#${route}/${params.model}`);
|
||||
} else {
|
||||
window.history.pushState({ route, params }, '', url);
|
||||
}
|
||||
|
||||
// Scroll to top on navigation
|
||||
window.scrollTo(0, 0);
|
||||
|
||||
// Emit custom event for route change listeners
|
||||
window.dispatchEvent(new CustomEvent('spa:navigate', {
|
||||
detail: { route, params, previousRoute: this.previousRoute }
|
||||
}));
|
||||
},
|
||||
|
||||
/**
|
||||
* Check if the current route matches
|
||||
* @param {string} route - The route to check
|
||||
* @returns {boolean}
|
||||
*/
|
||||
isRoute(route) {
|
||||
return this.currentRoute === route;
|
||||
},
|
||||
|
||||
/**
|
||||
* Navigate to chat with a specific model
|
||||
* @param {string} model - The model name
|
||||
*/
|
||||
navigateToChat(model) {
|
||||
this.navigate('chat', { model });
|
||||
},
|
||||
|
||||
/**
|
||||
* Navigate to text2image with a specific model
|
||||
* @param {string} model - The model name
|
||||
*/
|
||||
navigateToText2Image(model) {
|
||||
this.navigate('text2image', { model });
|
||||
},
|
||||
|
||||
/**
|
||||
* Navigate to TTS with a specific model
|
||||
* @param {string} model - The model name
|
||||
*/
|
||||
navigateToTTS(model) {
|
||||
this.navigate('tts', { model });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Handle browser back/forward buttons
|
||||
window.addEventListener('popstate', (event) => {
|
||||
if (event.state && event.state.route) {
|
||||
Alpine.store('router').currentRoute = event.state.route;
|
||||
Alpine.store('router').routeParams = event.state.params || {};
|
||||
} else {
|
||||
// Parse URL for route
|
||||
const parsed = parseUrlPath(window.location.pathname);
|
||||
Alpine.store('router').currentRoute = parsed.route;
|
||||
Alpine.store('router').routeParams = parsed.params;
|
||||
}
|
||||
});
|
||||
|
||||
// Export for use in other scripts
|
||||
window.SPA_ROUTES = SPA_ROUTES;
|
||||
window.parseUrlPath = parseUrlPath;
|
||||
@@ -54,6 +54,11 @@
|
||||
<span class="font-semibold text-cyan-300" x-text="installedBackends"></span>
|
||||
<span class="text-[#94A3B8] ml-1">installed</span>
|
||||
</a>
|
||||
<div class="flex items-center bg-[#101827] rounded-lg px-4 py-2 border border-[#38BDF8]/30">
|
||||
<i class="fas fa-microchip text-[#38BDF8] mr-2"></i>
|
||||
<span class="text-[#94A3B8] mr-1">Capability:</span>
|
||||
<span class="font-semibold text-[#38BDF8]" x-text="systemCapability"></span>
|
||||
</div>
|
||||
<a href="https://localai.io/backends/" target="_blank" class="btn-primary">
|
||||
<i class="fas fa-info-circle mr-2"></i>
|
||||
<span>Documentation</span>
|
||||
@@ -588,6 +593,7 @@ function backendsGallery() {
|
||||
totalPages: 1,
|
||||
availableBackends: 0,
|
||||
installedBackends: 0,
|
||||
systemCapability: '',
|
||||
selectedBackend: null,
|
||||
jobProgress: {},
|
||||
notifications: [],
|
||||
@@ -683,6 +689,7 @@ function backendsGallery() {
|
||||
this.totalPages = data.totalPages || 1;
|
||||
this.availableBackends = data.availableBackends || 0;
|
||||
this.installedBackends = data.installedBackends || 0;
|
||||
this.systemCapability = data.systemCapability || 'default';
|
||||
} catch (error) {
|
||||
console.error('Error fetching backends:', error);
|
||||
} finally {
|
||||
|
||||
@@ -41,7 +41,7 @@ SOFTWARE.
|
||||
__chatContextSize = {{ .ContextSize }};
|
||||
{{ end }}
|
||||
|
||||
// Store gallery configs for header icon display
|
||||
// Store gallery configs for header icon display and model info modal
|
||||
window.__galleryConfigs = {};
|
||||
{{ $allGalleryConfigs:=.GalleryConfig }}
|
||||
{{ range $modelName, $galleryConfig := $allGalleryConfigs }}
|
||||
@@ -49,6 +49,16 @@ SOFTWARE.
|
||||
{{ if $galleryConfig.Icon }}
|
||||
window.__galleryConfigs["{{$modelName}}"].Icon = "{{$galleryConfig.Icon}}";
|
||||
{{ end }}
|
||||
{{ if $galleryConfig.Description }}
|
||||
window.__galleryConfigs["{{$modelName}}"].Description = {{ printf "%q" $galleryConfig.Description }};
|
||||
{{ end }}
|
||||
{{ if $galleryConfig.URLs }}
|
||||
window.__galleryConfigs["{{$modelName}}"].URLs = [
|
||||
{{ range $idx, $url := $galleryConfig.URLs }}
|
||||
{{ if $idx }},{{ end }}{{ printf "%q" $url }}
|
||||
{{ end }}
|
||||
];
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
// Function to initialize store
|
||||
@@ -276,12 +286,31 @@ SOFTWARE.
|
||||
}
|
||||
},
|
||||
|
||||
add(role, content, image, audio, targetChatId = null) {
|
||||
add(role, content, image, audio, targetChatId = null, model = null) {
|
||||
// If targetChatId is provided, add to that chat, otherwise use active chat
|
||||
// This allows streaming to continue to the correct chat even if user switches
|
||||
const chat = targetChatId ? this.getChat(targetChatId) : this.activeChat();
|
||||
if (!chat) return;
|
||||
|
||||
// Determine model for this message:
|
||||
// - If model is explicitly provided, use it (for assistant messages with specific model)
|
||||
// - For user messages, use the current chat's model
|
||||
// - For other messages (thinking, tool_call, etc.), inherit from previous message or use chat model
|
||||
let messageModel = model;
|
||||
if (!messageModel) {
|
||||
if (role === "user") {
|
||||
// User messages always use the current chat's model
|
||||
messageModel = chat.model || "";
|
||||
} else if (role === "assistant") {
|
||||
// Assistant messages use the chat's model (should be set when request is made)
|
||||
messageModel = chat.model || "";
|
||||
} else {
|
||||
// For thinking, tool_call, etc., try to inherit from last assistant message, or use chat model
|
||||
const lastAssistant = chat.history.slice().reverse().find(m => m.role === "assistant");
|
||||
messageModel = lastAssistant?.model || chat.model || "";
|
||||
}
|
||||
}
|
||||
|
||||
const N = chat.history.length - 1;
|
||||
// For thinking, reasoning, tool_call, and tool_result messages, always create a new message
|
||||
if (role === "thinking" || role === "reasoning" || role === "tool_call" || role === "tool_result") {
|
||||
@@ -307,11 +336,11 @@ SOFTWARE.
|
||||
c += DOMPurify.sanitize(marked.parse(line));
|
||||
});
|
||||
}
|
||||
// Set expanded state: thinking is expanded by default in non-MCP mode, collapsed in MCP mode
|
||||
// Reasoning, tool_call, and tool_result are always collapsed by default
|
||||
// Set expanded state: thinking and reasoning are expanded by default in non-MCP mode, collapsed in MCP mode
|
||||
// tool_call and tool_result are always collapsed by default
|
||||
const isMCPMode = chat.mcpMode || false;
|
||||
const shouldExpand = (role === "thinking" && !isMCPMode) || false;
|
||||
chat.history.push({ role, content, html: c, image, audio, expanded: shouldExpand });
|
||||
const shouldExpand = ((role === "thinking" || role === "reasoning") && !isMCPMode) || false;
|
||||
chat.history.push({ role, content, html: c, image, audio, expanded: shouldExpand, model: messageModel });
|
||||
|
||||
// Auto-name chat from first user message
|
||||
if (role === "user" && chat.name === "New Chat" && content.trim()) {
|
||||
@@ -332,6 +361,10 @@ SOFTWARE.
|
||||
if (audio && audio.length > 0) {
|
||||
chat.history[N].audio = [...(chat.history[N].audio || []), ...audio];
|
||||
}
|
||||
// Preserve model if merging (don't overwrite)
|
||||
if (!chat.history[N].model && messageModel) {
|
||||
chat.history[N].model = messageModel;
|
||||
}
|
||||
} else {
|
||||
let c = "";
|
||||
const lines = content.split("\n");
|
||||
@@ -343,7 +376,8 @@ SOFTWARE.
|
||||
content,
|
||||
html: c,
|
||||
image: image || [],
|
||||
audio: audio || []
|
||||
audio: audio || [],
|
||||
model: messageModel
|
||||
});
|
||||
|
||||
// Auto-name chat from first user message
|
||||
@@ -473,6 +507,11 @@ SOFTWARE.
|
||||
activeChat.model = modelName;
|
||||
activeChat.updatedAt = Date.now();
|
||||
|
||||
// Update model info modal with new model
|
||||
if (window.updateModelInfoModal) {
|
||||
window.updateModelInfoModal(modelName);
|
||||
}
|
||||
|
||||
// Get context size from data attribute
|
||||
let contextSize = null;
|
||||
if (selectedOption.dataset.contextSize) {
|
||||
@@ -512,18 +551,23 @@ SOFTWARE.
|
||||
}
|
||||
|
||||
// Update model selector to reflect the change (ensure it stays in sync)
|
||||
// Note: We don't dispatch a change event here to avoid infinite loop
|
||||
// The selector is already updated via user interaction or programmatic change
|
||||
const modelSelector = document.getElementById('modelSelector');
|
||||
if (modelSelector) {
|
||||
// Find and select the option matching the model
|
||||
const optionValue = 'chat/' + modelName;
|
||||
for (let i = 0; i < modelSelector.options.length; i++) {
|
||||
if (modelSelector.options[i].value === optionValue) {
|
||||
modelSelector.selectedIndex = i;
|
||||
// Only update if it's different to avoid unnecessary updates
|
||||
if (modelSelector.selectedIndex !== i) {
|
||||
modelSelector.selectedIndex = i;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Trigger Alpine reactivity by dispatching change event
|
||||
modelSelector.dispatchEvent(new Event('change', { bubbles: true }));
|
||||
// Don't dispatch change event here - it would cause infinite recursion
|
||||
// The selector is already in sync with the model
|
||||
}
|
||||
|
||||
// Trigger MCP availability check in Alpine component
|
||||
@@ -579,27 +623,52 @@ SOFTWARE.
|
||||
<div class="flex items-center justify-between gap-2">
|
||||
<label class="text-xs font-medium text-[var(--color-text-secondary)] uppercase tracking-wide flex-shrink-0">Model</label>
|
||||
<div class="flex items-center gap-1 flex-shrink-0">
|
||||
{{ if $model }}
|
||||
{{ $galleryConfig:= index $allGalleryConfigs $model}}
|
||||
{{ if $galleryConfig }}
|
||||
<button
|
||||
data-twe-ripple-init
|
||||
data-twe-ripple-color="light"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
data-modal-target="model-info-modal"
|
||||
data-modal-toggle="model-info-modal"
|
||||
title="Model Information">
|
||||
<i class="fas fa-info-circle"></i>
|
||||
</button>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ if $model }}
|
||||
<a href="/models/edit/{{$model}}"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-warning)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
title="Edit Model Configuration">
|
||||
<i class="fas fa-edit"></i>
|
||||
</a>
|
||||
{{ end }}
|
||||
<!-- Info button - reactive to active chat model -->
|
||||
<template x-if="$store.chat.activeChat() && $store.chat.activeChat().model && window.__galleryConfigs && window.__galleryConfigs[$store.chat.activeChat().model]">
|
||||
<button
|
||||
data-twe-ripple-init
|
||||
data-twe-ripple-color="light"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
data-modal-target="model-info-modal"
|
||||
data-modal-toggle="model-info-modal"
|
||||
:data-model-name="$store.chat.activeChat().model"
|
||||
@click="if (window.updateModelInfoModal) { window.updateModelInfoModal($store.chat.activeChat().model, true); }"
|
||||
title="Model Information">
|
||||
<i class="fas fa-info-circle"></i>
|
||||
</button>
|
||||
</template>
|
||||
<!-- Fallback info button for initial model from server -->
|
||||
<template x-if="(!$store.chat.activeChat() || !$store.chat.activeChat().model) && window.__galleryConfigs && window.__galleryConfigs['{{$model}}']">
|
||||
<button
|
||||
data-twe-ripple-init
|
||||
data-twe-ripple-color="light"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
data-modal-target="model-info-modal"
|
||||
data-modal-toggle="model-info-modal"
|
||||
data-model-name="{{$model}}"
|
||||
@click="if (window.updateModelInfoModal) { window.updateModelInfoModal('{{$model}}', true); }"
|
||||
title="Model Information">
|
||||
<i class="fas fa-info-circle"></i>
|
||||
</button>
|
||||
</template>
|
||||
<!-- Edit button - reactive to active chat model -->
|
||||
<template x-if="$store.chat.activeChat() && $store.chat.activeChat().model">
|
||||
<a :href="'/models/edit/' + $store.chat.activeChat().model"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-warning)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
title="Edit Model Configuration">
|
||||
<i class="fas fa-edit"></i>
|
||||
</a>
|
||||
</template>
|
||||
<!-- Fallback edit button for initial model from server -->
|
||||
<template x-if="!$store.chat.activeChat() || !$store.chat.activeChat().model">
|
||||
{{ if $model }}
|
||||
<a href="/models/edit/{{$model}}"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-warning)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
title="Edit Model Configuration">
|
||||
<i class="fas fa-edit"></i>
|
||||
</a>
|
||||
{{ end }}
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
<select
|
||||
@@ -1248,11 +1317,20 @@ SOFTWARE.
|
||||
</template>
|
||||
<template x-if="message.role != 'user' && message.role != 'thinking' && message.role != 'reasoning' && message.role != 'tool_call' && message.role != 'tool_result'">
|
||||
<div class="flex items-center space-x-2">
|
||||
{{ if $galleryConfig }}
|
||||
{{ if $galleryConfig.Icon }}<img src="{{$galleryConfig.Icon}}" class="rounded-lg mt-2 max-w-8 max-h-8 border border-[var(--color-primary-border)]/20">{{end}}
|
||||
{{ end }}
|
||||
<!-- Model icon - from message history, fallback to active chat -->
|
||||
<template x-if="message.model && window.__galleryConfigs && window.__galleryConfigs[message.model] && window.__galleryConfigs[message.model].Icon">
|
||||
<img :src="window.__galleryConfigs[message.model].Icon" class="rounded-lg mt-2 max-w-8 max-h-8 border border-[var(--color-primary-border)]/20">
|
||||
</template>
|
||||
<!-- Fallback: use active chat model if message doesn't have one -->
|
||||
<template x-if="!message.model && $store.chat.activeChat() && $store.chat.activeChat().model && window.__galleryConfigs && window.__galleryConfigs[$store.chat.activeChat().model] && window.__galleryConfigs[$store.chat.activeChat().model].Icon">
|
||||
<img :src="window.__galleryConfigs[$store.chat.activeChat().model].Icon" class="rounded-lg mt-2 max-w-8 max-h-8 border border-[var(--color-primary-border)]/20">
|
||||
</template>
|
||||
<!-- Final fallback: initial model from server -->
|
||||
<template x-if="!message.model && (!$store.chat.activeChat() || !$store.chat.activeChat().model) && window.__galleryConfigs && window.__galleryConfigs['{{$model}}'] && window.__galleryConfigs['{{$model}}'].Icon">
|
||||
<img :src="window.__galleryConfigs['{{$model}}'].Icon" class="rounded-lg mt-2 max-w-8 max-h-8 border border-[var(--color-primary-border)]/20">
|
||||
</template>
|
||||
<div class="flex flex-col flex-1">
|
||||
<span class="text-xs font-semibold text-[var(--color-text-secondary)] mb-1">{{if .Model}}{{.Model}}{{else}}Assistant{{end}}</span>
|
||||
<span class="text-xs font-semibold text-[var(--color-text-secondary)] mb-1" x-text="message.model || $store.chat.activeChat()?.model || '{{if .Model}}{{.Model}}{{else}}Assistant{{end}}'"></span>
|
||||
<div class="flex-1 text-[var(--color-text-primary)] flex items-center space-x-2 min-w-0">
|
||||
<div class="p-3 rounded-lg bg-[var(--color-bg-secondary)] border border-[var(--color-accent-border)]/20 shadow-lg max-w-full overflow-x-auto overflow-wrap-anywhere" x-html="message.html"></div>
|
||||
<button @click="copyToClipboard(message.html)" title="Copy to clipboard" class="text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors p-1 flex-shrink-0">
|
||||
@@ -1455,17 +1533,14 @@ SOFTWARE.
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Modal moved outside of sidebar to appear in center of page -->
|
||||
{{ if $model }}
|
||||
{{ $galleryConfig:= index $allGalleryConfigs $model}}
|
||||
{{ if $galleryConfig }}
|
||||
<div id="model-info-modal" tabindex="-1" aria-hidden="true" class="hidden overflow-y-auto overflow-x-hidden fixed top-0 right-0 left-0 z-50 flex justify-center items-center w-full md:inset-0 h-[calc(100%-1rem)] max-h-full">
|
||||
<!-- Modal moved outside of sidebar to appear in center of page - Always available, content updated dynamically -->
|
||||
<div id="model-info-modal" tabindex="-1" aria-hidden="true" class="hidden overflow-y-auto overflow-x-hidden fixed top-0 right-0 left-0 z-50 flex justify-center items-center w-full h-full md:inset-0 max-h-full" style="padding: 1rem;">
|
||||
<div class="relative p-4 w-full max-w-2xl max-h-full">
|
||||
<div class="relative p-4 w-full max-w-2xl max-h-full bg-white rounded-lg shadow dark:bg-gray-700">
|
||||
<!-- Header -->
|
||||
<div class="flex items-center justify-between p-4 md:p-5 border-b rounded-t dark:border-gray-600">
|
||||
<h3 class="text-xl font-semibold text-gray-900 dark:text-white">{{ $model }}</h3>
|
||||
<button class="text-gray-400 bg-transparent hover:bg-gray-200 hover:text-gray-900 rounded-lg text-sm w-8 h-8 ms-auto inline-flex justify-center items-center dark:hover:bg-gray-600 dark:hover:text-white" data-modal-hide="model-info-modal">
|
||||
<h3 id="model-info-modal-title" class="text-xl font-semibold text-gray-900 dark:text-white">{{ if $model }}{{ $model }}{{ end }}</h3>
|
||||
<button class="text-gray-400 bg-transparent hover:bg-gray-200 hover:text-gray-900 rounded-lg text-sm w-8 h-8 ms-auto inline-flex justify-center items-center dark:hover:bg-gray-600 dark:hover:text-white" data-modal-hide="model-info-modal" @click="if (window.closeModelInfoModal) { window.closeModelInfoModal(); }">
|
||||
<svg class="w-3 h-3" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 14 14">
|
||||
<path stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="m1 1 6 6m0 0 6 6M7 7l6-6M7 7l-6 6"/>
|
||||
</svg>
|
||||
@@ -1476,29 +1551,24 @@ SOFTWARE.
|
||||
<!-- Body -->
|
||||
<div class="p-4 md:p-5 space-y-4">
|
||||
<div class="flex justify-center items-center">
|
||||
{{ if $galleryConfig.Icon }}<img class="lazy rounded-t-lg max-h-48 max-w-96 object-cover mt-3 entered loaded" src="{{$galleryConfig.Icon}}" loading="lazy"/>{{end}}
|
||||
<img id="model-info-modal-icon" class="lazy rounded-t-lg max-h-48 max-w-96 object-cover mt-3 entered loaded" style="display: none;" loading="lazy"/>
|
||||
</div>
|
||||
<div id="model-info-description" class="text-base leading-relaxed text-gray-500 dark:text-gray-400 break-words max-w-full">{{ $galleryConfig.Description }}</div>
|
||||
<div id="model-info-description" class="text-base leading-relaxed text-gray-500 dark:text-gray-400 break-words max-w-full"></div>
|
||||
<hr>
|
||||
<p class="text-sm font-semibold text-gray-900 dark:text-white">Links</p>
|
||||
<ul>
|
||||
{{range $galleryConfig.URLs}}
|
||||
<li><a href="{{ . }}" target="_blank">{{ . }}</a></li>
|
||||
{{end}}
|
||||
<ul id="model-info-links">
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<!-- Footer -->
|
||||
<div class="flex items-center p-4 md:p-5 border-t border-gray-200 rounded-b dark:border-gray-600">
|
||||
<button data-modal-hide="model-info-modal" class="py-2.5 px-5 ms-3 text-sm font-medium text-gray-900 focus:outline-none bg-white rounded-lg border border-gray-200 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:ring-4 focus:ring-gray-100 dark:focus:ring-gray-700 dark:bg-gray-800 dark:text-gray-400 dark:border-gray-600 dark:hover:text-white dark:hover:bg-gray-700">
|
||||
<button data-modal-hide="model-info-modal" class="py-2.5 px-5 ms-3 text-sm font-medium text-gray-900 focus:outline-none bg-white rounded-lg border border-gray-200 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:ring-4 focus:ring-gray-100 dark:focus:ring-gray-700 dark:bg-gray-800 dark:text-gray-400 dark:border-gray-600 dark:hover:text-white dark:hover:bg-gray-700" @click="if (window.closeModelInfoModal) { window.closeModelInfoModal(); }">
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
<!-- Alpine store initialization and utilities -->
|
||||
<script>
|
||||
@@ -1709,10 +1779,20 @@ SOFTWARE.
|
||||
});
|
||||
|
||||
// Also listen for click events on modal toggle buttons
|
||||
document.querySelectorAll('[data-modal-toggle="model-info-modal"]').forEach(button => {
|
||||
button.addEventListener('click', () => {
|
||||
// Use event delegation to handle dynamically created buttons
|
||||
document.addEventListener('click', (e) => {
|
||||
const button = e.target.closest('[data-modal-toggle="model-info-modal"]');
|
||||
if (button) {
|
||||
// Update modal with current model before showing
|
||||
if (window.Alpine && window.Alpine.store("chat")) {
|
||||
const activeChat = window.Alpine.store("chat").activeChat();
|
||||
const modelName = activeChat ? activeChat.model : (button.dataset.modelName || (document.getElementById("chat-model") ? document.getElementById("chat-model").value : null));
|
||||
if (modelName && window.updateModelInfoModal) {
|
||||
window.updateModelInfoModal(modelName, true);
|
||||
}
|
||||
}
|
||||
setTimeout(processMarkdown, 300);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Process on initial load if libraries are ready
|
||||
@@ -1753,12 +1833,176 @@ SOFTWARE.
|
||||
syncModelSelectorOnLoad();
|
||||
}
|
||||
|
||||
// Function to update model info modal with current model
|
||||
// Set openModal to true to actually open the modal, false to just update content
|
||||
window.updateModelInfoModal = function(modelName, openModal = false) {
|
||||
if (!modelName) {
|
||||
return;
|
||||
}
|
||||
if (!window.__galleryConfigs) {
|
||||
return;
|
||||
}
|
||||
|
||||
const galleryConfig = window.__galleryConfigs[modelName];
|
||||
// Check if galleryConfig exists and has at least one property
|
||||
if (!galleryConfig || Object.keys(galleryConfig).length === 0) {
|
||||
// Still update the modal title even if no config, so user can see which model they clicked
|
||||
const titleEl = document.getElementById('model-info-modal-title');
|
||||
if (titleEl) {
|
||||
titleEl.textContent = modelName;
|
||||
}
|
||||
// Show message that no info is available
|
||||
const descEl = document.getElementById('model-info-description');
|
||||
if (descEl) {
|
||||
descEl.textContent = 'No additional information available for this model.';
|
||||
}
|
||||
const linksEl = document.getElementById('model-info-links');
|
||||
if (linksEl) {
|
||||
linksEl.innerHTML = '';
|
||||
}
|
||||
const iconEl = document.getElementById('model-info-modal-icon');
|
||||
if (iconEl) {
|
||||
iconEl.style.display = 'none';
|
||||
}
|
||||
// Only open the modal if explicitly requested
|
||||
if (openModal) {
|
||||
const modalElement = document.getElementById('model-info-modal');
|
||||
if (modalElement) {
|
||||
modalElement.classList.remove('hidden');
|
||||
modalElement.setAttribute('aria-hidden', 'false');
|
||||
// Add backdrop
|
||||
let backdrop = document.querySelector('.modal-backdrop');
|
||||
if (!backdrop) {
|
||||
backdrop = document.createElement('div');
|
||||
backdrop.className = 'modal-backdrop fixed inset-0 bg-gray-900 bg-opacity-50 dark:bg-opacity-80 z-40';
|
||||
document.body.appendChild(backdrop);
|
||||
backdrop.addEventListener('click', () => {
|
||||
closeModelInfoModal();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Update modal title
|
||||
const titleEl = document.getElementById('model-info-modal-title');
|
||||
if (titleEl) {
|
||||
titleEl.textContent = modelName;
|
||||
}
|
||||
|
||||
// Update icon
|
||||
const iconEl = document.getElementById('model-info-modal-icon');
|
||||
if (iconEl) {
|
||||
if (galleryConfig.Icon) {
|
||||
iconEl.src = galleryConfig.Icon;
|
||||
iconEl.style.display = 'block';
|
||||
} else {
|
||||
iconEl.style.display = 'none';
|
||||
}
|
||||
}
|
||||
|
||||
// Update description
|
||||
const descEl = document.getElementById('model-info-description');
|
||||
if (descEl) {
|
||||
descEl.textContent = galleryConfig.Description || 'No description available.';
|
||||
}
|
||||
|
||||
// Update links
|
||||
const linksEl = document.getElementById('model-info-links');
|
||||
if (linksEl && galleryConfig.URLs && Array.isArray(galleryConfig.URLs) && galleryConfig.URLs.length > 0) {
|
||||
linksEl.innerHTML = '';
|
||||
galleryConfig.URLs.forEach(url => {
|
||||
const li = document.createElement('li');
|
||||
const a = document.createElement('a');
|
||||
a.href = url;
|
||||
a.target = '_blank';
|
||||
a.textContent = url;
|
||||
li.appendChild(a);
|
||||
linksEl.appendChild(li);
|
||||
});
|
||||
} else if (linksEl) {
|
||||
linksEl.innerHTML = '<li>No links available</li>';
|
||||
}
|
||||
|
||||
// Only open the modal if explicitly requested
|
||||
if (openModal) {
|
||||
const modalElement = document.getElementById('model-info-modal');
|
||||
if (modalElement) {
|
||||
// Ensure positioning classes are present (they might have been removed)
|
||||
if (!modalElement.classList.contains('flex')) {
|
||||
modalElement.classList.add('flex');
|
||||
}
|
||||
if (!modalElement.classList.contains('justify-center')) {
|
||||
modalElement.classList.add('justify-center');
|
||||
}
|
||||
if (!modalElement.classList.contains('items-center')) {
|
||||
modalElement.classList.add('items-center');
|
||||
}
|
||||
// Ensure fixed positioning
|
||||
if (!modalElement.classList.contains('fixed')) {
|
||||
modalElement.classList.add('fixed');
|
||||
}
|
||||
// Ensure full width and height
|
||||
if (!modalElement.classList.contains('w-full')) {
|
||||
modalElement.classList.add('w-full');
|
||||
}
|
||||
if (!modalElement.classList.contains('h-full')) {
|
||||
modalElement.classList.add('h-full');
|
||||
}
|
||||
// Ensure padding is set
|
||||
if (!modalElement.style.padding) {
|
||||
modalElement.style.padding = '1rem';
|
||||
}
|
||||
// Remove hidden class if present
|
||||
modalElement.classList.remove('hidden');
|
||||
// Set aria-hidden to false
|
||||
modalElement.setAttribute('aria-hidden', 'false');
|
||||
// Add backdrop if needed
|
||||
let backdrop = document.querySelector('.modal-backdrop');
|
||||
if (!backdrop) {
|
||||
backdrop = document.createElement('div');
|
||||
backdrop.className = 'modal-backdrop fixed inset-0 bg-gray-900 bg-opacity-50 dark:bg-opacity-80 z-40';
|
||||
document.body.appendChild(backdrop);
|
||||
backdrop.addEventListener('click', () => {
|
||||
window.closeModelInfoModal();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Function to close the model info modal
|
||||
window.closeModelInfoModal = function() {
|
||||
const modalElement = document.getElementById('model-info-modal');
|
||||
if (modalElement) {
|
||||
modalElement.classList.add('hidden');
|
||||
modalElement.setAttribute('aria-hidden', 'true');
|
||||
}
|
||||
const backdrop = document.querySelector('.modal-backdrop');
|
||||
if (backdrop) {
|
||||
backdrop.remove();
|
||||
}
|
||||
};
|
||||
|
||||
// Also sync after Alpine initializes (in case it runs after DOMContentLoaded)
|
||||
function initializeModelInfo() {
|
||||
syncModelSelectorOnLoad();
|
||||
// Initialize model info modal content with current model (but don't open it)
|
||||
if (window.updateModelInfoModal && window.Alpine && window.Alpine.store("chat")) {
|
||||
const activeChat = window.Alpine.store("chat").activeChat();
|
||||
const modelName = activeChat ? activeChat.model : (document.getElementById("chat-model") ? document.getElementById("chat-model").value : null);
|
||||
if (modelName) {
|
||||
window.updateModelInfoModal(modelName, false); // false = don't open, just update content
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (window.Alpine) {
|
||||
Alpine.nextTick(syncModelSelectorOnLoad);
|
||||
Alpine.nextTick(initializeModelInfo);
|
||||
} else {
|
||||
document.addEventListener('alpine:init', () => {
|
||||
Alpine.nextTick(syncModelSelectorOnLoad);
|
||||
Alpine.nextTick(initializeModelInfo);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
<a href="traces/" class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-chart-line text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Traces
|
||||
</a>
|
||||
<a href="swagger/" class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<a href="swagger/index.html" class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-code text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>API
|
||||
</a>
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
<a href="traces/" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-chart-line text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Traces
|
||||
</a>
|
||||
<a href="swagger/" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<a href="swagger/index.html" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-code text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>API
|
||||
</a>
|
||||
|
||||
|
||||
154
core/http/views/partials/spa_navbar.html
Normal file
154
core/http/views/partials/spa_navbar.html
Normal file
@@ -0,0 +1,154 @@
|
||||
<nav class="bg-[var(--color-bg-primary)] shadow-2xl border-b border-[var(--color-bg-secondary)]">
|
||||
<div class="container mx-auto px-4 py-2">
|
||||
<div class="flex items-center justify-between">
|
||||
<div class="flex items-center">
|
||||
<!-- Logo Image -->
|
||||
<a href="#" @click.prevent="$store.router.navigate('home')" class="flex items-center group">
|
||||
<img src="static/logo_horizontal.png"
|
||||
alt="LocalAI Logo"
|
||||
class="h-10 mr-3 brightness-110 transition-all duration-300 group-hover:brightness-125 group-hover:drop-shadow-[0_0_8px_var(--color-primary-border)]">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<!-- Menu button for small screens -->
|
||||
<div class="lg:hidden">
|
||||
<button @click="mobileMenuOpen = !mobileMenuOpen" class="text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] focus:outline-none p-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)]">
|
||||
<i class="fas fa-bars fa-lg"></i>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<!-- Navigation links -->
|
||||
<div class="hidden lg:flex lg:items-center lg:justify-end lg:space-x-1">
|
||||
<a href="#" @click.prevent="$store.router.navigate('home')"
|
||||
:class="$store.router.currentRoute === 'home' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-home text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Home
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('chat')"
|
||||
:class="$store.router.currentRoute === 'chat' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fa-solid fa-comments text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Chat
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('text2image')"
|
||||
:class="$store.router.currentRoute === 'text2image' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-image text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Images
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('tts')"
|
||||
:class="$store.router.currentRoute === 'tts' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fa-solid fa-music text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>TTS
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('talk')"
|
||||
:class="$store.router.currentRoute === 'talk' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fa-solid fa-phone text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Talk
|
||||
</a>
|
||||
<a href="agent-jobs" class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-tasks text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Agent Jobs
|
||||
</a>
|
||||
<a href="traces/" class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-chart-line text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Traces
|
||||
</a>
|
||||
<a href="swagger/index.html" class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-code text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>API
|
||||
</a>
|
||||
|
||||
<!-- System Dropdown -->
|
||||
<div class="relative" @click.away="settingsOpen = false">
|
||||
<button @click="settingsOpen = !settingsOpen"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] px-2 py-2 rounded-lg transition duration-300 ease-in-out hover:bg-[var(--color-bg-secondary)] flex items-center group text-sm">
|
||||
<i class="fas fa-cog text-[var(--color-primary)] mr-1.5 text-sm group-hover:scale-110 transition-transform"></i>Settings
|
||||
<i class="fas fa-chevron-down ml-1 text-xs transition-transform" :class="settingsOpen ? 'rotate-180' : ''"></i>
|
||||
</button>
|
||||
<div x-show="settingsOpen"
|
||||
x-transition:enter="transition ease-out duration-200"
|
||||
x-transition:enter-start="opacity-0 scale-95"
|
||||
x-transition:enter-end="opacity-100 scale-100"
|
||||
x-transition:leave="transition ease-in duration-150"
|
||||
x-transition:leave-start="opacity-100 scale-100"
|
||||
x-transition:leave-end="opacity-0 scale-95"
|
||||
class="absolute top-full right-0 mt-1 w-48 bg-[var(--color-bg-secondary)] border border-[var(--color-primary-border)]/20 rounded-lg shadow-lg z-50 py-1">
|
||||
<a href="#" @click.prevent="$store.router.navigate('browse'); settingsOpen = false" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-primary)] px-3 py-2 text-sm transition-colors flex items-center">
|
||||
<i class="fas fa-brain text-[var(--color-primary)] mr-2 text-xs"></i>Models
|
||||
</a>
|
||||
<a href="browse/backends" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-primary)] px-3 py-2 text-sm transition-colors flex items-center">
|
||||
<i class="fas fa-server text-[var(--color-primary)] mr-2 text-xs"></i>Backends
|
||||
</a>
|
||||
<a href="p2p/" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-primary)] px-3 py-2 text-sm transition-colors flex items-center">
|
||||
<i class="fa-solid fa-circle-nodes text-[var(--color-primary)] mr-2 text-xs"></i>Swarm
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('manage'); settingsOpen = false" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-primary)] px-3 py-2 text-sm transition-colors flex items-center">
|
||||
<i class="fas fa-cog text-[var(--color-primary)] mr-2 text-xs"></i>System
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Collapsible menu for small screens -->
|
||||
<div class="lg:hidden" x-show="mobileMenuOpen" x-transition>
|
||||
<div class="pt-3 pb-2 space-y-1 border-t border-[var(--color-bg-secondary)] mt-2">
|
||||
<a href="#" @click.prevent="$store.router.navigate('home'); mobileMenuOpen = false"
|
||||
:class="$store.router.currentRoute === 'home' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="block hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-home text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Home
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('chat'); mobileMenuOpen = false"
|
||||
:class="$store.router.currentRoute === 'chat' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="block hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fa-solid fa-comments text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Chat
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('text2image'); mobileMenuOpen = false"
|
||||
:class="$store.router.currentRoute === 'text2image' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="block hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-image text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Images
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('tts'); mobileMenuOpen = false"
|
||||
:class="$store.router.currentRoute === 'tts' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="block hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fa-solid fa-music text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>TTS
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('talk'); mobileMenuOpen = false"
|
||||
:class="$store.router.currentRoute === 'talk' ? 'text-[var(--color-primary)] bg-[var(--color-bg-secondary)]' : 'text-[var(--color-text-secondary)]'"
|
||||
class="block hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fa-solid fa-phone text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Talk
|
||||
</a>
|
||||
<a href="agent-jobs" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-tasks text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Agent Jobs
|
||||
</a>
|
||||
<a href="traces/" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-chart-line text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Traces
|
||||
</a>
|
||||
<a href="swagger/index.html" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-code text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>API
|
||||
</a>
|
||||
|
||||
<!-- System with submenu -->
|
||||
<div>
|
||||
<button @click="mobileSettingsOpen = !mobileSettingsOpen"
|
||||
class="w-full text-left text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] px-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center justify-between text-sm">
|
||||
<div class="flex items-center">
|
||||
<i class="fas fa-cog text-[var(--color-primary)] mr-3 w-5 text-center text-sm"></i>Settings
|
||||
</div>
|
||||
<i class="fas fa-chevron-down text-xs transition-transform" :class="mobileSettingsOpen ? 'rotate-180' : ''"></i>
|
||||
</button>
|
||||
<div x-show="mobileSettingsOpen" x-transition class="overflow-hidden">
|
||||
<a href="#" @click.prevent="$store.router.navigate('browse'); mobileMenuOpen = false" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] pl-8 pr-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-brain text-[var(--color-primary)] mr-3 w-5 text-center text-xs"></i>Models
|
||||
</a>
|
||||
<a href="browse/backends" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] pl-8 pr-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-server text-[var(--color-primary)] mr-3 w-5 text-center text-xs"></i>Backends
|
||||
</a>
|
||||
<a href="p2p/" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] pl-8 pr-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fa-solid fa-circle-nodes text-[var(--color-primary)] mr-3 w-5 text-center text-xs"></i>Swarm
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('manage'); mobileMenuOpen = false" class="block text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] hover:bg-[var(--color-bg-secondary)] pl-8 pr-3 py-2 rounded-lg transition duration-300 ease-in-out flex items-center text-sm">
|
||||
<i class="fas fa-cog text-[var(--color-primary)] mr-3 w-5 text-center text-xs"></i>System
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
565
core/http/views/spa.html
Normal file
565
core/http/views/spa.html
Normal file
@@ -0,0 +1,565 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
{{template "views/partials/head" .}}
|
||||
|
||||
<!-- Critical Alpine.js component functions must be defined before Alpine loads -->
|
||||
<script>
|
||||
// Resource Monitor component (GPU if available, otherwise RAM)
|
||||
function resourceMonitor() {
|
||||
return {
|
||||
resourceData: null,
|
||||
pollInterval: null,
|
||||
|
||||
async fetchResourceData() {
|
||||
try {
|
||||
const response = await fetch('/api/resources');
|
||||
if (response.ok) {
|
||||
this.resourceData = await response.json();
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching resource data:', error);
|
||||
}
|
||||
},
|
||||
|
||||
startPolling() {
|
||||
this.fetchResourceData();
|
||||
this.pollInterval = setInterval(() => this.fetchResourceData(), 5000);
|
||||
},
|
||||
|
||||
stopPolling() {
|
||||
if (this.pollInterval) {
|
||||
clearInterval(this.pollInterval);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Format bytes helper
|
||||
function formatBytes(bytes) {
|
||||
if (bytes === 0) return '0 B';
|
||||
const k = 1024;
|
||||
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
|
||||
}
|
||||
|
||||
// Home input form component
|
||||
function homeInputForm() {
|
||||
return {
|
||||
selectedModel: '',
|
||||
inputValue: '',
|
||||
shiftPressed: false,
|
||||
fileName: '',
|
||||
imageFiles: [],
|
||||
audioFiles: [],
|
||||
textFiles: [],
|
||||
attachedFiles: [],
|
||||
mcpMode: false,
|
||||
mcpAvailable: false,
|
||||
mcpModels: {},
|
||||
currentPlaceholder: 'Send a message...',
|
||||
placeholderIndex: 0,
|
||||
charIndex: 0,
|
||||
isTyping: false,
|
||||
typingTimeout: null,
|
||||
displayTimeout: null,
|
||||
placeholderMessages: [
|
||||
'What is Nuclear fusion?',
|
||||
'How does a combustion engine work?',
|
||||
'Explain quantum computing',
|
||||
'What causes climate change?',
|
||||
'How do neural networks learn?',
|
||||
'What is the theory of relativity?',
|
||||
'How does photosynthesis work?',
|
||||
'Explain the water cycle',
|
||||
'What is machine learning?',
|
||||
'How do black holes form?'
|
||||
],
|
||||
|
||||
init() {
|
||||
window.currentPlaceholderText = this.currentPlaceholder;
|
||||
this.startTypingAnimation();
|
||||
this.buildMCPModelsMap();
|
||||
this.$nextTick(() => {
|
||||
const select = this.$el.querySelector('select');
|
||||
if (select && select.options.length > 1) {
|
||||
const firstModelOption = select.options[1];
|
||||
if (firstModelOption && firstModelOption.value) {
|
||||
this.selectedModel = firstModelOption.value;
|
||||
this.checkMCPAvailability();
|
||||
}
|
||||
}
|
||||
});
|
||||
this.$watch('selectedModel', () => {
|
||||
this.checkMCPAvailability();
|
||||
});
|
||||
},
|
||||
|
||||
buildMCPModelsMap() {
|
||||
const select = this.$el.querySelector('select');
|
||||
if (!select) return;
|
||||
this.mcpModels = {};
|
||||
for (let i = 0; i < select.options.length; i++) {
|
||||
const option = select.options[i];
|
||||
if (option.value) {
|
||||
const hasMcpAttr = option.getAttribute('data-has-mcp');
|
||||
this.mcpModels[option.value] = hasMcpAttr === 'true';
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
checkMCPAvailability() {
|
||||
if (!this.selectedModel) {
|
||||
this.mcpAvailable = false;
|
||||
this.mcpMode = false;
|
||||
return;
|
||||
}
|
||||
const hasMCP = this.mcpModels[this.selectedModel] === true;
|
||||
this.mcpAvailable = hasMCP;
|
||||
if (!hasMCP) {
|
||||
this.mcpMode = false;
|
||||
}
|
||||
},
|
||||
|
||||
startTypingAnimation() {
|
||||
if (this.isTyping) return;
|
||||
this.typeNextPlaceholder();
|
||||
},
|
||||
|
||||
typeNextPlaceholder() {
|
||||
if (this.isTyping) return;
|
||||
this.isTyping = true;
|
||||
this.charIndex = 0;
|
||||
const message = this.placeholderMessages[this.placeholderIndex];
|
||||
this.currentPlaceholder = '';
|
||||
window.currentPlaceholderText = '';
|
||||
|
||||
const typeChar = () => {
|
||||
if (this.charIndex < message.length) {
|
||||
this.currentPlaceholder = message.substring(0, this.charIndex + 1);
|
||||
window.currentPlaceholderText = this.currentPlaceholder;
|
||||
this.charIndex++;
|
||||
this.typingTimeout = setTimeout(typeChar, 30);
|
||||
} else {
|
||||
this.isTyping = false;
|
||||
window.currentPlaceholderText = this.currentPlaceholder;
|
||||
this.displayTimeout = setTimeout(() => {
|
||||
this.placeholderIndex = (this.placeholderIndex + 1) % this.placeholderMessages.length;
|
||||
this.typeNextPlaceholder();
|
||||
}, 2000);
|
||||
}
|
||||
};
|
||||
|
||||
typeChar();
|
||||
},
|
||||
|
||||
pauseTyping() {
|
||||
if (this.typingTimeout) {
|
||||
clearTimeout(this.typingTimeout);
|
||||
this.typingTimeout = null;
|
||||
}
|
||||
if (this.displayTimeout) {
|
||||
clearTimeout(this.displayTimeout);
|
||||
this.displayTimeout = null;
|
||||
}
|
||||
this.isTyping = false;
|
||||
},
|
||||
|
||||
resumeTyping() {
|
||||
if (!this.inputValue.trim() && !this.isTyping) {
|
||||
this.startTypingAnimation();
|
||||
}
|
||||
},
|
||||
|
||||
handleFocus() {
|
||||
if (this.isTyping && this.placeholderIndex < this.placeholderMessages.length) {
|
||||
const fullMessage = this.placeholderMessages[this.placeholderIndex];
|
||||
this.currentPlaceholder = fullMessage;
|
||||
window.currentPlaceholderText = fullMessage;
|
||||
}
|
||||
this.pauseTyping();
|
||||
},
|
||||
|
||||
handleBlur() {
|
||||
if (!this.inputValue.trim()) {
|
||||
this.resumeTyping();
|
||||
}
|
||||
},
|
||||
|
||||
handleInput() {
|
||||
if (this.inputValue.trim()) {
|
||||
this.pauseTyping();
|
||||
} else {
|
||||
this.resumeTyping();
|
||||
}
|
||||
},
|
||||
|
||||
handleFileSelection(files, fileType) {
|
||||
Array.from(files).forEach(file => {
|
||||
const exists = this.attachedFiles.some(f => f.name === file.name && f.type === fileType);
|
||||
if (!exists) {
|
||||
this.attachedFiles.push({ name: file.name, type: fileType });
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
removeAttachedFile(fileType, fileName) {
|
||||
const index = this.attachedFiles.findIndex(f => f.name === fileName && f.type === fileType);
|
||||
if (index !== -1) {
|
||||
this.attachedFiles.splice(index, 1);
|
||||
}
|
||||
if (fileType === 'image') {
|
||||
this.imageFiles = this.imageFiles.filter(f => f.name !== fileName);
|
||||
} else if (fileType === 'audio') {
|
||||
this.audioFiles = this.audioFiles.filter(f => f.name !== fileName);
|
||||
} else if (fileType === 'file') {
|
||||
this.textFiles = this.textFiles.filter(f => f.name !== fileName);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Start chat function for SPA
|
||||
function startChatSPA(event) {
|
||||
if (event) event.preventDefault();
|
||||
const form = event ? event.target.closest('form') : document.querySelector('form');
|
||||
if (!form) return;
|
||||
|
||||
const alpineComponent = form.closest('[x-data]');
|
||||
const select = alpineComponent ? alpineComponent.querySelector('select') : null;
|
||||
const textarea = form.querySelector('textarea');
|
||||
|
||||
const selectedModel = select ? select.value : '';
|
||||
let message = textarea ? textarea.value : '';
|
||||
|
||||
if (!message.trim() && window.currentPlaceholderText) {
|
||||
message = window.currentPlaceholderText;
|
||||
}
|
||||
|
||||
if (!selectedModel || !message.trim()) return;
|
||||
|
||||
let mcpMode = false;
|
||||
const mcpToggle = document.getElementById('spa_home_mcp_toggle');
|
||||
if (mcpToggle && mcpToggle.checked) mcpMode = true;
|
||||
|
||||
const chatData = { message, imageFiles: [], audioFiles: [], textFiles: [], mcpMode };
|
||||
const imageInput = document.getElementById('spa_home_input_image');
|
||||
const audioInput = document.getElementById('spa_home_input_audio');
|
||||
const fileInput = document.getElementById('spa_home_input_file');
|
||||
|
||||
const filePromises = [
|
||||
...Array.from(imageInput?.files || []).map(file =>
|
||||
new Promise(resolve => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = e => resolve({ name: file.name, data: e.target.result, type: file.type });
|
||||
reader.readAsDataURL(file);
|
||||
})
|
||||
),
|
||||
...Array.from(audioInput?.files || []).map(file =>
|
||||
new Promise(resolve => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = e => resolve({ name: file.name, data: e.target.result, type: file.type });
|
||||
reader.readAsDataURL(file);
|
||||
})
|
||||
),
|
||||
...Array.from(fileInput?.files || []).map(file =>
|
||||
new Promise(resolve => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = e => resolve({ name: file.name, data: e.target.result, type: file.type });
|
||||
reader.readAsText(file);
|
||||
})
|
||||
)
|
||||
];
|
||||
|
||||
const navigateToChat = () => {
|
||||
localStorage.setItem('localai_index_chat_data', JSON.stringify(chatData));
|
||||
if (window.Alpine && Alpine.store('router')) {
|
||||
Alpine.store('router').navigate('chat', { model: selectedModel });
|
||||
} else {
|
||||
window.location.href = `/chat/${selectedModel}`;
|
||||
}
|
||||
};
|
||||
|
||||
if (filePromises.length > 0) {
|
||||
Promise.all(filePromises).then(files => {
|
||||
files.forEach(file => {
|
||||
if (file.type.startsWith('image/')) chatData.imageFiles.push(file);
|
||||
else if (file.type.startsWith('audio/')) chatData.audioFiles.push(file);
|
||||
else chatData.textFiles.push(file);
|
||||
});
|
||||
navigateToChat();
|
||||
}).catch(() => navigateToChat());
|
||||
} else {
|
||||
navigateToChat();
|
||||
}
|
||||
}
|
||||
|
||||
// Stop individual model
|
||||
async function stopModel(modelName) {
|
||||
if (!confirm(`Are you sure you want to stop "${modelName}"?`)) return;
|
||||
try {
|
||||
const response = await fetch('/backend/shutdown', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ model: modelName })
|
||||
});
|
||||
if (response.ok) {
|
||||
setTimeout(() => window.location.reload(), 500);
|
||||
} else {
|
||||
alert('Failed to stop model');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error stopping model:', error);
|
||||
alert('Failed to stop model');
|
||||
}
|
||||
}
|
||||
|
||||
// Stop all loaded models
|
||||
async function stopAllModels(component) {
|
||||
const loadedModelElements = document.querySelectorAll('[data-loaded-model]');
|
||||
const loadedModelNames = Array.from(loadedModelElements).map(el => {
|
||||
const span = el.querySelector('span.truncate');
|
||||
return span ? span.textContent.trim() : '';
|
||||
}).filter(name => name.length > 0);
|
||||
|
||||
if (loadedModelNames.length === 0) return;
|
||||
if (!confirm(`Are you sure you want to stop all ${loadedModelNames.length} loaded model(s)?`)) return;
|
||||
|
||||
if (component) component.stoppingAll = true;
|
||||
|
||||
try {
|
||||
const stopPromises = loadedModelNames.map(modelName =>
|
||||
fetch('/backend/shutdown', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ model: modelName })
|
||||
})
|
||||
);
|
||||
await Promise.all(stopPromises);
|
||||
setTimeout(() => window.location.reload(), 1000);
|
||||
} catch (error) {
|
||||
console.error('Error stopping models:', error);
|
||||
alert('Failed to stop some models');
|
||||
if (component) component.stoppingAll = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Make available globally
|
||||
window.resourceMonitor = resourceMonitor;
|
||||
window.formatBytes = formatBytes;
|
||||
window.homeInputForm = homeInputForm;
|
||||
window.startChatSPA = startChatSPA;
|
||||
window.stopModel = stopModel;
|
||||
window.stopAllModels = stopAllModels;
|
||||
|
||||
// ========================================
|
||||
// SPA Router - Alpine.js Store Definition
|
||||
// Must be defined before Alpine.js initializes
|
||||
// ========================================
|
||||
|
||||
// Define routes and their corresponding view IDs
|
||||
const SPA_ROUTES = {
|
||||
'home': { title: 'LocalAI', viewId: 'view-home', paths: ['/', ''] },
|
||||
'chat': { title: 'LocalAI - Chat', viewId: 'view-chat', paths: ['/chat'] },
|
||||
'text2image': { title: 'LocalAI - Images', viewId: 'view-text2image', paths: ['/text2image'] },
|
||||
'tts': { title: 'LocalAI - TTS', viewId: 'view-tts', paths: ['/tts'] },
|
||||
'talk': { title: 'LocalAI - Talk', viewId: 'view-talk', paths: ['/talk'] },
|
||||
'manage': { title: 'LocalAI - System', viewId: 'view-manage', paths: ['/manage'] },
|
||||
'browse': { title: 'LocalAI - Model Gallery', viewId: 'view-browse', paths: ['/browse'] }
|
||||
};
|
||||
|
||||
// Parse URL path to determine route
|
||||
function parseUrlPath(pathname) {
|
||||
pathname = pathname.replace(/\/$/, '') || '/';
|
||||
|
||||
// Check for hash-based routes first
|
||||
const hash = window.location.hash.slice(1);
|
||||
if (hash) {
|
||||
const hashParts = hash.split('/');
|
||||
const route = hashParts[0];
|
||||
const model = hashParts[1] || null;
|
||||
if (SPA_ROUTES[route]) {
|
||||
return { route, params: model ? { model } : {} };
|
||||
}
|
||||
}
|
||||
|
||||
// Check path-based routes
|
||||
for (const [route, config] of Object.entries(SPA_ROUTES)) {
|
||||
for (const path of config.paths) {
|
||||
if (pathname === path) {
|
||||
return { route, params: {} };
|
||||
}
|
||||
if (pathname.startsWith(path + '/')) {
|
||||
const param = pathname.slice(path.length + 1);
|
||||
if (param) {
|
||||
return { route, params: { model: param } };
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { route: 'home', params: {} };
|
||||
}
|
||||
|
||||
// Register the router store with Alpine.js on init event
|
||||
document.addEventListener('alpine:init', () => {
|
||||
const initialRoute = parseUrlPath(window.location.pathname);
|
||||
|
||||
Alpine.store('router', {
|
||||
currentRoute: initialRoute.route,
|
||||
routeParams: initialRoute.params,
|
||||
previousRoute: null,
|
||||
|
||||
navigate(route, params = {}) {
|
||||
if (!SPA_ROUTES[route]) {
|
||||
console.warn('Unknown route:', route);
|
||||
return;
|
||||
}
|
||||
|
||||
this.previousRoute = this.currentRoute;
|
||||
this.currentRoute = route;
|
||||
this.routeParams = params;
|
||||
|
||||
document.title = SPA_ROUTES[route].title;
|
||||
|
||||
const url = route === 'home' ? '/' : '/#' + route;
|
||||
if (params.model) {
|
||||
window.history.pushState({ route, params }, '', '/#' + route + '/' + params.model);
|
||||
} else {
|
||||
window.history.pushState({ route, params }, '', url);
|
||||
}
|
||||
|
||||
window.scrollTo(0, 0);
|
||||
window.dispatchEvent(new CustomEvent('spa:navigate', {
|
||||
detail: { route, params, previousRoute: this.previousRoute }
|
||||
}));
|
||||
},
|
||||
|
||||
isRoute(route) {
|
||||
return this.currentRoute === route;
|
||||
},
|
||||
|
||||
navigateToChat(model) {
|
||||
this.navigate('chat', { model });
|
||||
},
|
||||
|
||||
navigateToText2Image(model) {
|
||||
this.navigate('text2image', { model });
|
||||
},
|
||||
|
||||
navigateToTTS(model) {
|
||||
this.navigate('tts', { model });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Handle browser back/forward buttons
|
||||
window.addEventListener('popstate', (event) => {
|
||||
if (window.Alpine && Alpine.store('router')) {
|
||||
if (event.state && event.state.route) {
|
||||
Alpine.store('router').currentRoute = event.state.route;
|
||||
Alpine.store('router').routeParams = event.state.params || {};
|
||||
} else {
|
||||
const parsed = parseUrlPath(window.location.pathname);
|
||||
Alpine.store('router').currentRoute = parsed.route;
|
||||
Alpine.store('router').routeParams = parsed.params;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Export for use in other scripts
|
||||
window.SPA_ROUTES = SPA_ROUTES;
|
||||
window.parseUrlPath = parseUrlPath;
|
||||
</script>
|
||||
|
||||
<!-- SPA Scripts -->
|
||||
<script defer src="static/spa-router.js"></script>
|
||||
<script defer src="static/spa-home.js"></script>
|
||||
<script defer src="static/chat.js"></script>
|
||||
<script defer src="static/image.js"></script>
|
||||
<script defer src="static/tts.js"></script>
|
||||
<!-- Note: talk.js is NOT included here because it has global-scope DOM access that
|
||||
conflicts with the SPA architecture. The SPA talk view has its own inline JS. -->
|
||||
<script src="static/assets/pdf.min.js"></script>
|
||||
<script>
|
||||
// Initialize PDF.js worker
|
||||
if (typeof pdfjsLib !== 'undefined') {
|
||||
pdfjsLib.GlobalWorkerOptions.workerSrc = 'static/assets/pdf.worker.min.js';
|
||||
}
|
||||
|
||||
// Store gallery configs for header icon display and model info modal
|
||||
window.__galleryConfigs = {};
|
||||
{{ $allGalleryConfigs:=.GalleryConfig }}
|
||||
{{ range $modelName, $galleryConfig := $allGalleryConfigs }}
|
||||
window.__galleryConfigs["{{$modelName}}"] = {};
|
||||
{{ if $galleryConfig.Icon }}
|
||||
window.__galleryConfigs["{{$modelName}}"].Icon = "{{$galleryConfig.Icon}}";
|
||||
{{ end }}
|
||||
{{ if $galleryConfig.Description }}
|
||||
window.__galleryConfigs["{{$modelName}}"].Description = {{ printf "%q" $galleryConfig.Description }};
|
||||
{{ end }}
|
||||
{{ if $galleryConfig.URLs }}
|
||||
window.__galleryConfigs["{{$modelName}}"].URLs = [
|
||||
{{ range $idx, $url := $galleryConfig.URLs }}
|
||||
{{ if $idx }},{{ end }}{{ printf "%q" $url }}
|
||||
{{ end }}
|
||||
];
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</script>
|
||||
|
||||
<body class="bg-[var(--color-bg-primary)] text-[var(--color-text-primary)]">
|
||||
<div class="flex flex-col min-h-screen" x-data="{ mobileMenuOpen: false, settingsOpen: false, mobileSettingsOpen: false }">
|
||||
|
||||
{{template "views/partials/spa_navbar" .}}
|
||||
|
||||
<!-- SPA View Container -->
|
||||
<div class="flex-1 flex flex-col">
|
||||
|
||||
<!-- Home View -->
|
||||
<div x-show="$store.router.currentRoute === 'home'" x-cloak>
|
||||
{{template "views/spa/home" .}}
|
||||
</div>
|
||||
|
||||
<!-- Chat View -->
|
||||
<div x-show="$store.router.currentRoute === 'chat'" x-cloak class="flex-1 flex flex-col">
|
||||
{{template "views/spa/chat" .}}
|
||||
</div>
|
||||
|
||||
<!-- Text2Image View -->
|
||||
<div x-show="$store.router.currentRoute === 'text2image'" x-cloak class="flex-1 flex flex-col">
|
||||
{{template "views/spa/text2image" .}}
|
||||
</div>
|
||||
|
||||
<!-- TTS View -->
|
||||
<div x-show="$store.router.currentRoute === 'tts'" x-cloak class="flex-1 flex flex-col">
|
||||
{{template "views/spa/tts" .}}
|
||||
</div>
|
||||
|
||||
<!-- Talk View -->
|
||||
<div x-show="$store.router.currentRoute === 'talk'" x-cloak class="flex-1 flex flex-col">
|
||||
{{template "views/spa/talk" .}}
|
||||
</div>
|
||||
|
||||
<!-- Manage View -->
|
||||
<div x-show="$store.router.currentRoute === 'manage'" x-cloak class="flex-1 flex flex-col">
|
||||
{{template "views/spa/manage" .}}
|
||||
</div>
|
||||
|
||||
<!-- Browse View (Model Gallery) -->
|
||||
<div x-show="$store.router.currentRoute === 'browse'" x-cloak class="flex-1 flex flex-col">
|
||||
{{template "views/spa/browse" .}}
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
{{template "views/partials/footer" .}}
|
||||
</div>
|
||||
|
||||
<style>
|
||||
/* Hide elements until Alpine.js initializes */
|
||||
[x-cloak] { display: none !important; }
|
||||
</style>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
221
core/http/views/spa/browse.html
Normal file
221
core/http/views/spa/browse.html
Normal file
@@ -0,0 +1,221 @@
|
||||
<!-- Browse/Gallery View Content for SPA -->
|
||||
<!-- This is a simplified gallery view - for full functionality, use the /browse/ URL -->
|
||||
<div class="container mx-auto px-4 py-8 flex-grow" x-data="browseGallery()">
|
||||
|
||||
<!-- Hero Header -->
|
||||
<div class="hero-section">
|
||||
<div class="hero-content">
|
||||
<h1 class="hero-title">
|
||||
<i class="fas fa-images mr-2"></i>Model Gallery
|
||||
</h1>
|
||||
<p class="hero-subtitle">Browse and install AI models</p>
|
||||
|
||||
<!-- Search and Filter -->
|
||||
<div class="flex flex-wrap justify-center gap-3 mt-6">
|
||||
<div class="relative">
|
||||
<input type="text"
|
||||
x-model="searchQuery"
|
||||
@input="filterModels()"
|
||||
placeholder="Search models..."
|
||||
class="input pl-10 py-2 w-64">
|
||||
<i class="fas fa-search absolute left-3 top-1/2 transform -translate-y-1/2 text-[var(--color-text-secondary)]"></i>
|
||||
</div>
|
||||
|
||||
<select x-model="categoryFilter" @change="filterModels()" class="input py-2">
|
||||
<option value="">All Categories</option>
|
||||
<option value="chat">Chat</option>
|
||||
<option value="image">Image Generation</option>
|
||||
<option value="audio">Audio</option>
|
||||
<option value="embedding">Embeddings</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Loading State -->
|
||||
<div x-show="loading" class="flex justify-center py-12">
|
||||
<div class="animate-spin rounded-full h-12 w-12 border-t-2 border-b-2 border-[var(--color-primary)]"></div>
|
||||
</div>
|
||||
|
||||
<!-- Models Grid -->
|
||||
<div x-show="!loading" class="mt-8 grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4 gap-4">
|
||||
<template x-for="model in filteredModels" :key="model.name">
|
||||
<div class="card overflow-hidden hover:border-[var(--color-primary-border)] transition-colors">
|
||||
<!-- Model Header -->
|
||||
<div class="p-4 border-b border-[var(--color-border)]">
|
||||
<div class="flex items-start justify-between">
|
||||
<div class="flex items-center">
|
||||
<div class="w-10 h-10 bg-[var(--color-primary-light)] rounded-lg flex items-center justify-center mr-3">
|
||||
<template x-if="model.icon">
|
||||
<img :src="model.icon" :alt="model.name" class="w-8 h-8 rounded">
|
||||
</template>
|
||||
<template x-if="!model.icon">
|
||||
<i class="fas fa-brain text-[var(--color-primary)]"></i>
|
||||
</template>
|
||||
</div>
|
||||
<div>
|
||||
<h3 class="text-sm font-semibold text-[var(--color-text-primary)] truncate max-w-[150px]" x-text="model.name"></h3>
|
||||
<p class="text-xs text-[var(--color-text-secondary)]" x-text="model.gallery?.name || 'Unknown'"></p>
|
||||
</div>
|
||||
</div>
|
||||
<template x-if="model.installed">
|
||||
<span class="inline-flex items-center px-2 py-0.5 rounded text-xs font-medium bg-green-500/20 text-green-300">
|
||||
<i class="fas fa-check mr-1"></i>Installed
|
||||
</span>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Model Info -->
|
||||
<div class="p-4">
|
||||
<p class="text-xs text-[var(--color-text-secondary)] line-clamp-2 mb-3" x-text="model.description || 'No description available'"></p>
|
||||
|
||||
<!-- Tags -->
|
||||
<div class="flex flex-wrap gap-1 mb-3">
|
||||
<template x-for="tag in (model.tags || []).slice(0, 3)" :key="tag">
|
||||
<span class="inline-flex items-center px-1.5 py-0.5 rounded text-[10px] bg-[var(--color-bg-secondary)] text-[var(--color-text-secondary)]" x-text="tag"></span>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
<!-- Actions -->
|
||||
<div class="flex items-center gap-2">
|
||||
<template x-if="model.installed">
|
||||
<button @click="$store.router.navigate('chat', { model: model.name })"
|
||||
class="flex-1 btn-primary text-xs py-1.5">
|
||||
<i class="fas fa-comments mr-1"></i>Use
|
||||
</button>
|
||||
</template>
|
||||
<template x-if="!model.installed">
|
||||
<button @click="installModel(model)"
|
||||
:disabled="model.installing"
|
||||
:class="model.installing ? 'opacity-50 cursor-not-allowed' : ''"
|
||||
class="flex-1 btn-primary text-xs py-1.5">
|
||||
<i class="fas fa-download mr-1"></i>
|
||||
<span x-text="model.installing ? 'Installing...' : 'Install'"></span>
|
||||
</button>
|
||||
</template>
|
||||
<a :href="`/browse/${model.gallery?.name || ''}/${model.name}`"
|
||||
class="btn-secondary text-xs py-1.5 px-2" title="View details">
|
||||
<i class="fas fa-info-circle"></i>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
<!-- Empty State -->
|
||||
<div x-show="!loading && filteredModels.length === 0" class="text-center py-12 text-[var(--color-text-secondary)]">
|
||||
<i class="fas fa-search text-4xl mb-3 opacity-50"></i>
|
||||
<p>No models found</p>
|
||||
<p class="text-sm mt-2">Try adjusting your search or filters</p>
|
||||
</div>
|
||||
|
||||
<!-- Link to Full Gallery -->
|
||||
<div class="mt-8 text-center">
|
||||
<a href="/browse/" class="btn-secondary">
|
||||
<i class="fas fa-external-link-alt mr-2"></i>
|
||||
View Full Model Gallery
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Browse gallery component
|
||||
function browseGallery() {
|
||||
return {
|
||||
loading: true,
|
||||
searchQuery: '',
|
||||
categoryFilter: '',
|
||||
models: [],
|
||||
filteredModels: [],
|
||||
|
||||
init() {
|
||||
this.loadModels();
|
||||
},
|
||||
|
||||
async loadModels() {
|
||||
try {
|
||||
// Fetch available models from gallery
|
||||
const response = await fetch('/models/available');
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
this.models = data || [];
|
||||
this.filterModels();
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error loading models:', error);
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
},
|
||||
|
||||
filterModels() {
|
||||
let filtered = this.models;
|
||||
|
||||
// Search filter
|
||||
if (this.searchQuery.trim()) {
|
||||
const query = this.searchQuery.toLowerCase();
|
||||
filtered = filtered.filter(m =>
|
||||
(m.name && m.name.toLowerCase().includes(query)) ||
|
||||
(m.description && m.description.toLowerCase().includes(query))
|
||||
);
|
||||
}
|
||||
|
||||
// Category filter
|
||||
if (this.categoryFilter) {
|
||||
filtered = filtered.filter(m => {
|
||||
const tags = m.tags || [];
|
||||
const name = (m.name || '').toLowerCase();
|
||||
switch (this.categoryFilter) {
|
||||
case 'chat':
|
||||
return tags.includes('chat') || tags.includes('llm') || name.includes('chat');
|
||||
case 'image':
|
||||
return tags.includes('image') || tags.includes('diffusion') || name.includes('stable');
|
||||
case 'audio':
|
||||
return tags.includes('audio') || tags.includes('tts') || tags.includes('whisper');
|
||||
case 'embedding':
|
||||
return tags.includes('embedding') || name.includes('embed');
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
this.filteredModels = filtered.slice(0, 20); // Limit to first 20 for performance
|
||||
},
|
||||
|
||||
async installModel(model) {
|
||||
model.installing = true;
|
||||
|
||||
try {
|
||||
const response = await fetch('/models/apply', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
id: model.gallery?.name + '@' + model.name
|
||||
})
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
// Model installation started
|
||||
alert(`Installation of ${model.name} started. This may take a while.`);
|
||||
// Refresh after a delay
|
||||
setTimeout(() => this.loadModels(), 5000);
|
||||
} else {
|
||||
alert('Failed to start installation');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error installing model:', error);
|
||||
alert('Error: ' + error.message);
|
||||
} finally {
|
||||
model.installing = false;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
window.browseGallery = browseGallery;
|
||||
</script>
|
||||
273
core/http/views/spa/chat.html
Normal file
273
core/http/views/spa/chat.html
Normal file
@@ -0,0 +1,273 @@
|
||||
<!-- Chat View Content for SPA -->
|
||||
<!-- This embeds the chat interface inline in the SPA -->
|
||||
<div class="flex flex-col flex-1 overflow-hidden" x-data="chatSPA()">
|
||||
|
||||
<!-- Main Chat Area -->
|
||||
<div class="flex flex-1 overflow-hidden">
|
||||
<!-- Sidebar for chat list -->
|
||||
<aside class="hidden lg:flex w-64 flex-col bg-[var(--color-bg-secondary)] border-r border-[var(--color-bg-primary)]">
|
||||
<div class="p-3 border-b border-[var(--color-bg-primary)]">
|
||||
<button @click="createNewChatSPA()" class="w-full btn-primary text-sm py-2">
|
||||
<i class="fas fa-plus mr-2"></i>New Chat
|
||||
</button>
|
||||
</div>
|
||||
<div class="flex-1 overflow-y-auto p-2 space-y-1">
|
||||
<template x-for="chat in $store.chat.chats" :key="chat.id">
|
||||
<div
|
||||
@click="switchChatSPA(chat.id)"
|
||||
:class="$store.chat.activeChatId === chat.id ? 'bg-[var(--color-primary-light)] border-[var(--color-primary-border)]' : 'hover:bg-[var(--color-bg-primary)] border-transparent'"
|
||||
class="p-2 rounded-lg cursor-pointer border transition-colors group relative">
|
||||
<div class="flex items-center justify-between">
|
||||
<span class="truncate text-sm text-[var(--color-text-primary)]" x-text="chat.name"></span>
|
||||
<div class="flex items-center gap-1 opacity-0 group-hover:opacity-100 transition-opacity">
|
||||
<button
|
||||
@click.stop="deleteChatSPA(chat.id)"
|
||||
class="p-1 text-red-400 hover:text-red-300 transition-colors"
|
||||
title="Delete chat">
|
||||
<i class="fas fa-trash text-xs"></i>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-center gap-2 mt-1 text-xs text-[var(--color-text-secondary)]">
|
||||
<span x-text="chat.model || 'No model'"></span>
|
||||
<span x-show="$store.chat.hasActiveRequest(chat.id)" class="flex items-center gap-1">
|
||||
<span class="animate-pulse w-1.5 h-1.5 rounded-full bg-green-400"></span>
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<!-- Chat Content -->
|
||||
<div class="flex-1 flex flex-col min-w-0">
|
||||
<!-- Chat Header -->
|
||||
<header class="flex items-center justify-between px-4 py-2 border-b border-[var(--color-bg-secondary)] bg-[var(--color-bg-primary)]">
|
||||
<div class="flex items-center gap-3">
|
||||
<button @click="showMobileSidebar = !showMobileSidebar" class="lg:hidden p-2 text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)]">
|
||||
<i class="fas fa-bars"></i>
|
||||
</button>
|
||||
<div class="flex items-center gap-2">
|
||||
<select
|
||||
x-model="currentModel"
|
||||
@change="updateChatModel()"
|
||||
class="input text-sm py-1.5 px-3">
|
||||
<option value="" disabled>Select model...</option>
|
||||
{{ range .ModelsConfig }}
|
||||
{{ $cfg := . }}
|
||||
{{ range .KnownUsecaseStrings }}
|
||||
{{ if eq . "FLAG_CHAT" }}
|
||||
<option value="{{$cfg.Name}}">{{$cfg.Name}}</option>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ range .ModelsWithoutConfig }}
|
||||
<option value="{{.}}">{{.}}</option>
|
||||
{{ end }}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-center gap-2">
|
||||
<span id="tokens-per-second" class="text-xs text-[var(--color-text-secondary)]">-</span>
|
||||
<span id="max-tokens-per-second-badge" class="hidden text-xs bg-green-500/20 text-green-300 px-2 py-0.5 rounded"></span>
|
||||
<div id="header-loading-indicator" class="hidden">
|
||||
<svg class="animate-spin h-4 w-4 text-[var(--color-primary)]" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24">
|
||||
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"></circle>
|
||||
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path>
|
||||
</svg>
|
||||
</div>
|
||||
<button @click="clearChat()" class="p-2 text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] transition-colors" title="Clear chat">
|
||||
<i class="fas fa-eraser"></i>
|
||||
</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<!-- Messages Container -->
|
||||
<div id="chat" class="flex-1 overflow-y-auto p-4 space-y-4">
|
||||
<template x-for="(message, index) in $store.chat.activeHistory" :key="index">
|
||||
<div :class="message.role === 'user' ? 'justify-end' : 'justify-start'" class="flex">
|
||||
<div :class="message.role === 'user' ? 'bg-[var(--color-primary)] text-white max-w-[80%]' : 'bg-[var(--color-bg-secondary)] text-[var(--color-text-primary)] max-w-[90%]'"
|
||||
class="rounded-lg px-4 py-2">
|
||||
<!-- Thinking/Reasoning messages -->
|
||||
<template x-if="message.role === 'thinking' || message.role === 'reasoning'">
|
||||
<div class="text-xs">
|
||||
<button @click="message.expanded = !message.expanded" class="flex items-center gap-2 text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)]">
|
||||
<i :class="message.expanded ? 'fa-chevron-down' : 'fa-chevron-right'" class="fas text-xs"></i>
|
||||
<span>Thinking...</span>
|
||||
</button>
|
||||
<div x-show="message.expanded" x-html="message.html" class="mt-2 prose prose-sm prose-invert max-w-none"></div>
|
||||
</div>
|
||||
</template>
|
||||
<!-- Regular messages -->
|
||||
<template x-if="message.role !== 'thinking' && message.role !== 'reasoning'">
|
||||
<div x-html="message.html" class="prose prose-sm prose-invert max-w-none"></div>
|
||||
</template>
|
||||
<!-- Images -->
|
||||
<template x-if="message.image && message.image.length > 0">
|
||||
<div class="mt-2 flex flex-wrap gap-2">
|
||||
<template x-for="img in message.image" :key="img">
|
||||
<img :src="img" class="max-w-[200px] rounded-lg" alt="Attached image">
|
||||
</template>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<!-- Empty state -->
|
||||
<div x-show="!$store.chat.activeHistory || $store.chat.activeHistory.length === 0" class="flex flex-col items-center justify-center h-full text-center text-[var(--color-text-secondary)]">
|
||||
<i class="fas fa-comments text-4xl mb-4 opacity-50"></i>
|
||||
<p>Start a conversation</p>
|
||||
<p class="text-sm mt-2">Select a model and send a message to begin</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Input Area -->
|
||||
<div class="border-t border-[var(--color-bg-secondary)] p-4 bg-[var(--color-bg-primary)]">
|
||||
<form id="prompt" @submit.prevent="submitPrompt($event)" class="relative">
|
||||
<div class="flex items-end gap-2">
|
||||
<div class="flex-1 relative">
|
||||
<textarea
|
||||
id="input"
|
||||
name="input"
|
||||
placeholder="Type a message..."
|
||||
class="input w-full resize-none py-3 pr-12"
|
||||
rows="1"
|
||||
@keydown.enter.prevent="if (!$event.shiftKey) submitPrompt($event)"
|
||||
@input="autoResize($event.target)"
|
||||
></textarea>
|
||||
<div class="absolute right-2 bottom-2 flex items-center gap-1">
|
||||
<button type="button" @click="document.getElementById('input_image').click()" class="p-1.5 text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors" title="Attach image">
|
||||
<i class="fas fa-image"></i>
|
||||
</button>
|
||||
<button type="button" @click="document.getElementById('input_audio').click()" class="p-1.5 text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors" title="Attach audio">
|
||||
<i class="fas fa-microphone"></i>
|
||||
</button>
|
||||
<button type="button" @click="document.getElementById('input_file').click()" class="p-1.5 text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors" title="Attach file">
|
||||
<i class="fas fa-paperclip"></i>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<button type="submit" id="send-button" class="btn-primary p-3">
|
||||
<i class="fas fa-paper-plane"></i>
|
||||
</button>
|
||||
<button type="button" id="stop-button" @click="stopRequest()" class="btn-primary p-3 bg-red-500 hover:bg-red-600" style="display: none;">
|
||||
<i class="fas fa-stop"></i>
|
||||
</button>
|
||||
</div>
|
||||
<!-- Hidden file inputs -->
|
||||
<input type="file" id="input_image" multiple accept="image/*" class="hidden" @change="readInputImage">
|
||||
<input type="file" id="input_audio" multiple accept="audio/*" class="hidden" @change="readInputAudio">
|
||||
<input type="file" id="input_file" multiple accept=".txt,.md,.pdf" class="hidden" @change="readInputFile">
|
||||
</form>
|
||||
|
||||
<!-- System prompt form (hidden) -->
|
||||
<form id="system_prompt" @submit.prevent="submitSystemPrompt($event)" style="display: none;">
|
||||
<input type="text" id="systemPrompt" name="systemPrompt">
|
||||
</form>
|
||||
<input type="hidden" id="chat-model" value="{{.Model}}">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Mobile Sidebar Overlay -->
|
||||
<div x-show="showMobileSidebar" @click="showMobileSidebar = false" class="lg:hidden fixed inset-0 bg-black/50 z-40"></div>
|
||||
<aside x-show="showMobileSidebar" class="lg:hidden fixed left-0 top-0 bottom-0 w-64 bg-[var(--color-bg-secondary)] z-50 transform transition-transform"
|
||||
:class="showMobileSidebar ? 'translate-x-0' : '-translate-x-full'">
|
||||
<div class="p-3 border-b border-[var(--color-bg-primary)] flex items-center justify-between">
|
||||
<span class="font-medium text-[var(--color-text-primary)]">Chats</span>
|
||||
<button @click="showMobileSidebar = false" class="p-2 text-[var(--color-text-secondary)]">
|
||||
<i class="fas fa-times"></i>
|
||||
</button>
|
||||
</div>
|
||||
<div class="p-3">
|
||||
<button @click="createNewChatSPA(); showMobileSidebar = false" class="w-full btn-primary text-sm py-2">
|
||||
<i class="fas fa-plus mr-2"></i>New Chat
|
||||
</button>
|
||||
</div>
|
||||
<div class="flex-1 overflow-y-auto p-2 space-y-1">
|
||||
<template x-for="chat in $store.chat.chats" :key="chat.id">
|
||||
<div
|
||||
@click="switchChatSPA(chat.id); showMobileSidebar = false"
|
||||
:class="$store.chat.activeChatId === chat.id ? 'bg-[var(--color-primary-light)] border-[var(--color-primary-border)]' : 'hover:bg-[var(--color-bg-primary)] border-transparent'"
|
||||
class="p-2 rounded-lg cursor-pointer border transition-colors">
|
||||
<span class="truncate text-sm text-[var(--color-text-primary)]" x-text="chat.name"></span>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</aside>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Chat SPA component
|
||||
function chatSPA() {
|
||||
return {
|
||||
currentModel: '{{.Model}}',
|
||||
showMobileSidebar: false,
|
||||
|
||||
init() {
|
||||
// Initialize chat store if not already done
|
||||
this.$nextTick(() => {
|
||||
if (window.Alpine && Alpine.store('chat') && Alpine.store('chat').chats.length === 0) {
|
||||
Alpine.store('chat').createChat(this.currentModel, '', false);
|
||||
}
|
||||
// Update model from route params if available
|
||||
const routeParams = Alpine.store('router')?.routeParams;
|
||||
if (routeParams?.model) {
|
||||
this.currentModel = routeParams.model;
|
||||
const activeChat = Alpine.store('chat').activeChat();
|
||||
if (activeChat) {
|
||||
activeChat.model = this.currentModel;
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
updateChatModel() {
|
||||
const activeChat = Alpine.store('chat').activeChat();
|
||||
if (activeChat) {
|
||||
activeChat.model = this.currentModel;
|
||||
if (typeof window.autoSaveChats === 'function') {
|
||||
window.autoSaveChats();
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
clearChat() {
|
||||
if (confirm('Clear all messages in this chat?')) {
|
||||
Alpine.store('chat').clear();
|
||||
}
|
||||
},
|
||||
|
||||
autoResize(textarea) {
|
||||
textarea.style.height = 'auto';
|
||||
textarea.style.height = Math.min(textarea.scrollHeight, 200) + 'px';
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Helper functions for chat in SPA context
|
||||
function createNewChatSPA() {
|
||||
const currentModel = document.getElementById('chat-model')?.value || '';
|
||||
if (window.createNewChat) {
|
||||
window.createNewChat(currentModel, '', false);
|
||||
}
|
||||
}
|
||||
|
||||
function switchChatSPA(chatId) {
|
||||
if (window.switchChat) {
|
||||
window.switchChat(chatId);
|
||||
}
|
||||
}
|
||||
|
||||
function deleteChatSPA(chatId) {
|
||||
if (confirm('Delete this chat?')) {
|
||||
if (window.deleteChat) {
|
||||
window.deleteChat(chatId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make component available globally
|
||||
window.chatSPA = chatSPA;
|
||||
</script>
|
||||
329
core/http/views/spa/home.html
Normal file
329
core/http/views/spa/home.html
Normal file
@@ -0,0 +1,329 @@
|
||||
<!-- Home View Content for SPA -->
|
||||
<!-- Main Content - ChatGPT-style minimal interface -->
|
||||
<div class="flex-1 flex flex-col items-center justify-center px-4 py-12">
|
||||
<div class="w-full max-w-3xl mx-auto">
|
||||
{{ if eq (len .ModelsConfig) 0 }}
|
||||
<!-- No Models - Wizard Guide -->
|
||||
<div class="hero-section">
|
||||
<div class="hero-content">
|
||||
<h2 class="hero-title">
|
||||
No Models Installed
|
||||
</h2>
|
||||
<p class="hero-subtitle">
|
||||
Get started with LocalAI by installing your first model. Choose from our gallery, import your own, or use the API to download models.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Features Preview -->
|
||||
<div class="grid grid-cols-1 md:grid-cols-3 gap-4 mb-6">
|
||||
<div class="card card-animate">
|
||||
<div class="w-10 h-10 bg-[var(--color-primary-light)] rounded-lg flex items-center justify-center mx-auto mb-3">
|
||||
<i class="fas fa-images text-[var(--color-primary)] text-xl"></i>
|
||||
</div>
|
||||
<h3 class="text-sm font-semibold text-[var(--color-text-primary)] mb-2">Model Gallery</h3>
|
||||
<p class="text-xs text-[var(--color-text-secondary)]">Browse and install pre-configured models</p>
|
||||
</div>
|
||||
<div class="card card-animate">
|
||||
<div class="w-10 h-10 bg-[var(--color-accent-light)] rounded-lg flex items-center justify-center mx-auto mb-3">
|
||||
<i class="fas fa-upload text-[var(--color-accent)] text-xl"></i>
|
||||
</div>
|
||||
<h3 class="text-sm font-semibold text-[var(--color-text-primary)] mb-2">Import Models</h3>
|
||||
<p class="text-xs text-[var(--color-text-secondary)]">Upload your own model files</p>
|
||||
</div>
|
||||
<div class="card card-animate">
|
||||
<div class="w-10 h-10 bg-[var(--color-success-light)] rounded-lg flex items-center justify-center mx-auto mb-3">
|
||||
<i class="fas fa-code text-[var(--color-success)] text-xl"></i>
|
||||
</div>
|
||||
<h3 class="text-sm font-semibold text-[var(--color-text-primary)] mb-2">API Download</h3>
|
||||
<p class="text-xs text-[var(--color-text-secondary)]">Use the API to download models programmatically</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Setup Instructions -->
|
||||
<div class="card mb-6 text-left">
|
||||
<h3 class="text-lg font-bold text-[var(--color-text-primary)] mb-4 flex items-center">
|
||||
<i class="fas fa-rocket text-[var(--color-accent)] mr-2"></i>
|
||||
How to Get Started
|
||||
</h3>
|
||||
<div class="space-y-4">
|
||||
<div class="flex items-start">
|
||||
<div class="flex-shrink-0 w-8 h-8 rounded-full bg-[var(--color-accent-light)] flex items-center justify-center mr-3 mt-0.5">
|
||||
<span class="text-[var(--color-accent)] font-bold text-sm">1</span>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<p class="text-[var(--color-text-primary)] font-medium mb-2">Browse the Model Gallery</p>
|
||||
<p class="text-[var(--color-text-secondary)] text-sm">Explore our curated collection of pre-configured models. Find models for chat, image generation, audio processing, and more.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-start">
|
||||
<div class="flex-shrink-0 w-8 h-8 rounded-full bg-[var(--color-accent-light)] flex items-center justify-center mr-3 mt-0.5">
|
||||
<span class="text-[var(--color-accent)] font-bold text-sm">2</span>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<p class="text-[var(--color-text-primary)] font-medium mb-2">Install a Model</p>
|
||||
<p class="text-[var(--color-text-secondary)] text-sm">Click on a model from the gallery to install it, or use the import feature to upload your own model files.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-start">
|
||||
<div class="flex-shrink-0 w-8 h-8 rounded-full bg-[var(--color-accent-light)] flex items-center justify-center mr-3 mt-0.5">
|
||||
<span class="text-[var(--color-accent)] font-bold text-sm">3</span>
|
||||
</div>
|
||||
<div class="flex-1">
|
||||
<p class="text-[var(--color-text-primary)] font-medium mb-2">Start Chatting</p>
|
||||
<p class="text-[var(--color-text-secondary)] text-sm">Once installed, return to this page to start chatting with your model or use the API to interact programmatically.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex flex-wrap justify-center gap-4 mb-8">
|
||||
<a href="#" @click.prevent="$store.router.navigate('browse')" class="btn-primary">
|
||||
<i class="fas fa-images mr-2"></i>
|
||||
Browse Model Gallery
|
||||
</a>
|
||||
<a href="/import-model" class="btn-primary">
|
||||
<i class="fas fa-upload mr-2"></i>
|
||||
Import Model
|
||||
</a>
|
||||
<a href="https://localai.io/basics/getting_started/" target="_blank" class="btn-secondary">
|
||||
<i class="fas fa-graduation-cap mr-2"></i>
|
||||
Getting Started
|
||||
<i class="fas fa-external-link-alt ml-2 text-sm"></i>
|
||||
</a>
|
||||
</div>
|
||||
{{ else }}
|
||||
<!-- Welcome Message / Hero Section -->
|
||||
<div class="hero-section">
|
||||
<div class="hero-content">
|
||||
<div class="mb-4 flex justify-center">
|
||||
<img src="static/logo.png" alt="LocalAI Logo" class="h-16 md:h-20">
|
||||
</div>
|
||||
<h1 class="hero-title">How can I help you today?</h1>
|
||||
<p class="hero-subtitle">Ask me anything, and I'll do my best to assist you.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Chat Input Form -->
|
||||
<div class="mb-8" x-data="homeInputForm()">
|
||||
<!-- Model Selector with MCP Toggle -->
|
||||
<div class="mb-4">
|
||||
<label class="block text-sm font-medium text-[var(--color-text-secondary)] mb-2">Select Model</label>
|
||||
<div class="flex items-center gap-3">
|
||||
<select
|
||||
x-model="selectedModel"
|
||||
@change="$nextTick(() => checkMCPAvailability())"
|
||||
class="input flex-1"
|
||||
required
|
||||
>
|
||||
<option value="" disabled class="text-[var(--color-text-secondary)]">Select a model to chat with...</option>
|
||||
{{ range .ModelsConfig }}
|
||||
{{ $cfg := . }}
|
||||
{{ $hasMCP := or (ne $cfg.MCP.Servers "") (ne $cfg.MCP.Stdio "") }}
|
||||
{{ range .KnownUsecaseStrings }}
|
||||
{{ if eq . "FLAG_CHAT" }}
|
||||
<option value="{{$cfg.Name}}" data-has-mcp="{{if $hasMCP}}true{{else}}false{{end}}" class="bg-[var(--color-bg-secondary)] text-[var(--color-text-primary)]">{{$cfg.Name}}</option>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</select>
|
||||
|
||||
<!-- Compact MCP Toggle - Show only if MCP is available for selected model -->
|
||||
<div
|
||||
x-show="mcpAvailable"
|
||||
class="flex items-center gap-2 px-3 py-2 text-xs rounded text-[var(--color-text-primary)] bg-[var(--color-bg-secondary)] border border-[var(--color-primary-border)] whitespace-nowrap">
|
||||
<i class="fa-solid fa-plug text-[var(--color-primary)] text-sm"></i>
|
||||
<span class="text-[var(--color-text-secondary)]">MCP</span>
|
||||
<label class="relative inline-flex items-center cursor-pointer ml-1">
|
||||
<input type="checkbox" id="spa_home_mcp_toggle" class="sr-only peer" x-model="mcpMode">
|
||||
<div class="w-9 h-5 bg-[var(--color-bg-primary)] peer-focus:outline-none peer-focus:ring-2 peer-focus:ring-[var(--color-primary-border)] rounded-full peer peer-checked:after:translate-x-full peer-checked:after:border-white after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-[var(--color-bg-secondary)] after:border after:rounded-full after:h-4 after:w-4 after:transition-all peer-checked:bg-[var(--color-primary)]"></div>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- MCP Mode Notification - Compact tooltip style -->
|
||||
<div
|
||||
x-show="mcpMode && mcpAvailable"
|
||||
class="mt-2 p-2 bg-[var(--color-primary-light)] border border-[var(--color-primary-border)] rounded text-[var(--color-text-secondary)] text-xs">
|
||||
<div class="flex items-start space-x-2">
|
||||
<i class="fa-solid fa-info-circle text-[var(--color-primary)] mt-0.5 text-xs"></i>
|
||||
<p class="text-[var(--color-text-secondary)]">Non-streaming mode active. Responses may take longer to process.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Input Bar -->
|
||||
<form @submit.prevent="startChatSPA($event)" class="relative w-full">
|
||||
<!-- Attachment Tags - Show above input when files are attached -->
|
||||
<div x-show="attachedFiles.length > 0" class="mb-3 flex flex-wrap gap-2 items-center">
|
||||
<template x-for="(file, index) in attachedFiles" :key="index">
|
||||
<div class="inline-flex items-center gap-2 px-3 py-1.5 rounded-lg text-sm bg-[var(--color-primary-light)] border border-[var(--color-primary-border)] text-[var(--color-text-primary)]">
|
||||
<i :class="file.type === 'image' ? 'fa-solid fa-image' : file.type === 'audio' ? 'fa-solid fa-microphone' : 'fa-solid fa-file'" class="text-[var(--color-primary)]"></i>
|
||||
<span x-text="file.name" class="max-w-[200px] truncate"></span>
|
||||
<button
|
||||
type="button"
|
||||
@click="attachedFiles.splice(index, 1); removeAttachedFile(file.type, file.name)"
|
||||
class="ml-1 text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] transition-colors"
|
||||
title="Remove attachment"
|
||||
>
|
||||
<i class="fa-solid fa-times text-xs"></i>
|
||||
</button>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
<div class="relative w-full">
|
||||
<textarea
|
||||
x-model="inputValue"
|
||||
:placeholder="currentPlaceholder"
|
||||
class="input p-3 pr-16 w-full resize-none border-0"
|
||||
required
|
||||
@keydown.shift="shiftPressed = true"
|
||||
@keyup.shift="shiftPressed = false"
|
||||
@keydown.enter.prevent="if (!shiftPressed && selectedModel && (inputValue.trim() || currentPlaceholder.trim())) { startChatSPA($event); }"
|
||||
@focus="handleFocus()"
|
||||
@blur="handleBlur()"
|
||||
@input="handleInput()"
|
||||
rows="2"
|
||||
></textarea>
|
||||
|
||||
<!-- Attachment Buttons -->
|
||||
<button
|
||||
type="button"
|
||||
@click="document.getElementById('spa_home_input_image').click()"
|
||||
class="fa-solid fa-image text-[var(--color-text-secondary)] absolute right-12 top-3 text-base p-1.5 hover:text-[var(--color-primary)] transition-colors duration-200"
|
||||
title="Attach images"
|
||||
></button>
|
||||
<button
|
||||
type="button"
|
||||
@click="document.getElementById('spa_home_input_audio').click()"
|
||||
class="fa-solid fa-microphone text-[var(--color-text-secondary)] absolute right-20 top-3 text-base p-1.5 hover:text-[var(--color-primary)] transition-colors duration-200"
|
||||
title="Attach an audio file"
|
||||
></button>
|
||||
<button
|
||||
type="button"
|
||||
@click="document.getElementById('spa_home_input_file').click()"
|
||||
class="fa-solid fa-file text-[var(--color-text-secondary)] absolute right-28 top-3 text-base p-1.5 hover:text-[var(--color-primary)] transition-colors duration-200"
|
||||
title="Upload text, markdown or PDF file"
|
||||
></button>
|
||||
|
||||
<!-- Send Button -->
|
||||
<button
|
||||
type="submit"
|
||||
:disabled="!selectedModel || (!inputValue.trim() && !currentPlaceholder.trim())"
|
||||
:class="!selectedModel || (!inputValue.trim() && !currentPlaceholder.trim()) ? 'opacity-50 cursor-not-allowed' : ''"
|
||||
class="text-lg p-2 text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors duration-200 absolute right-3 top-3"
|
||||
title="Send message (Enter)"
|
||||
>
|
||||
<i class="fa-solid fa-paper-plane"></i>
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
<!-- Hidden File Inputs -->
|
||||
<input
|
||||
id="spa_home_input_image"
|
||||
type="file"
|
||||
multiple
|
||||
accept="image/*"
|
||||
style="display: none;"
|
||||
@change="imageFiles = Array.from($event.target.files); handleFileSelection($event.target.files, 'image')"
|
||||
/>
|
||||
<input
|
||||
id="spa_home_input_audio"
|
||||
type="file"
|
||||
multiple
|
||||
accept="audio/*"
|
||||
style="display: none;"
|
||||
@change="audioFiles = Array.from($event.target.files); handleFileSelection($event.target.files, 'audio')"
|
||||
/>
|
||||
<input
|
||||
id="spa_home_input_file"
|
||||
type="file"
|
||||
multiple
|
||||
accept=".txt,.md,.pdf"
|
||||
style="display: none;"
|
||||
@change="textFiles = Array.from($event.target.files); handleFileSelection($event.target.files, 'file')"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<!-- Quick Links -->
|
||||
<div class="flex flex-wrap justify-center gap-3 mb-8">
|
||||
<a href="#" @click.prevent="$store.router.navigate('manage')" class="btn-tertiary">
|
||||
<i class="fas fa-cog mr-2"></i>
|
||||
Installed Models and Backends
|
||||
</a>
|
||||
<a href="/import-model" class="btn-tertiary">
|
||||
<i class="fas fa-upload mr-2"></i>
|
||||
Import Model
|
||||
</a>
|
||||
<a href="#" @click.prevent="$store.router.navigate('browse')" class="btn-tertiary">
|
||||
<i class="fas fa-images mr-2"></i>
|
||||
Browse Gallery
|
||||
</a>
|
||||
<a href="https://localai.io" target="_blank" class="btn-tertiary">
|
||||
<i class="fas fa-book mr-2"></i>
|
||||
Documentation
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<!-- Memory Status Indicator (GPU or RAM) -->
|
||||
<div class="mb-4" x-data="resourceMonitor()" x-init="startPolling()">
|
||||
<template x-if="resourceData && resourceData.available">
|
||||
<div class="flex items-center justify-center gap-3 text-xs text-[var(--color-text-secondary)]">
|
||||
<div class="flex items-center gap-2 px-3 py-1.5 rounded-full bg-[var(--color-bg-secondary)] border border-[var(--color-primary-border)]/20">
|
||||
<i :class="resourceData.type === 'gpu' ? 'fas fa-microchip' : 'fas fa-memory'"
|
||||
:class="resourceData.aggregate.usage_percent > 90 ? 'text-red-400' : resourceData.aggregate.usage_percent > 70 ? 'text-yellow-400' : 'text-green-400'"></i>
|
||||
<span class="text-[var(--color-text-secondary)]" x-text="resourceData.type === 'gpu' ? 'GPU' : 'RAM'"></span>
|
||||
<span class="font-mono"
|
||||
:class="resourceData.aggregate.usage_percent > 90 ? 'text-red-400' : resourceData.aggregate.usage_percent > 70 ? 'text-yellow-400' : 'text-green-400'"
|
||||
x-text="`${resourceData.aggregate.usage_percent.toFixed(0)}%`"></span>
|
||||
<div class="w-16 bg-[var(--color-bg-primary)] rounded-full h-1.5 overflow-hidden">
|
||||
<div class="h-full rounded-full transition-all duration-300"
|
||||
:class="resourceData.aggregate.usage_percent > 90 ? 'bg-red-500' : resourceData.aggregate.usage_percent > 70 ? 'bg-yellow-500' : 'bg-[var(--color-success)]'"
|
||||
:style="`width: ${resourceData.aggregate.usage_percent}%`"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
<!-- Model Status Summary - Subtle -->
|
||||
{{ $loadedModels := .LoadedModels }}
|
||||
<div class="mb-8 flex items-center justify-center gap-2 text-xs text-[var(--color-text-secondary)]"
|
||||
x-data="{ stoppingAll: false, stopAllModels() { window.stopAllModels(this); }, stopModel(name) { window.stopModel(name); }, getLoadedCount() { return document.querySelectorAll('[data-loaded-model]').length; } }"
|
||||
x-show="getLoadedCount() > 0"
|
||||
style="display: none;">
|
||||
<span class="flex items-center gap-1.5">
|
||||
<i class="fas fa-circle text-green-500 text-[10px]"></i>
|
||||
<span x-text="`${getLoadedCount()} model(s) loaded`"></span>
|
||||
</span>
|
||||
<span class="text-[var(--color-primary)] opacity-40">•</span>
|
||||
{{ range .ModelsConfig }}
|
||||
{{ if index $loadedModels .Name }}
|
||||
<span class="inline-flex items-center gap-1 text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] transition-colors" data-loaded-model>
|
||||
<span class="truncate max-w-[100px]">{{.Name}}</span>
|
||||
<button
|
||||
@click="stopModel('{{.Name}}')"
|
||||
class="text-red-400/60 hover:text-red-400 transition-colors ml-0.5"
|
||||
title="Stop {{.Name}}"
|
||||
>
|
||||
<i class="fas fa-times text-[10px]"></i>
|
||||
</button>
|
||||
</span>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
<span class="text-[var(--color-primary)] opacity-40">•</span>
|
||||
<button
|
||||
@click="stopAllModels()"
|
||||
:disabled="stoppingAll"
|
||||
:class="stoppingAll ? 'opacity-50 cursor-not-allowed' : ''"
|
||||
class="text-red-400/60 hover:text-red-400 transition-colors text-xs"
|
||||
title="Stop all loaded models"
|
||||
>
|
||||
<span x-text="stoppingAll ? 'Stopping...' : 'Stop all'"></span>
|
||||
</button>
|
||||
</div>
|
||||
{{ end }}
|
||||
</div>
|
||||
</div>
|
||||
322
core/http/views/spa/manage.html
Normal file
322
core/http/views/spa/manage.html
Normal file
@@ -0,0 +1,322 @@
|
||||
<!-- Manage View Content for SPA -->
|
||||
<div class="container mx-auto px-4 py-8 flex-grow" x-data="manageDashboard()">
|
||||
|
||||
<!-- Notifications -->
|
||||
<div class="fixed top-20 right-4 z-50 space-y-2" style="max-width: 400px;">
|
||||
<template x-for="notification in notifications" :key="notification.id">
|
||||
<div x-show="true"
|
||||
x-transition:enter="transition ease-out duration-200"
|
||||
x-transition:enter-start="opacity-0"
|
||||
x-transition:enter-end="opacity-100"
|
||||
x-transition:leave="transition ease-in duration-150"
|
||||
x-transition:leave-start="opacity-100"
|
||||
x-transition:leave-end="opacity-0"
|
||||
:class="notification.type === 'error' ? 'bg-red-500' : 'bg-[var(--color-success)]'"
|
||||
class="rounded-lg p-4 text-white flex items-start space-x-3">
|
||||
<div class="flex-shrink-0">
|
||||
<i :class="notification.type === 'error' ? 'fas fa-exclamation-circle' : 'fas fa-check-circle'" class="text-xl"></i>
|
||||
</div>
|
||||
<div class="flex-1 min-w-0">
|
||||
<p class="text-sm font-medium break-words" x-text="notification.message"></p>
|
||||
</div>
|
||||
<button @click="dismissNotification(notification.id)" class="flex-shrink-0 text-white hover:opacity-80 transition-opacity">
|
||||
<i class="fas fa-times"></i>
|
||||
</button>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
<!-- Hero Header -->
|
||||
<div class="hero-section">
|
||||
<div class="hero-content">
|
||||
<h1 class="hero-title">
|
||||
Model & Backend Management
|
||||
</h1>
|
||||
<p class="hero-subtitle">Manage your installed models and backends</p>
|
||||
|
||||
<!-- Quick Actions -->
|
||||
<div class="flex flex-wrap justify-center gap-3">
|
||||
<a href="#" @click.prevent="$store.router.navigate('browse')" class="btn-primary text-sm py-1.5 px-3">
|
||||
<i class="fas fa-images mr-1.5 text-[10px]"></i>
|
||||
<span>Model Gallery</span>
|
||||
</a>
|
||||
|
||||
<a href="/import-model" class="btn-primary text-sm py-1.5 px-3">
|
||||
<i class="fas fa-plus mr-1.5 text-[10px]"></i>
|
||||
<span>Import Model</span>
|
||||
</a>
|
||||
|
||||
<button @click="reloadModels()" class="btn-primary text-sm py-1.5 px-3">
|
||||
<i class="fas fa-sync-alt mr-1.5 text-[10px]"></i>
|
||||
<span>Update Models</span>
|
||||
</button>
|
||||
|
||||
<a href="/browse/backends" class="btn-secondary text-sm py-1.5 px-3">
|
||||
<i class="fas fa-cogs mr-1.5 text-[10px]"></i>
|
||||
<span>Backend Gallery</span>
|
||||
</a>
|
||||
|
||||
{{ if not .DisableRuntimeSettings }}
|
||||
<a href="/settings" class="btn-secondary text-sm py-1.5 px-3">
|
||||
<i class="fas fa-cog mr-1.5 text-[10px]"></i>
|
||||
<span>Settings</span>
|
||||
</a>
|
||||
{{ end }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Memory Info Section -->
|
||||
<div class="mt-8" x-data="resourceMonitor()" x-init="startPolling()">
|
||||
<template x-if="resourceData && resourceData.available">
|
||||
<div class="bg-[var(--color-bg-secondary)] border border-[var(--color-primary-border)]/20 rounded-lg p-4 mb-6">
|
||||
<div class="flex items-center justify-between mb-3">
|
||||
<h2 class="h3 flex items-center">
|
||||
<i :class="resourceData.type === 'gpu' ? 'fas fa-microchip' : 'fas fa-memory'" class="mr-2 text-[var(--color-primary)] text-sm"></i>
|
||||
<span x-text="resourceData.type === 'gpu' ? 'GPU Status' : 'Memory Status'"></span>
|
||||
</h2>
|
||||
</div>
|
||||
|
||||
<!-- Aggregate Stats -->
|
||||
<div class="bg-[var(--color-bg-primary)] rounded p-3">
|
||||
<div class="flex items-center justify-between mb-2">
|
||||
<span class="text-xs font-medium text-[var(--color-text-primary)]" x-text="resourceData.type === 'gpu' ? 'Total GPU Memory' : 'System RAM'"></span>
|
||||
<span class="text-xs font-mono"
|
||||
:class="resourceData.aggregate.usage_percent > 90 ? 'text-red-400' : resourceData.aggregate.usage_percent > 70 ? 'text-yellow-400' : 'text-green-400'"
|
||||
x-text="`${resourceData.aggregate.usage_percent.toFixed(1)}%`"></span>
|
||||
</div>
|
||||
<div class="w-full bg-[var(--color-bg-secondary)] rounded-full h-2 overflow-hidden">
|
||||
<div class="h-full rounded-full transition-all duration-300"
|
||||
:class="resourceData.aggregate.usage_percent > 90 ? 'bg-red-500' : resourceData.aggregate.usage_percent > 70 ? 'bg-yellow-500' : 'bg-[var(--color-success)]'"
|
||||
:style="`width: ${resourceData.aggregate.usage_percent}%`"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
<!-- Installed Models Section -->
|
||||
<div class="mt-8">
|
||||
<h2 class="text-xl font-semibold text-[var(--color-text-primary)] mb-4 flex items-center">
|
||||
<i class="fas fa-brain text-[var(--color-primary)] mr-2"></i>
|
||||
Installed Models
|
||||
</h2>
|
||||
|
||||
<div class="card overflow-hidden">
|
||||
<div class="overflow-x-auto">
|
||||
<table class="w-full">
|
||||
<thead>
|
||||
<tr class="bg-[var(--color-bg-secondary)] border-b border-[var(--color-border)]">
|
||||
<th class="px-4 py-3 text-left text-xs font-semibold text-[var(--color-text-secondary)] uppercase">Model</th>
|
||||
<th class="px-4 py-3 text-left text-xs font-semibold text-[var(--color-text-secondary)] uppercase">Status</th>
|
||||
<th class="px-4 py-3 text-left text-xs font-semibold text-[var(--color-text-secondary)] uppercase">Backend</th>
|
||||
<th class="px-4 py-3 text-right text-xs font-semibold text-[var(--color-text-secondary)] uppercase">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="divide-y divide-[var(--color-border)]">
|
||||
{{ $loadedModels := .LoadedModels }}
|
||||
{{ range .ModelsConfig }}
|
||||
<tr class="hover:bg-[var(--color-bg-secondary)]/50 transition-colors">
|
||||
<td class="px-4 py-3">
|
||||
<div class="flex items-center">
|
||||
<span class="text-sm font-medium text-[var(--color-text-primary)]">{{.Name}}</span>
|
||||
</div>
|
||||
</td>
|
||||
<td class="px-4 py-3">
|
||||
{{ if index $loadedModels .Name }}
|
||||
<span class="inline-flex items-center px-2 py-0.5 rounded text-xs font-medium bg-green-500/20 text-green-300">
|
||||
<i class="fas fa-circle text-[6px] mr-1.5"></i>Loaded
|
||||
</span>
|
||||
{{ else }}
|
||||
<span class="inline-flex items-center px-2 py-0.5 rounded text-xs font-medium bg-[var(--color-bg-secondary)] text-[var(--color-text-secondary)]">
|
||||
<i class="fas fa-circle text-[6px] mr-1.5"></i>Idle
|
||||
</span>
|
||||
{{ end }}
|
||||
</td>
|
||||
<td class="px-4 py-3">
|
||||
<span class="text-xs text-[var(--color-text-secondary)]">{{.Backend}}</span>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-right">
|
||||
<div class="flex items-center justify-end gap-2">
|
||||
{{ $hasChat := false }}
|
||||
{{ range .KnownUsecaseStrings }}
|
||||
{{ if eq . "FLAG_CHAT" }}{{ $hasChat = true }}{{ end }}
|
||||
{{ end }}
|
||||
{{ if $hasChat }}
|
||||
<button @click="$store.router.navigate('chat', { model: '{{.Name}}' })"
|
||||
class="px-2 py-1 text-xs rounded bg-[var(--color-primary)] text-white hover:opacity-80 transition-opacity">
|
||||
<i class="fas fa-comments mr-1"></i>Chat
|
||||
</button>
|
||||
{{ end }}
|
||||
{{ if index $loadedModels .Name }}
|
||||
<button onclick="stopModelManage('{{.Name}}')"
|
||||
class="px-2 py-1 text-xs rounded bg-red-500/20 text-red-300 hover:bg-red-500/30 transition-colors">
|
||||
<i class="fas fa-stop mr-1"></i>Stop
|
||||
</button>
|
||||
{{ end }}
|
||||
<a href="/model-editor/{{.Name}}" class="px-2 py-1 text-xs rounded bg-[var(--color-bg-secondary)] text-[var(--color-text-secondary)] hover:text-[var(--color-text-primary)] transition-colors">
|
||||
<i class="fas fa-edit"></i>
|
||||
</a>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ range .Models }}
|
||||
<tr class="hover:bg-[var(--color-bg-secondary)]/50 transition-colors">
|
||||
<td class="px-4 py-3">
|
||||
<div class="flex items-center">
|
||||
<span class="text-sm font-medium text-[var(--color-text-primary)]">{{.}}</span>
|
||||
<span class="ml-2 text-xs text-[var(--color-text-secondary)]">(no config)</span>
|
||||
</div>
|
||||
</td>
|
||||
<td class="px-4 py-3">
|
||||
<span class="inline-flex items-center px-2 py-0.5 rounded text-xs font-medium bg-[var(--color-bg-secondary)] text-[var(--color-text-secondary)]">
|
||||
<i class="fas fa-circle text-[6px] mr-1.5"></i>Idle
|
||||
</span>
|
||||
</td>
|
||||
<td class="px-4 py-3">
|
||||
<span class="text-xs text-[var(--color-text-secondary)]">-</span>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-right">
|
||||
<button @click="$store.router.navigate('chat', { model: '{{.}}' })"
|
||||
class="px-2 py-1 text-xs rounded bg-[var(--color-primary)] text-white hover:opacity-80 transition-opacity">
|
||||
<i class="fas fa-comments mr-1"></i>Chat
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
{{ if and (eq (len .ModelsConfig) 0) (eq (len .Models) 0) }}
|
||||
<div class="text-center py-8 text-[var(--color-text-secondary)]">
|
||||
<i class="fas fa-box-open text-4xl mb-3 opacity-50"></i>
|
||||
<p>No models installed yet</p>
|
||||
<p class="text-sm mt-2">
|
||||
<a href="#" @click.prevent="$store.router.navigate('browse')" class="text-[var(--color-primary)] hover:underline">Browse the gallery</a> to get started
|
||||
</p>
|
||||
</div>
|
||||
{{ end }}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Installed Backends Section -->
|
||||
<div class="mt-8">
|
||||
<h2 class="text-xl font-semibold text-[var(--color-text-primary)] mb-4 flex items-center">
|
||||
<i class="fas fa-server text-[var(--color-accent)] mr-2"></i>
|
||||
Installed Backends
|
||||
</h2>
|
||||
|
||||
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
|
||||
{{ range .InstalledBackends }}
|
||||
<div class="card p-4">
|
||||
<div class="flex items-center justify-between">
|
||||
<div class="flex items-center">
|
||||
<div class="w-10 h-10 bg-[var(--color-accent-light)] rounded-lg flex items-center justify-center mr-3">
|
||||
<i class="fas fa-cogs text-[var(--color-accent)]"></i>
|
||||
</div>
|
||||
<div>
|
||||
<h3 class="text-sm font-medium text-[var(--color-text-primary)]">{{.Name}}</h3>
|
||||
<div class="flex flex-wrap gap-1 mt-1">
|
||||
{{ if .IsSystem }}
|
||||
<span class="inline-flex items-center px-1.5 py-0.5 rounded text-[10px] font-medium bg-blue-500/10 text-blue-300">
|
||||
<i class="fas fa-shield-alt text-[8px] mr-1"></i>System
|
||||
</span>
|
||||
{{ else }}
|
||||
<span class="inline-flex items-center px-1.5 py-0.5 rounded text-[10px] font-medium bg-[var(--color-success)]/10 text-green-300">
|
||||
<i class="fas fa-download text-[8px] mr-1"></i>User
|
||||
</span>
|
||||
{{ end }}
|
||||
{{ if .IsMeta }}
|
||||
<span class="inline-flex items-center px-1.5 py-0.5 rounded text-[10px] font-medium bg-[var(--color-accent-light)] text-[var(--color-accent)]">
|
||||
<i class="fas fa-layer-group text-[8px] mr-1"></i>Meta
|
||||
</span>
|
||||
{{ end }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<span class="inline-flex items-center px-2 py-0.5 rounded text-xs font-medium bg-green-500/20 text-green-300">
|
||||
Installed
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
{{ else }}
|
||||
<div class="col-span-full text-center py-8 text-[var(--color-text-secondary)]">
|
||||
<i class="fas fa-plug text-4xl mb-3 opacity-50"></i>
|
||||
<p>No backends installed yet</p>
|
||||
<p class="text-sm mt-2">
|
||||
<a href="/browse/backends" class="text-[var(--color-primary)] hover:underline">Browse the backend gallery</a>
|
||||
</p>
|
||||
</div>
|
||||
{{ end }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Manage dashboard component
|
||||
function manageDashboard() {
|
||||
return {
|
||||
notifications: [],
|
||||
|
||||
init() {
|
||||
// Initialize
|
||||
},
|
||||
|
||||
addNotification(message, type = 'success') {
|
||||
const id = Date.now();
|
||||
this.notifications.push({ id, message, type });
|
||||
setTimeout(() => this.dismissNotification(id), 5000);
|
||||
},
|
||||
|
||||
dismissNotification(id) {
|
||||
this.notifications = this.notifications.filter(n => n.id !== id);
|
||||
},
|
||||
|
||||
reloadModels() {
|
||||
fetch('/models/reload', { method: 'POST' })
|
||||
.then(response => {
|
||||
if (response.ok) {
|
||||
this.addNotification('Models reloaded successfully');
|
||||
setTimeout(() => window.location.reload(), 1000);
|
||||
} else {
|
||||
this.addNotification('Failed to reload models', 'error');
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
this.addNotification('Error: ' + error.message, 'error');
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Stop model function
|
||||
async function stopModelManage(modelName) {
|
||||
if (!confirm(`Are you sure you want to stop "${modelName}"?`)) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch('/backend/shutdown', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ model: modelName })
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
setTimeout(() => window.location.reload(), 500);
|
||||
} else {
|
||||
alert('Failed to stop model');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error stopping model:', error);
|
||||
alert('Failed to stop model');
|
||||
}
|
||||
}
|
||||
|
||||
window.manageDashboard = manageDashboard;
|
||||
window.stopModelManage = stopModelManage;
|
||||
</script>
|
||||
229
core/http/views/spa/talk.html
Normal file
229
core/http/views/spa/talk.html
Normal file
@@ -0,0 +1,229 @@
|
||||
<!-- Talk View Content for SPA -->
|
||||
<div class="container mx-auto px-4 py-8 flex-grow">
|
||||
<!-- Hero Section -->
|
||||
<div class="hero-section">
|
||||
<div class="hero-content">
|
||||
<h1 class="hero-title">
|
||||
<i class="fas fa-comments mr-2"></i>Talk Interface
|
||||
</h1>
|
||||
<p class="hero-subtitle">Speak with your AI models using voice interaction</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Talk Interface -->
|
||||
<div class="max-w-3xl mx-auto">
|
||||
<div class="card overflow-hidden">
|
||||
<!-- Talk Interface Body -->
|
||||
<div class="p-6">
|
||||
<!-- Recording Status -->
|
||||
<div id="spa-recording" class="bg-red-500/10 border border-red-500/30 rounded-lg p-4 mb-4 flex items-center space-x-3" style="display: none;">
|
||||
<i class="fa-solid fa-microphone text-2xl text-red-400"></i>
|
||||
<span class="text-red-300 font-medium">Recording... press "Stop recording" to stop</span>
|
||||
</div>
|
||||
|
||||
<!-- Loader -->
|
||||
<div id="spa-talk-loader" class="my-4 flex justify-center" style="display: none;">
|
||||
<div class="animate-spin rounded-full h-10 w-10 border-t-2 border-b-2 border-[var(--color-primary)]"></div>
|
||||
</div>
|
||||
|
||||
<!-- Status Text -->
|
||||
<div id="spa-statustext" class="my-4 p-3 bg-[var(--color-bg-secondary)] border border-[var(--color-border)] rounded-lg text-[var(--color-text-primary)]" style="min-height: 3rem;">Press the record button to start recording.</div>
|
||||
|
||||
<!-- Note -->
|
||||
<div class="bg-[var(--color-primary-light)] border border-[var(--color-primary-border)] rounded-lg p-4 mb-6">
|
||||
<div class="flex items-start">
|
||||
<i class="fas fa-info-circle text-[var(--color-primary)] mt-1 mr-3 flex-shrink-0"></i>
|
||||
<p class="text-[var(--color-text-secondary)]">
|
||||
<strong class="text-[var(--color-primary)]">Note:</strong> You need an LLM, an audio-transcription (whisper), and a TTS model installed for this to work. Select the appropriate models below and click 'Talk' to start recording.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Model Selectors -->
|
||||
<div class="grid grid-cols-1 md:grid-cols-3 gap-6 mb-6">
|
||||
<!-- LLM Model -->
|
||||
<div class="space-y-2">
|
||||
<label for="spa-modelSelect" class="flex items-center text-[var(--color-text-secondary)] font-medium">
|
||||
<i class="fas fa-brain text-[var(--color-primary)] mr-2"></i>LLM Model
|
||||
</label>
|
||||
<select id="spa-modelSelect" class="input w-full p-2.5">
|
||||
<option value="" disabled class="text-[var(--color-text-secondary)]">Select a model</option>
|
||||
{{ range .ModelsConfig }}
|
||||
<option value="{{.Name}}" class="bg-[var(--color-bg-primary)] text-[var(--color-text-primary)]">{{.Name}}</option>
|
||||
{{ end }}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<!-- Whisper Model -->
|
||||
<div class="space-y-2">
|
||||
<label for="spa-whisperModelSelect" class="flex items-center text-[var(--color-text-secondary)] font-medium">
|
||||
<i class="fas fa-ear-listen text-[var(--color-accent)] mr-2"></i>Whisper Model
|
||||
</label>
|
||||
<select id="spa-whisperModelSelect" class="input w-full p-2.5">
|
||||
<option value="" disabled class="text-[var(--color-text-secondary)]">Select a model</option>
|
||||
{{ range .ModelsConfig }}
|
||||
<option value="{{.Name}}" class="bg-[var(--color-bg-primary)] text-[var(--color-text-primary)]">{{.Name}}</option>
|
||||
{{ end }}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<!-- TTS Model -->
|
||||
<div class="space-y-2">
|
||||
<label for="spa-ttsModelSelect" class="flex items-center text-[var(--color-text-secondary)] font-medium">
|
||||
<i class="fas fa-volume-high text-green-400 mr-2"></i>TTS Model
|
||||
</label>
|
||||
<select id="spa-ttsModelSelect" class="input w-full p-2.5">
|
||||
<option value="" disabled class="text-[var(--color-text-secondary)]">Select a model</option>
|
||||
{{ range .ModelsConfig }}
|
||||
<option value="{{.Name}}" class="bg-[var(--color-bg-primary)] text-[var(--color-text-primary)]">{{.Name}}</option>
|
||||
{{ end }}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Buttons -->
|
||||
<div class="flex items-center justify-between mt-8">
|
||||
<button id="spa-recordButton" onclick="startTalkRecording()"
|
||||
class="inline-flex items-center bg-red-500 hover:bg-red-600 text-white font-semibold py-2 px-6 rounded-lg transition-colors">
|
||||
<i class="fas fa-microphone mr-2"></i>
|
||||
<span>Talk</span>
|
||||
</button>
|
||||
<button id="spa-stopRecordButton" onclick="stopTalkRecording()" style="display: none;"
|
||||
class="inline-flex items-center bg-gray-500 hover:bg-gray-600 text-white font-semibold py-2 px-6 rounded-lg transition-colors">
|
||||
<i class="fas fa-stop mr-2"></i>
|
||||
<span>Stop Recording</span>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<!-- Audio Result -->
|
||||
<div id="spa-talk-result" class="mt-6"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Simplified Talk functions for SPA
|
||||
let talkMediaRecorder = null;
|
||||
let talkAudioChunks = [];
|
||||
|
||||
function startTalkRecording() {
|
||||
const statusText = document.getElementById('spa-statustext');
|
||||
const recording = document.getElementById('spa-recording');
|
||||
const recordButton = document.getElementById('spa-recordButton');
|
||||
const stopButton = document.getElementById('spa-stopRecordButton');
|
||||
|
||||
navigator.mediaDevices.getUserMedia({ audio: true })
|
||||
.then(stream => {
|
||||
talkMediaRecorder = new MediaRecorder(stream);
|
||||
talkAudioChunks = [];
|
||||
|
||||
talkMediaRecorder.ondataavailable = event => {
|
||||
talkAudioChunks.push(event.data);
|
||||
};
|
||||
|
||||
talkMediaRecorder.onstop = () => {
|
||||
const audioBlob = new Blob(talkAudioChunks, { type: 'audio/wav' });
|
||||
processTalkAudio(audioBlob);
|
||||
};
|
||||
|
||||
talkMediaRecorder.start();
|
||||
recording.style.display = 'flex';
|
||||
recordButton.style.display = 'none';
|
||||
stopButton.style.display = 'inline-flex';
|
||||
statusText.textContent = 'Recording... Speak now.';
|
||||
})
|
||||
.catch(error => {
|
||||
statusText.textContent = 'Error accessing microphone: ' + error.message;
|
||||
});
|
||||
}
|
||||
|
||||
function stopTalkRecording() {
|
||||
const recording = document.getElementById('spa-recording');
|
||||
const recordButton = document.getElementById('spa-recordButton');
|
||||
const stopButton = document.getElementById('spa-stopRecordButton');
|
||||
|
||||
if (talkMediaRecorder && talkMediaRecorder.state !== 'inactive') {
|
||||
talkMediaRecorder.stop();
|
||||
talkMediaRecorder.stream.getTracks().forEach(track => track.stop());
|
||||
}
|
||||
|
||||
recording.style.display = 'none';
|
||||
recordButton.style.display = 'inline-flex';
|
||||
stopButton.style.display = 'none';
|
||||
}
|
||||
|
||||
function processTalkAudio(audioBlob) {
|
||||
const statusText = document.getElementById('spa-statustext');
|
||||
const loader = document.getElementById('spa-talk-loader');
|
||||
const result = document.getElementById('spa-talk-result');
|
||||
const llmModel = document.getElementById('spa-modelSelect').value;
|
||||
const whisperModel = document.getElementById('spa-whisperModelSelect').value;
|
||||
const ttsModel = document.getElementById('spa-ttsModelSelect').value;
|
||||
|
||||
if (!llmModel || !whisperModel || !ttsModel) {
|
||||
statusText.textContent = 'Please select all three models (LLM, Whisper, TTS)';
|
||||
return;
|
||||
}
|
||||
|
||||
loader.style.display = 'flex';
|
||||
statusText.textContent = 'Processing...';
|
||||
|
||||
// Step 1: Transcribe audio
|
||||
const formData = new FormData();
|
||||
formData.append('file', audioBlob, 'audio.wav');
|
||||
formData.append('model', whisperModel);
|
||||
|
||||
fetch('/v1/audio/transcriptions', {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
const transcription = data.text;
|
||||
statusText.textContent = 'You said: ' + transcription;
|
||||
|
||||
// Step 2: Send to LLM
|
||||
return fetch('/v1/chat/completions', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: llmModel,
|
||||
messages: [{ role: 'user', content: transcription }]
|
||||
})
|
||||
});
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
const reply = data.choices[0].message.content;
|
||||
statusText.textContent = 'AI: ' + reply;
|
||||
|
||||
// Step 3: Convert to speech
|
||||
return fetch('/tts', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: ttsModel,
|
||||
input: reply
|
||||
})
|
||||
});
|
||||
})
|
||||
.then(response => response.blob())
|
||||
.then(blob => {
|
||||
loader.style.display = 'none';
|
||||
const audioUrl = URL.createObjectURL(blob);
|
||||
result.innerHTML = `
|
||||
<audio controls autoplay class="w-full">
|
||||
<source src="${audioUrl}" type="audio/wav">
|
||||
</audio>
|
||||
`;
|
||||
})
|
||||
.catch(error => {
|
||||
loader.style.display = 'none';
|
||||
statusText.textContent = 'Error: ' + error.message;
|
||||
});
|
||||
}
|
||||
|
||||
window.startTalkRecording = startTalkRecording;
|
||||
window.stopTalkRecording = stopTalkRecording;
|
||||
</script>
|
||||
155
core/http/views/spa/text2image.html
Normal file
155
core/http/views/spa/text2image.html
Normal file
@@ -0,0 +1,155 @@
|
||||
<!-- Text2Image View Content for SPA -->
|
||||
<div class="flex flex-col flex-1 overflow-hidden">
|
||||
<div class="flex flex-1 overflow-hidden">
|
||||
<!-- Two Column Layout: Settings on Left, Preview on Right -->
|
||||
<div class="flex flex-col lg:flex-row flex-1 gap-4 p-4 overflow-hidden">
|
||||
<!-- Left Column: Generation Settings -->
|
||||
<div class="flex-shrink-0 lg:w-1/4 flex flex-col min-h-0">
|
||||
<div class="card p-3 space-y-3 overflow-y-auto flex-1">
|
||||
<!-- Model Selection -->
|
||||
<div class="space-y-1.5">
|
||||
<div class="flex items-center justify-between gap-2">
|
||||
<label class="text-xs font-medium text-[var(--color-text-secondary)] uppercase tracking-wide flex-shrink-0">Model</label>
|
||||
</div>
|
||||
<select id="image-model-select" class="input w-full p-1.5 text-xs" @change="document.getElementById('image-model').value = $event.target.value">
|
||||
<option value="" disabled class="text-[var(--color-text-secondary)]">Select a model</option>
|
||||
{{ $model:=.Model}}
|
||||
{{ range .ModelsConfig }}
|
||||
{{ $cfg := . }}
|
||||
{{ range .KnownUsecaseStrings }}
|
||||
{{ if eq . "FLAG_IMAGE" }}
|
||||
<option value="{{$cfg.Name}}" {{ if eq $cfg.Name $model }} selected {{end}} class="bg-[var(--color-bg-primary)] text-[var(--color-text-primary)]">{{$cfg.Name}}</option>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ range .ModelsWithoutConfig }}
|
||||
<option value="{{.}}" {{ if eq . $model }} selected {{ end }} class="bg-[var(--color-bg-primary)] text-[var(--color-text-primary)]">{{.}}</option>
|
||||
{{end}}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="relative">
|
||||
<input id="image-model" type="hidden" value="{{.Model}}">
|
||||
<form id="genimage" @submit.prevent="genImage($event)">
|
||||
<!-- Basic Settings -->
|
||||
<div class="space-y-2">
|
||||
<!-- Prompt -->
|
||||
<div class="space-y-1">
|
||||
<label for="image-input" class="block text-xs font-medium text-[var(--color-text-secondary)] uppercase tracking-wide">
|
||||
<i class="fas fa-magic mr-1.5 text-[var(--color-primary)]"></i>Prompt
|
||||
</label>
|
||||
<textarea
|
||||
id="image-input"
|
||||
name="input"
|
||||
placeholder="Describe the image you want to generate..."
|
||||
autocomplete="off"
|
||||
rows="3"
|
||||
class="input w-full p-1.5 text-xs resize-y"
|
||||
required
|
||||
></textarea>
|
||||
</div>
|
||||
|
||||
<!-- Negative Prompt -->
|
||||
<div class="space-y-1">
|
||||
<label for="negative-prompt" class="block text-xs font-medium text-[var(--color-text-secondary)] uppercase tracking-wide">
|
||||
<i class="fas fa-ban mr-1.5 text-[var(--color-primary)]"></i>Negative Prompt
|
||||
</label>
|
||||
<textarea
|
||||
id="negative-prompt"
|
||||
name="negative-prompt"
|
||||
placeholder="Things to avoid in the image..."
|
||||
rows="2"
|
||||
class="input w-full p-1.5 text-xs resize-y"
|
||||
></textarea>
|
||||
</div>
|
||||
|
||||
<!-- Size Selection -->
|
||||
<div class="space-y-1">
|
||||
<label for="image-size" class="block text-xs font-medium text-[var(--color-text-secondary)] uppercase tracking-wide">
|
||||
<i class="fas fa-expand-arrows-alt mr-1.5 text-[var(--color-primary)]"></i>Image Size
|
||||
</label>
|
||||
<div class="flex flex-wrap gap-1.5 mb-1.5">
|
||||
<button type="button" class="size-preset px-2 py-0.5 text-[10px] rounded border border-[var(--color-border)] hover:bg-[var(--color-bg-secondary)]" data-size="256x256">256×256</button>
|
||||
<button type="button" class="size-preset px-2 py-0.5 text-[10px] rounded border border-[var(--color-border)] hover:bg-[var(--color-bg-secondary)] bg-[var(--color-primary)] text-white" data-size="512x512">512×512</button>
|
||||
<button type="button" class="size-preset px-2 py-0.5 text-[10px] rounded border border-[var(--color-border)] hover:bg-[var(--color-bg-secondary)]" data-size="768x768">768×768</button>
|
||||
<button type="button" class="size-preset px-2 py-0.5 text-[10px] rounded border border-[var(--color-border)] hover:bg-[var(--color-bg-secondary)]" data-size="1024x1024">1024×1024</button>
|
||||
</div>
|
||||
<input
|
||||
type="text"
|
||||
id="image-size"
|
||||
value="512x512"
|
||||
placeholder="e.g., 256x256, 512x512, 1024x1024"
|
||||
class="input p-1.5 text-xs w-full"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<!-- Number of Images -->
|
||||
<div class="space-y-1">
|
||||
<label for="image-count" class="block text-xs font-medium text-[var(--color-text-secondary)] uppercase tracking-wide">
|
||||
<i class="fas fa-images mr-1.5 text-[var(--color-primary)]"></i>Number of Images
|
||||
</label>
|
||||
<input
|
||||
type="number"
|
||||
id="image-count"
|
||||
name="n"
|
||||
min="1"
|
||||
max="4"
|
||||
value="1"
|
||||
class="input p-1.5 text-xs w-full"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Submit Button -->
|
||||
<div class="mt-4">
|
||||
<button
|
||||
type="submit"
|
||||
id="generate-btn"
|
||||
class="w-full px-2 py-1.5 text-xs rounded text-[var(--color-bg-primary)] bg-[var(--color-primary)] hover:bg-[var(--color-primary)]/90 transition-colors font-medium"
|
||||
>
|
||||
<i class="fas fa-magic mr-1.5"></i>Generate Image
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Right Column: Image Preview -->
|
||||
<div class="flex-grow lg:w-3/4 flex flex-col min-h-0">
|
||||
<div class="relative flex-1 min-h-0 overflow-y-auto">
|
||||
<!-- Loading Animation -->
|
||||
<div id="loader" class="hidden absolute inset-0 flex items-center justify-center bg-[var(--color-bg-primary)]/80 rounded-xl z-10">
|
||||
<div class="text-center">
|
||||
<svg class="animate-spin h-10 w-10 text-[var(--color-primary)] mx-auto mb-3" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24">
|
||||
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"></circle>
|
||||
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path>
|
||||
</svg>
|
||||
<p class="text-xs text-[var(--color-text-secondary)]">Generating image...</p>
|
||||
</div>
|
||||
</div>
|
||||
<!-- Placeholder when no images -->
|
||||
<div id="result-placeholder" class="min-h-[400px] flex items-center justify-center flex-shrink-0">
|
||||
<p class="text-xs text-[var(--color-text-secondary)] italic text-center">Your generated images will appear here</p>
|
||||
</div>
|
||||
<!-- Results container -->
|
||||
<div id="result" class="grid grid-cols-1 sm:grid-cols-2 gap-4 pb-4"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Size preset buttons for SPA
|
||||
document.querySelectorAll('.size-preset').forEach(button => {
|
||||
button.addEventListener('click', function() {
|
||||
const size = this.getAttribute('data-size');
|
||||
document.getElementById('image-size').value = size;
|
||||
document.querySelectorAll('.size-preset').forEach(btn => {
|
||||
btn.classList.remove('bg-[var(--color-primary)]', 'text-white');
|
||||
});
|
||||
this.classList.add('bg-[var(--color-primary)]', 'text-white');
|
||||
});
|
||||
});
|
||||
</script>
|
||||
138
core/http/views/spa/tts.html
Normal file
138
core/http/views/spa/tts.html
Normal file
@@ -0,0 +1,138 @@
|
||||
<!-- TTS View Content for SPA -->
|
||||
<div class="container mx-auto px-4 py-8 flex-grow">
|
||||
<!-- Hero Section -->
|
||||
<div class="hero-section">
|
||||
<div class="hero-content">
|
||||
<h1 class="hero-title">
|
||||
<i class="fas fa-volume-high mr-2"></i>Text to Speech
|
||||
</h1>
|
||||
<p class="hero-subtitle">Convert your text into natural-sounding speech</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- TTS Interface -->
|
||||
<div class="max-w-3xl mx-auto">
|
||||
<div class="card overflow-hidden">
|
||||
<!-- Header with Model Selection -->
|
||||
<div class="border-b border-[var(--color-bg-secondary)] p-5">
|
||||
<div class="flex flex-col sm:flex-row items-center justify-between gap-4">
|
||||
<!-- Model Selection -->
|
||||
<div class="flex items-center">
|
||||
<label for="tts-model-select" class="mr-3 text-[var(--color-text-secondary)] font-medium">
|
||||
<i class="fas fa-microphone-lines text-[var(--color-accent)] mr-2"></i>Model:
|
||||
</label>
|
||||
<select id="tts-model-select" class="input p-2.5" @change="document.getElementById('tts-model').value = $event.target.value">
|
||||
<option value="" disabled class="text-[var(--color-text-secondary)]">Select a model</option>
|
||||
{{ $model:=.Model}}
|
||||
{{ range .ModelsConfig }}
|
||||
{{ $cfg := . }}
|
||||
{{ range .KnownUsecaseStrings }}
|
||||
{{ if eq . "FLAG_TTS" }}
|
||||
<option value="{{$cfg.Name}}" {{ if eq $cfg.Name $model }} selected {{end}} class="bg-[var(--color-bg-primary)] text-[var(--color-text-primary)]">{{$cfg.Name}}</option>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ range .ModelsWithoutConfig }}
|
||||
<option value="{{.}}" {{ if eq . $model }} selected {{ end }} class="bg-[var(--color-bg-primary)] text-[var(--color-text-primary)]">{{.}}</option>
|
||||
{{end}}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Input Area -->
|
||||
<div class="p-6">
|
||||
<div class="bg-[var(--color-accent-light)] border border-[var(--color-accent-border)] rounded-lg p-4 mb-6">
|
||||
<div class="flex items-start">
|
||||
<i class="fas fa-info-circle text-[var(--color-accent)] mt-1 mr-3 flex-shrink-0"></i>
|
||||
<p class="text-[var(--color-text-secondary)]">
|
||||
Enter your text below and submit to generate speech with the selected TTS model.
|
||||
The generated audio will appear below the input field.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<input id="tts-model" type="hidden" value="{{.Model}}">
|
||||
<form id="tts" @submit.prevent="generateTTS($event)" class="mb-6">
|
||||
<div class="relative">
|
||||
<input
|
||||
type="text"
|
||||
id="tts-input"
|
||||
name="input"
|
||||
placeholder="Enter text to convert to speech..."
|
||||
autocomplete="off"
|
||||
class="input w-full p-4 pl-4 pr-12"
|
||||
required
|
||||
/>
|
||||
<button type="submit" class="absolute right-3 top-1/2 transform -translate-y-1/2 text-[var(--color-accent)] hover:text-[var(--color-primary)] transition icon-hover">
|
||||
<i class="fas fa-paper-plane"></i>
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
<!-- Loading indicator -->
|
||||
<div class="flex justify-center my-6">
|
||||
<div id="tts-loader" class="animate-spin rounded-full h-10 w-10 border-t-2 border-b-2 border-[var(--color-accent)]" style="display: none;"></div>
|
||||
</div>
|
||||
|
||||
<!-- Results Area -->
|
||||
<div class="bg-[var(--color-bg-secondary)] border border-[var(--color-border)] rounded-lg p-4 min-h-[100px] flex items-center justify-center">
|
||||
<div id="tts-result" class="w-full text-center text-[var(--color-text-secondary)]">
|
||||
<p>Generated audio will appear here</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// TTS generation function for SPA
|
||||
function generateTTS(event) {
|
||||
if (event) event.preventDefault();
|
||||
|
||||
const input = document.getElementById('tts-input');
|
||||
const model = document.getElementById('tts-model')?.value;
|
||||
const loader = document.getElementById('tts-loader');
|
||||
const result = document.getElementById('tts-result');
|
||||
|
||||
if (!input?.value.trim() || !model) {
|
||||
alert('Please enter text and select a model');
|
||||
return;
|
||||
}
|
||||
|
||||
loader.style.display = 'block';
|
||||
result.innerHTML = '';
|
||||
|
||||
fetch('/tts', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: model,
|
||||
input: input.value.trim()
|
||||
})
|
||||
})
|
||||
.then(response => response.blob())
|
||||
.then(blob => {
|
||||
loader.style.display = 'none';
|
||||
const audioUrl = URL.createObjectURL(blob);
|
||||
result.innerHTML = `
|
||||
<audio controls class="w-full">
|
||||
<source src="${audioUrl}" type="audio/wav">
|
||||
Your browser does not support the audio element.
|
||||
</audio>
|
||||
<a href="${audioUrl}" download="tts_output.wav" class="mt-3 inline-block btn-secondary text-sm">
|
||||
<i class="fas fa-download mr-2"></i>Download
|
||||
</a>
|
||||
`;
|
||||
})
|
||||
.catch(error => {
|
||||
loader.style.display = 'none';
|
||||
result.innerHTML = `<p class="text-red-400">Error generating speech: ${error.message}</p>`;
|
||||
});
|
||||
}
|
||||
|
||||
window.generateTTS = generateTTS;
|
||||
</script>
|
||||
@@ -215,26 +215,23 @@
|
||||
|
||||
<!-- Right Column: Image Preview -->
|
||||
<div class="flex-grow lg:w-3/4 flex flex-col min-h-0">
|
||||
<div class="card p-3 flex flex-col flex-1 min-h-0">
|
||||
<h3 class="text-sm font-semibold text-[var(--color-text-primary)] mb-3 flex-shrink-0">Generated Images</h3>
|
||||
<div class="relative flex-1 min-h-0 overflow-y-auto">
|
||||
<!-- Loading Animation -->
|
||||
<div id="loader" class="hidden absolute inset-0 flex items-center justify-center bg-[var(--color-bg-primary)]/80 rounded-xl z-10">
|
||||
<div class="text-center">
|
||||
<svg class="animate-spin h-10 w-10 text-[var(--color-primary)] mx-auto mb-3" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24">
|
||||
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"></circle>
|
||||
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path>
|
||||
</svg>
|
||||
<p class="text-xs text-[var(--color-text-secondary)]">Generating image...</p>
|
||||
</div>
|
||||
<div class="relative flex-1 min-h-0 overflow-y-auto">
|
||||
<!-- Loading Animation -->
|
||||
<div id="loader" class="hidden absolute inset-0 flex items-center justify-center bg-[var(--color-bg-primary)]/80 rounded-xl z-10">
|
||||
<div class="text-center">
|
||||
<svg class="animate-spin h-10 w-10 text-[var(--color-primary)] mx-auto mb-3" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24">
|
||||
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"></circle>
|
||||
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path>
|
||||
</svg>
|
||||
<p class="text-xs text-[var(--color-text-secondary)]">Generating image...</p>
|
||||
</div>
|
||||
<!-- Placeholder when no images -->
|
||||
<div id="result-placeholder" class="bg-[var(--color-bg-primary)]/50 border border-[#1E293B] rounded-xl p-6 min-h-[400px] flex items-center justify-center flex-shrink-0">
|
||||
<p class="text-xs text-[var(--color-text-secondary)] italic text-center">Your generated images will appear here</p>
|
||||
</div>
|
||||
<!-- Results container -->
|
||||
<div id="result" class="space-y-4 pb-4"></div>
|
||||
</div>
|
||||
<!-- Placeholder when no images -->
|
||||
<div id="result-placeholder" class="min-h-[400px] flex items-center justify-center flex-shrink-0">
|
||||
<p class="text-xs text-[var(--color-text-secondary)] italic text-center">Your generated images will appear here</p>
|
||||
</div>
|
||||
<!-- Results container -->
|
||||
<div id="result" class="grid grid-cols-1 sm:grid-cols-2 gap-4 pb-4"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
176
core/schema/anthropic.go
Normal file
176
core/schema/anthropic.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// AnthropicRequest represents a request to the Anthropic Messages API
|
||||
// https://docs.anthropic.com/claude/reference/messages_post
|
||||
type AnthropicRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []AnthropicMessage `json:"messages"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopK *int `json:"top_k,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
Tools []AnthropicTool `json:"tools,omitempty"`
|
||||
ToolChoice interface{} `json:"tool_choice,omitempty"`
|
||||
|
||||
// Internal fields for request handling
|
||||
Context context.Context `json:"-"`
|
||||
Cancel context.CancelFunc `json:"-"`
|
||||
}
|
||||
|
||||
// ModelName implements the LocalAIRequest interface
|
||||
func (ar *AnthropicRequest) ModelName(s *string) string {
|
||||
if s != nil {
|
||||
ar.Model = *s
|
||||
}
|
||||
return ar.Model
|
||||
}
|
||||
|
||||
// AnthropicTool represents a tool definition in the Anthropic format
|
||||
type AnthropicTool struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
InputSchema map[string]interface{} `json:"input_schema"`
|
||||
}
|
||||
|
||||
// AnthropicMessage represents a message in the Anthropic format
|
||||
type AnthropicMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content interface{} `json:"content"`
|
||||
}
|
||||
|
||||
// AnthropicContentBlock represents a content block in an Anthropic message
|
||||
type AnthropicContentBlock struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Source *AnthropicImageSource `json:"source,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Input map[string]interface{} `json:"input,omitempty"`
|
||||
ToolUseID string `json:"tool_use_id,omitempty"`
|
||||
Content interface{} `json:"content,omitempty"`
|
||||
IsError *bool `json:"is_error,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicImageSource represents an image source in Anthropic format
|
||||
type AnthropicImageSource struct {
|
||||
Type string `json:"type"`
|
||||
MediaType string `json:"media_type"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
// AnthropicResponse represents a response from the Anthropic Messages API
|
||||
type AnthropicResponse struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Role string `json:"role"`
|
||||
Content []AnthropicContentBlock `json:"content"`
|
||||
Model string `json:"model"`
|
||||
StopReason *string `json:"stop_reason"`
|
||||
StopSequence *string `json:"stop_sequence,omitempty"`
|
||||
Usage AnthropicUsage `json:"usage"`
|
||||
}
|
||||
|
||||
// AnthropicUsage represents token usage in Anthropic format
|
||||
type AnthropicUsage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
}
|
||||
|
||||
// AnthropicStreamEvent represents a streaming event from the Anthropic API
|
||||
type AnthropicStreamEvent struct {
|
||||
Type string `json:"type"`
|
||||
Index int `json:"index,omitempty"`
|
||||
ContentBlock *AnthropicContentBlock `json:"content_block,omitempty"`
|
||||
Delta *AnthropicStreamDelta `json:"delta,omitempty"`
|
||||
Message *AnthropicStreamMessage `json:"message,omitempty"`
|
||||
Usage *AnthropicUsage `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicStreamDelta represents the delta in a streaming response
|
||||
type AnthropicStreamDelta struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
PartialJSON string `json:"partial_json,omitempty"`
|
||||
StopReason *string `json:"stop_reason,omitempty"`
|
||||
StopSequence *string `json:"stop_sequence,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicStreamMessage represents the message object in streaming events
|
||||
type AnthropicStreamMessage struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Role string `json:"role"`
|
||||
Content []AnthropicContentBlock `json:"content"`
|
||||
Model string `json:"model"`
|
||||
StopReason *string `json:"stop_reason"`
|
||||
StopSequence *string `json:"stop_sequence,omitempty"`
|
||||
Usage AnthropicUsage `json:"usage"`
|
||||
}
|
||||
|
||||
// AnthropicErrorResponse represents an error response from the Anthropic API
|
||||
type AnthropicErrorResponse struct {
|
||||
Type string `json:"type"`
|
||||
Error AnthropicError `json:"error"`
|
||||
}
|
||||
|
||||
// AnthropicError represents an error in the Anthropic format
|
||||
type AnthropicError struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// GetStringContent extracts the string content from an AnthropicMessage
|
||||
// Content can be either a string or an array of content blocks
|
||||
func (m *AnthropicMessage) GetStringContent() string {
|
||||
switch content := m.Content.(type) {
|
||||
case string:
|
||||
return content
|
||||
case []interface{}:
|
||||
var result string
|
||||
for _, block := range content {
|
||||
if blockMap, ok := block.(map[string]interface{}); ok {
|
||||
if blockMap["type"] == "text" {
|
||||
if text, ok := blockMap["text"].(string); ok {
|
||||
result += text
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetContentBlocks extracts content blocks from an AnthropicMessage
|
||||
func (m *AnthropicMessage) GetContentBlocks() []AnthropicContentBlock {
|
||||
switch content := m.Content.(type) {
|
||||
case string:
|
||||
return []AnthropicContentBlock{{Type: "text", Text: content}}
|
||||
case []interface{}:
|
||||
var blocks []AnthropicContentBlock
|
||||
for _, block := range content {
|
||||
if blockMap, ok := block.(map[string]interface{}); ok {
|
||||
cb := AnthropicContentBlock{}
|
||||
data, err := json.Marshal(blockMap)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := json.Unmarshal(data, &cb); err != nil {
|
||||
continue
|
||||
}
|
||||
blocks = append(blocks, cb)
|
||||
}
|
||||
}
|
||||
return blocks
|
||||
}
|
||||
return nil
|
||||
}
|
||||
216
core/schema/anthropic_test.go
Normal file
216
core/schema/anthropic_test.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package schema_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Anthropic Schema", func() {
|
||||
Describe("AnthropicRequest", func() {
|
||||
It("should unmarshal a valid request", func() {
|
||||
jsonData := `{
|
||||
"model": "claude-3-sonnet-20240229",
|
||||
"max_tokens": 1024,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello, world!"}
|
||||
],
|
||||
"system": "You are a helpful assistant.",
|
||||
"temperature": 0.7
|
||||
}`
|
||||
|
||||
var req schema.AnthropicRequest
|
||||
err := json.Unmarshal([]byte(jsonData), &req)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(req.Model).To(Equal("claude-3-sonnet-20240229"))
|
||||
Expect(req.MaxTokens).To(Equal(1024))
|
||||
Expect(len(req.Messages)).To(Equal(1))
|
||||
Expect(req.System).To(Equal("You are a helpful assistant."))
|
||||
Expect(*req.Temperature).To(Equal(0.7))
|
||||
})
|
||||
|
||||
It("should unmarshal a request with tools", func() {
|
||||
jsonData := `{
|
||||
"model": "claude-3-sonnet-20240229",
|
||||
"max_tokens": 1024,
|
||||
"messages": [
|
||||
{"role": "user", "content": "What's the weather?"}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"tool_choice": {"type": "tool", "name": "get_weather"}
|
||||
}`
|
||||
|
||||
var req schema.AnthropicRequest
|
||||
err := json.Unmarshal([]byte(jsonData), &req)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(req.Tools)).To(Equal(1))
|
||||
Expect(req.Tools[0].Name).To(Equal("get_weather"))
|
||||
Expect(req.Tools[0].Description).To(Equal("Get the current weather"))
|
||||
Expect(req.ToolChoice).ToNot(BeNil())
|
||||
})
|
||||
|
||||
It("should implement LocalAIRequest interface", func() {
|
||||
req := &schema.AnthropicRequest{Model: "test-model"}
|
||||
Expect(req.ModelName(nil)).To(Equal("test-model"))
|
||||
|
||||
newModel := "new-model"
|
||||
Expect(req.ModelName(&newModel)).To(Equal("new-model"))
|
||||
Expect(req.Model).To(Equal("new-model"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("AnthropicMessage", func() {
|
||||
It("should get string content from string content", func() {
|
||||
msg := schema.AnthropicMessage{
|
||||
Role: "user",
|
||||
Content: "Hello, world!",
|
||||
}
|
||||
Expect(msg.GetStringContent()).To(Equal("Hello, world!"))
|
||||
})
|
||||
|
||||
It("should get string content from array content", func() {
|
||||
msg := schema.AnthropicMessage{
|
||||
Role: "user",
|
||||
Content: []interface{}{
|
||||
map[string]interface{}{"type": "text", "text": "Hello, "},
|
||||
map[string]interface{}{"type": "text", "text": "world!"},
|
||||
},
|
||||
}
|
||||
Expect(msg.GetStringContent()).To(Equal("Hello, world!"))
|
||||
})
|
||||
|
||||
It("should get content blocks from string content", func() {
|
||||
msg := schema.AnthropicMessage{
|
||||
Role: "user",
|
||||
Content: "Hello, world!",
|
||||
}
|
||||
blocks := msg.GetContentBlocks()
|
||||
Expect(len(blocks)).To(Equal(1))
|
||||
Expect(blocks[0].Type).To(Equal("text"))
|
||||
Expect(blocks[0].Text).To(Equal("Hello, world!"))
|
||||
})
|
||||
|
||||
It("should get content blocks from array content", func() {
|
||||
msg := schema.AnthropicMessage{
|
||||
Role: "user",
|
||||
Content: []interface{}{
|
||||
map[string]interface{}{"type": "text", "text": "Hello"},
|
||||
map[string]interface{}{"type": "image", "source": map[string]interface{}{"type": "base64", "data": "abc123"}},
|
||||
},
|
||||
}
|
||||
blocks := msg.GetContentBlocks()
|
||||
Expect(len(blocks)).To(Equal(2))
|
||||
Expect(blocks[0].Type).To(Equal("text"))
|
||||
Expect(blocks[0].Text).To(Equal("Hello"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("AnthropicResponse", func() {
|
||||
It("should marshal a valid response", func() {
|
||||
stopReason := "end_turn"
|
||||
resp := schema.AnthropicResponse{
|
||||
ID: "msg_123",
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Model: "claude-3-sonnet-20240229",
|
||||
StopReason: &stopReason,
|
||||
Content: []schema.AnthropicContentBlock{
|
||||
{Type: "text", Text: "Hello!"},
|
||||
},
|
||||
Usage: schema.AnthropicUsage{
|
||||
InputTokens: 10,
|
||||
OutputTokens: 5,
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
var result map[string]interface{}
|
||||
err = json.Unmarshal(data, &result)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(result["id"]).To(Equal("msg_123"))
|
||||
Expect(result["type"]).To(Equal("message"))
|
||||
Expect(result["role"]).To(Equal("assistant"))
|
||||
Expect(result["stop_reason"]).To(Equal("end_turn"))
|
||||
})
|
||||
|
||||
It("should marshal a response with tool use", func() {
|
||||
stopReason := "tool_use"
|
||||
resp := schema.AnthropicResponse{
|
||||
ID: "msg_123",
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Model: "claude-3-sonnet-20240229",
|
||||
StopReason: &stopReason,
|
||||
Content: []schema.AnthropicContentBlock{
|
||||
{
|
||||
Type: "tool_use",
|
||||
ID: "toolu_123",
|
||||
Name: "get_weather",
|
||||
Input: map[string]interface{}{
|
||||
"location": "San Francisco",
|
||||
},
|
||||
},
|
||||
},
|
||||
Usage: schema.AnthropicUsage{
|
||||
InputTokens: 10,
|
||||
OutputTokens: 5,
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
var result map[string]interface{}
|
||||
err = json.Unmarshal(data, &result)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(result["stop_reason"]).To(Equal("tool_use"))
|
||||
content := result["content"].([]interface{})
|
||||
Expect(len(content)).To(Equal(1))
|
||||
toolUse := content[0].(map[string]interface{})
|
||||
Expect(toolUse["type"]).To(Equal("tool_use"))
|
||||
Expect(toolUse["id"]).To(Equal("toolu_123"))
|
||||
Expect(toolUse["name"]).To(Equal("get_weather"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("AnthropicErrorResponse", func() {
|
||||
It("should marshal an error response", func() {
|
||||
resp := schema.AnthropicErrorResponse{
|
||||
Type: "error",
|
||||
Error: schema.AnthropicError{
|
||||
Type: "invalid_request_error",
|
||||
Message: "max_tokens is required",
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
var result map[string]interface{}
|
||||
err = json.Unmarshal(data, &result)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(result["type"]).To(Equal("error"))
|
||||
errorObj := result["error"].(map[string]interface{})
|
||||
Expect(errorObj["type"]).To(Equal("invalid_request_error"))
|
||||
Expect(errorObj["message"]).To(Equal("max_tokens is required"))
|
||||
})
|
||||
})
|
||||
})
|
||||
79
core/schema/gallery-model.schema.json
Normal file
79
core/schema/gallery-model.schema.json
Normal file
@@ -0,0 +1,79 @@
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"$id": "https://raw.githubusercontent.com/mudler/LocalAI/main/schemas/gallery.model.schema.json",
|
||||
"title": "LocalAI Gallery Model Spec",
|
||||
"description": "Schema for LocalAI gallery model YAML files",
|
||||
"type": "object",
|
||||
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Model name"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Human-readable description of the model"
|
||||
},
|
||||
"icon": {
|
||||
"type": "string",
|
||||
"description": "Optional icon reference or URL"
|
||||
},
|
||||
"license": {
|
||||
"type": "string",
|
||||
"description": "Model license identifier or text"
|
||||
},
|
||||
"urls": {
|
||||
"type": "array",
|
||||
"description": "URLs pointing to remote model configuration",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
}
|
||||
},
|
||||
"config_file": {
|
||||
"type": "string",
|
||||
"description": "Inline YAML configuration that will be written to the model config file"
|
||||
},
|
||||
"files": {
|
||||
"type": "array",
|
||||
"description": "Files to download and install for this model",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": ["filename", "uri"],
|
||||
"properties": {
|
||||
"filename": {
|
||||
"type": "string"
|
||||
},
|
||||
"sha256": {
|
||||
"type": "string",
|
||||
"description": "Optional SHA256 checksum for file verification"
|
||||
},
|
||||
"uri": {
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"prompt_templates": {
|
||||
"type": "array",
|
||||
"description": "Prompt templates written as .tmpl files",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": ["name", "content"],
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"content": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"additionalProperties": false
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user