+ {/* Header */}
+
+
setSidebarOpen(prev => !prev)}
+ title={sidebarOpen ? 'Hide chat list' : 'Show chat list'}
+ style={{ flexShrink: 0 }}
+ >
+
+
+
{activeChat.name}
+
updateChatSettings(activeChat.id, { model })}
+ capability="FLAG_CHAT"
+ />
+ {activeChat.model && (
+ <>
+ setShowModelInfo(!showModelInfo)}
+ title="Model info"
+ >
+
+
+ navigate(`/model-editor/${encodeURIComponent(activeChat.model)}`)}
+ title="Edit model config"
+ >
+
+
+ >
+ )}
+ {mcpAvailable && (
+
+ MCP
+
+ updateChatSettings(activeChat.id, { mcpMode: e.target.checked })}
+ />
+
+
+
+ )}
+
+ exportChatAsMarkdown(activeChat)}
+ title="Export chat as Markdown"
+ >
+
+
+ clearHistory(activeChat.id)}
+ title="Clear chat history"
+ >
+
+
+ setShowSettings(!showSettings)}
+ title="Settings"
+ >
+
+
+
+
+
+ {/* Model info panel */}
+ {showModelInfo && modelInfo && (
+
+
+ Model Info: {activeChat.model}
+ setShowModelInfo(false)}>
+
+
+
+
+ {modelInfo.backend &&
Backend {modelInfo.backend}
}
+ {modelInfo.parameters?.model &&
Model file {modelInfo.parameters.model}
}
+ {modelInfo.context_size > 0 &&
Context size {modelInfo.context_size}
}
+ {modelInfo.threads > 0 &&
Threads {modelInfo.threads}
}
+ {(modelInfo.mcp?.remote || modelInfo.mcp?.stdio) &&
MCP Configured
}
+ {modelInfo.template?.chat_message &&
Chat template Yes
}
+ {modelInfo.gpu_layers > 0 &&
GPU layers {modelInfo.gpu_layers}
}
+
+
+ )}
+
+ {/* Context window progress bar */}
+ {contextPercent !== null && (
+
+
90 ? 'var(--color-error)' : contextPercent > 70 ? 'var(--color-warning)' : 'var(--color-primary)',
+ }}
+ />
+
+ Context: {Math.round(contextPercent)}%
+ {activeChat.tokenUsage.total > 0 && ` (${activeChat.tokenUsage.total} tokens)`}
+
+
+ )}
+
+ {/* Settings slide-out panel */}
+
setShowSettings(false)} />
+
+
+ Chat Settings
+ setShowSettings(false)}>
+
+
+
+
+
+ System Prompt
+
+
+
+ Temperature {activeChat.temperature !== null ? `(${activeChat.temperature})` : ''}
+
+
updateChatSettings(activeChat.id, { temperature: parseFloat(e.target.value) })}
+ className="chat-slider"
+ />
+
0 2
+
+
+
+ Top P {activeChat.topP !== null ? `(${activeChat.topP})` : ''}
+
+
updateChatSettings(activeChat.id, { topP: parseFloat(e.target.value) })}
+ className="chat-slider"
+ />
+
0 1
+
+
+
+ Top K {activeChat.topK !== null ? `(${activeChat.topK})` : ''}
+
+
updateChatSettings(activeChat.id, { topK: parseInt(e.target.value) })}
+ className="chat-slider"
+ />
+
1 100
+
+
+ Context Size
+ updateChatSettings(activeChat.id, { contextSize: parseInt(e.target.value) || null })}
+ placeholder="2048"
+ />
+
+
+
+
+ {/* Messages */}
+
+ {activeChat.history.length === 0 && !isStreaming && (
+
+
+
+
+
Start a conversation
+
Type a message below to begin chatting{activeChat.model ? ` with ${activeChat.model}` : ''}.
+
+ Enter to send
+ Shift+Enter for newline
+ Attach files
+
+
+ )}
+ {activeChat.history.map((msg, i) => {
+ if (msg.role === 'thinking' || msg.role === 'reasoning') {
+ return (
+
{
+ const newHistory = [...activeChat.history]
+ newHistory[i] = { ...newHistory[i], expanded: !newHistory[i].expanded }
+ updateChatSettings(activeChat.id, { history: newHistory })
+ }} />
+ )
+ }
+ if (msg.role === 'tool_call' || msg.role === 'tool_result') {
+ return (
+ {
+ const newHistory = [...activeChat.history]
+ newHistory[i] = { ...newHistory[i], expanded: !newHistory[i].expanded }
+ updateChatSettings(activeChat.id, { history: newHistory })
+ }} />
+ )
+ }
+ return (
+
+
+
+
+
+ {msg.role === 'assistant' && activeChat.model && (
+
{activeChat.model}
+ )}
+
+ {msg.role === 'user' ? (
+
+ ) : (
+
+ )}
+
+
+ copyMessage(msg.content)} title="Copy">
+
+
+ {msg.role === 'assistant' && i === activeChat.history.length - 1 && !isStreaming && (
+
+
+
+ )}
+
+
+
+ )
+ })}
+
+ {/* Streaming reasoning box */}
+ {isStreaming && streamingReasoning && (
+
+
+
+ {streamingReasoning}
+
+
+ )}
+
+ {/* Streaming tool calls */}
+ {isStreaming && }
+
+ {/* Streaming message */}
+ {isStreaming && streamingContent && (
+
+
+
+
+
+ {activeChat.model && (
+
{activeChat.model}
+ )}
+
+
+
+
+
+
+ )}
+ {isStreaming && !streamingContent && !streamingReasoning && streamingToolCalls.length === 0 && (
+
+ )}
+
+
+
+ {/* Token info bar */}
+ {(tokensPerSecond || maxTokensPerSecond || activeChat.tokenUsage?.total > 0) && (
+
+ {tokensPerSecond !== null && {tokensPerSecond} tok/s }
+ {maxTokensPerSecond !== null && !isStreaming && (
+
+ Peak: {maxTokensPerSecond} tok/s
+
+ )}
+ {activeChat.tokenUsage?.total > 0 && (
+
+ {activeChat.tokenUsage.prompt}p + {activeChat.tokenUsage.completion}c = {activeChat.tokenUsage.total}
+
+ )}
+
+ )}
+
+ {/* File badges */}
+ {files.length > 0 && (
+
+ {files.map((f, i) => (
+
+
+ {f.name}
+ setFiles(prev => prev.filter((_, idx) => idx !== i))}>
+
+
+
+ ))}
+
+ )}
+
+ {/* Input area */}
+
+
+ fileInputRef.current?.click()}
+ title="Attach file"
+ >
+
+
+
+
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/Explorer.jsx b/core/http/react-ui/src/pages/Explorer.jsx
new file mode 100644
index 000000000..b2aef2156
--- /dev/null
+++ b/core/http/react-ui/src/pages/Explorer.jsx
@@ -0,0 +1,26 @@
+import { useEffect, useRef } from 'react'
+import { useNavigate } from 'react-router-dom'
+
+export default function Explorer() {
+ const navigate = useNavigate()
+
+ return (
+
+
+ LocalAI Explorer
+
+
+ Network visualization and node explorer
+
+
+
+
+
Explorer visualization
+
+
+
navigate('/')} style={{ marginTop: 'var(--spacing-lg)' }}>
+ Back to Home
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/Home.jsx b/core/http/react-ui/src/pages/Home.jsx
new file mode 100644
index 000000000..6f48bc2b0
--- /dev/null
+++ b/core/http/react-ui/src/pages/Home.jsx
@@ -0,0 +1,770 @@
+import { useState, useEffect, useRef, useCallback } from 'react'
+import { useNavigate, useOutletContext } from 'react-router-dom'
+import ModelSelector from '../components/ModelSelector'
+import { useResources } from '../hooks/useResources'
+import { fileToBase64, backendControlApi, systemApi, modelsApi } from '../utils/api'
+import { API_CONFIG } from '../utils/config'
+
+const placeholderMessages = [
+ 'What is the meaning of life?',
+ 'Write a poem about AI',
+ 'Explain quantum computing simply',
+ 'Help me debug my code',
+ 'Tell me a creative story',
+ 'How do neural networks work?',
+ 'Write a haiku about programming',
+ 'Explain blockchain in simple terms',
+ 'What are the best practices for REST APIs?',
+ 'Help me write a cover letter',
+ 'What is the Fibonacci sequence?',
+ 'Explain the theory of relativity',
+]
+
+export default function Home() {
+ const navigate = useNavigate()
+ const { addToast } = useOutletContext()
+ const { resources } = useResources()
+ const [configuredModels, setConfiguredModels] = useState([])
+ const [loadedModels, setLoadedModels] = useState([])
+ const [initialLoaded, setInitialLoaded] = useState(false)
+ const [selectedModel, setSelectedModel] = useState('')
+ const [message, setMessage] = useState('')
+ const [imageFiles, setImageFiles] = useState([])
+ const [audioFiles, setAudioFiles] = useState([])
+ const [textFiles, setTextFiles] = useState([])
+ const [mcpMode, setMcpMode] = useState(false)
+ const [mcpAvailable, setMcpAvailable] = useState(false)
+ const [placeholderIdx, setPlaceholderIdx] = useState(0)
+ const [placeholderText, setPlaceholderText] = useState('')
+ const imageInputRef = useRef(null)
+ const audioInputRef = useRef(null)
+ const fileInputRef = useRef(null)
+
+ // Fetch configured models (to know if any exist) and loaded models (currently running)
+ const fetchSystemInfo = useCallback(async () => {
+ try {
+ const [sysInfo, v1Models] = await Promise.all([
+ systemApi.info().catch(() => null),
+ modelsApi.listV1().catch(() => null),
+ ])
+ if (sysInfo?.loaded_models) {
+ setLoadedModels(sysInfo.loaded_models)
+ }
+ if (v1Models?.data) {
+ setConfiguredModels(v1Models.data)
+ }
+ setInitialLoaded(true)
+ } catch (_e) { setInitialLoaded(true) }
+ }, [])
+
+ useEffect(() => {
+ fetchSystemInfo()
+ const interval = setInterval(fetchSystemInfo, 5000)
+ return () => clearInterval(interval)
+ }, [fetchSystemInfo])
+
+ // Check MCP availability when selected model changes
+ useEffect(() => {
+ if (!selectedModel) {
+ setMcpAvailable(false)
+ setMcpMode(false)
+ return
+ }
+ let cancelled = false
+ modelsApi.getConfigJson(selectedModel).then(cfg => {
+ if (cancelled) return
+ const hasMcp = !!(cfg?.mcp?.remote || cfg?.mcp?.stdio)
+ setMcpAvailable(hasMcp)
+ if (!hasMcp) setMcpMode(false)
+ }).catch(() => {
+ if (!cancelled) {
+ setMcpAvailable(false)
+ setMcpMode(false)
+ }
+ })
+ return () => { cancelled = true }
+ }, [selectedModel])
+
+ const allFiles = [...imageFiles, ...audioFiles, ...textFiles]
+
+ // Animated typewriter placeholder
+ useEffect(() => {
+ const target = placeholderMessages[placeholderIdx]
+ let charIdx = 0
+ setPlaceholderText('')
+ const interval = setInterval(() => {
+ if (charIdx <= target.length) {
+ setPlaceholderText(target.slice(0, charIdx))
+ charIdx++
+ } else {
+ clearInterval(interval)
+ setTimeout(() => {
+ setPlaceholderIdx(prev => (prev + 1) % placeholderMessages.length)
+ }, 2000)
+ }
+ }, 50)
+ return () => clearInterval(interval)
+ }, [placeholderIdx])
+
+ const addFiles = useCallback(async (fileList, setter) => {
+ const newFiles = []
+ for (const file of fileList) {
+ const base64 = await fileToBase64(file)
+ newFiles.push({ name: file.name, type: file.type, base64 })
+ }
+ setter(prev => [...prev, ...newFiles])
+ }, [])
+
+ const removeFile = useCallback((file) => {
+ const removeFn = (prev) => prev.filter(f => f !== file)
+ if (file.type?.startsWith('image/')) setImageFiles(removeFn)
+ else if (file.type?.startsWith('audio/')) setAudioFiles(removeFn)
+ else setTextFiles(removeFn)
+ }, [])
+
+ const doSubmit = useCallback(() => {
+ const text = message.trim() || placeholderText
+ if (!text && allFiles.length === 0) return
+ if (!selectedModel) {
+ addToast('Please select a model first', 'warning')
+ return
+ }
+
+ const chatData = {
+ message: text,
+ model: selectedModel,
+ files: allFiles,
+ mcpMode,
+ newChat: true,
+ }
+ localStorage.setItem('localai_index_chat_data', JSON.stringify(chatData))
+ navigate(`/chat/${encodeURIComponent(selectedModel)}`)
+ }, [message, placeholderText, allFiles, selectedModel, mcpMode, addToast, navigate])
+
+ const handleSubmit = (e) => {
+ if (e) e.preventDefault()
+ doSubmit()
+ }
+
+ const handleStopModel = async (modelName) => {
+ if (!confirm(`Stop model ${modelName}?`)) return
+ try {
+ await backendControlApi.shutdown({ model: modelName })
+ addToast(`Stopped ${modelName}`, 'success')
+ // Refresh loaded models list after a short delay
+ setTimeout(fetchSystemInfo, 500)
+ } catch (err) {
+ addToast(`Failed to stop: ${err.message}`, 'error')
+ }
+ }
+
+ const handleStopAll = async () => {
+ if (!confirm('Stop all loaded models?')) return
+ try {
+ await Promise.all(loadedModels.map(m => backendControlApi.shutdown({ model: m.id })))
+ addToast('All models stopped', 'success')
+ setTimeout(fetchSystemInfo, 1000)
+ } catch (err) {
+ addToast(`Failed to stop: ${err.message}`, 'error')
+ }
+ }
+
+ const hasModels = configuredModels.length > 0
+ const loadedCount = loadedModels.length
+
+ // Resource display
+ const resType = resources?.type
+ const usagePct = resources?.aggregate?.usage_percent ?? resources?.ram?.usage_percent ?? 0
+ const pctColor = usagePct > 90 ? 'var(--color-error)' : usagePct > 70 ? 'var(--color-warning)' : 'var(--color-success)'
+
+ if (!initialLoaded) {
+ return
+ }
+
+ return (
+
+ {hasModels ? (
+ <>
+ {/* Hero with logo */}
+
+
+
How can I help you today?
+
Ask me anything, and I'll do my best to assist you.
+
+
+ {/* Chat input form */}
+
+
+ {/* Quick links */}
+
+
navigate('/manage')}>
+ Installed Models and Backends
+
+
navigate('/browse')}>
+ Browse Gallery
+
+
navigate('/import-model')}>
+ Import Model
+
+
+ Documentation
+
+
+
+ {/* Compact resource indicator */}
+ {resources && (
+
+
+
{resType === 'gpu' ? 'GPU' : 'RAM'}
+
+ {usagePct.toFixed(0)}%
+
+
+
+ )}
+
+ {/* Loaded models status */}
+ {loadedCount > 0 && (
+
+
+
{loadedCount} model{loadedCount !== 1 ? 's' : ''} loaded
+
+ {loadedModels.map(m => (
+
+ {m.id}
+ handleStopModel(m.id)} title="Stop model">
+
+
+
+ ))}
+
+ {loadedCount > 1 && (
+
+ Stop all
+
+ )}
+
+ )}
+ >
+ ) : (
+ /* No models installed wizard */
+
+
+
No Models Installed
+
Get started with LocalAI by installing your first model. Browse our gallery of open-source AI models.
+
+
+ {/* Feature preview cards */}
+
+
+
+
+
+
Model Gallery
+
Browse and install from a curated collection of open-source AI models
+
+
navigate('/import-model')} style={{ cursor: 'pointer' }}>
+
+
+
+
Import Models
+
Import your own models from HuggingFace or local files
+
+
+
+
+
+
API Download
+
Use the API to download and configure models programmatically
+
+
+
+ {/* Setup steps */}
+
+
How to Get Started
+
+
1
+
+
Browse the Model Gallery
+
Visit the model gallery to find the right model for your needs.
+
+
+
+
2
+
+
Install a Model
+
Click install on any model to download and configure it automatically.
+
+
+
+
3
+
+
Start Chatting
+
Once installed, you can chat with your model right from the browser.
+
+
+
+
+ {/* Action buttons */}
+
+
navigate('/browse')}>
+ Browse Model Gallery
+
+
navigate('/import-model')}>
+ Import Model
+
+
+ Getting Started
+
+
+
+ )}
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/ImageGen.jsx b/core/http/react-ui/src/pages/ImageGen.jsx
new file mode 100644
index 000000000..d46428ec4
--- /dev/null
+++ b/core/http/react-ui/src/pages/ImageGen.jsx
@@ -0,0 +1,152 @@
+import { useState, useRef } from 'react'
+import { useParams, useOutletContext } from 'react-router-dom'
+import ModelSelector from '../components/ModelSelector'
+import LoadingSpinner from '../components/LoadingSpinner'
+import { imageApi, fileToBase64 } from '../utils/api'
+
+const SIZES = ['256x256', '512x512', '768x768', '1024x1024']
+
+export default function ImageGen() {
+ const { model: urlModel } = useParams()
+ const { addToast } = useOutletContext()
+ const [model, setModel] = useState(urlModel || '')
+ const [prompt, setPrompt] = useState('')
+ const [negativePrompt, setNegativePrompt] = useState('')
+ const [size, setSize] = useState('512x512')
+ const [count, setCount] = useState(1)
+ const [steps, setSteps] = useState('')
+ const [seed, setSeed] = useState('')
+ const [loading, setLoading] = useState(false)
+ const [images, setImages] = useState([])
+ const [showAdvanced, setShowAdvanced] = useState(false)
+ const [showImageInputs, setShowImageInputs] = useState(false)
+ const [sourceImage, setSourceImage] = useState(null)
+ const [refImages, setRefImages] = useState([])
+ const sourceRef = useRef(null)
+ const refRef = useRef(null)
+
+ const handleGenerate = async (e) => {
+ e.preventDefault()
+ if (!prompt.trim()) { addToast('Please enter a prompt', 'warning'); return }
+ if (!model) { addToast('Please select a model', 'warning'); return }
+
+ setLoading(true)
+ setImages([])
+
+ let combinedPrompt = prompt.trim()
+ if (negativePrompt.trim()) combinedPrompt += '|' + negativePrompt.trim()
+
+ const body = { model, prompt: combinedPrompt, n: count, size }
+ if (steps) body.step = parseInt(steps)
+ if (seed) body.seed = parseInt(seed)
+ if (sourceImage) body.file = sourceImage
+ if (refImages.length > 0) body.ref_images = refImages
+
+ try {
+ const data = await imageApi.generate(body)
+ setImages(data?.data || [])
+ if (!data?.data?.length) addToast('No images generated', 'warning')
+ } catch (err) {
+ addToast(`Error: ${err.message}`, 'error')
+ } finally {
+ setLoading(false)
+ }
+ }
+
+ const handleSourceImage = async (e) => {
+ if (e.target.files[0]) setSourceImage(await fileToBase64(e.target.files[0]))
+ }
+
+ const handleRefImages = async (e) => {
+ const arr = []
+ for (const f of e.target.files) arr.push(await fileToBase64(f))
+ setRefImages(prev => [...prev, ...arr])
+ }
+
+ return (
+
+
+
+
Image Generation
+
+
+
+
+ Model
+
+
+
+ Prompt
+ setPrompt(e.target.value)} placeholder="Describe the image you want to generate..." rows={3} onKeyDown={(e) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); handleGenerate(e) } }} />
+
+
+ Negative Prompt
+ setNegativePrompt(e.target.value)} placeholder="What to avoid..." rows={2} />
+
+
+
+
+ Size
+ setSize(e.target.value)} style={{ width: '100%' }}>
+ {SIZES.map(s => {s} )}
+
+
+
+ Count (1-4)
+ setCount(parseInt(e.target.value) || 1)} />
+
+
+
+ setShowAdvanced(!showAdvanced)}>
+ Advanced Settings
+
+ {showAdvanced && (
+
+ )}
+
+ setShowImageInputs(!showImageInputs)}>
+ Image Inputs
+
+ {showImageInputs && (
+
+ )}
+
+
+ {loading ? <> Generating...> : <> Generate>}
+
+
+
+
+
+
+ {loading ? (
+
+ ) : images.length > 0 ? (
+
+ {images.map((img, i) => (
+
+
+
+ ))}
+
+ ) : (
+
+
+
Generated images will appear here
+
+ )}
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/ImportModel.jsx b/core/http/react-ui/src/pages/ImportModel.jsx
new file mode 100644
index 000000000..2b0e391a5
--- /dev/null
+++ b/core/http/react-ui/src/pages/ImportModel.jsx
@@ -0,0 +1,445 @@
+import { useState, useRef, useCallback, useEffect } from 'react'
+import { useNavigate, useOutletContext } from 'react-router-dom'
+import { modelsApi } from '../utils/api'
+import LoadingSpinner from '../components/LoadingSpinner'
+import CodeEditor from '../components/CodeEditor'
+
+const BACKENDS = [
+ { value: '', label: 'Auto-detect (based on URI)' },
+ { value: 'llama-cpp', label: 'llama-cpp' },
+ { value: 'mlx', label: 'mlx' },
+ { value: 'mlx-vlm', label: 'mlx-vlm' },
+ { value: 'transformers', label: 'transformers' },
+ { value: 'vllm', label: 'vllm' },
+ { value: 'diffusers', label: 'diffusers' },
+]
+
+const URI_FORMATS = [
+ {
+ icon: 'fab fa-hubspot', color: 'var(--color-accent)', title: 'HuggingFace',
+ examples: [
+ { prefix: 'huggingface://', suffix: 'TheBloke/Llama-2-7B-Chat-GGUF', desc: 'Standard HuggingFace format' },
+ { prefix: 'hf://', suffix: 'TheBloke/Llama-2-7B-Chat-GGUF', desc: 'Short HuggingFace format' },
+ { prefix: 'https://huggingface.co/', suffix: 'TheBloke/Llama-2-7B-Chat-GGUF', desc: 'Full HuggingFace URL' },
+ ],
+ },
+ {
+ icon: 'fas fa-globe', color: 'var(--color-primary)', title: 'HTTP/HTTPS URLs',
+ examples: [
+ { prefix: 'https://', suffix: 'example.com/model.gguf', desc: 'Direct download from any HTTPS URL' },
+ ],
+ },
+ {
+ icon: 'fas fa-file', color: 'var(--color-warning)', title: 'Local Files',
+ examples: [
+ { prefix: 'file://', suffix: '/path/to/model.gguf', desc: 'Local file path (absolute)' },
+ { prefix: '', suffix: '/path/to/model.yaml', desc: 'Direct local YAML config file' },
+ ],
+ },
+ {
+ icon: 'fas fa-box', color: '#22d3ee', title: 'OCI Registry',
+ examples: [
+ { prefix: 'oci://', suffix: 'registry.example.com/model:tag', desc: 'OCI container registry' },
+ { prefix: 'ocifile://', suffix: '/path/to/image.tar', desc: 'Local OCI tarball file' },
+ ],
+ },
+ {
+ icon: 'fas fa-cube', color: '#818cf8', title: 'Ollama',
+ examples: [
+ { prefix: 'ollama://', suffix: 'llama2:7b', desc: 'Ollama model format' },
+ ],
+ },
+ {
+ icon: 'fas fa-code', color: '#f472b6', title: 'YAML Configuration Files',
+ examples: [
+ { prefix: '', suffix: 'https://example.com/model.yaml', desc: 'Remote YAML config file' },
+ { prefix: 'file://', suffix: '/path/to/config.yaml', desc: 'Local YAML config file' },
+ ],
+ },
+]
+
+const DEFAULT_YAML = `name: my-model
+backend: llama-cpp
+parameters:
+ model: /path/to/model.gguf
+`
+
+const hintStyle = { marginTop: '4px', fontSize: '0.75rem', color: 'var(--color-text-muted)' }
+
+export default function ImportModel() {
+ const navigate = useNavigate()
+ const { addToast } = useOutletContext()
+
+ const [isAdvancedMode, setIsAdvancedMode] = useState(false)
+ const [importUri, setImportUri] = useState('')
+ const [isSubmitting, setIsSubmitting] = useState(false)
+ const [showGuide, setShowGuide] = useState(false)
+ const [yamlContent, setYamlContent] = useState(DEFAULT_YAML)
+ const [estimate, setEstimate] = useState(null)
+ const [jobProgress, setJobProgress] = useState(null)
+
+ const [prefs, setPrefs] = useState({
+ backend: '', name: '', description: '', quantizations: '',
+ mmproj_quantizations: '', embeddings: false, type: '',
+ pipeline_type: '', scheduler_type: '', enable_parameters: '', cuda: false,
+ })
+ const [customPrefs, setCustomPrefs] = useState([])
+
+ const pollRef = useRef(null)
+
+ useEffect(() => {
+ return () => { if (pollRef.current) clearInterval(pollRef.current) }
+ }, [])
+
+ const updatePref = (key, value) => setPrefs(p => ({ ...p, [key]: value }))
+ const addCustomPref = () => setCustomPrefs(p => [...p, { key: '', value: '' }])
+ const removeCustomPref = (i) => setCustomPrefs(p => p.filter((_, idx) => idx !== i))
+ const updateCustomPref = (i, field, value) => {
+ setCustomPrefs(p => p.map((item, idx) => idx === i ? { ...item, [field]: value } : item))
+ }
+
+ const startJobPolling = useCallback((jobId) => {
+ if (pollRef.current) clearInterval(pollRef.current)
+ pollRef.current = setInterval(async () => {
+ try {
+ const data = await modelsApi.getJobStatus(jobId)
+ if (data.processed || data.progress) {
+ setJobProgress(data.message || data.progress || 'Processing...')
+ }
+ if (data.completed) {
+ clearInterval(pollRef.current)
+ pollRef.current = null
+ setIsSubmitting(false)
+ setJobProgress(null)
+ addToast('Model imported successfully!', 'success')
+ navigate('/manage')
+ } else if (data.error || (data.message && data.message.startsWith('error:'))) {
+ clearInterval(pollRef.current)
+ pollRef.current = null
+ setIsSubmitting(false)
+ setJobProgress(null)
+ let msg = 'Unknown error'
+ if (typeof data.error === 'string') msg = data.error
+ else if (data.error?.message) msg = data.error.message
+ else if (data.message) msg = data.message
+ if (msg.startsWith('error: ')) msg = msg.substring(7)
+ addToast(`Import failed: ${msg}`, 'error')
+ }
+ } catch (err) {
+ console.error('Error polling job status:', err)
+ }
+ }, 1000)
+ }, [addToast, navigate])
+
+ const handleSimpleImport = async () => {
+ if (!importUri.trim()) { addToast('Please enter a model URI', 'error'); return }
+ setIsSubmitting(true)
+ setEstimate(null)
+ try {
+ const prefsObj = {}
+ if (prefs.backend) prefsObj.backend = prefs.backend
+ if (prefs.name.trim()) prefsObj.name = prefs.name.trim()
+ if (prefs.description.trim()) prefsObj.description = prefs.description.trim()
+ if (prefs.quantizations.trim()) prefsObj.quantizations = prefs.quantizations.trim()
+ if (prefs.mmproj_quantizations.trim()) prefsObj.mmproj_quantizations = prefs.mmproj_quantizations.trim()
+ if (prefs.embeddings) prefsObj.embeddings = 'true'
+ if (prefs.type.trim()) prefsObj.type = prefs.type.trim()
+ if (prefs.pipeline_type.trim()) prefsObj.pipeline_type = prefs.pipeline_type.trim()
+ if (prefs.scheduler_type.trim()) prefsObj.scheduler_type = prefs.scheduler_type.trim()
+ if (prefs.enable_parameters.trim()) prefsObj.enable_parameters = prefs.enable_parameters.trim()
+ if (prefs.cuda) prefsObj.cuda = true
+ customPrefs.forEach(cp => {
+ if (cp.key.trim() && cp.value.trim()) prefsObj[cp.key.trim()] = cp.value.trim()
+ })
+
+ const result = await modelsApi.importUri({
+ uri: importUri.trim(),
+ preferences: Object.keys(prefsObj).length > 0 ? prefsObj : null,
+ })
+
+ const hasSize = result.estimated_size_display && result.estimated_size_display !== '0 B'
+ const hasVram = result.estimated_vram_display && result.estimated_vram_display !== '0 B'
+ if (hasSize || hasVram) {
+ setEstimate({ sizeDisplay: result.estimated_size_display || '', vramDisplay: result.estimated_vram_display || '' })
+ }
+
+ const jobId = result.uuid || result.ID
+ if (!jobId) throw new Error('No job ID returned from server')
+
+ let msg = 'Import started! Tracking progress...'
+ const parts = []
+ if (hasSize) parts.push(`Size: ${result.estimated_size_display}`)
+ if (hasVram) parts.push(`VRAM: ${result.estimated_vram_display}`)
+ if (parts.length) msg += ` (${parts.join(' \u00b7 ')})`
+ addToast(msg, 'success')
+ startJobPolling(jobId)
+ } catch (err) {
+ addToast(`Failed to start import: ${err.message}`, 'error')
+ setIsSubmitting(false)
+ }
+ }
+
+ const handleAdvancedImport = async () => {
+ if (!yamlContent.trim()) { addToast('Please enter YAML configuration', 'error'); return }
+ setIsSubmitting(true)
+ try {
+ await modelsApi.importConfig(yamlContent, 'application/x-yaml')
+ addToast('Model configuration imported successfully!', 'success')
+ navigate('/manage')
+ } catch (err) {
+ addToast(`Import failed: ${err.message}`, 'error')
+ } finally {
+ setIsSubmitting(false)
+ }
+ }
+
+ return (
+
+
+
+
Import New Model
+
+ {isAdvancedMode ? 'Configure your model settings using YAML' : 'Import a model from URI with preferences'}
+
+
+
+ setIsAdvancedMode(!isAdvancedMode)}>
+
+ {isAdvancedMode ? ' Simple Mode' : ' Advanced Mode'}
+
+ {!isAdvancedMode ? (
+
+ {isSubmitting ? <> Importing...> : <> Import Model>}
+
+ ) : (
+
+ {isSubmitting ? <> Saving...> : <> Create>}
+
+ )}
+
+
+
+ {/* Estimate banner */}
+ {!isAdvancedMode && estimate && (
+
+
+
+ Estimated requirements
+ {estimate.sizeDisplay && estimate.sizeDisplay !== '0 B' && (
+ Download: {estimate.sizeDisplay}
+ )}
+ {estimate.vramDisplay && estimate.vramDisplay !== '0 B' && (
+ VRAM: {estimate.vramDisplay}
+ )}
+
+
+ )}
+
+ {/* Job progress */}
+ {jobProgress && (
+
+ )}
+
+ {/* Simple Import Mode */}
+ {!isAdvancedMode && (
+
+
+
+ Import from URI
+
+
+ {/* URI Input */}
+
+
+
setImportUri(e.target.value)}
+ placeholder="huggingface://TheBloke/Llama-2-7B-Chat-GGUF or https://example.com/model.gguf"
+ disabled={isSubmitting}
+ />
+
Enter the URI or path to the model file you want to import
+
+ {/* URI format guide */}
+
setShowGuide(!showGuide)}
+ style={{ marginTop: 'var(--spacing-sm)', background: 'none', border: 'none', color: 'var(--color-text-secondary)', cursor: 'pointer', fontSize: '0.8125rem', display: 'flex', alignItems: 'center', gap: '6px', padding: 0 }}
+ >
+
+
+ Supported URI Formats
+
+ {showGuide && (
+
+ {URI_FORMATS.map((fmt, i) => (
+
+
+
+ {fmt.title}
+
+
+ {fmt.examples.map((ex, j) => (
+
+
{ex.prefix}
+
{ex.suffix}
+
{ex.desc}
+
+ ))}
+
+
+ ))}
+
+ )}
+
+
+ {/* Preferences */}
+
+
+ Preferences (Optional)
+
+
+
+
+
+ Common Preferences
+
+
+
+
+
Backend
+
updatePref('backend', e.target.value)} disabled={isSubmitting}>
+ {BACKENDS.map(b => {b.label} )}
+
+
Force a specific backend. Leave empty to auto-detect from URI.
+
+
+
+
Model Name
+
updatePref('name', e.target.value)} placeholder="Leave empty to use filename" disabled={isSubmitting} />
+
Custom name for the model. If empty, the filename will be used.
+
+
+
+
Description
+
updatePref('description', e.target.value)} placeholder="Leave empty to use default description" disabled={isSubmitting} />
+ Custom description for the model.
+
+
+
+
Quantizations
+
updatePref('quantizations', e.target.value)} placeholder="q4_k_m,q4_k_s,q3_k_m (comma-separated)" disabled={isSubmitting} />
+
Preferred quantizations (comma-separated). Leave empty for default (q4_k_m).
+
+
+
+
MMProj Quantizations
+
updatePref('mmproj_quantizations', e.target.value)} placeholder="fp16,fp32 (comma-separated)" disabled={isSubmitting} />
+
Preferred MMProj quantizations. Leave empty for default (fp16).
+
+
+
+
+ updatePref('embeddings', e.target.checked)} disabled={isSubmitting} />
+
+ Embeddings
+
+
+
Enable embeddings support for this model.
+
+
+
+
Model Type
+
updatePref('type', e.target.value)} placeholder="AutoModelForCausalLM (for transformers backend)" disabled={isSubmitting} />
+
Model type for transformers backend. Examples: AutoModelForCausalLM, SentenceTransformer, Mamba.
+
+
+ {/* Diffusers-specific fields */}
+ {prefs.backend === 'diffusers' && (
+ <>
+
+
Pipeline Type
+
updatePref('pipeline_type', e.target.value)} placeholder="StableDiffusionPipeline" disabled={isSubmitting} />
+
Pipeline type for diffusers backend.
+
+
+
Scheduler Type
+
updatePref('scheduler_type', e.target.value)} placeholder="k_dpmpp_2m (optional)" disabled={isSubmitting} />
+
Scheduler type for diffusers backend. Examples: k_dpmpp_2m, euler_a, ddim.
+
+
+
Enable Parameters
+
updatePref('enable_parameters', e.target.value)} placeholder="negative_prompt,num_inference_steps (comma-separated)" disabled={isSubmitting} />
+
Enabled parameters for diffusers backend (comma-separated).
+
+
+
+ updatePref('cuda', e.target.checked)} disabled={isSubmitting} />
+
+ CUDA
+
+
+
Enable CUDA support for GPU acceleration.
+
+ >
+ )}
+
+
+
+ {/* Custom Preferences */}
+
+
+
+ Custom Preferences
+
+
+ Add Custom
+
+
+ {customPrefs.map((cp, i) => (
+
+ updateCustomPref(i, 'key', e.target.value)} placeholder="Key" disabled={isSubmitting} style={{ flex: 1 }} />
+ :
+ updateCustomPref(i, 'value', e.target.value)} placeholder="Value" disabled={isSubmitting} style={{ flex: 1 }} />
+ removeCustomPref(i)} disabled={isSubmitting} style={{ color: 'var(--color-error)' }}>
+
+
+
+ ))}
+
Add custom key-value pairs for advanced configuration.
+
+
+
+ )}
+
+ {/* Advanced YAML Editor Mode */}
+ {isAdvancedMode && (
+
+
+
+
+ YAML Configuration Editor
+
+ { navigator.clipboard.writeText(yamlContent); addToast('Copied to clipboard', 'success') }}>
+ Copy
+
+
+
+
+ )}
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/Login.jsx b/core/http/react-ui/src/pages/Login.jsx
new file mode 100644
index 000000000..ffaf680c4
--- /dev/null
+++ b/core/http/react-ui/src/pages/Login.jsx
@@ -0,0 +1,58 @@
+import { useState } from 'react'
+import { useNavigate } from 'react-router-dom'
+
+export default function Login() {
+ const navigate = useNavigate()
+ const [token, setToken] = useState('')
+ const [error, setError] = useState('')
+
+ const handleSubmit = (e) => {
+ e.preventDefault()
+ if (!token.trim()) {
+ setError('Please enter a token')
+ return
+ }
+ // Set token as cookie
+ document.cookie = `token=${encodeURIComponent(token.trim())}; path=/; SameSite=Strict`
+ navigate('/')
+ }
+
+ return (
+
+
+
+
+
+ LocalAI
+
+
Enter your API token to continue
+
+
+
+
+
API Token
+
{ setToken(e.target.value); setError('') }}
+ placeholder="Enter token..."
+ autoFocus
+ />
+ {error &&
{error}
}
+
+
+ Login
+
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/Manage.jsx b/core/http/react-ui/src/pages/Manage.jsx
new file mode 100644
index 000000000..522f873d0
--- /dev/null
+++ b/core/http/react-ui/src/pages/Manage.jsx
@@ -0,0 +1,350 @@
+import { useState, useEffect, useCallback } from 'react'
+import { useNavigate, useOutletContext } from 'react-router-dom'
+import ResourceMonitor from '../components/ResourceMonitor'
+import { useModels } from '../hooks/useModels'
+import { backendControlApi, modelsApi, backendsApi, systemApi } from '../utils/api'
+
+export default function Manage() {
+ const { addToast } = useOutletContext()
+ const navigate = useNavigate()
+ const { models, loading: modelsLoading, refetch: refetchModels } = useModels()
+ const [loadedModelIds, setLoadedModelIds] = useState(new Set())
+ const [backends, setBackends] = useState([])
+ const [backendsLoading, setBackendsLoading] = useState(true)
+ const [reloading, setReloading] = useState(false)
+ const [reinstallingBackends, setReinstallingBackends] = useState(new Set())
+
+ const fetchLoadedModels = useCallback(async () => {
+ try {
+ const info = await systemApi.info()
+ const loaded = Array.isArray(info?.loaded_models) ? info.loaded_models : []
+ setLoadedModelIds(new Set(loaded.map(m => m.id)))
+ } catch {
+ setLoadedModelIds(new Set())
+ }
+ }, [])
+
+ const fetchBackends = useCallback(async () => {
+ try {
+ setBackendsLoading(true)
+ const data = await backendsApi.listInstalled()
+ setBackends(Array.isArray(data) ? data : [])
+ } catch {
+ setBackends([])
+ } finally {
+ setBackendsLoading(false)
+ }
+ }, [])
+
+ useEffect(() => {
+ fetchLoadedModels()
+ fetchBackends()
+ }, [fetchLoadedModels, fetchBackends])
+
+ const handleStopModel = async (modelName) => {
+ if (!confirm(`Stop model ${modelName}?`)) return
+ try {
+ await backendControlApi.shutdown({ model: modelName })
+ addToast(`Stopped ${modelName}`, 'success')
+ setTimeout(fetchLoadedModels, 500)
+ } catch (err) {
+ addToast(`Failed to stop: ${err.message}`, 'error')
+ }
+ }
+
+ const handleDeleteModel = async (modelName) => {
+ if (!confirm(`Delete model ${modelName}? This cannot be undone.`)) return
+ try {
+ await modelsApi.deleteByName(modelName)
+ addToast(`Deleted ${modelName}`, 'success')
+ refetchModels()
+ fetchLoadedModels()
+ } catch (err) {
+ addToast(`Failed to delete: ${err.message}`, 'error')
+ }
+ }
+
+ const handleReload = async () => {
+ setReloading(true)
+ try {
+ await modelsApi.reload()
+ addToast('Models reloaded', 'success')
+ setTimeout(() => { refetchModels(); fetchLoadedModels(); setReloading(false) }, 1000)
+ } catch (err) {
+ addToast(`Reload failed: ${err.message}`, 'error')
+ setReloading(false)
+ }
+ }
+
+ const handleReinstallBackend = async (name) => {
+ try {
+ setReinstallingBackends(prev => new Set(prev).add(name))
+ await backendsApi.install(name)
+ addToast(`Reinstalling ${name}...`, 'info')
+ } catch (err) {
+ addToast(`Failed to reinstall: ${err.message}`, 'error')
+ } finally {
+ setReinstallingBackends(prev => {
+ const next = new Set(prev)
+ next.delete(name)
+ return next
+ })
+ }
+ }
+
+ const handleDeleteBackend = async (name) => {
+ if (!confirm(`Delete backend ${name}?`)) return
+ try {
+ await backendsApi.deleteInstalled(name)
+ addToast(`Deleted backend ${name}`, 'success')
+ fetchBackends()
+ } catch (err) {
+ addToast(`Failed to delete backend: ${err.message}`, 'error')
+ }
+ }
+
+ return (
+
+
+
Model & Backend Management
+
+
+ {/* Resource Monitor */}
+
+
+ {/* Models Section */}
+
+
+
+ Models ({models.length})
+
+
+
+ {reloading ? 'Updating...' : 'Update'}
+
+
+
+ {modelsLoading ? (
+
+ Loading models...
+
+ ) : models.length === 0 ? (
+
+
+
No models installed yet
+
+ Install a model from the gallery to get started.
+
+
+
navigate('/browse')}>
+ Browse Gallery
+
+
navigate('/import-model')}>
+ Import Model
+
+
+ Documentation
+
+
+
+ ) : (
+
+
+
+
+ Name
+ Status
+ Backend
+ Use Cases
+ Actions
+
+
+
+ {models.map(model => (
+
+
+
+
+
+ {loadedModelIds.has(model.id) ? (
+
+ Running
+
+ ) : (
+
+ Idle
+
+ )}
+
+
+ Auto
+
+
+
+
+
+
+ {loadedModelIds.has(model.id) && (
+ handleStopModel(model.id)}
+ title="Stop model"
+ >
+
+
+ )}
+ handleDeleteModel(model.id)}
+ title="Delete model"
+ >
+
+
+
+
+
+ ))}
+
+
+
+ )}
+
+
+ {/* Backends Section */}
+
+
+
+ Backends ({backends.length})
+
+
+
+ {backendsLoading ? (
+
+ Loading backends...
+
+ ) : backends.length === 0 ? (
+
+
+
No backends installed yet
+
+ Install backends from the gallery to extend functionality.
+
+
+
+ ) : (
+
+
+
+
+ Name
+ Type
+ Metadata
+ Actions
+
+
+
+ {backends.map((backend, i) => (
+
+
+
+
+ {backend.Name}
+
+
+
+
+ {backend.IsSystem ? (
+
+ System
+
+ ) : (
+
+ User
+
+ )}
+ {backend.IsMeta && (
+
+ Meta
+
+ )}
+
+
+
+
+ {backend.Metadata?.alias && (
+
+
+ Alias: {backend.Metadata.alias}
+
+ )}
+ {backend.Metadata?.meta_backend_for && (
+
+
+ For: {backend.Metadata.meta_backend_for}
+
+ )}
+ {backend.Metadata?.installed_at && (
+
+
+ {backend.Metadata.installed_at}
+
+ )}
+ {!backend.Metadata?.alias && !backend.Metadata?.meta_backend_for && !backend.Metadata?.installed_at && '—'}
+
+
+
+
+ {!backend.IsSystem ? (
+ <>
+ handleReinstallBackend(backend.Name)}
+ disabled={reinstallingBackends.has(backend.Name)}
+ title="Reinstall"
+ >
+
+
+ handleDeleteBackend(backend.Name)}
+ title="Delete"
+ >
+
+
+ >
+ ) : (
+ —
+ )}
+
+
+
+ ))}
+
+
+
+ )}
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/ModelEditor.jsx b/core/http/react-ui/src/pages/ModelEditor.jsx
new file mode 100644
index 000000000..939bad301
--- /dev/null
+++ b/core/http/react-ui/src/pages/ModelEditor.jsx
@@ -0,0 +1,70 @@
+import { useState, useEffect } from 'react'
+import { useParams, useNavigate, useOutletContext } from 'react-router-dom'
+import { modelsApi } from '../utils/api'
+import LoadingSpinner from '../components/LoadingSpinner'
+import CodeEditor from '../components/CodeEditor'
+
+export default function ModelEditor() {
+ const { name } = useParams()
+ const navigate = useNavigate()
+ const { addToast } = useOutletContext()
+ const [config, setConfig] = useState('')
+ const [loading, setLoading] = useState(true)
+ const [saving, setSaving] = useState(false)
+
+ useEffect(() => {
+ if (!name) return
+ modelsApi.getEditConfig(name).then(data => {
+ setConfig(data?.config || '')
+ setLoading(false)
+ }).catch(err => {
+ addToast(`Failed to load config: ${err.message}`, 'error')
+ setLoading(false)
+ })
+ }, [name, addToast])
+
+ const handleSave = async () => {
+ setSaving(true)
+ try {
+ // Send raw YAML/text to the edit endpoint (not JSON-encoded)
+ const response = await fetch(`/models/edit/${encodeURIComponent(name)}`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/x-yaml' },
+ body: config,
+ })
+ const data = await response.json()
+ if (!response.ok || data.success === false) {
+ throw new Error(data.error || `HTTP ${response.status}`)
+ }
+ addToast('Config saved', 'success')
+ } catch (err) {
+ addToast(`Save failed: ${err.message}`, 'error')
+ } finally {
+ setSaving(false)
+ }
+ }
+
+ if (loading) return
+
+ return (
+
+
+
+
Model Editor
+
{decodeURIComponent(name)}
+
+
navigate('/manage')}>
+ Back
+
+
+
+
+
+
+
+ {saving ? <> Saving...> : <> Save>}
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/Models.jsx b/core/http/react-ui/src/pages/Models.jsx
new file mode 100644
index 000000000..ce476876a
--- /dev/null
+++ b/core/http/react-ui/src/pages/Models.jsx
@@ -0,0 +1,554 @@
+import { useState, useCallback, useEffect, useRef } from 'react'
+import { useNavigate, useOutletContext } from 'react-router-dom'
+import { modelsApi } from '../utils/api'
+import { useOperations } from '../hooks/useOperations'
+import { useResources } from '../hooks/useResources'
+import { formatBytes } from '../utils/format'
+
+
+const LOADING_PHRASES = [
+ { text: 'Rounding up the neural networks...', icon: 'fa-brain' },
+ { text: 'Asking the models to line up nicely...', icon: 'fa-people-line' },
+ { text: 'Convincing transformers to transform...', icon: 'fa-wand-magic-sparkles' },
+ { text: 'Herding digital llamas...', icon: 'fa-horse' },
+ { text: 'Downloading more RAM... just kidding', icon: 'fa-memory' },
+ { text: 'Counting parameters... lost count at a billion', icon: 'fa-calculator' },
+ { text: 'Untangling attention heads...', icon: 'fa-diagram-project' },
+ { text: 'Warming up the GPUs...', icon: 'fa-fire' },
+ { text: 'Teaching AI to sit and stay...', icon: 'fa-graduation-cap' },
+ { text: 'Polishing the weights and biases...', icon: 'fa-gem' },
+ { text: 'Stacking layers like pancakes...', icon: 'fa-layer-group' },
+ { text: 'Negotiating with the token budget...', icon: 'fa-coins' },
+ { text: 'Fetching models from the cloud mines...', icon: 'fa-cloud-arrow-down' },
+ { text: 'Calibrating the vibe check algorithm...', icon: 'fa-gauge-high' },
+ { text: 'Optimizing inference with good intentions...', icon: 'fa-bolt' },
+ { text: 'Measuring GPU with a ruler...', icon: 'fa-ruler' },
+ { text: 'Will it fit? Asking the VRAM oracle...', icon: 'fa-microchip' },
+ { text: 'Playing Tetris with model layers...', icon: 'fa-cubes' },
+ { text: 'Checking if we need more RGB...', icon: 'fa-rainbow' },
+ { text: 'Squeezing tensors into memory...', icon: 'fa-compress' },
+ { text: 'Whispering sweet nothings to CUDA cores...', icon: 'fa-heart' },
+ { text: 'Asking the electrons to scoot over...', icon: 'fa-atom' },
+ { text: 'Defragmenting the flux capacitor...', icon: 'fa-clock-rotate-left' },
+ { text: 'Consulting the tensor gods...', icon: 'fa-hands-praying' },
+ { text: 'Checking under the GPU\'s hood...', icon: 'fa-car' },
+ { text: 'Seeing if the hamsters can run faster...', icon: 'fa-fan' },
+ { text: 'Running very important math... carry the 1...', icon: 'fa-square-root-variable' },
+ { text: 'Poking the memory bus gently...', icon: 'fa-bus' },
+ { text: 'Bribing the scheduler with clock cycles...', icon: 'fa-stopwatch' },
+ { text: 'Asking models to share their VRAM nicely...', icon: 'fa-handshake' },
+]
+
+function GalleryLoader() {
+ const [idx, setIdx] = useState(() => Math.floor(Math.random() * LOADING_PHRASES.length))
+ const [fade, setFade] = useState(true)
+
+ useEffect(() => {
+ const interval = setInterval(() => {
+ setFade(false)
+ setTimeout(() => {
+ setIdx(prev => (prev + 1) % LOADING_PHRASES.length)
+ setFade(true)
+ }, 300)
+ }, 2800)
+ return () => clearInterval(interval)
+ }, [])
+
+ const phrase = LOADING_PHRASES[idx]
+
+ return (
+
+ {/* Animated dots */}
+
+ {[0, 1, 2, 3, 4].map(i => (
+
+ ))}
+
+ {/* Rotating phrase */}
+
+
+ {phrase.text}
+
+ {/* Skeleton rows */}
+
+ {[0.9, 0.7, 0.5].map((opacity, i) => (
+
+ ))}
+
+
+
+ )
+}
+
+const FILTERS = [
+ { key: '', label: 'All', icon: 'fa-layer-group' },
+ { key: 'llm', label: 'LLM', icon: 'fa-brain' },
+ { key: 'sd', label: 'Image', icon: 'fa-image' },
+ { key: 'multimodal', label: 'Multimodal', icon: 'fa-shapes' },
+ { key: 'vision', label: 'Vision', icon: 'fa-eye' },
+ { key: 'tts', label: 'TTS', icon: 'fa-microphone' },
+ { key: 'stt', label: 'STT', icon: 'fa-headphones' },
+ { key: 'embedding', label: 'Embedding', icon: 'fa-vector-square' },
+ { key: 'reranker', label: 'Rerank', icon: 'fa-sort' },
+]
+
+export default function Models() {
+ const { addToast } = useOutletContext()
+ const navigate = useNavigate()
+ const { operations } = useOperations()
+ const { resources } = useResources()
+ const [models, setModels] = useState([])
+ const [loading, setLoading] = useState(true)
+ const [page, setPage] = useState(1)
+ const [totalPages, setTotalPages] = useState(1)
+ const [search, setSearch] = useState('')
+ const [filter, setFilter] = useState('')
+ const [sort, setSort] = useState('')
+ const [order, setOrder] = useState('asc')
+ const [installing, setInstalling] = useState(new Set())
+ const [selectedModel, setSelectedModel] = useState(null)
+ const [stats, setStats] = useState({ total: 0, installed: 0, repositories: 0 })
+ const debounceRef = useRef(null)
+
+ // Total GPU memory for "fits" check
+ const totalGpuMemory = resources?.aggregate?.total_memory || 0
+
+ const fetchModels = useCallback(async (params = {}) => {
+ try {
+ setLoading(true)
+ const searchVal = params.search !== undefined ? params.search : search
+ const filterVal = params.filter !== undefined ? params.filter : filter
+ const sortVal = params.sort !== undefined ? params.sort : sort
+ // Combine search text and filter into 'term' param
+ const term = searchVal || filterVal || ''
+ const queryParams = {
+ page: params.page || page,
+ items: 21,
+ }
+ if (term) queryParams.term = term
+ if (sortVal) {
+ queryParams.sort = sortVal
+ queryParams.order = params.order || order
+ }
+ const data = await modelsApi.list(queryParams)
+ setModels(data?.models || [])
+ setTotalPages(data?.totalPages || data?.total_pages || 1)
+ setStats({
+ total: data?.availableModels || 0,
+ installed: data?.installedModels || 0,
+ })
+ } catch (err) {
+ addToast(`Failed to load models: ${err.message}`, 'error')
+ } finally {
+ setLoading(false)
+ }
+ }, [page, search, filter, sort, order, addToast])
+
+ useEffect(() => {
+ fetchModels()
+ }, [page, filter, sort, order])
+
+ const handleSearch = (value) => {
+ setSearch(value)
+ if (debounceRef.current) clearTimeout(debounceRef.current)
+ debounceRef.current = setTimeout(() => {
+ setPage(1)
+ fetchModels({ search: value, page: 1 })
+ }, 500)
+ }
+
+ const handleSort = (col) => {
+ if (sort === col) {
+ setOrder(o => o === 'asc' ? 'desc' : 'asc')
+ } else {
+ setSort(col)
+ setOrder('asc')
+ }
+ }
+
+ const handleInstall = async (modelId) => {
+ try {
+ setInstalling(prev => new Set(prev).add(modelId))
+ await modelsApi.install(modelId)
+ addToast(`Installing ${modelId}...`, 'info')
+ } catch (err) {
+ addToast(`Failed to install: ${err.message}`, 'error')
+ }
+ }
+
+ const handleDelete = async (modelId) => {
+ if (!confirm(`Delete model ${modelId}?`)) return
+ try {
+ await modelsApi.delete(modelId)
+ addToast(`Deleting ${modelId}...`, 'info')
+ fetchModels()
+ } catch (err) {
+ addToast(`Failed to delete: ${err.message}`, 'error')
+ }
+ }
+
+ const isInstalling = (modelId) => {
+ return installing.has(modelId) || operations.some(op =>
+ op.name === modelId && !op.completed && !op.error
+ )
+ }
+
+ const getOperationProgress = (modelId) => {
+ const op = operations.find(o => o.name === modelId && !o.completed && !o.error)
+ return op?.progress ?? 0
+ }
+
+ const fitsGpu = (vramBytes) => {
+ if (!vramBytes || !totalGpuMemory) return null
+ return vramBytes <= totalGpuMemory * 0.95
+ }
+
+ return (
+
+
+
+
Model Gallery
+
Discover and install AI models for your workflows
+
+
+
+
+
{stats.total}
+
Available
+
+
+
+
navigate('/import-model')}>
+ Import Model
+
+
+
+
+ {/* Search */}
+
+
+ handleSearch(e.target.value)}
+ />
+
+
+ {/* Filter buttons */}
+
+ {FILTERS.map(f => (
+ { setFilter(f.key); setPage(1) }}
+ >
+
+ {f.label}
+
+ ))}
+
+
+ {/* Table */}
+ {loading ? (
+
+ ) : models.length === 0 ? (
+
+
+
No models found
+
Try adjusting your search or filters
+
+ ) : (
+
+
+
+
+
+
+ handleSort('name')}>
+ Model Name {sort === 'name' && }
+
+ Description
+ Size / VRAM
+ handleSort('status')}>
+ Status {sort === 'status' && }
+
+ Actions
+
+
+
+ {models.map(model => {
+ const name = model.name || model.id
+ const installing = isInstalling(name)
+ const progress = getOperationProgress(name)
+ const fit = fitsGpu(model.estimated_vram_bytes)
+
+ return (
+
+ {/* Icon */}
+
+
+ {model.icon ? (
+
+ ) : (
+
+ )}
+
+
+
+ {/* Name */}
+
+
+
{name}
+ {model.trustRemoteCode && (
+
+
+ Trust Remote Code
+
+
+ )}
+
+
+
+ {/* Description */}
+
+
+ {model.description || '—'}
+
+
+
+ {/* Size / VRAM */}
+
+
+ {(model.estimated_size_display || model.estimated_vram_display) ? (
+ <>
+
+ {model.estimated_size_display && model.estimated_size_display !== '0 B' && (
+ Size: {model.estimated_size_display}
+ )}
+ {model.estimated_size_display && model.estimated_size_display !== '0 B' && model.estimated_vram_display && model.estimated_vram_display !== '0 B' && ' · '}
+ {model.estimated_vram_display && model.estimated_vram_display !== '0 B' && (
+ VRAM: {model.estimated_vram_display}
+ )}
+
+ {fit !== null && (
+
+
+
+ {fit ? 'Fits' : 'May not fit'}
+
+
+ )}
+ >
+ ) : (
+ —
+ )}
+
+
+
+ {/* Status */}
+
+ {installing ? (
+
+
+ Installing...
+
+ {progress > 0 && (
+
+ )}
+
+ ) : model.installed ? (
+
+ Installed
+
+ ) : (
+
+ Not Installed
+
+ )}
+
+
+ {/* Actions */}
+
+
+ setSelectedModel(model)}
+ title="Details"
+ >
+
+
+ {model.installed ? (
+ <>
+ handleInstall(name)} title="Reinstall">
+
+
+ handleDelete(name)} title="Delete">
+
+
+ >
+ ) : (
+ handleInstall(name)}
+ disabled={installing}
+ title="Install"
+ >
+
+
+ )}
+
+
+
+ )
+ })}
+
+
+
+
+ )}
+
+ {/* Pagination */}
+ {totalPages > 1 && (
+
+ setPage(p => Math.max(1, p - 1))} disabled={page === 1}>
+
+
+
+ {page} / {totalPages}
+
+ setPage(p => Math.min(totalPages, p + 1))} disabled={page === totalPages}>
+
+
+
+ )}
+
+ {/* Detail Modal */}
+ {selectedModel && (
+
setSelectedModel(null)}>
+
e.stopPropagation()}>
+ {/* Modal header */}
+
+
{selectedModel.name}
+ setSelectedModel(null)}>
+
+
+
+ {/* Modal body */}
+
+ {/* Icon */}
+ {selectedModel.icon && (
+
+
+
+ )}
+ {/* Description */}
+ {selectedModel.description && (
+
+ {selectedModel.description}
+
+ )}
+ {/* Size/VRAM */}
+ {(selectedModel.estimated_size_display || selectedModel.estimated_vram_display) && (
+
+ {selectedModel.estimated_size_display &&
Size: {selectedModel.estimated_size_display}
}
+ {selectedModel.estimated_vram_display &&
VRAM: {selectedModel.estimated_vram_display}
}
+
+ )}
+ {/* Tags */}
+ {selectedModel.tags?.length > 0 && (
+
+ {selectedModel.tags.map(tag => (
+ {tag}
+ ))}
+
+ )}
+ {/* Links */}
+ {selectedModel.urls?.length > 0 && (
+
+
Links
+ {selectedModel.urls.map((url, i) => (
+
+ {url}
+
+ ))}
+
+ )}
+
+ {/* Modal footer */}
+
+ setSelectedModel(null)}>Close
+
+
+
+ )}
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/NotFound.jsx b/core/http/react-ui/src/pages/NotFound.jsx
new file mode 100644
index 000000000..c5d1a480f
--- /dev/null
+++ b/core/http/react-ui/src/pages/NotFound.jsx
@@ -0,0 +1,19 @@
+import { useNavigate } from 'react-router-dom'
+
+export default function NotFound() {
+ const navigate = useNavigate()
+
+ return (
+
+
+
+
404
+
Page Not Found
+
The page you're looking for doesn't exist.
+
navigate('/')}>
+ Go Home
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/P2P.jsx b/core/http/react-ui/src/pages/P2P.jsx
new file mode 100644
index 000000000..c1f97a4ec
--- /dev/null
+++ b/core/http/react-ui/src/pages/P2P.jsx
@@ -0,0 +1,684 @@
+import { useState, useEffect, useCallback } from 'react'
+import { useOutletContext } from 'react-router-dom'
+import { p2pApi } from '../utils/api'
+import LoadingSpinner from '../components/LoadingSpinner'
+
+function NodeCard({ node, label, iconColor, iconBg }) {
+ return (
+
+
+
+
+
+
+
+
{label}
+
+ {node.id}
+
+
+
+
+
+
+ {node.isOnline ? 'Online' : 'Offline'}
+
+
+
+
+
+ Updated: {new Date().toLocaleTimeString()}
+
+
+ )
+}
+
+function CommandBlock({ command, addToast }) {
+ const copy = () => {
+ navigator.clipboard.writeText(command)
+ addToast('Copied to clipboard', 'success', 2000)
+ }
+ return (
+
+ )
+}
+
+function StepNumber({ n, bg, color }) {
+ return (
+
{n}
+ )
+}
+
+export default function P2P() {
+ const { addToast } = useOutletContext()
+ const [workers, setWorkers] = useState([])
+ const [federation, setFederation] = useState([])
+ const [stats, setStats] = useState({ workers: { online: 0, total: 0 }, federated: { online: 0, total: 0 } })
+ const [loading, setLoading] = useState(true)
+ const [enabled, setEnabled] = useState(false)
+ const [token, setToken] = useState('')
+ const [activeTab, setActiveTab] = useState('federation')
+
+ const fetchData = useCallback(async () => {
+ try {
+ const [wRes, fRes, sRes, tRes] = await Promise.allSettled([
+ p2pApi.getWorkers(),
+ p2pApi.getFederation(),
+ p2pApi.getStats(),
+ p2pApi.getToken(),
+ ])
+
+ let p2pToken = ''
+ if (tRes.status === 'fulfilled') {
+ p2pToken = (typeof tRes.value === 'string' ? tRes.value : (tRes.value?.token || '')).trim()
+ }
+ setToken(p2pToken)
+ setEnabled(!!p2pToken)
+
+ if (p2pToken) {
+ if (wRes.status === 'fulfilled') {
+ const data = wRes.value
+ setWorkers(data?.nodes || (Array.isArray(data) ? data : []))
+ }
+ if (fRes.status === 'fulfilled') {
+ const data = fRes.value
+ setFederation(data?.nodes || (Array.isArray(data) ? data : []))
+ }
+ if (sRes.status === 'fulfilled') {
+ setStats(sRes.value)
+ }
+ }
+ } catch {
+ setEnabled(false)
+ } finally {
+ setLoading(false)
+ }
+ }, [])
+
+ useEffect(() => {
+ fetchData()
+ const interval = setInterval(fetchData, 3000)
+ return () => clearInterval(interval)
+ }, [fetchData])
+
+ const copyToken = () => {
+ if (token) {
+ navigator.clipboard.writeText(token)
+ addToast('Token copied to clipboard', 'success', 2000)
+ }
+ }
+
+ if (loading) {
+ return (
+
+
+
+ )
+ }
+
+ // ── P2P Disabled ──
+ if (!enabled) {
+ return (
+
+
+
+
+ P2P Distribution Not Enabled
+
+
+ Enable peer-to-peer distribution to scale your AI workloads across multiple devices. Share instances, shard models, and pool computational resources across your network.
+
+
+
+
+
+
+
+
Instance Federation
+
Load balance across multiple instances
+
+
+
+
+
+
Model Sharding
+
Split large models across workers
+
+
+
+
+
+
Resource Sharing
+
Pool resources from multiple devices
+
+
+
+
+ {/* How to Enable */}
+
+
+
+ How to Enable P2P
+
+
+
+
+
+
Start LocalAI with P2P enabled
+
+
+ This will automatically generate a network token for you.
+
+
+
+
+
+
+
Or use an existing token
+
+
+ If you already have a token from another instance, you can reuse it.
+
+
+
+
+
+
+
Access the P2P dashboard
+
+ Once enabled, refresh this page to see your network token and start connecting nodes.
+
+
+
+
+
+
+
+
+ )
+ }
+
+ // ── P2P Enabled ──
+ const fedOnline = stats.federated?.online ?? 0
+ const fedTotal = stats.federated?.total ?? 0
+ const wrkOnline = stats.workers?.online ?? 0
+ const wrkTotal = stats.workers?.total ?? 0
+
+ return (
+
+
+
+
+ Distributed AI Computing
+
+
+ Scale your AI workloads across multiple devices with peer-to-peer distribution
+ {' '}
+
+
+
+
+
+
+ {/* Network Token */}
+
+
+
+
Network Token
+
+
+
+
+
+ {token || 'Loading...'}
+
+
+ All nodes (federated servers, instances, and workers) use the same token to join the network. Pass it via the TOKEN environment variable.
+
+
+
+ {/* Tab bar */}
+
+
setActiveTab('federation')}
+ style={{
+ flex: 1, padding: 'var(--spacing-md)',
+ background: activeTab === 'federation' ? 'var(--color-bg-secondary)' : 'transparent',
+ border: 'none', cursor: 'pointer',
+ borderBottom: activeTab === 'federation' ? '2px solid var(--color-primary)' : '2px solid transparent',
+ marginBottom: '-2px',
+ borderRadius: 'var(--radius-md) var(--radius-md) 0 0',
+ transition: 'all 150ms',
+ }}
+ >
+
+
+
+
+
+
+ Federation
+
+
+ {fedOnline}/{fedTotal} instances
+
+
+
+
+
setActiveTab('sharding')}
+ style={{
+ flex: 1, padding: 'var(--spacing-md)',
+ background: activeTab === 'sharding' ? 'var(--color-bg-secondary)' : 'transparent',
+ border: 'none', cursor: 'pointer',
+ borderBottom: activeTab === 'sharding' ? '2px solid var(--color-accent)' : '2px solid transparent',
+ marginBottom: '-2px',
+ borderRadius: 'var(--radius-md) var(--radius-md) 0 0',
+ transition: 'all 150ms',
+ }}
+ >
+
+
+
+
+
+
+ Model Sharding
+
+
+ {wrkOnline}/{wrkTotal} workers
+
+
+
+
+
+
+ {/* ── Federation Tab ── */}
+ {activeTab === 'federation' && (
+
+
+ {/* Architecture diagram */}
+
+
+
+
+
+
+
+
+
Federated Server
+
Load balancer
+
+
+
+
+ {[1, 2, 3].map(n => (
+
+
+
+ ))}
+
+
Federated Instances
+
Workers
+
+
+
+ The Federated Server acts as a load balancer — it receives API requests and distributes them across Federated Instances (workers running your models).
+
+
+
+ {/* Status + nodes */}
+
+
Connected Instances
+
+ 0 ? 'var(--color-success)' : 'var(--color-error)' }}>{fedOnline}
+ /{fedTotal}
+
+
+
+ {federation.length === 0 ? (
+
+
+
No federated instances connected
+
Follow the setup steps below
+
+ ) : (
+
+ {federation.map((node, i) => (
+
+ ))}
+
+ )}
+
+
+ {/* Setup Guide */}
+
+
+
+ Setup Guide
+
+
+
+ {/* Step 1 */}
+
+
+
+ Start the Federated Server (load balancer)
+
+
+
+ This is the entry point for your API clients. It receives requests and distributes them to federated instances.
+
+
+
+ Listens on port 8080 by default. To change it, add -e ADDRESS=:9090.
+
+
+ {/* Step 2 */}
+
+
+
+ Start Federated Instances (workers)
+
+
+
+ Run this on each machine you want to add as a worker. Each instance runs your models and receives tasks from the federated server.
+
+
+
+ Listens on port 8080 by default. To change it, add -e ADDRESS=:9090.
+
+
+
+ For GPU images and all available options, see the{' '}
+ Container images
+ {' '}and{' '}
+ Distribution docs.
+
+
+
+
+ )}
+
+ {/* ── Model Sharding Tab ── */}
+ {activeTab === 'sharding' && (
+
+
+ {/* Architecture diagram */}
+
+
+
+
+
+
+
LocalAI Instance
+
Orchestrator
+
+
+
+ RPC
+
+
+
+ {['Layer 1-10', 'Layer 11-20', 'Layer 21-30'].map((label, i) => (
+
+ ))}
+
+
RPC Workers
+
Distributed memory
+
+
+
+ Model weights are split across RPC workers . Each worker holds a portion of the model layers in its memory (GPU or CPU).
+ The LocalAI instance orchestrates inference by communicating with all workers via RPC.
+
+
+
+
+
+ Different from federation: Federation distributes whole requests across instances. Model sharding splits a single model's weights across machines for joint inference. Currently only supported with llama.cpp based models.
+
+
+ {/* Status + nodes */}
+
+
Connected Workers
+
+ 0 ? 'var(--color-success)' : 'var(--color-error)' }}>{wrkOnline}
+ /{wrkTotal}
+
+
+
+ {workers.length === 0 ? (
+
+
+
No workers available
+
Start workers to see them here
+
+ ) : (
+
+ {workers.map((node, i) => (
+
+ ))}
+
+ )}
+
+
+ {/* Setup Guide */}
+
+
+
+ Start a llama.cpp RPC Worker
+
+
+
+
+ Each worker exposes its GPU/CPU memory as a shard for distributed model inference.
+
+
+
+ Run this on each machine you want to contribute as a shard. The worker will automatically join the network and advertise its resources.
+
+
+
+ For GPU images and all available options, see the{' '}
+ Container images
+ {' '}and{' '}
+ Worker docs.
+
+
+
+
+ )}
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/Settings.jsx b/core/http/react-ui/src/pages/Settings.jsx
new file mode 100644
index 000000000..856260f49
--- /dev/null
+++ b/core/http/react-ui/src/pages/Settings.jsx
@@ -0,0 +1,468 @@
+import { useState, useEffect, useCallback, useRef } from 'react'
+import { useOutletContext } from 'react-router-dom'
+import { settingsApi, resourcesApi } from '../utils/api'
+import LoadingSpinner from '../components/LoadingSpinner'
+import { formatBytes, percentColor } from '../utils/format'
+
+function Toggle({ checked, onChange, disabled }) {
+ return (
+
+ onChange(e.target.checked)}
+ disabled={disabled}
+ style={{ display: 'none' }}
+ />
+
+
+
+
+ )
+}
+
+function SettingRow({ label, description, children }) {
+ return (
+
+
+
{label}
+ {description &&
{description}
}
+
+
{children}
+
+ )
+}
+
+const SECTIONS = [
+ { id: 'watchdog', icon: 'fa-shield-halved', color: 'var(--color-primary)', label: 'Watchdog' },
+ { id: 'memory', icon: 'fa-memory', color: 'var(--color-accent)', label: 'Memory' },
+ { id: 'backends', icon: 'fa-cogs', color: 'var(--color-accent)', label: 'Backends' },
+ { id: 'performance', icon: 'fa-gauge-high', color: 'var(--color-success)', label: 'Performance' },
+ { id: 'api', icon: 'fa-globe', color: 'var(--color-warning)', label: 'API & CORS' },
+ { id: 'p2p', icon: 'fa-network-wired', color: 'var(--color-accent)', label: 'P2P' },
+ { id: 'galleries', icon: 'fa-images', color: 'var(--color-accent)', label: 'Galleries' },
+ { id: 'apikeys', icon: 'fa-key', color: 'var(--color-error)', label: 'API Keys' },
+ { id: 'agents', icon: 'fa-tasks', color: 'var(--color-primary)', label: 'Agent Jobs' },
+ { id: 'responses', icon: 'fa-database', color: 'var(--color-accent)', label: 'Responses' },
+]
+
+export default function Settings() {
+ const { addToast } = useOutletContext()
+ const [settings, setSettings] = useState(null)
+ const [loading, setLoading] = useState(true)
+ const [saving, setSaving] = useState(false)
+ const [resources, setResources] = useState(null)
+ const [activeSection, setActiveSection] = useState('watchdog')
+ const contentRef = useRef(null)
+ const sectionRefs = useRef({})
+
+ useEffect(() => { fetchSettings() }, [])
+
+ const fetchSettings = async () => {
+ try {
+ const data = await settingsApi.get()
+ setSettings(data)
+ } catch (err) {
+ addToast(`Failed to load settings: ${err.message}`, 'error')
+ } finally {
+ setLoading(false)
+ }
+ }
+
+ const fetchResources = useCallback(async () => {
+ try {
+ const data = await resourcesApi.get()
+ setResources(data)
+ } catch (_e) { /* ignore */ }
+ }, [])
+
+ const handleSave = async () => {
+ setSaving(true)
+ try {
+ await settingsApi.save(settings)
+ addToast('Settings saved successfully', 'success')
+ } catch (err) {
+ addToast(`Save failed: ${err.message}`, 'error')
+ } finally {
+ setSaving(false)
+ }
+ }
+
+ const update = (key, value) => {
+ setSettings(prev => ({ ...prev, [key]: value }))
+ }
+
+ const scrollTo = (id) => {
+ setActiveSection(id)
+ sectionRefs.current[id]?.scrollIntoView({ behavior: 'smooth', block: 'start' })
+ }
+
+ // Track which section is visible on scroll
+ useEffect(() => {
+ const container = contentRef.current
+ if (!container) return
+ const onScroll = () => {
+ const containerTop = container.getBoundingClientRect().top
+ let closest = SECTIONS[0].id
+ let closestDist = Infinity
+ for (const s of SECTIONS) {
+ const el = sectionRefs.current[s.id]
+ if (el) {
+ const dist = Math.abs(el.getBoundingClientRect().top - containerTop - 8)
+ if (dist < closestDist) { closestDist = dist; closest = s.id }
+ }
+ }
+ setActiveSection(closest)
+ }
+ container.addEventListener('scroll', onScroll, { passive: true })
+ return () => container.removeEventListener('scroll', onScroll)
+ }, [loading])
+
+ if (loading) return
+ if (!settings) return
+
+ const watchdogEnabled = settings.watchdog_idle || settings.watchdog_busy
+
+ return (
+
+ {/* Header */}
+
+
+
Settings
+
Configure LocalAI runtime settings
+
+
+ {saving ? <> Saving...> : <> Save>}
+
+
+
+ {/* Two-column layout */}
+
+ {/* Sidebar nav */}
+
+ {SECTIONS.map(s => (
+ scrollTo(s.id)}
+ style={{
+ display: 'flex', alignItems: 'center', gap: 'var(--spacing-sm)',
+ width: '100%', padding: '8px 12px',
+ background: activeSection === s.id ? 'var(--color-primary-light)' : 'transparent',
+ border: 'none', borderRadius: 'var(--radius-md)', cursor: 'pointer',
+ color: activeSection === s.id ? 'var(--color-primary)' : 'var(--color-text-secondary)',
+ fontSize: '0.8125rem', fontWeight: activeSection === s.id ? 600 : 400,
+ textAlign: 'left', transition: 'all 150ms',
+ marginBottom: 2,
+ borderLeft: activeSection === s.id ? '2px solid var(--color-primary)' : '2px solid transparent',
+ }}
+ >
+
+ {s.label}
+
+ ))}
+
+
+ {/* Content area */}
+
+ {/* Watchdog */}
+
sectionRefs.current.watchdog = el} style={{ marginBottom: 'var(--spacing-xl)' }}>
+
+ Watchdog
+
+
+
+ { update('watchdog_idle', v); update('watchdog_busy', v) }} />
+
+
+ update('watchdog_idle', v)} disabled={!watchdogEnabled} />
+
+
+ update('watchdog_idle_timeout', e.target.value)} placeholder="15m" disabled={!settings.watchdog_idle} />
+
+
+ update('watchdog_busy', v)} disabled={!watchdogEnabled} />
+
+
+ update('watchdog_busy_timeout', e.target.value)} placeholder="5m" disabled={!settings.watchdog_busy} />
+
+
+ update('watchdog_check_interval', e.target.value)} placeholder="2s" />
+
+
+ update('force_eviction', v)} />
+
+
+ update('lru_retries', parseInt(e.target.value) || 0)} placeholder="30" />
+
+
+ update('lru_retry_interval', e.target.value)} placeholder="1s" />
+
+
+
+
+ {/* Memory Reclaimer */}
+
sectionRefs.current.memory = el} style={{ marginBottom: 'var(--spacing-xl)' }}>
+
+
+ Memory Reclaimer
+
+
+
+
+
+
+ {resources && (
+
+ {resources.gpus?.length > 0 ? resources.gpus.map((gpu, i) => {
+ const usedPct = gpu.total > 0 ? Math.round((gpu.used / gpu.total) * 100) : 0
+ return (
+
+
GPU {i}
+
+
{usedPct}%
+
{formatBytes(gpu.used)} / {formatBytes(gpu.total)}
+
+ )
+ }) : (
+
+
RAM
+ {resources.ram && (() => {
+ const usedPct = resources.ram.total > 0 ? Math.round((resources.ram.used / resources.ram.total) * 100) : 0
+ return (
+ <>
+
+
{usedPct}%
+
{formatBytes(resources.ram.used)} / {formatBytes(resources.ram.total)}
+ >
+ )
+ })()}
+
+ )}
+
+ )}
+
+ update('memory_reclaimer', v)} />
+
+
+
+ update('memory_threshold', parseInt(e.target.value))} disabled={!settings.memory_reclaimer} style={{ width: 120 }} />
+
+ {settings.memory_threshold || 80}%
+
+
+
+
+
+
+ {/* Backends */}
+
sectionRefs.current.backends = el} style={{ marginBottom: 'var(--spacing-xl)' }}>
+
+ Backend Management
+
+
+
+ update('max_active_backends', parseInt(e.target.value) || 0)} placeholder="0" />
+
+
+ update('parallel_backend_requests', v)} />
+
+
+
+
+ {/* Performance */}
+
+
+ {/* API & CORS */}
+
sectionRefs.current.api = el} style={{ marginBottom: 'var(--spacing-xl)' }}>
+
+ API & CORS
+
+
+
+ update('cors', v)} />
+
+
+ update('cors_allow_origins', e.target.value)} placeholder="*" disabled={!settings.cors} />
+
+
+ update('csrf', v)} />
+
+
+
+
+ {/* P2P */}
+
sectionRefs.current.p2p = el} style={{ marginBottom: 'var(--spacing-xl)' }}>
+
+ P2P Network
+
+
+
+
+ {/* Galleries */}
+
sectionRefs.current.galleries = el} style={{ marginBottom: 'var(--spacing-xl)' }}>
+
+ Galleries
+
+
+
+ update('autoload_galleries', v)} />
+
+
+ update('autoload_backend_galleries', v)} />
+
+
+ Model Galleries (JSON)
+ update('galleries_json', e.target.value)}
+ rows={4}
+ placeholder={'[\n { "url": "https://...", "name": "my-gallery" }\n]'}
+ style={{ fontFamily: "'JetBrains Mono', monospace", fontSize: '0.8125rem' }}
+ />
+
+
+ Backend Galleries (JSON)
+ update('backend_galleries_json', e.target.value)}
+ rows={4}
+ placeholder={'[\n { "url": "https://...", "name": "my-backends" }\n]'}
+ style={{ fontFamily: "'JetBrains Mono', monospace", fontSize: '0.8125rem' }}
+ />
+
+
+
+
+ {/* API Keys */}
+
sectionRefs.current.apikeys = el} style={{ marginBottom: 'var(--spacing-xl)' }}>
+
+ API Keys
+
+
+
+
+ API keys are sensitive. One key per line or comma-separated.
+
+
update('api_keys_text', e.target.value)}
+ rows={4}
+ placeholder="sk-key-1
sk-key-2"
+ style={{ fontFamily: "'JetBrains Mono', monospace", fontSize: '0.8125rem' }}
+ />
+
+
+
+ {/* Agent Jobs */}
+
sectionRefs.current.agents = el} style={{ marginBottom: 'var(--spacing-xl)' }}>
+
+ Agent Jobs
+
+
+
+ update('agent_job_retention_days', parseInt(e.target.value) || 0)} placeholder="30" />
+
+
+
+
+ {/* Open Responses */}
+
sectionRefs.current.responses = el} style={{ marginBottom: 'var(--spacing-xl)' }}>
+
+ Open Responses
+
+
+
+ update('open_responses_store_ttl', e.target.value)} placeholder="1h" />
+
+
+
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/Sound.jsx b/core/http/react-ui/src/pages/Sound.jsx
new file mode 100644
index 000000000..9353e6a97
--- /dev/null
+++ b/core/http/react-ui/src/pages/Sound.jsx
@@ -0,0 +1,152 @@
+import { useState, useRef } from 'react'
+import { useParams, useOutletContext } from 'react-router-dom'
+import ModelSelector from '../components/ModelSelector'
+import LoadingSpinner from '../components/LoadingSpinner'
+import { soundApi } from '../utils/api'
+
+export default function Sound() {
+ const { model: urlModel } = useParams()
+ const { addToast } = useOutletContext()
+ const [model, setModel] = useState(urlModel || '')
+ const [mode, setMode] = useState('simple')
+ const [text, setText] = useState('')
+ const [instrumental, setInstrumental] = useState(false)
+ const [vocalLanguage, setVocalLanguage] = useState('')
+ const [caption, setCaption] = useState('')
+ const [lyrics, setLyrics] = useState('')
+ const [think, setThink] = useState(false)
+ const [bpm, setBpm] = useState('')
+ const [duration, setDuration] = useState('')
+ const [keyscale, setKeyscale] = useState('')
+ const [language, setLanguage] = useState('')
+ const [timesignature, setTimesignature] = useState('')
+ const [loading, setLoading] = useState(false)
+ const [audioUrl, setAudioUrl] = useState(null)
+ const audioRef = useRef(null)
+
+ const handleGenerate = async (e) => {
+ e.preventDefault()
+ if (!model) { addToast('Please select a model', 'warning'); return }
+
+ const body = { model_id: model }
+
+ if (mode === 'simple') {
+ if (!text.trim()) { addToast('Please enter a description', 'warning'); return }
+ body.text = text.trim()
+ body.instrumental = instrumental
+ if (vocalLanguage.trim()) body.vocal_language = vocalLanguage.trim()
+ } else {
+ if (!caption.trim() && !lyrics.trim()) { addToast('Please enter caption or lyrics', 'warning'); return }
+ if (caption.trim()) body.caption = caption.trim()
+ if (lyrics.trim()) body.lyrics = lyrics.trim()
+ body.think = think
+ if (bpm) body.bpm = parseInt(bpm)
+ if (duration) body.duration_seconds = parseFloat(duration)
+ if (keyscale.trim()) body.keyscale = keyscale.trim()
+ if (language.trim()) body.language = language.trim()
+ if (timesignature.trim()) body.timesignature = timesignature.trim()
+ }
+
+ setLoading(true)
+ setAudioUrl(null)
+
+ try {
+ const blob = await soundApi.generate(body)
+ const url = URL.createObjectURL(blob)
+ setAudioUrl(url)
+ addToast('Sound generated', 'success')
+ setTimeout(() => audioRef.current?.play().catch(() => {}), 100)
+ } catch (err) {
+ addToast(`Error: ${err.message}`, 'error')
+ } finally {
+ setLoading(false)
+ }
+ }
+
+ return (
+
+
+
+
Sound Generation
+
+
+
+
+ Model
+
+
+
+ {/* Mode toggle */}
+
+
+ setMode('simple')}>Simple
+ setMode('advanced')}>Advanced
+
+
+
+ {mode === 'simple' ? (
+ <>
+
+ Description
+ setText(e.target.value)} placeholder="Describe the sound..." rows={3} />
+
+
+ >
+ ) : (
+ <>
+
+ Caption
+ setCaption(e.target.value)} rows={2} />
+
+
+ Lyrics
+ setLyrics(e.target.value)} rows={3} />
+
+
+
+ setThink(e.target.checked)} /> Think mode
+
+ >
+ )}
+
+
+ {loading ? <> Generating...> : <> Generate Sound>}
+
+
+
+
+
+
+ {loading ? (
+
+ ) : audioUrl ? (
+
+ ) : (
+
+
+
Generated sound will appear here
+
+ )}
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/TTS.jsx b/core/http/react-ui/src/pages/TTS.jsx
new file mode 100644
index 000000000..ceebb4480
--- /dev/null
+++ b/core/http/react-ui/src/pages/TTS.jsx
@@ -0,0 +1,95 @@
+import { useState, useRef } from 'react'
+import { useParams, useOutletContext } from 'react-router-dom'
+import ModelSelector from '../components/ModelSelector'
+import LoadingSpinner from '../components/LoadingSpinner'
+import { ttsApi } from '../utils/api'
+
+export default function TTS() {
+ const { model: urlModel } = useParams()
+ const { addToast } = useOutletContext()
+ const [model, setModel] = useState(urlModel || '')
+ const [text, setText] = useState('')
+ const [loading, setLoading] = useState(false)
+ const [audioUrl, setAudioUrl] = useState(null)
+ const audioRef = useRef(null)
+
+ const handleGenerate = async (e) => {
+ e.preventDefault()
+ if (!text.trim()) { addToast('Please enter text', 'warning'); return }
+ if (!model) { addToast('Please select a model', 'warning'); return }
+
+ setLoading(true)
+ setAudioUrl(null)
+
+ try {
+ const blob = await ttsApi.generate({ model, input: text.trim() })
+ const url = URL.createObjectURL(blob)
+ setAudioUrl(url)
+ addToast('Audio generated', 'success')
+ // Auto-play
+ setTimeout(() => audioRef.current?.play(), 100)
+ } catch (err) {
+ addToast(`Error: ${err.message}`, 'error')
+ } finally {
+ setLoading(false)
+ }
+ }
+
+ return (
+
+
+
+
Text to Speech
+
+
+
+
+ Model
+
+
+
+ Text
+ setText(e.target.value)}
+ placeholder="Enter text to convert to speech..."
+ rows={5}
+ />
+
+
+ {loading ? <> Generating...> : <> Generate Audio>}
+
+
+
+
+
+
+ {loading ? (
+
+ ) : audioUrl ? (
+
+ ) : (
+
+
+
Generated audio will appear here
+
+ )}
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/Talk.jsx b/core/http/react-ui/src/pages/Talk.jsx
new file mode 100644
index 000000000..590b89bda
--- /dev/null
+++ b/core/http/react-ui/src/pages/Talk.jsx
@@ -0,0 +1,200 @@
+import { useState, useRef, useCallback } from 'react'
+import { useOutletContext } from 'react-router-dom'
+import ModelSelector from '../components/ModelSelector'
+import LoadingSpinner from '../components/LoadingSpinner'
+import { chatApi, ttsApi, audioApi } from '../utils/api'
+
+export default function Talk() {
+ const { addToast } = useOutletContext()
+ const [llmModel, setLlmModel] = useState('')
+ const [whisperModel, setWhisperModel] = useState('')
+ const [ttsModel, setTtsModel] = useState('')
+ const [isRecording, setIsRecording] = useState(false)
+ const [loading, setLoading] = useState(false)
+ const [status, setStatus] = useState('Press the record button to start talking.')
+ const [audioUrl, setAudioUrl] = useState(null)
+ const [conversationHistory, setConversationHistory] = useState([])
+ const mediaRecorderRef = useRef(null)
+ const chunksRef = useRef([])
+ const audioRef = useRef(null)
+
+ const startRecording = async () => {
+ if (!navigator.mediaDevices) {
+ addToast('MediaDevices API not supported', 'error')
+ return
+ }
+ try {
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
+ const recorder = new MediaRecorder(stream)
+ chunksRef.current = []
+ recorder.ondataavailable = (e) => chunksRef.current.push(e.data)
+ recorder.start()
+ mediaRecorderRef.current = recorder
+ setIsRecording(true)
+ setStatus('Recording... Click to stop.')
+ } catch (err) {
+ addToast(`Microphone error: ${err.message}`, 'error')
+ }
+ }
+
+ const stopRecording = useCallback(() => {
+ if (!mediaRecorderRef.current) return
+
+ mediaRecorderRef.current.onstop = async () => {
+ setIsRecording(false)
+ setLoading(true)
+
+ const audioBlob = new Blob(chunksRef.current, { type: 'audio/webm' })
+
+ try {
+ // 1. Transcribe
+ setStatus('Transcribing audio...')
+ const formData = new FormData()
+ formData.append('file', audioBlob)
+ formData.append('model', whisperModel)
+ const transcription = await audioApi.transcribe(formData)
+ const userText = transcription.text
+
+ setStatus(`You said: "${userText}". Generating response...`)
+
+ // 2. Chat completion
+ const newHistory = [...conversationHistory, { role: 'user', content: userText }]
+ const chatResponse = await chatApi.complete({
+ model: llmModel,
+ messages: newHistory,
+ })
+ const assistantText = chatResponse?.choices?.[0]?.message?.content || ''
+ const updatedHistory = [...newHistory, { role: 'assistant', content: assistantText }]
+ setConversationHistory(updatedHistory)
+
+ setStatus(`Response: "${assistantText}". Generating speech...`)
+
+ // 3. TTS
+ const ttsBlob = await ttsApi.generateV1({ input: assistantText, model: ttsModel })
+ const url = URL.createObjectURL(ttsBlob)
+ setAudioUrl(url)
+ setStatus('Press the record button to continue.')
+
+ // Auto-play
+ setTimeout(() => audioRef.current?.play(), 100)
+ } catch (err) {
+ addToast(`Error: ${err.message}`, 'error')
+ setStatus('Error occurred. Try again.')
+ } finally {
+ setLoading(false)
+ }
+ }
+
+ mediaRecorderRef.current.stop()
+ mediaRecorderRef.current.stream?.getTracks().forEach(t => t.stop())
+ }, [whisperModel, llmModel, ttsModel, conversationHistory])
+
+ const resetConversation = () => {
+ setConversationHistory([])
+ setAudioUrl(null)
+ setStatus('Conversation reset. Press record to start.')
+ addToast('Conversation reset', 'info')
+ }
+
+ const allModelsSet = llmModel && whisperModel && ttsModel
+
+ return (
+
+
+
+
Talk
+
Voice conversation with AI
+
+
+ {/* Main interaction area */}
+
+ {/* Big record button */}
+
+
+
+
+ {/* Status */}
+
+ {loading ? : null}
+ {' '}{status}
+
+
+ {/* Recording indicator */}
+ {isRecording && (
+
+
+ Recording...
+
+ )}
+
+ {/* Audio playback */}
+ {audioUrl && (
+
+ )}
+
+
+ {/* Model selectors */}
+
+
+
+ Models
+
+
+ Reset
+
+
+
+
+
+
+ LLM
+
+
+
+
+
+ Speech-to-Text
+
+
+
+
+
+ Text-to-Speech
+
+
+
+
+
+ {!allModelsSet && (
+
+
+ Select all three models to start talking.
+
+ )}
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/Traces.jsx b/core/http/react-ui/src/pages/Traces.jsx
new file mode 100644
index 000000000..9d4b83215
--- /dev/null
+++ b/core/http/react-ui/src/pages/Traces.jsx
@@ -0,0 +1,167 @@
+import React, { useState, useEffect, useCallback } from 'react'
+import { useOutletContext } from 'react-router-dom'
+import { tracesApi } from '../utils/api'
+import LoadingSpinner from '../components/LoadingSpinner'
+
+function formatDuration(ns) {
+ if (!ns && ns !== 0) return '-'
+ if (ns < 1000) return `${ns}ns`
+ if (ns < 1_000_000) return `${(ns / 1000).toFixed(1)}µs`
+ if (ns < 1_000_000_000) return `${(ns / 1_000_000).toFixed(1)}ms`
+ return `${(ns / 1_000_000_000).toFixed(2)}s`
+}
+
+export default function Traces() {
+ const { addToast } = useOutletContext()
+ const [activeTab, setActiveTab] = useState('api')
+ const [traces, setTraces] = useState([])
+ const [loading, setLoading] = useState(true)
+ const [expandedRow, setExpandedRow] = useState(null)
+
+ const fetchTraces = useCallback(async () => {
+ try {
+ setLoading(true)
+ const data = activeTab === 'api'
+ ? await tracesApi.get()
+ : await tracesApi.getBackend()
+ setTraces(Array.isArray(data) ? data : [])
+ } catch (err) {
+ addToast(`Failed to load traces: ${err.message}`, 'error')
+ } finally {
+ setLoading(false)
+ }
+ }, [activeTab, addToast])
+
+ useEffect(() => {
+ fetchTraces()
+ }, [fetchTraces])
+
+ const handleClear = async () => {
+ try {
+ if (activeTab === 'api') await tracesApi.clear()
+ else await tracesApi.clearBackend()
+ setTraces([])
+ addToast('Traces cleared', 'success')
+ } catch (err) {
+ addToast(`Failed to clear: ${err.message}`, 'error')
+ }
+ }
+
+ const handleExport = () => {
+ const blob = new Blob([JSON.stringify(traces, null, 2)], { type: 'application/json' })
+ const url = URL.createObjectURL(blob)
+ const a = document.createElement('a')
+ a.href = url
+ a.download = `traces-${activeTab}-${new Date().toISOString().slice(0, 10)}.json`
+ a.click()
+ URL.revokeObjectURL(url)
+ }
+
+ return (
+
+
+
Traces
+
Debug API and backend traces
+
+
+
+ setActiveTab('api')}>API Traces
+ setActiveTab('backend')}>Backend Traces
+
+
+
+ Refresh
+ Clear
+ Export
+
+
+ {loading ? (
+
+ ) : traces.length === 0 ? (
+
+
+
No traces
+
Traces will appear here as requests are made.
+
+ ) : activeTab === 'api' ? (
+
+
+
+
+
+ Time
+ Method
+ Path
+ Status
+
+
+
+ {traces.map((trace, i) => (
+
+ setExpandedRow(expandedRow === i ? null : i)} style={{ cursor: 'pointer' }}>
+
+ {trace.timestamp ? new Date(trace.timestamp).toLocaleTimeString() : '-'}
+ {trace.request?.method || '-'}
+ {trace.request?.path || '-'}
+ {trace.response?.status || '-'}
+
+ {expandedRow === i && (
+
+
+
+ {JSON.stringify(trace, null, 2)}
+
+
+
+ )}
+
+ ))}
+
+
+
+ ) : (
+
+
+
+
+
+ Time
+ Type
+ Model
+ Backend
+ Duration
+ Summary
+
+
+
+ {traces.map((trace, i) => (
+
+ setExpandedRow(expandedRow === i ? null : i)} style={{ cursor: 'pointer' }}>
+
+ {trace.timestamp ? new Date(trace.timestamp).toLocaleTimeString() : '-'}
+ {trace.type || '-'}
+ {trace.model_name || '-'}
+ {trace.backend || '-'}
+ {formatDuration(trace.duration)}
+
+ {trace.error ? {trace.error} : (trace.summary || '-')}
+
+
+ {expandedRow === i && (
+
+
+
+ {JSON.stringify(trace, null, 2)}
+
+
+
+ )}
+
+ ))}
+
+
+
+ )}
+
+ )
+}
diff --git a/core/http/react-ui/src/pages/VideoGen.jsx b/core/http/react-ui/src/pages/VideoGen.jsx
new file mode 100644
index 000000000..573e61d54
--- /dev/null
+++ b/core/http/react-ui/src/pages/VideoGen.jsx
@@ -0,0 +1,148 @@
+import { useState } from 'react'
+import { useParams, useOutletContext } from 'react-router-dom'
+import ModelSelector from '../components/ModelSelector'
+import LoadingSpinner from '../components/LoadingSpinner'
+import { videoApi, fileToBase64 } from '../utils/api'
+
+const SIZES = ['256x256', '512x512', '768x768', '1024x1024']
+
+export default function VideoGen() {
+ const { model: urlModel } = useParams()
+ const { addToast } = useOutletContext()
+ const [model, setModel] = useState(urlModel || '')
+ const [prompt, setPrompt] = useState('')
+ const [negativePrompt, setNegativePrompt] = useState('')
+ const [size, setSize] = useState('512x512')
+ const [seconds, setSeconds] = useState('')
+ const [fps, setFps] = useState('16')
+ const [frames, setFrames] = useState('')
+ const [steps, setSteps] = useState('')
+ const [seed, setSeed] = useState('')
+ const [cfgScale, setCfgScale] = useState('')
+ const [loading, setLoading] = useState(false)
+ const [videos, setVideos] = useState([])
+ const [showAdvanced, setShowAdvanced] = useState(false)
+ const [showImageInputs, setShowImageInputs] = useState(false)
+ const [startImage, setStartImage] = useState(null)
+ const [endImage, setEndImage] = useState(null)
+
+ const handleGenerate = async (e) => {
+ e.preventDefault()
+ if (!prompt.trim()) { addToast('Please enter a prompt', 'warning'); return }
+ if (!model) { addToast('Please select a model', 'warning'); return }
+
+ setLoading(true)
+ setVideos([])
+
+ const [w, h] = size.split('x').map(Number)
+ const body = { model, prompt: prompt.trim(), width: w, height: h, fps: parseInt(fps) || 16 }
+ if (negativePrompt.trim()) body.negative_prompt = negativePrompt.trim()
+ if (seconds) body.seconds = seconds
+ if (frames) body.num_frames = parseInt(frames)
+ if (steps) body.step = parseInt(steps)
+ if (seed) body.seed = parseInt(seed)
+ if (cfgScale) body.cfg_scale = parseFloat(cfgScale)
+ if (startImage) body.start_image = startImage
+ if (endImage) body.end_image = endImage
+
+ try {
+ const data = await videoApi.generate(body)
+ setVideos(data?.data || [])
+ if (!data?.data?.length) addToast('No videos generated', 'warning')
+ } catch (err) {
+ addToast(`Error: ${err.message}`, 'error')
+ } finally {
+ setLoading(false)
+ }
+ }
+
+ const handleImageUpload = async (e, setter) => {
+ if (e.target.files[0]) setter(await fileToBase64(e.target.files[0]))
+ }
+
+ return (
+
+
+
+
Video Generation
+
+
+
+
+ Model
+
+
+
+ Prompt
+ setPrompt(e.target.value)} placeholder="Describe the video..." rows={3} />
+
+
+ Negative Prompt
+ setNegativePrompt(e.target.value)} rows={2} />
+
+
+
+
+ Size
+ setSize(e.target.value)} style={{ width: '100%' }}>
+ {SIZES.map(s => {s} )}
+
+
+
+ Duration (s)
+ setSeconds(e.target.value)} placeholder="Auto" />
+
+
+ FPS
+ setFps(e.target.value)} />
+
+
+
+ setShowAdvanced(!showAdvanced)}>
+ Advanced
+
+ {showAdvanced && (
+
+ )}
+
+ setShowImageInputs(!showImageInputs)}>
+ Image Inputs
+
+ {showImageInputs && (
+
+ )}
+
+
+ {loading ? <> Generating...> : <> Generate Video>}
+
+
+
+
+
+
+ {loading ? (
+
+ ) : videos.length > 0 ? (
+
+ {videos.map((v, i) => (
+
+ ))}
+
+ ) : (
+
+
+
Generated videos will appear here
+
+ )}
+
+
+
+ )
+}
diff --git a/core/http/react-ui/src/router.jsx b/core/http/react-ui/src/router.jsx
new file mode 100644
index 000000000..815a196fa
--- /dev/null
+++ b/core/http/react-ui/src/router.jsx
@@ -0,0 +1,66 @@
+import { createBrowserRouter } from 'react-router-dom'
+import App from './App'
+import Home from './pages/Home'
+import Chat from './pages/Chat'
+import Models from './pages/Models'
+import Manage from './pages/Manage'
+import ImageGen from './pages/ImageGen'
+import VideoGen from './pages/VideoGen'
+import TTS from './pages/TTS'
+import Sound from './pages/Sound'
+import Talk from './pages/Talk'
+import Backends from './pages/Backends'
+import Settings from './pages/Settings'
+import Traces from './pages/Traces'
+import P2P from './pages/P2P'
+import AgentJobs from './pages/AgentJobs'
+import AgentTaskDetails from './pages/AgentTaskDetails'
+import AgentJobDetails from './pages/AgentJobDetails'
+import ModelEditor from './pages/ModelEditor'
+import ImportModel from './pages/ImportModel'
+import Explorer from './pages/Explorer'
+import Login from './pages/Login'
+import NotFound from './pages/NotFound'
+
+export const router = createBrowserRouter([
+ {
+ path: '/login',
+ element:
,
+ },
+ {
+ path: '/explorer',
+ element:
,
+ },
+ {
+ path: '/',
+ element:
,
+ children: [
+ { index: true, element:
},
+ { path: 'browse', element:
},
+ { path: 'chat', element:
},
+ { path: 'chat/:model', element:
},
+ { path: 'image', element:
},
+ { path: 'image/:model', element:
},
+ { path: 'video', element:
},
+ { path: 'video/:model', element:
},
+ { path: 'tts', element:
},
+ { path: 'tts/:model', element:
},
+ { path: 'sound', element:
},
+ { path: 'sound/:model', element:
},
+ { path: 'talk', element:
},
+ { path: 'manage', element:
},
+ { path: 'backends', element:
},
+ { path: 'settings', element:
},
+ { path: 'traces', element:
},
+ { path: 'p2p', element:
},
+ { path: 'agent-jobs', element:
},
+ { path: 'agent-jobs/tasks/new', element:
},
+ { path: 'agent-jobs/tasks/:id', element:
},
+ { path: 'agent-jobs/tasks/:id/edit', element:
},
+ { path: 'agent-jobs/jobs/:id', element:
},
+ { path: 'model-editor/:name', element:
},
+ { path: 'import-model', element:
},
+ { path: '*', element:
},
+ ],
+ },
+], { basename: '/app' })
diff --git a/core/http/react-ui/src/theme.css b/core/http/react-ui/src/theme.css
new file mode 100644
index 000000000..1d3047c58
--- /dev/null
+++ b/core/http/react-ui/src/theme.css
@@ -0,0 +1,138 @@
+/* LocalAI Theme - CSS Variables System */
+:root,
+[data-theme="dark"] {
+ --color-bg-primary: #121212;
+ --color-bg-secondary: #1A1A1A;
+ --color-bg-tertiary: #222222;
+ --color-bg-overlay: rgba(18, 18, 18, 0.95);
+
+ --color-primary: #38BDF8;
+ --color-primary-hover: #0EA5E9;
+ --color-primary-active: #0284C7;
+ --color-primary-text: #FFFFFF;
+ --color-primary-light: rgba(56, 189, 248, 0.08);
+ --color-primary-border: rgba(56, 189, 248, 0.15);
+
+ --color-secondary: #14B8A6;
+ --color-secondary-hover: #0D9488;
+ --color-secondary-light: rgba(20, 184, 166, 0.1);
+
+ --color-accent: #8B5CF6;
+ --color-accent-hover: #7C3AED;
+ --color-accent-light: rgba(139, 92, 246, 0.1);
+ --color-accent-purple: #A78BFA;
+ --color-accent-teal: #2DD4BF;
+
+ --color-text-primary: #E5E7EB;
+ --color-text-secondary: #94A3B8;
+ --color-text-muted: #64748B;
+ --color-text-disabled: #475569;
+ --color-text-inverse: #FFFFFF;
+
+ --color-border-subtle: rgba(255, 255, 255, 0.08);
+ --color-border-default: rgba(255, 255, 255, 0.12);
+ --color-border-strong: rgba(56, 189, 248, 0.3);
+ --color-border-divider: rgba(255, 255, 255, 0.05);
+ --color-border-primary: rgba(56, 189, 248, 0.2);
+ --color-border-focus: rgba(56, 189, 248, 0.4);
+
+ --color-success: #14B8A6;
+ --color-success-light: rgba(20, 184, 166, 0.1);
+ --color-warning: #F59E0B;
+ --color-warning-light: rgba(245, 158, 11, 0.1);
+ --color-error: #EF4444;
+ --color-error-light: rgba(239, 68, 68, 0.1);
+ --color-info: #38BDF8;
+ --color-info-light: rgba(56, 189, 248, 0.1);
+
+ --gradient-primary: linear-gradient(135deg, #38BDF8 0%, #8B5CF6 50%, #14B8A6 100%);
+ --gradient-hero: linear-gradient(135deg, #121212 0%, #1A1A1A 50%, #121212 100%);
+ --gradient-card: linear-gradient(135deg, rgba(56, 189, 248, 0.04) 0%, rgba(139, 92, 246, 0.04) 100%);
+ --gradient-text: linear-gradient(135deg, #38BDF8 0%, #8B5CF6 50%, #14B8A6 100%);
+
+ --shadow-subtle: 0 1px 2px rgba(0, 0, 0, 0.2);
+ --shadow-sm: 0 1px 3px rgba(0, 0, 0, 0.25);
+ --shadow-md: 0 4px 6px rgba(0, 0, 0, 0.3);
+ --shadow-lg: 0 10px 15px rgba(0, 0, 0, 0.35);
+ --shadow-glow: 0 0 0 1px rgba(56, 189, 248, 0.15), 0 0 12px rgba(56, 189, 248, 0.2);
+ --shadow-sidebar: 1px 0 3px rgba(0, 0, 0, 0.25);
+
+ --duration-fast: 150ms;
+ --duration-normal: 200ms;
+ --duration-slow: 300ms;
+ --ease-default: cubic-bezier(0.4, 0, 0.2, 1);
+
+ --spacing-xs: 0.25rem;
+ --spacing-sm: 0.5rem;
+ --spacing-md: 1rem;
+ --spacing-lg: 1.5rem;
+ --spacing-xl: 2rem;
+
+ --radius-sm: 4px;
+ --radius-md: 6px;
+ --radius-lg: 8px;
+ --radius-xl: 12px;
+ --radius-full: 9999px;
+
+ --sidebar-width: 220px;
+ --color-toggle-off: #475569;
+}
+
+[data-theme="light"] {
+ --color-bg-primary: #F8FAFC;
+ --color-bg-secondary: #FFFFFF;
+ --color-bg-tertiary: #FFFFFF;
+ --color-bg-overlay: rgba(248, 250, 252, 0.9);
+
+ --color-primary: #0EA5E9;
+ --color-primary-hover: #0284C7;
+ --color-primary-active: #0369A1;
+ --color-primary-text: #FFFFFF;
+ --color-primary-light: rgba(14, 165, 233, 0.08);
+ --color-primary-border: rgba(14, 165, 233, 0.2);
+
+ --color-secondary: #0D9488;
+ --color-secondary-hover: #0F766E;
+ --color-secondary-light: rgba(13, 148, 136, 0.1);
+
+ --color-accent: #7C3AED;
+ --color-accent-hover: #6D28D9;
+ --color-accent-light: rgba(124, 58, 237, 0.1);
+ --color-accent-purple: #A78BFA;
+ --color-accent-teal: #2DD4BF;
+
+ --color-text-primary: #1E293B;
+ --color-text-secondary: #64748B;
+ --color-text-muted: #94A3B8;
+ --color-text-disabled: #CBD5E1;
+ --color-text-inverse: #FFFFFF;
+
+ --color-border-subtle: rgba(15, 23, 42, 0.06);
+ --color-border-default: rgba(15, 23, 42, 0.1);
+ --color-border-strong: rgba(14, 165, 233, 0.3);
+ --color-border-divider: rgba(15, 23, 42, 0.04);
+ --color-border-primary: rgba(14, 165, 233, 0.2);
+ --color-border-focus: rgba(14, 165, 233, 0.4);
+
+ --color-success: #0D9488;
+ --color-success-light: rgba(13, 148, 136, 0.1);
+ --color-warning: #D97706;
+ --color-warning-light: rgba(217, 119, 6, 0.1);
+ --color-error: #DC2626;
+ --color-error-light: rgba(220, 38, 38, 0.1);
+ --color-info: #0EA5E9;
+ --color-info-light: rgba(14, 165, 233, 0.1);
+
+ --gradient-primary: linear-gradient(135deg, #0EA5E9 0%, #7C3AED 50%, #0D9488 100%);
+ --gradient-hero: linear-gradient(135deg, #F8FAFC 0%, #FFFFFF 50%, #F8FAFC 100%);
+ --gradient-card: linear-gradient(135deg, rgba(14, 165, 233, 0.03) 0%, rgba(124, 58, 237, 0.03) 100%);
+ --gradient-text: linear-gradient(135deg, #0EA5E9 0%, #7C3AED 50%, #0D9488 100%);
+
+ --shadow-subtle: 0 1px 2px rgba(0, 0, 0, 0.05);
+ --shadow-sm: 0 1px 3px rgba(0, 0, 0, 0.08);
+ --shadow-md: 0 4px 6px rgba(0, 0, 0, 0.07);
+ --shadow-lg: 0 10px 15px rgba(0, 0, 0, 0.08);
+ --shadow-glow: 0 0 0 1px rgba(14, 165, 233, 0.15), 0 0 8px rgba(14, 165, 233, 0.2);
+ --shadow-sidebar: 1px 0 3px rgba(0, 0, 0, 0.08);
+ --color-toggle-off: #CBD5E1;
+}
diff --git a/core/http/react-ui/src/utils/api.js b/core/http/react-ui/src/utils/api.js
new file mode 100644
index 000000000..e2cc08c6a
--- /dev/null
+++ b/core/http/react-ui/src/utils/api.js
@@ -0,0 +1,253 @@
+import { API_CONFIG } from './config'
+
+async function handleResponse(response) {
+ if (!response.ok) {
+ let errorMessage = `HTTP ${response.status}`
+ try {
+ const data = await response.json()
+ if (data?.error?.message) errorMessage = data.error.message
+ else if (data?.error) errorMessage = data.error
+ } catch (_e) {
+ // response wasn't JSON
+ }
+ throw new Error(errorMessage)
+ }
+ const contentType = response.headers.get('content-type')
+ if (contentType && contentType.includes('application/json')) {
+ return response.json()
+ }
+ return response
+}
+
+function buildUrl(endpoint, params) {
+ const url = new URL(endpoint, window.location.origin)
+ if (params) {
+ Object.entries(params).forEach(([key, value]) => {
+ if (value !== undefined && value !== null && value !== '') {
+ url.searchParams.set(key, value)
+ }
+ })
+ }
+ return url.toString()
+}
+
+async function fetchJSON(endpoint, options = {}) {
+ const response = await fetch(endpoint, {
+ headers: { 'Content-Type': 'application/json', ...options.headers },
+ ...options,
+ })
+ return handleResponse(response)
+}
+
+async function postJSON(endpoint, body, options = {}) {
+ return fetchJSON(endpoint, {
+ method: 'POST',
+ body: JSON.stringify(body),
+ ...options,
+ })
+}
+
+// SSE streaming for chat completions
+export async function streamChat(body, signal) {
+ const response = await fetch(API_CONFIG.endpoints.chatCompletions, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ ...body, stream: true }),
+ signal,
+ })
+
+ if (!response.ok) {
+ let errorMessage = `HTTP ${response.status}`
+ try {
+ const data = await response.json()
+ if (data?.error?.message) errorMessage = data.error.message
+ } catch (_e) { /* not JSON */ }
+ throw new Error(errorMessage)
+ }
+
+ return response.body
+}
+
+// Models API
+export const modelsApi = {
+ list: (params) => fetchJSON(buildUrl(API_CONFIG.endpoints.models, params)),
+ listV1: () => fetchJSON(API_CONFIG.endpoints.modelsList),
+ listCapabilities: () => fetchJSON(API_CONFIG.endpoints.modelsCapabilities),
+ install: (id) => postJSON(API_CONFIG.endpoints.installModel(id), {}),
+ delete: (id) => postJSON(API_CONFIG.endpoints.deleteModel(id), {}),
+ getConfig: (id) => postJSON(API_CONFIG.endpoints.modelConfig(id), {}),
+ getConfigJson: (name) => fetchJSON(API_CONFIG.endpoints.modelConfigJson(name)),
+ getJob: (uid) => fetchJSON(API_CONFIG.endpoints.modelJob(uid)),
+ apply: (body) => postJSON(API_CONFIG.endpoints.modelsApply, body),
+ deleteByName: (name) => postJSON(API_CONFIG.endpoints.modelsDelete(name), {}),
+ reload: () => postJSON(API_CONFIG.endpoints.modelsReload, {}),
+ importUri: (body) => postJSON(API_CONFIG.endpoints.modelsImportUri, body),
+ importConfig: async (content, contentType = 'application/x-yaml') => {
+ const response = await fetch(API_CONFIG.endpoints.modelsImport, {
+ method: 'POST',
+ headers: { 'Content-Type': contentType },
+ body: content,
+ })
+ return handleResponse(response)
+ },
+ getJobStatus: (uid) => fetchJSON(API_CONFIG.endpoints.modelsJobStatus(uid)),
+ getEditConfig: (name) => fetchJSON(API_CONFIG.endpoints.modelEditGet(name)),
+ editConfig: (name, body) => postJSON(API_CONFIG.endpoints.modelEdit(name), body),
+}
+
+// Backends API
+export const backendsApi = {
+ list: (params) => fetchJSON(buildUrl(API_CONFIG.endpoints.backends, params)),
+ listInstalled: () => fetchJSON(API_CONFIG.endpoints.backendsInstalled),
+ install: (id) => postJSON(API_CONFIG.endpoints.installBackend(id), {}),
+ delete: (id) => postJSON(API_CONFIG.endpoints.deleteBackend(id), {}),
+ installExternal: (body) => postJSON(API_CONFIG.endpoints.installExternalBackend, body),
+ getJob: (uid) => fetchJSON(API_CONFIG.endpoints.backendJob(uid)),
+ deleteInstalled: (name) => postJSON(API_CONFIG.endpoints.deleteInstalledBackend(name), {}),
+}
+
+// Chat API (non-streaming)
+export const chatApi = {
+ complete: (body) => postJSON(API_CONFIG.endpoints.chatCompletions, body),
+ mcpComplete: (body) => postJSON(API_CONFIG.endpoints.mcpChatCompletions, body),
+}
+
+// Resources API
+export const resourcesApi = {
+ get: () => fetchJSON(API_CONFIG.endpoints.resources),
+}
+
+// Operations API
+export const operationsApi = {
+ list: () => fetchJSON(API_CONFIG.endpoints.operations),
+ cancel: (jobID) => postJSON(API_CONFIG.endpoints.cancelOperation(jobID), {}),
+}
+
+// Settings API
+export const settingsApi = {
+ get: () => fetchJSON(API_CONFIG.endpoints.settings),
+ save: (body) => postJSON(API_CONFIG.endpoints.settings, body),
+}
+
+// Traces API
+export const tracesApi = {
+ get: () => fetchJSON(API_CONFIG.endpoints.traces),
+ clear: () => postJSON(API_CONFIG.endpoints.clearTraces, {}),
+ getBackend: () => fetchJSON(API_CONFIG.endpoints.backendTraces),
+ clearBackend: () => postJSON(API_CONFIG.endpoints.clearBackendTraces, {}),
+}
+
+// P2P API
+export const p2pApi = {
+ getWorkers: () => fetchJSON(API_CONFIG.endpoints.p2pWorkers),
+ getFederation: () => fetchJSON(API_CONFIG.endpoints.p2pFederation),
+ getStats: () => fetchJSON(API_CONFIG.endpoints.p2pStats),
+ getToken: async () => {
+ const response = await fetch(API_CONFIG.endpoints.p2pToken)
+ if (!response.ok) throw new Error(`HTTP ${response.status}`)
+ return response.text()
+ },
+}
+
+// Agent Jobs API
+export const agentJobsApi = {
+ listTasks: () => fetchJSON(API_CONFIG.endpoints.agentTasks),
+ getTask: (id) => fetchJSON(API_CONFIG.endpoints.agentTask(id)),
+ createTask: (body) => postJSON(API_CONFIG.endpoints.agentTasks, body),
+ updateTask: (id, body) => fetchJSON(API_CONFIG.endpoints.agentTask(id), { method: 'PUT', body: JSON.stringify(body), headers: { 'Content-Type': 'application/json' } }),
+ deleteTask: (id) => fetchJSON(API_CONFIG.endpoints.agentTask(id), { method: 'DELETE' }),
+ executeTask: (name) => postJSON(API_CONFIG.endpoints.executeAgentTask(name), {}),
+ listJobs: () => fetchJSON(API_CONFIG.endpoints.agentJobs),
+ getJob: (id) => fetchJSON(API_CONFIG.endpoints.agentJob(id)),
+ cancelJob: (id) => postJSON(API_CONFIG.endpoints.cancelAgentJob(id), {}),
+ executeJob: (body) => postJSON(API_CONFIG.endpoints.executeAgentJob, body),
+}
+
+// Image generation
+export const imageApi = {
+ generate: (body) => postJSON(API_CONFIG.endpoints.imageGenerations, body),
+}
+
+// Video generation
+export const videoApi = {
+ generate: (body) => postJSON(API_CONFIG.endpoints.video, body),
+}
+
+// TTS
+export const ttsApi = {
+ generate: async (body) => {
+ const response = await fetch(API_CONFIG.endpoints.tts, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify(body),
+ })
+ if (!response.ok) {
+ const data = await response.json().catch(() => ({}))
+ throw new Error(data?.error?.message || `HTTP ${response.status}`)
+ }
+ return response.blob()
+ },
+ generateV1: async (body) => {
+ const response = await fetch(API_CONFIG.endpoints.audioSpeech, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify(body),
+ })
+ if (!response.ok) {
+ const data = await response.json().catch(() => ({}))
+ throw new Error(data?.error?.message || `HTTP ${response.status}`)
+ }
+ return response.blob()
+ },
+}
+
+// Sound generation
+export const soundApi = {
+ generate: async (body) => {
+ const response = await fetch(API_CONFIG.endpoints.soundGeneration, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify(body),
+ })
+ if (!response.ok) {
+ const data = await response.json().catch(() => ({}))
+ throw new Error(data?.error?.message || `HTTP ${response.status}`)
+ }
+ return response.blob()
+ },
+}
+
+// Audio transcription
+export const audioApi = {
+ transcribe: async (formData) => {
+ const response = await fetch(API_CONFIG.endpoints.audioTranscriptions, {
+ method: 'POST',
+ body: formData,
+ })
+ return handleResponse(response)
+ },
+}
+
+// Backend control
+export const backendControlApi = {
+ shutdown: (body) => postJSON(API_CONFIG.endpoints.backendShutdown, body),
+}
+
+// System info
+export const systemApi = {
+ version: () => fetchJSON(API_CONFIG.endpoints.version),
+ info: () => fetchJSON(API_CONFIG.endpoints.system),
+}
+
+// File to base64 helper
+export function fileToBase64(file) {
+ return new Promise((resolve, reject) => {
+ const reader = new FileReader()
+ reader.onload = () => {
+ const base64 = reader.result.split(',')[1] || reader.result
+ resolve(base64)
+ }
+ reader.onerror = reject
+ reader.readAsDataURL(file)
+ })
+}
diff --git a/core/http/react-ui/src/utils/config.js b/core/http/react-ui/src/utils/config.js
new file mode 100644
index 000000000..4a64be51c
--- /dev/null
+++ b/core/http/react-ui/src/utils/config.js
@@ -0,0 +1,82 @@
+export const API_CONFIG = {
+ endpoints: {
+ // Operations
+ operations: '/api/operations',
+ cancelOperation: (jobID) => `/api/operations/${jobID}/cancel`,
+
+ // Models gallery
+ models: '/api/models',
+ installModel: (id) => `/api/models/install/${id}`,
+ deleteModel: (id) => `/api/models/delete/${id}`,
+ modelConfig: (id) => `/api/models/config/${id}`,
+ modelConfigJson: (name) => `/api/models/config-json/${name}`,
+ modelJob: (uid) => `/api/models/job/${uid}`,
+
+ // Backends gallery
+ backends: '/api/backends',
+ installBackend: (id) => `/api/backends/install/${id}`,
+ deleteBackend: (id) => `/api/backends/delete/${id}`,
+ installExternalBackend: '/api/backends/install-external',
+ backendJob: (uid) => `/api/backends/job/${uid}`,
+ deleteInstalledBackend: (name) => `/api/backends/system/delete/${name}`,
+
+ // Resources
+ resources: '/api/resources',
+
+ // Settings
+ settings: '/api/settings',
+
+ // Traces
+ traces: '/api/traces',
+ clearTraces: '/api/traces/clear',
+ backendTraces: '/api/backend-traces',
+ clearBackendTraces: '/api/backend-traces/clear',
+
+ // P2P
+ p2pWorkers: '/api/p2p/workers',
+ p2pFederation: '/api/p2p/federation',
+ p2pStats: '/api/p2p/stats',
+ p2pToken: '/api/p2p/token',
+
+ // Agent jobs
+ agentTasks: '/api/agent/tasks',
+ agentTask: (id) => `/api/agent/tasks/${id}`,
+ executeAgentTask: (name) => `/api/agent/tasks/${name}/execute`,
+ agentJobs: '/api/agent/jobs',
+ agentJob: (id) => `/api/agent/jobs/${id}`,
+ cancelAgentJob: (id) => `/api/agent/jobs/${id}/cancel`,
+ executeAgentJob: '/api/agent/jobs/execute',
+
+ // OpenAI-compatible endpoints
+ chatCompletions: '/v1/chat/completions',
+ mcpChatCompletions: '/v1/mcp/chat/completions',
+ completions: '/v1/completions',
+ imageGenerations: '/v1/images/generations',
+ audioSpeech: '/v1/audio/speech',
+ audioTranscriptions: '/v1/audio/transcriptions',
+ soundGeneration: '/v1/sound-generation',
+ embeddings: '/v1/embeddings',
+ modelsList: '/v1/models',
+ modelsCapabilities: '/api/models/capabilities',
+
+ // LocalAI-specific
+ tts: '/tts',
+ video: '/video',
+ backendMonitor: '/backend/monitor',
+ backendShutdown: '/backend/shutdown',
+ modelsApply: '/models/apply',
+ modelsDelete: (name) => `/models/delete/${name}`,
+ modelsAvailable: '/models/available',
+ modelsGalleries: '/models/galleries',
+ modelsReload: '/models/reload',
+ modelsImportUri: '/models/import-uri',
+ modelsImport: '/models/import',
+ modelsJobStatus: (uid) => `/models/jobs/${uid}`,
+ modelEditGet: (name) => `/api/models/edit/${name}`,
+ modelEdit: (name) => `/models/edit/${name}`,
+ backendsAvailable: '/backends/available',
+ backendsInstalled: '/backends',
+ version: '/version',
+ system: '/system',
+ },
+}
diff --git a/core/http/react-ui/src/utils/format.js b/core/http/react-ui/src/utils/format.js
new file mode 100644
index 000000000..3372f86c6
--- /dev/null
+++ b/core/http/react-ui/src/utils/format.js
@@ -0,0 +1,22 @@
+export function formatBytes(bytes) {
+ if (bytes === 0) return '0 B'
+ const k = 1024
+ const sizes = ['B', 'KB', 'MB', 'GB', 'TB']
+ const i = Math.floor(Math.log(bytes) / Math.log(k))
+ return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i]
+}
+
+export function percentColor(pct) {
+ if (pct > 90) return 'var(--color-error)'
+ if (pct > 70) return 'var(--color-warning)'
+ return 'var(--color-success)'
+}
+
+export function vendorColor(vendor) {
+ if (!vendor) return 'var(--color-accent)'
+ const v = vendor.toLowerCase()
+ if (v.includes('nvidia')) return '#76b900'
+ if (v.includes('amd')) return '#ed1c24'
+ if (v.includes('intel')) return '#0071c5'
+ return 'var(--color-accent)'
+}
diff --git a/core/http/react-ui/src/utils/markdown.js b/core/http/react-ui/src/utils/markdown.js
new file mode 100644
index 000000000..c03fde7a7
--- /dev/null
+++ b/core/http/react-ui/src/utils/markdown.js
@@ -0,0 +1,27 @@
+import { marked } from 'marked'
+import DOMPurify from 'dompurify'
+import hljs from 'highlight.js'
+
+marked.setOptions({
+ highlight(code, lang) {
+ if (lang && hljs.getLanguage(lang)) {
+ return hljs.highlight(code, { language: lang }).value
+ }
+ return hljs.highlightAuto(code).value
+ },
+ breaks: true,
+ gfm: true,
+})
+
+export function renderMarkdown(text) {
+ if (!text) return ''
+ const html = marked.parse(text)
+ return DOMPurify.sanitize(html)
+}
+
+export function highlightAll(element) {
+ if (!element) return
+ element.querySelectorAll('pre code').forEach((block) => {
+ hljs.highlightElement(block)
+ })
+}
diff --git a/core/http/react-ui/vite.config.js b/core/http/react-ui/vite.config.js
new file mode 100644
index 000000000..58a4a4413
--- /dev/null
+++ b/core/http/react-ui/vite.config.js
@@ -0,0 +1,32 @@
+import { defineConfig } from 'vite'
+import react from '@vitejs/plugin-react'
+
+const backendUrl = process.env.LOCALAI_URL || 'http://localhost:8080'
+
+export default defineConfig({
+ plugins: [react()],
+ base: '/app',
+ server: {
+ port: 3000,
+ proxy: {
+ '/api': backendUrl,
+ '/v1': backendUrl,
+ '/tts': backendUrl,
+ '/video': backendUrl,
+ '/backend': backendUrl,
+ '/models': backendUrl,
+ '/backends': backendUrl,
+ '/swagger': backendUrl,
+ '/static': backendUrl,
+ '/generated-audio': backendUrl,
+ '/generated-images': backendUrl,
+ '/generated-videos': backendUrl,
+ '/version': backendUrl,
+ '/system': backendUrl,
+ },
+ },
+ build: {
+ outDir: 'dist',
+ assetsDir: 'assets',
+ },
+})
diff --git a/core/http/routes/ui.go b/core/http/routes/ui.go
index 4cd9a72bd..0064f8131 100644
--- a/core/http/routes/ui.go
+++ b/core/http/routes/ui.go
@@ -3,12 +3,9 @@ package routes
import (
"github.com/labstack/echo/v4"
"github.com/mudler/LocalAI/core/config"
- "github.com/mudler/LocalAI/core/gallery"
- "github.com/mudler/LocalAI/core/http/endpoints/localai"
"github.com/mudler/LocalAI/core/http/middleware"
"github.com/mudler/LocalAI/core/services"
"github.com/mudler/LocalAI/core/trace"
- "github.com/mudler/LocalAI/internal"
"github.com/mudler/LocalAI/pkg/model"
)
@@ -18,409 +15,68 @@ func RegisterUIRoutes(app *echo.Echo,
appConfig *config.ApplicationConfig,
galleryService *services.GalleryService) {
- // keeps the state of ops that are started from the UI
- var processingOps = services.NewOpCache(galleryService)
+ // Redirect all old UI routes to React SPA at /app
+ redirectToApp := func(path string) echo.HandlerFunc {
+ return func(c echo.Context) error {
+ return c.Redirect(302, "/app"+path)
+ }
+ }
- app.GET("/", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps))
- app.GET("/manage", localai.WelcomeEndpoint(appConfig, cl, ml, processingOps))
+ redirectToAppWithParam := func(prefix string) echo.HandlerFunc {
+ return func(c echo.Context) error {
+ param := c.Param("model")
+ if param == "" {
+ param = c.Param("id")
+ }
+ if param != "" {
+ return c.Redirect(302, "/app"+prefix+"/"+param)
+ }
+ return c.Redirect(302, "/app"+prefix)
+ }
+ }
+
+ // "/" is handled in app.go to serve React SPA directly (preserves reverse-proxy headers)
+ app.GET("/manage", redirectToApp("/manage"))
if !appConfig.DisableRuntimeSettings {
- // Settings page
- app.GET("/settings", func(c echo.Context) error {
- summary := map[string]interface{}{
- "Title": "LocalAI - Settings",
- "BaseURL": middleware.BaseURL(c),
- }
- return c.Render(200, "views/settings", summary)
- })
+ app.GET("/settings", redirectToApp("/settings"))
}
// Agent Jobs pages
- app.GET("/agent-jobs", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- summary := map[string]interface{}{
- "Title": "LocalAI - Agent Jobs",
- "BaseURL": middleware.BaseURL(c),
- "Version": internal.PrintableVersion(),
- "ModelsConfig": modelConfigs,
- }
- return c.Render(200, "views/agent-jobs", summary)
- })
-
- app.GET("/agent-jobs/tasks/new", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- summary := map[string]interface{}{
- "Title": "LocalAI - Create Task",
- "BaseURL": middleware.BaseURL(c),
- "Version": internal.PrintableVersion(),
- "ModelsConfig": modelConfigs,
- }
- return c.Render(200, "views/agent-task-details", summary)
- })
-
- // More specific route must come first
+ app.GET("/agent-jobs", redirectToApp("/agent-jobs"))
+ app.GET("/agent-jobs/tasks/new", redirectToApp("/agent-jobs/tasks/new"))
app.GET("/agent-jobs/tasks/:id/edit", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- summary := map[string]interface{}{
- "Title": "LocalAI - Edit Task",
- "BaseURL": middleware.BaseURL(c),
- "Version": internal.PrintableVersion(),
- "ModelsConfig": modelConfigs,
- }
- return c.Render(200, "views/agent-task-details", summary)
+ return c.Redirect(302, "/app/agent-jobs/tasks/"+c.Param("id")+"/edit")
})
-
- // Task details page (less specific, comes after edit route)
app.GET("/agent-jobs/tasks/:id", func(c echo.Context) error {
- summary := map[string]interface{}{
- "Title": "LocalAI - Task Details",
- "BaseURL": middleware.BaseURL(c),
- "Version": internal.PrintableVersion(),
- }
- return c.Render(200, "views/agent-task-details", summary)
+ return c.Redirect(302, "/app/agent-jobs/tasks/"+c.Param("id"))
})
-
app.GET("/agent-jobs/jobs/:id", func(c echo.Context) error {
- summary := map[string]interface{}{
- "Title": "LocalAI - Job Details",
- "BaseURL": middleware.BaseURL(c),
- "Version": internal.PrintableVersion(),
- }
- return c.Render(200, "views/agent-job-details", summary)
+ return c.Redirect(302, "/app/agent-jobs/jobs/"+c.Param("id"))
})
// P2P
- app.GET("/p2p", func(c echo.Context) error {
- summary := map[string]interface{}{
- "Title": "LocalAI - P2P dashboard",
- "BaseURL": middleware.BaseURL(c),
- "Version": internal.PrintableVersion(),
- //"Nodes": p2p.GetAvailableNodes(""),
- //"FederatedNodes": p2p.GetAvailableNodes(p2p.FederatedID),
-
- "P2PToken": appConfig.P2PToken,
- "NetworkID": appConfig.P2PNetworkID,
- }
-
- // Render index
- return c.Render(200, "views/p2p", summary)
- })
-
- // Note: P2P UI fragment routes (/p2p/ui/*) were removed
- // P2P nodes are now fetched via JSON API at /api/p2p/workers and /api/p2p/federation
-
- // End P2P
+ app.GET("/p2p", redirectToApp("/p2p"))
if !appConfig.DisableGalleryEndpoint {
- registerGalleryRoutes(app, cl, appConfig, galleryService, processingOps)
- registerBackendGalleryRoutes(app, appConfig, galleryService, processingOps)
+ app.GET("/browse", redirectToApp("/browse"))
+ app.GET("/browse/backends", redirectToApp("/backends"))
}
- app.GET("/talk", func(c echo.Context) error {
- modelConfigs, _ := services.ListModels(cl, ml, config.NoFilterFn, services.SKIP_IF_CONFIGURED)
-
- if len(modelConfigs) == 0 {
- // If no model is available redirect to the index which suggests how to install models
- return c.Redirect(302, middleware.BaseURL(c))
- }
-
- summary := map[string]interface{}{
- "Title": "LocalAI - Talk",
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "Model": modelConfigs[0],
-
- "Version": internal.PrintableVersion(),
- }
-
- // Render index
- return c.Render(200, "views/talk", summary)
- })
-
- app.GET("/chat", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- if len(modelConfigs)+len(modelsWithoutConfig) == 0 {
- // If no model is available redirect to the index which suggests how to install models
- return c.Redirect(302, middleware.BaseURL(c))
- }
- modelThatCanBeUsed := ""
- galleryConfigs := map[string]*gallery.ModelConfig{}
-
- for _, m := range modelConfigs {
- cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name)
- if err != nil {
- continue
- }
- galleryConfigs[m.Name] = cfg
- }
-
- title := "LocalAI - Chat"
- var modelContextSize *int
-
- for _, b := range modelConfigs {
- if b.HasUsecases(config.FLAG_CHAT) {
- modelThatCanBeUsed = b.Name
- title = "LocalAI - Chat with " + modelThatCanBeUsed
- if b.LLMConfig.ContextSize != nil {
- modelContextSize = b.LLMConfig.ContextSize
- }
- break
- }
- }
-
- summary := map[string]interface{}{
- "Title": title,
- "BaseURL": middleware.BaseURL(c),
- "ModelsWithoutConfig": modelsWithoutConfig,
- "GalleryConfig": galleryConfigs,
- "ModelsConfig": modelConfigs,
- "Model": modelThatCanBeUsed,
- "ContextSize": modelContextSize,
- "Version": internal.PrintableVersion(),
- }
-
- // Render index
- return c.Render(200, "views/chat", summary)
- })
-
- // Show the Chat page
- app.GET("/chat/:model", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- galleryConfigs := map[string]*gallery.ModelConfig{}
- modelName := c.Param("model")
- var modelContextSize *int
-
- for _, m := range modelConfigs {
- cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name)
- if err != nil {
- continue
- }
- galleryConfigs[m.Name] = cfg
- if m.Name == modelName && m.LLMConfig.ContextSize != nil {
- modelContextSize = m.LLMConfig.ContextSize
- }
- }
-
- summary := map[string]interface{}{
- "Title": "LocalAI - Chat with " + modelName,
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "GalleryConfig": galleryConfigs,
- "ModelsWithoutConfig": modelsWithoutConfig,
- "Model": modelName,
- "ContextSize": modelContextSize,
- "Version": internal.PrintableVersion(),
- }
-
- // Render index
- return c.Render(200, "views/chat", summary)
- })
-
- app.GET("/image/:model", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- summary := map[string]interface{}{
- "Title": "LocalAI - Generate images with " + c.Param("model"),
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "ModelsWithoutConfig": modelsWithoutConfig,
- "Model": c.Param("model"),
- "Version": internal.PrintableVersion(),
- }
-
- // Render index
- return c.Render(200, "views/image", summary)
- })
-
- app.GET("/image", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- if len(modelConfigs)+len(modelsWithoutConfig) == 0 {
- // If no model is available redirect to the index which suggests how to install models
- return c.Redirect(302, middleware.BaseURL(c))
- }
-
- modelThatCanBeUsed := ""
- title := "LocalAI - Generate images"
-
- for _, b := range modelConfigs {
- if b.HasUsecases(config.FLAG_IMAGE) {
- modelThatCanBeUsed = b.Name
- title = "LocalAI - Generate images with " + modelThatCanBeUsed
- break
- }
- }
-
- summary := map[string]interface{}{
- "Title": title,
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "ModelsWithoutConfig": modelsWithoutConfig,
- "Model": modelThatCanBeUsed,
- "Version": internal.PrintableVersion(),
- }
-
- // Render index
- return c.Render(200, "views/image", summary)
- })
-
- app.GET("/tts/:model", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- summary := map[string]interface{}{
- "Title": "LocalAI - Generate images with " + c.Param("model"),
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "ModelsWithoutConfig": modelsWithoutConfig,
- "Model": c.Param("model"),
- "Version": internal.PrintableVersion(),
- }
-
- // Render index
- return c.Render(200, "views/tts", summary)
- })
-
- app.GET("/tts", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- if len(modelConfigs)+len(modelsWithoutConfig) == 0 {
- // If no model is available redirect to the index which suggests how to install models
- return c.Redirect(302, middleware.BaseURL(c))
- }
-
- modelThatCanBeUsed := ""
- title := "LocalAI - Generate audio"
-
- for _, b := range modelConfigs {
- if b.HasUsecases(config.FLAG_TTS) {
- modelThatCanBeUsed = b.Name
- title = "LocalAI - Generate audio with " + modelThatCanBeUsed
- break
- }
- }
- summary := map[string]interface{}{
- "Title": title,
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "ModelsWithoutConfig": modelsWithoutConfig,
- "Model": modelThatCanBeUsed,
- "Version": internal.PrintableVersion(),
- }
-
- // Render index
- return c.Render(200, "views/tts", summary)
- })
-
- app.GET("/sound/:model", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- summary := map[string]interface{}{
- "Title": "LocalAI - Generate sound with " + c.Param("model"),
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "ModelsWithoutConfig": modelsWithoutConfig,
- "Model": c.Param("model"),
- "Version": internal.PrintableVersion(),
- }
- return c.Render(200, "views/sound", summary)
- })
-
- app.GET("/sound", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- if len(modelConfigs)+len(modelsWithoutConfig) == 0 {
- return c.Redirect(302, middleware.BaseURL(c))
- }
-
- modelThatCanBeUsed := ""
- title := "LocalAI - Generate sound"
- for _, b := range modelConfigs {
- if b.HasUsecases(config.FLAG_SOUND_GENERATION) {
- modelThatCanBeUsed = b.Name
- title = "LocalAI - Generate sound with " + modelThatCanBeUsed
- break
- }
- }
- summary := map[string]interface{}{
- "Title": title,
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "ModelsWithoutConfig": modelsWithoutConfig,
- "Model": modelThatCanBeUsed,
- "Version": internal.PrintableVersion(),
- }
- return c.Render(200, "views/sound", summary)
- })
-
- app.GET("/video/:model", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- summary := map[string]interface{}{
- "Title": "LocalAI - Generate videos with " + c.Param("model"),
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "ModelsWithoutConfig": modelsWithoutConfig,
- "Model": c.Param("model"),
- "Version": internal.PrintableVersion(),
- }
-
- // Render index
- return c.Render(200, "views/video", summary)
- })
-
- app.GET("/video", func(c echo.Context) error {
- modelConfigs := cl.GetAllModelsConfigs()
- modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
-
- if len(modelConfigs)+len(modelsWithoutConfig) == 0 {
- // If no model is available redirect to the index which suggests how to install models
- return c.Redirect(302, middleware.BaseURL(c))
- }
-
- modelThatCanBeUsed := ""
- title := "LocalAI - Generate videos"
-
- for _, b := range modelConfigs {
- if b.HasUsecases(config.FLAG_VIDEO) {
- modelThatCanBeUsed = b.Name
- title = "LocalAI - Generate videos with " + modelThatCanBeUsed
- break
- }
- }
-
- summary := map[string]interface{}{
- "Title": title,
- "BaseURL": middleware.BaseURL(c),
- "ModelsConfig": modelConfigs,
- "ModelsWithoutConfig": modelsWithoutConfig,
- "Model": modelThatCanBeUsed,
- "Version": internal.PrintableVersion(),
- }
-
- // Render index
- return c.Render(200, "views/video", summary)
- })
+ app.GET("/talk", redirectToApp("/talk"))
+ app.GET("/chat", redirectToApp("/chat"))
+ app.GET("/chat/:model", redirectToAppWithParam("/chat"))
+ app.GET("/image", redirectToApp("/image"))
+ app.GET("/image/:model", redirectToAppWithParam("/image"))
+ app.GET("/tts", redirectToApp("/tts"))
+ app.GET("/tts/:model", redirectToAppWithParam("/tts"))
+ app.GET("/sound", redirectToApp("/sound"))
+ app.GET("/sound/:model", redirectToAppWithParam("/sound"))
+ app.GET("/video", redirectToApp("/video"))
+ app.GET("/video/:model", redirectToAppWithParam("/video"))
// Traces UI
- app.GET("/traces", func(c echo.Context) error {
- summary := map[string]interface{}{
- "Title": "LocalAI - Traces",
- "BaseURL": middleware.BaseURL(c),
- "Version": internal.PrintableVersion(),
- }
- return c.Render(200, "views/traces", summary)
- })
+ app.GET("/traces", redirectToApp("/traces"))
app.GET("/api/traces", func(c echo.Context) error {
return c.JSON(200, middleware.GetTraces())
diff --git a/core/http/routes/ui_api.go b/core/http/routes/ui_api.go
index 4dc73a823..563c9b499 100644
--- a/core/http/routes/ui_api.go
+++ b/core/http/routes/ui_api.go
@@ -397,6 +397,35 @@ func RegisterUIAPIRoutes(app *echo.Echo, cl *config.ModelConfigLoader, ml *model
})
})
+ // Returns installed models with their capability flags for UI filtering
+ app.GET("/api/models/capabilities", func(c echo.Context) error {
+ modelConfigs := cl.GetAllModelsConfigs()
+ modelsWithoutConfig, _ := services.ListModels(cl, ml, config.NoFilterFn, services.LOOSE_ONLY)
+
+ type modelCapability struct {
+ ID string `json:"id"`
+ Capabilities []string `json:"capabilities"`
+ }
+
+ result := make([]modelCapability, 0, len(modelConfigs)+len(modelsWithoutConfig))
+ for _, cfg := range modelConfigs {
+ result = append(result, modelCapability{
+ ID: cfg.Name,
+ Capabilities: cfg.KnownUsecaseStrings,
+ })
+ }
+ for _, name := range modelsWithoutConfig {
+ result = append(result, modelCapability{
+ ID: name,
+ Capabilities: []string{},
+ })
+ }
+
+ return c.JSON(200, map[string]any{
+ "data": result,
+ })
+ })
+
app.POST("/api/models/install/:id", func(c echo.Context) error {
galleryID := c.Param("id")
// URL decode the gallery ID (e.g., "localai%40model" -> "localai@model")
@@ -533,6 +562,61 @@ func RegisterUIAPIRoutes(app *echo.Echo, cl *config.ModelConfigLoader, ml *model
})
})
+ // Get installed model config as JSON (used by frontend for MCP detection, etc.)
+ app.GET("/api/models/config-json/:name", func(c echo.Context) error {
+ modelName := c.Param("name")
+ if modelName == "" {
+ return c.JSON(http.StatusBadRequest, map[string]interface{}{
+ "error": "model name is required",
+ })
+ }
+
+ modelConfig, exists := cl.GetModelConfig(modelName)
+ if !exists {
+ return c.JSON(http.StatusNotFound, map[string]interface{}{
+ "error": "model configuration not found",
+ })
+ }
+
+ return c.JSON(http.StatusOK, modelConfig)
+ })
+
+ // Get installed model YAML config for the React model editor
+ app.GET("/api/models/edit/:name", func(c echo.Context) error {
+ modelName := c.Param("name")
+ if modelName == "" {
+ return c.JSON(http.StatusBadRequest, map[string]interface{}{
+ "error": "model name is required",
+ })
+ }
+
+ modelConfig, exists := cl.GetModelConfig(modelName)
+ if !exists {
+ return c.JSON(http.StatusNotFound, map[string]interface{}{
+ "error": "model configuration not found",
+ })
+ }
+
+ modelConfigFile := modelConfig.GetModelConfigFile()
+ if modelConfigFile == "" {
+ return c.JSON(http.StatusNotFound, map[string]interface{}{
+ "error": "model configuration file not found",
+ })
+ }
+
+ configData, err := os.ReadFile(modelConfigFile)
+ if err != nil {
+ return c.JSON(http.StatusInternalServerError, map[string]interface{}{
+ "error": "failed to read configuration file: " + err.Error(),
+ })
+ }
+
+ return c.JSON(http.StatusOK, map[string]interface{}{
+ "config": string(configData),
+ "name": modelName,
+ })
+ })
+
app.GET("/api/models/job/:uid", func(c echo.Context) error {
jobUID := c.Param("uid")
diff --git a/core/http/routes/ui_backend_gallery.go b/core/http/routes/ui_backend_gallery.go
index 8f0a31351..93b63b2c2 100644
--- a/core/http/routes/ui_backend_gallery.go
+++ b/core/http/routes/ui_backend_gallery.go
@@ -3,22 +3,11 @@ package routes
import (
"github.com/labstack/echo/v4"
"github.com/mudler/LocalAI/core/config"
- "github.com/mudler/LocalAI/core/http/middleware"
"github.com/mudler/LocalAI/core/services"
- "github.com/mudler/LocalAI/internal"
)
func registerBackendGalleryRoutes(app *echo.Echo, appConfig *config.ApplicationConfig, galleryService *services.GalleryService, opcache *services.OpCache) {
- // Show the Backends page (all backends are loaded client-side via Alpine.js)
- app.GET("/browse/backends", func(c echo.Context) error {
- summary := map[string]interface{}{
- "Title": "LocalAI - Backends",
- "BaseURL": middleware.BaseURL(c),
- "Version": internal.PrintableVersion(),
- "Repositories": appConfig.BackendGalleries,
- }
-
- // Render index - backends are now loaded via Alpine.js from /api/backends
- return c.Render(200, "views/backends", summary)
- })
+ // Backend gallery routes are now handled by the React SPA at /app/backends
+ // This function is kept for backward compatibility but no longer registers routes
+ // (routes are registered directly in RegisterUIRoutes)
}
diff --git a/core/http/routes/ui_gallery.go b/core/http/routes/ui_gallery.go
index dfd39fe76..fd4b3d484 100644
--- a/core/http/routes/ui_gallery.go
+++ b/core/http/routes/ui_gallery.go
@@ -3,21 +3,11 @@ package routes
import (
"github.com/labstack/echo/v4"
"github.com/mudler/LocalAI/core/config"
- "github.com/mudler/LocalAI/core/http/middleware"
"github.com/mudler/LocalAI/core/services"
- "github.com/mudler/LocalAI/internal"
)
func registerGalleryRoutes(app *echo.Echo, cl *config.ModelConfigLoader, appConfig *config.ApplicationConfig, galleryService *services.GalleryService, opcache *services.OpCache) {
- app.GET("/browse", func(c echo.Context) error {
- summary := map[string]interface{}{
- "Title": "LocalAI - Models",
- "BaseURL": middleware.BaseURL(c),
- "Version": internal.PrintableVersion(),
- "Repositories": appConfig.Galleries,
- }
-
- // Render index - models are now loaded via Alpine.js from /api/models
- return c.Render(200, "views/models", summary)
- })
+ // Gallery routes are now handled by the React SPA at /app/browse
+ // This function is kept for backward compatibility but no longer registers routes
+ // (routes are registered directly in RegisterUIRoutes)
}
diff --git a/pkg/functions/functions_test.go b/pkg/functions/functions_test.go
index 2eb0946a6..aad84e001 100644
--- a/pkg/functions/functions_test.go
+++ b/pkg/functions/functions_test.go
@@ -1,3 +1,4 @@
+
package functions_test
import (
diff --git a/webui_static.yaml b/webui_static.yaml
index 8185225eb..185a8ea4a 100644
--- a/webui_static.yaml
+++ b/webui_static.yaml
@@ -1,3 +1,9 @@
+# DEPRECATED: This file is used by the legacy Alpine.js UI (core/http/views/).
+# The new React UI (core/http/react-ui/) bundles all its dependencies via npm.
+# When the legacy UI is removed, delete this file along with:
+# - core/dependencies_manager/manager.go
+# - core/http/static/assets/ (downloaded artifacts)
+# - The "gen-assets" Makefile target
- filename: "highlightjs.css"
url: "https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.8.0/build/styles/default.min.css"
sha: "fbde0ac0921d86c356c41532e7319c887a23bd1b8ff00060cab447249f03c7cf"