mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-04 11:42:57 -05:00
Compare commits
6 Commits
copilot/re
...
copilot/up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a1480707df | ||
|
|
6e1ecc9be7 | ||
|
|
c88074a19e | ||
|
|
5ca8f0aea0 | ||
|
|
84234e531f | ||
|
|
4cbf9abfef |
@@ -205,6 +205,7 @@ func API(application *application.Application) (*echo.Echo, error) {
|
||||
|
||||
routes.RegisterLocalAIRoutes(e, requestExtractor, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application.TemplatesEvaluator(), application)
|
||||
routes.RegisterOpenAIRoutes(e, requestExtractor, application)
|
||||
routes.RegisterAnthropicRoutes(e, requestExtractor, application)
|
||||
if !application.ApplicationConfig().DisableWebUI {
|
||||
routes.RegisterUIAPIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application)
|
||||
routes.RegisterUIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService())
|
||||
|
||||
537
core/http/endpoints/anthropic/messages.go
Normal file
537
core/http/endpoints/anthropic/messages.go
Normal file
@@ -0,0 +1,537 @@
|
||||
package anthropic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http/middleware"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/templates"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/mudler/xlog"
|
||||
)
|
||||
|
||||
// MessagesEndpoint is the Anthropic Messages API endpoint
|
||||
// https://docs.anthropic.com/claude/reference/messages_post
|
||||
// @Summary Generate a message response for the given messages and model.
|
||||
// @Param request body schema.AnthropicRequest true "query params"
|
||||
// @Success 200 {object} schema.AnthropicResponse "Response"
|
||||
// @Router /v1/messages [post]
|
||||
func MessagesEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator *templates.Evaluator, appConfig *config.ApplicationConfig) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
id := uuid.New().String()
|
||||
|
||||
input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.AnthropicRequest)
|
||||
if !ok || input.Model == "" {
|
||||
return sendAnthropicError(c, 400, "invalid_request_error", "model is required")
|
||||
}
|
||||
|
||||
cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig)
|
||||
if !ok || cfg == nil {
|
||||
return sendAnthropicError(c, 400, "invalid_request_error", "model configuration not found")
|
||||
}
|
||||
|
||||
if input.MaxTokens <= 0 {
|
||||
return sendAnthropicError(c, 400, "invalid_request_error", "max_tokens is required and must be greater than 0")
|
||||
}
|
||||
|
||||
xlog.Debug("Anthropic Messages endpoint configuration read", "config", cfg)
|
||||
|
||||
// Convert Anthropic messages to OpenAI format for internal processing
|
||||
openAIMessages := convertAnthropicToOpenAIMessages(input)
|
||||
|
||||
// Convert Anthropic tools to internal Functions format
|
||||
funcs, shouldUseFn := convertAnthropicTools(input, cfg)
|
||||
|
||||
// Create an OpenAI-compatible request for internal processing
|
||||
openAIReq := &schema.OpenAIRequest{
|
||||
PredictionOptions: schema.PredictionOptions{
|
||||
BasicModelRequest: schema.BasicModelRequest{Model: input.Model},
|
||||
Temperature: input.Temperature,
|
||||
TopK: input.TopK,
|
||||
TopP: input.TopP,
|
||||
Maxtokens: &input.MaxTokens,
|
||||
},
|
||||
Messages: openAIMessages,
|
||||
Stream: input.Stream,
|
||||
Context: input.Context,
|
||||
Cancel: input.Cancel,
|
||||
}
|
||||
|
||||
// Set stop sequences
|
||||
if len(input.StopSequences) > 0 {
|
||||
openAIReq.Stop = input.StopSequences
|
||||
}
|
||||
|
||||
// Merge config settings
|
||||
if input.Temperature != nil {
|
||||
cfg.Temperature = input.Temperature
|
||||
}
|
||||
if input.TopK != nil {
|
||||
cfg.TopK = input.TopK
|
||||
}
|
||||
if input.TopP != nil {
|
||||
cfg.TopP = input.TopP
|
||||
}
|
||||
cfg.Maxtokens = &input.MaxTokens
|
||||
if len(input.StopSequences) > 0 {
|
||||
cfg.StopWords = append(cfg.StopWords, input.StopSequences...)
|
||||
}
|
||||
|
||||
// Template the prompt with tools if available
|
||||
predInput := evaluator.TemplateMessages(*openAIReq, openAIReq.Messages, cfg, funcs, shouldUseFn)
|
||||
xlog.Debug("Anthropic Messages - Prompt (after templating)", "prompt", predInput)
|
||||
|
||||
if input.Stream {
|
||||
return handleAnthropicStream(c, id, input, cfg, ml, predInput, openAIReq, funcs, shouldUseFn)
|
||||
}
|
||||
|
||||
return handleAnthropicNonStream(c, id, input, cfg, ml, predInput, openAIReq, funcs, shouldUseFn)
|
||||
}
|
||||
}
|
||||
|
||||
func handleAnthropicNonStream(c echo.Context, id string, input *schema.AnthropicRequest, cfg *config.ModelConfig, ml *model.ModelLoader, predInput string, openAIReq *schema.OpenAIRequest, funcs functions.Functions, shouldUseFn bool) error {
|
||||
images := []string{}
|
||||
for _, m := range openAIReq.Messages {
|
||||
images = append(images, m.StringImages...)
|
||||
}
|
||||
|
||||
predFunc, err := backend.ModelInference(
|
||||
input.Context, predInput, openAIReq.Messages, images, nil, nil, ml, cfg, nil, nil, nil, "", "", nil, nil, nil)
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic model inference failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("model inference failed: %v", err))
|
||||
}
|
||||
|
||||
prediction, err := predFunc()
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic prediction failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("prediction failed: %v", err))
|
||||
}
|
||||
|
||||
result := backend.Finetune(*cfg, predInput, prediction.Response)
|
||||
|
||||
// Check if the result contains tool calls
|
||||
toolCalls := functions.ParseFunctionCall(result, cfg.FunctionsConfig)
|
||||
|
||||
var contentBlocks []schema.AnthropicContentBlock
|
||||
var stopReason string
|
||||
|
||||
if shouldUseFn && len(toolCalls) > 0 {
|
||||
// Model wants to use tools
|
||||
stopReason = "tool_use"
|
||||
for _, tc := range toolCalls {
|
||||
// Parse arguments as JSON
|
||||
var inputArgs map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(tc.Arguments), &inputArgs); err != nil {
|
||||
xlog.Warn("Failed to parse tool call arguments as JSON", "error", err, "args", tc.Arguments)
|
||||
inputArgs = map[string]interface{}{"raw": tc.Arguments}
|
||||
}
|
||||
|
||||
contentBlocks = append(contentBlocks, schema.AnthropicContentBlock{
|
||||
Type: "tool_use",
|
||||
ID: fmt.Sprintf("toolu_%s_%d", id, len(contentBlocks)),
|
||||
Name: tc.Name,
|
||||
Input: inputArgs,
|
||||
})
|
||||
}
|
||||
|
||||
// Add any text content before the tool calls
|
||||
textContent := functions.ParseTextContent(result, cfg.FunctionsConfig)
|
||||
if textContent != "" {
|
||||
// Prepend text block
|
||||
contentBlocks = append([]schema.AnthropicContentBlock{{Type: "text", Text: textContent}}, contentBlocks...)
|
||||
}
|
||||
} else {
|
||||
// Normal text response
|
||||
stopReason = "end_turn"
|
||||
contentBlocks = []schema.AnthropicContentBlock{
|
||||
{Type: "text", Text: result},
|
||||
}
|
||||
}
|
||||
|
||||
resp := &schema.AnthropicResponse{
|
||||
ID: fmt.Sprintf("msg_%s", id),
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Model: input.Model,
|
||||
StopReason: &stopReason,
|
||||
Content: contentBlocks,
|
||||
Usage: schema.AnthropicUsage{
|
||||
InputTokens: prediction.Usage.Prompt,
|
||||
OutputTokens: prediction.Usage.Completion,
|
||||
},
|
||||
}
|
||||
|
||||
if respData, err := json.Marshal(resp); err == nil {
|
||||
xlog.Debug("Anthropic Response", "response", string(respData))
|
||||
}
|
||||
|
||||
return c.JSON(200, resp)
|
||||
}
|
||||
|
||||
func handleAnthropicStream(c echo.Context, id string, input *schema.AnthropicRequest, cfg *config.ModelConfig, ml *model.ModelLoader, predInput string, openAIReq *schema.OpenAIRequest, funcs functions.Functions, shouldUseFn bool) error {
|
||||
c.Response().Header().Set("Content-Type", "text/event-stream")
|
||||
c.Response().Header().Set("Cache-Control", "no-cache")
|
||||
c.Response().Header().Set("Connection", "keep-alive")
|
||||
|
||||
// Create OpenAI messages for inference
|
||||
openAIMessages := openAIReq.Messages
|
||||
|
||||
images := []string{}
|
||||
for _, m := range openAIMessages {
|
||||
images = append(images, m.StringImages...)
|
||||
}
|
||||
|
||||
// Send message_start event
|
||||
messageStart := schema.AnthropicStreamEvent{
|
||||
Type: "message_start",
|
||||
Message: &schema.AnthropicStreamMessage{
|
||||
ID: fmt.Sprintf("msg_%s", id),
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Content: []schema.AnthropicContentBlock{},
|
||||
Model: input.Model,
|
||||
Usage: schema.AnthropicUsage{InputTokens: 0, OutputTokens: 0},
|
||||
},
|
||||
}
|
||||
sendAnthropicSSE(c, messageStart)
|
||||
|
||||
// Track accumulated content for tool call detection
|
||||
accumulatedContent := ""
|
||||
currentBlockIndex := 0
|
||||
inToolCall := false
|
||||
toolCallsEmitted := 0
|
||||
|
||||
// Send initial content_block_start event
|
||||
contentBlockStart := schema.AnthropicStreamEvent{
|
||||
Type: "content_block_start",
|
||||
Index: currentBlockIndex,
|
||||
ContentBlock: &schema.AnthropicContentBlock{Type: "text", Text: ""},
|
||||
}
|
||||
sendAnthropicSSE(c, contentBlockStart)
|
||||
|
||||
// Stream content deltas
|
||||
tokenCallback := func(token string, usage backend.TokenUsage) bool {
|
||||
accumulatedContent += token
|
||||
|
||||
// If we're using functions, try to detect tool calls incrementally
|
||||
if shouldUseFn {
|
||||
cleanedResult := functions.CleanupLLMResult(accumulatedContent, cfg.FunctionsConfig)
|
||||
|
||||
// Try parsing for tool calls
|
||||
toolCalls := functions.ParseFunctionCall(cleanedResult, cfg.FunctionsConfig)
|
||||
|
||||
// If we detected new tool calls and haven't emitted them yet
|
||||
if len(toolCalls) > toolCallsEmitted {
|
||||
// Stop the current text block if we were in one
|
||||
if !inToolCall && currentBlockIndex == 0 {
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_stop",
|
||||
Index: currentBlockIndex,
|
||||
})
|
||||
currentBlockIndex++
|
||||
inToolCall = true
|
||||
}
|
||||
|
||||
// Emit new tool calls
|
||||
for i := toolCallsEmitted; i < len(toolCalls); i++ {
|
||||
tc := toolCalls[i]
|
||||
|
||||
// Send content_block_start for tool_use
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_start",
|
||||
Index: currentBlockIndex,
|
||||
ContentBlock: &schema.AnthropicContentBlock{
|
||||
Type: "tool_use",
|
||||
ID: fmt.Sprintf("toolu_%s_%d", id, i),
|
||||
Name: tc.Name,
|
||||
},
|
||||
})
|
||||
|
||||
// Send input_json_delta with the arguments
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_delta",
|
||||
Index: currentBlockIndex,
|
||||
Delta: &schema.AnthropicStreamDelta{
|
||||
Type: "input_json_delta",
|
||||
PartialJSON: tc.Arguments,
|
||||
},
|
||||
})
|
||||
|
||||
// Send content_block_stop
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_stop",
|
||||
Index: currentBlockIndex,
|
||||
})
|
||||
|
||||
currentBlockIndex++
|
||||
}
|
||||
toolCallsEmitted = len(toolCalls)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Send regular text delta if not in tool call mode
|
||||
if !inToolCall {
|
||||
delta := schema.AnthropicStreamEvent{
|
||||
Type: "content_block_delta",
|
||||
Index: 0,
|
||||
Delta: &schema.AnthropicStreamDelta{
|
||||
Type: "text_delta",
|
||||
Text: token,
|
||||
},
|
||||
}
|
||||
sendAnthropicSSE(c, delta)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
predFunc, err := backend.ModelInference(
|
||||
input.Context, predInput, openAIMessages, images, nil, nil, ml, cfg, nil, nil, tokenCallback, "", "", nil, nil, nil)
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic stream model inference failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("model inference failed: %v", err))
|
||||
}
|
||||
|
||||
prediction, err := predFunc()
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic stream prediction failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("prediction failed: %v", err))
|
||||
}
|
||||
|
||||
// Send content_block_stop event for last block if we didn't close it yet
|
||||
if !inToolCall {
|
||||
contentBlockStop := schema.AnthropicStreamEvent{
|
||||
Type: "content_block_stop",
|
||||
Index: 0,
|
||||
}
|
||||
sendAnthropicSSE(c, contentBlockStop)
|
||||
}
|
||||
|
||||
// Determine stop reason
|
||||
stopReason := "end_turn"
|
||||
if toolCallsEmitted > 0 {
|
||||
stopReason = "tool_use"
|
||||
}
|
||||
|
||||
// Send message_delta event with stop_reason
|
||||
messageDelta := schema.AnthropicStreamEvent{
|
||||
Type: "message_delta",
|
||||
Delta: &schema.AnthropicStreamDelta{
|
||||
StopReason: &stopReason,
|
||||
},
|
||||
Usage: &schema.AnthropicUsage{
|
||||
OutputTokens: prediction.Usage.Completion,
|
||||
},
|
||||
}
|
||||
sendAnthropicSSE(c, messageDelta)
|
||||
|
||||
// Send message_stop event
|
||||
messageStop := schema.AnthropicStreamEvent{
|
||||
Type: "message_stop",
|
||||
}
|
||||
sendAnthropicSSE(c, messageStop)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sendAnthropicSSE(c echo.Context, event schema.AnthropicStreamEvent) {
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
xlog.Error("Failed to marshal SSE event", "error", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(c.Response().Writer, "event: %s\ndata: %s\n\n", event.Type, string(data))
|
||||
c.Response().Flush()
|
||||
}
|
||||
|
||||
func sendAnthropicError(c echo.Context, statusCode int, errorType, message string) error {
|
||||
resp := schema.AnthropicErrorResponse{
|
||||
Type: "error",
|
||||
Error: schema.AnthropicError{
|
||||
Type: errorType,
|
||||
Message: message,
|
||||
},
|
||||
}
|
||||
return c.JSON(statusCode, resp)
|
||||
}
|
||||
|
||||
func convertAnthropicToOpenAIMessages(input *schema.AnthropicRequest) []schema.Message {
|
||||
var messages []schema.Message
|
||||
|
||||
// Add system message if present
|
||||
if input.System != "" {
|
||||
messages = append(messages, schema.Message{
|
||||
Role: "system",
|
||||
StringContent: input.System,
|
||||
Content: input.System,
|
||||
})
|
||||
}
|
||||
|
||||
// Convert Anthropic messages to OpenAI format
|
||||
for _, msg := range input.Messages {
|
||||
openAIMsg := schema.Message{
|
||||
Role: msg.Role,
|
||||
}
|
||||
|
||||
// Handle content (can be string or array of content blocks)
|
||||
switch content := msg.Content.(type) {
|
||||
case string:
|
||||
openAIMsg.StringContent = content
|
||||
openAIMsg.Content = content
|
||||
case []interface{}:
|
||||
// Handle array of content blocks
|
||||
var textContent string
|
||||
var stringImages []string
|
||||
var toolCalls []schema.ToolCall
|
||||
toolCallIndex := 0
|
||||
|
||||
for _, block := range content {
|
||||
if blockMap, ok := block.(map[string]interface{}); ok {
|
||||
blockType, _ := blockMap["type"].(string)
|
||||
switch blockType {
|
||||
case "text":
|
||||
if text, ok := blockMap["text"].(string); ok {
|
||||
textContent += text
|
||||
}
|
||||
case "image":
|
||||
// Handle image content
|
||||
if source, ok := blockMap["source"].(map[string]interface{}); ok {
|
||||
if sourceType, ok := source["type"].(string); ok && sourceType == "base64" {
|
||||
if data, ok := source["data"].(string); ok {
|
||||
mediaType, _ := source["media_type"].(string)
|
||||
// Format as data URI
|
||||
dataURI := fmt.Sprintf("data:%s;base64,%s", mediaType, data)
|
||||
stringImages = append(stringImages, dataURI)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "tool_use":
|
||||
// Convert tool_use to ToolCall format
|
||||
toolID, _ := blockMap["id"].(string)
|
||||
toolName, _ := blockMap["name"].(string)
|
||||
toolInput := blockMap["input"]
|
||||
|
||||
// Serialize input to JSON string
|
||||
inputJSON, err := json.Marshal(toolInput)
|
||||
if err != nil {
|
||||
xlog.Warn("Failed to marshal tool input", "error", err)
|
||||
inputJSON = []byte("{}")
|
||||
}
|
||||
|
||||
toolCalls = append(toolCalls, schema.ToolCall{
|
||||
Index: toolCallIndex,
|
||||
ID: toolID,
|
||||
Type: "function",
|
||||
FunctionCall: schema.FunctionCall{
|
||||
Name: toolName,
|
||||
Arguments: string(inputJSON),
|
||||
},
|
||||
})
|
||||
toolCallIndex++
|
||||
case "tool_result":
|
||||
// Convert tool_result to a message with role "tool"
|
||||
// This is handled by creating a separate message after this block
|
||||
// For now, we'll add it as text content
|
||||
toolUseID, _ := blockMap["tool_use_id"].(string)
|
||||
isError := false
|
||||
if isErrorPtr, ok := blockMap["is_error"].(*bool); ok && isErrorPtr != nil {
|
||||
isError = *isErrorPtr
|
||||
}
|
||||
|
||||
var resultText string
|
||||
if resultContent, ok := blockMap["content"]; ok {
|
||||
switch rc := resultContent.(type) {
|
||||
case string:
|
||||
resultText = rc
|
||||
case []interface{}:
|
||||
// Array of content blocks
|
||||
for _, cb := range rc {
|
||||
if cbMap, ok := cb.(map[string]interface{}); ok {
|
||||
if cbMap["type"] == "text" {
|
||||
if text, ok := cbMap["text"].(string); ok {
|
||||
resultText += text
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add tool result as a tool role message
|
||||
// We need to handle this differently - create a new message
|
||||
if msg.Role == "user" {
|
||||
// Store tool result info for creating separate message
|
||||
prefix := ""
|
||||
if isError {
|
||||
prefix = "Error: "
|
||||
}
|
||||
textContent += fmt.Sprintf("\n[Tool Result for %s]: %s%s", toolUseID, prefix, resultText)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
openAIMsg.StringContent = textContent
|
||||
openAIMsg.Content = textContent
|
||||
openAIMsg.StringImages = stringImages
|
||||
|
||||
// Add tool calls if present
|
||||
if len(toolCalls) > 0 {
|
||||
openAIMsg.ToolCalls = toolCalls
|
||||
}
|
||||
}
|
||||
|
||||
messages = append(messages, openAIMsg)
|
||||
}
|
||||
|
||||
return messages
|
||||
}
|
||||
|
||||
// convertAnthropicTools converts Anthropic tools to internal Functions format
|
||||
func convertAnthropicTools(input *schema.AnthropicRequest, cfg *config.ModelConfig) (functions.Functions, bool) {
|
||||
if len(input.Tools) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var funcs functions.Functions
|
||||
for _, tool := range input.Tools {
|
||||
f := functions.Function{
|
||||
Name: tool.Name,
|
||||
Description: tool.Description,
|
||||
Parameters: tool.InputSchema,
|
||||
}
|
||||
funcs = append(funcs, f)
|
||||
}
|
||||
|
||||
// Handle tool_choice
|
||||
if input.ToolChoice != nil {
|
||||
switch tc := input.ToolChoice.(type) {
|
||||
case string:
|
||||
// "auto", "any", or "none"
|
||||
if tc == "any" {
|
||||
// Force the model to use one of the tools
|
||||
cfg.SetFunctionCallString("required")
|
||||
} else if tc == "none" {
|
||||
// Don't use tools
|
||||
return nil, false
|
||||
}
|
||||
// "auto" is the default - let model decide
|
||||
case map[string]interface{}:
|
||||
// Specific tool selection: {"type": "tool", "name": "tool_name"}
|
||||
if tcType, ok := tc["type"].(string); ok && tcType == "tool" {
|
||||
if name, ok := tc["name"].(string); ok {
|
||||
// Force specific tool
|
||||
cfg.SetFunctionCallString(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return funcs, len(funcs) > 0 && cfg.ShouldUseFunctions()
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package openai
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -34,11 +35,54 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
Created: created,
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []schema.Choice{{Delta: &schema.Message{Role: "assistant"}, Index: 0, FinishReason: nil}},
|
||||
Object: "chat.completion.chunk",
|
||||
}
|
||||
responses <- initialMessage
|
||||
|
||||
// Track accumulated content for reasoning extraction
|
||||
accumulatedContent := ""
|
||||
lastEmittedReasoning := ""
|
||||
lastEmittedCleanedContent := ""
|
||||
|
||||
_, _, err := ComputeChoices(req, s, config, cl, startupOptions, loader, func(s string, c *[]schema.Choice) {}, func(s string, tokenUsage backend.TokenUsage) bool {
|
||||
accumulatedContent += s
|
||||
// Extract reasoning from accumulated content
|
||||
currentReasoning, cleanedContent := functions.ExtractReasoning(accumulatedContent)
|
||||
|
||||
// Calculate new reasoning delta (what we haven't emitted yet)
|
||||
var reasoningDelta *string
|
||||
if currentReasoning != lastEmittedReasoning {
|
||||
// Extract only the new part
|
||||
if len(currentReasoning) > len(lastEmittedReasoning) && strings.HasPrefix(currentReasoning, lastEmittedReasoning) {
|
||||
newReasoning := currentReasoning[len(lastEmittedReasoning):]
|
||||
reasoningDelta = &newReasoning
|
||||
lastEmittedReasoning = currentReasoning
|
||||
} else if currentReasoning != "" {
|
||||
// If reasoning changed in a non-append way, emit the full current reasoning
|
||||
reasoningDelta = ¤tReasoning
|
||||
lastEmittedReasoning = currentReasoning
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate content delta from cleaned content
|
||||
var deltaContent string
|
||||
if len(cleanedContent) > len(lastEmittedCleanedContent) && strings.HasPrefix(cleanedContent, lastEmittedCleanedContent) {
|
||||
deltaContent = cleanedContent[len(lastEmittedCleanedContent):]
|
||||
lastEmittedCleanedContent = cleanedContent
|
||||
} else if cleanedContent != lastEmittedCleanedContent {
|
||||
// If cleaned content changed but not in a simple append, extract delta from cleaned content
|
||||
// This handles cases where thinking tags are removed mid-stream
|
||||
if lastEmittedCleanedContent == "" {
|
||||
deltaContent = cleanedContent
|
||||
lastEmittedCleanedContent = cleanedContent
|
||||
} else {
|
||||
// Content changed in non-append way, use the new cleaned content
|
||||
deltaContent = cleanedContent
|
||||
lastEmittedCleanedContent = cleanedContent
|
||||
}
|
||||
}
|
||||
// Only emit content if there's actual content (not just thinking tags)
|
||||
// If deltaContent is empty, we still emit the response but with empty content
|
||||
|
||||
usage := schema.OpenAIUsage{
|
||||
PromptTokens: tokenUsage.Prompt,
|
||||
CompletionTokens: tokenUsage.Completion,
|
||||
@@ -49,11 +93,20 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
usage.TimingPromptProcessing = tokenUsage.TimingPromptProcessing
|
||||
}
|
||||
|
||||
delta := &schema.Message{}
|
||||
// Only include content if there's actual content (not just thinking tags)
|
||||
if deltaContent != "" {
|
||||
delta.Content = &deltaContent
|
||||
}
|
||||
if reasoningDelta != nil && *reasoningDelta != "" {
|
||||
delta.Reasoning = reasoningDelta
|
||||
}
|
||||
|
||||
resp := schema.OpenAIResponse{
|
||||
ID: id,
|
||||
Created: created,
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []schema.Choice{{Delta: &schema.Message{Content: &s}, Index: 0, FinishReason: nil}},
|
||||
Choices: []schema.Choice{{Delta: delta, Index: 0, FinishReason: nil}},
|
||||
Object: "chat.completion.chunk",
|
||||
Usage: usage,
|
||||
}
|
||||
@@ -176,6 +229,10 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Extract reasoning before processing tool calls
|
||||
reasoning, cleanedResult := functions.ExtractReasoning(result)
|
||||
result = cleanedResult
|
||||
|
||||
textContentToReturn = functions.ParseTextContent(result, config.FunctionsConfig)
|
||||
result = functions.CleanupLLMResult(result, config.FunctionsConfig)
|
||||
functionResults := functions.ParseFunctionCall(result, config.FunctionsConfig)
|
||||
@@ -208,11 +265,20 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
usage.TimingPromptProcessing = tokenUsage.TimingPromptProcessing
|
||||
}
|
||||
|
||||
var deltaReasoning *string
|
||||
if reasoning != "" {
|
||||
deltaReasoning = &reasoning
|
||||
}
|
||||
delta := &schema.Message{Content: &result}
|
||||
if deltaReasoning != nil {
|
||||
delta.Reasoning = deltaReasoning
|
||||
}
|
||||
|
||||
resp := schema.OpenAIResponse{
|
||||
ID: id,
|
||||
Created: created,
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []schema.Choice{{Delta: &schema.Message{Content: &result}, Index: 0, FinishReason: nil}},
|
||||
Choices: []schema.Choice{{Delta: delta, Index: 0, FinishReason: nil}},
|
||||
Object: "chat.completion.chunk",
|
||||
Usage: usage,
|
||||
}
|
||||
@@ -553,10 +619,18 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
default:
|
||||
|
||||
tokenCallback := func(s string, c *[]schema.Choice) {
|
||||
// Extract reasoning from the response
|
||||
reasoning, cleanedS := functions.ExtractReasoning(s)
|
||||
s = cleanedS
|
||||
|
||||
if !shouldUseFn {
|
||||
// no function is called, just reply and use stop as finish reason
|
||||
stopReason := FinishReasonStop
|
||||
*c = append(*c, schema.Choice{FinishReason: &stopReason, Index: 0, Message: &schema.Message{Role: "assistant", Content: &s}})
|
||||
message := &schema.Message{Role: "assistant", Content: &s}
|
||||
if reasoning != "" {
|
||||
message.Reasoning = &reasoning
|
||||
}
|
||||
*c = append(*c, schema.Choice{FinishReason: &stopReason, Index: 0, Message: message})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -575,9 +649,13 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
}
|
||||
|
||||
stopReason := FinishReasonStop
|
||||
message := &schema.Message{Role: "assistant", Content: &result}
|
||||
if reasoning != "" {
|
||||
message.Reasoning = &reasoning
|
||||
}
|
||||
*c = append(*c, schema.Choice{
|
||||
FinishReason: &stopReason,
|
||||
Message: &schema.Message{Role: "assistant", Content: &result}})
|
||||
Message: message})
|
||||
default:
|
||||
toolCallsReason := FinishReasonToolCalls
|
||||
toolChoice := schema.Choice{
|
||||
@@ -586,6 +664,9 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
Role: "assistant",
|
||||
},
|
||||
}
|
||||
if reasoning != "" {
|
||||
toolChoice.Message.Reasoning = &reasoning
|
||||
}
|
||||
|
||||
for _, ss := range results {
|
||||
name, args := ss.Name, ss.Arguments
|
||||
@@ -606,16 +687,20 @@ func ChatEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator
|
||||
} else {
|
||||
// otherwise we return more choices directly (deprecated)
|
||||
functionCallReason := FinishReasonFunctionCall
|
||||
message := &schema.Message{
|
||||
Role: "assistant",
|
||||
Content: &textContentToReturn,
|
||||
FunctionCall: map[string]interface{}{
|
||||
"name": name,
|
||||
"arguments": args,
|
||||
},
|
||||
}
|
||||
if reasoning != "" {
|
||||
message.Reasoning = &reasoning
|
||||
}
|
||||
*c = append(*c, schema.Choice{
|
||||
FinishReason: &functionCallReason,
|
||||
Message: &schema.Message{
|
||||
Role: "assistant",
|
||||
Content: &textContentToReturn,
|
||||
FunctionCall: map[string]interface{}{
|
||||
"name": name,
|
||||
"arguments": args,
|
||||
},
|
||||
},
|
||||
Message: message,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
108
core/http/routes/anthropic.go
Normal file
108
core/http/routes/anthropic.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/mudler/LocalAI/core/application"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http/endpoints/anthropic"
|
||||
"github.com/mudler/LocalAI/core/http/middleware"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/xlog"
|
||||
)
|
||||
|
||||
func RegisterAnthropicRoutes(app *echo.Echo,
|
||||
re *middleware.RequestExtractor,
|
||||
application *application.Application) {
|
||||
|
||||
// Anthropic Messages API endpoint
|
||||
messagesHandler := anthropic.MessagesEndpoint(
|
||||
application.ModelConfigLoader(),
|
||||
application.ModelLoader(),
|
||||
application.TemplatesEvaluator(),
|
||||
application.ApplicationConfig(),
|
||||
)
|
||||
|
||||
messagesMiddleware := []echo.MiddlewareFunc{
|
||||
middleware.TraceMiddleware(application),
|
||||
re.BuildFilteredFirstAvailableDefaultModel(config.BuildUsecaseFilterFn(config.FLAG_CHAT)),
|
||||
re.SetModelAndConfig(func() schema.LocalAIRequest { return new(schema.AnthropicRequest) }),
|
||||
setAnthropicRequestContext(application.ApplicationConfig()),
|
||||
}
|
||||
|
||||
// Main Anthropic endpoint
|
||||
app.POST("/v1/messages", messagesHandler, messagesMiddleware...)
|
||||
|
||||
// Also support without version prefix for compatibility
|
||||
app.POST("/messages", messagesHandler, messagesMiddleware...)
|
||||
}
|
||||
|
||||
// setAnthropicRequestContext sets up the context and cancel function for Anthropic requests
|
||||
func setAnthropicRequestContext(appConfig *config.ApplicationConfig) echo.MiddlewareFunc {
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.AnthropicRequest)
|
||||
if !ok || input.Model == "" {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "model is required")
|
||||
}
|
||||
|
||||
cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig)
|
||||
if !ok || cfg == nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, "model configuration not found")
|
||||
}
|
||||
|
||||
// Extract or generate the correlation ID
|
||||
// Anthropic uses x-request-id header
|
||||
correlationID := c.Request().Header.Get("x-request-id")
|
||||
if correlationID == "" {
|
||||
correlationID = uuid.New().String()
|
||||
}
|
||||
c.Response().Header().Set("x-request-id", correlationID)
|
||||
|
||||
// Set up context with cancellation
|
||||
reqCtx := c.Request().Context()
|
||||
c1, cancel := context.WithCancel(appConfig.Context)
|
||||
|
||||
// Cancel when request context is cancelled (client disconnects)
|
||||
go func() {
|
||||
select {
|
||||
case <-reqCtx.Done():
|
||||
cancel()
|
||||
case <-c1.Done():
|
||||
// Already cancelled
|
||||
}
|
||||
}()
|
||||
|
||||
// Add the correlation ID to the new context
|
||||
ctxWithCorrelationID := context.WithValue(c1, middleware.CorrelationIDKey, correlationID)
|
||||
|
||||
input.Context = ctxWithCorrelationID
|
||||
input.Cancel = cancel
|
||||
|
||||
if cfg.Model == "" {
|
||||
xlog.Debug("replacing empty cfg.Model with input value", "input.Model", input.Model)
|
||||
cfg.Model = input.Model
|
||||
}
|
||||
|
||||
c.Set(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST, input)
|
||||
c.Set(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG, cfg)
|
||||
|
||||
// Log the Anthropic API version if provided
|
||||
anthropicVersion := c.Request().Header.Get("anthropic-version")
|
||||
if anthropicVersion != "" {
|
||||
xlog.Debug("Anthropic API version", "version", anthropicVersion)
|
||||
}
|
||||
|
||||
// Validate max_tokens is provided
|
||||
if input.MaxTokens <= 0 {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("max_tokens is required and must be greater than 0"))
|
||||
}
|
||||
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1368,6 +1368,7 @@ async function promptGPT(systemPrompt, input) {
|
||||
let lastAssistantMessageIndex = -1;
|
||||
let lastThinkingMessageIndex = -1;
|
||||
let lastThinkingScrollTime = 0;
|
||||
let hasReasoningFromAPI = false; // Track if we're receiving reasoning from API (skip tag-based detection)
|
||||
const THINKING_SCROLL_THROTTLE = 200; // Throttle scrolling to every 200ms
|
||||
|
||||
try {
|
||||
@@ -1401,19 +1402,24 @@ async function promptGPT(systemPrompt, input) {
|
||||
// Handle different event types
|
||||
switch (eventData.type) {
|
||||
case "reasoning":
|
||||
hasReasoningFromAPI = true; // Mark that we're receiving reasoning from API
|
||||
if (eventData.content) {
|
||||
// Insert reasoning before assistant message if it exists
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) break; // Chat was deleted
|
||||
const isMCPMode = currentChat.mcpMode || false;
|
||||
const shouldExpand = !isMCPMode; // Expanded in non-MCP mode, collapsed in MCP mode
|
||||
// Insert thinking before assistant message if it exists (always use "thinking" role)
|
||||
if (lastAssistantMessageIndex >= 0 && targetHistory[lastAssistantMessageIndex]?.role === "assistant") {
|
||||
targetHistory.splice(lastAssistantMessageIndex, 0, {
|
||||
role: "reasoning",
|
||||
role: "thinking",
|
||||
content: eventData.content,
|
||||
html: DOMPurify.sanitize(marked.parse(eventData.content)),
|
||||
image: [],
|
||||
audio: [],
|
||||
expanded: false // Reasoning is always collapsed
|
||||
expanded: shouldExpand
|
||||
});
|
||||
lastAssistantMessageIndex++; // Adjust index since we inserted
|
||||
// Scroll smoothly after adding reasoning
|
||||
// Scroll smoothly after adding thinking
|
||||
setTimeout(() => {
|
||||
const chatContainer = document.getElementById('chat');
|
||||
if (chatContainer) {
|
||||
@@ -1425,7 +1431,7 @@ async function promptGPT(systemPrompt, input) {
|
||||
}, 100);
|
||||
} else {
|
||||
// No assistant message yet, just add normally
|
||||
chatStore.add("reasoning", eventData.content, null, null, chatId);
|
||||
chatStore.add("thinking", eventData.content, null, null, chatId);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -1491,14 +1497,17 @@ async function promptGPT(systemPrompt, input) {
|
||||
// Only update display if this is the active chat (interval will handle it)
|
||||
// Don't call updateTokensPerSecond here to avoid unnecessary updates
|
||||
|
||||
// Check for thinking tags in the chunk (incremental detection)
|
||||
if (contentChunk.includes("<thinking>") || contentChunk.includes("<think>")) {
|
||||
isThinking = true;
|
||||
thinkingContent = "";
|
||||
lastThinkingMessageIndex = -1;
|
||||
}
|
||||
|
||||
if (contentChunk.includes("</thinking>") || contentChunk.includes("</think>")) {
|
||||
// Only check for thinking tags if we're NOT receiving reasoning from API
|
||||
// This prevents duplicate thinking/reasoning messages
|
||||
if (!hasReasoningFromAPI) {
|
||||
// Check for thinking tags in the chunk (incremental detection)
|
||||
if (contentChunk.includes("<thinking>") || contentChunk.includes("<think>")) {
|
||||
isThinking = true;
|
||||
thinkingContent = "";
|
||||
lastThinkingMessageIndex = -1;
|
||||
}
|
||||
|
||||
if (contentChunk.includes("</thinking>") || contentChunk.includes("</think>")) {
|
||||
isThinking = false;
|
||||
// When closing tag is detected, process the accumulated thinking content
|
||||
if (thinkingContent.trim()) {
|
||||
@@ -1552,10 +1561,11 @@ async function promptGPT(systemPrompt, input) {
|
||||
}
|
||||
thinkingContent = "";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle content based on thinking state
|
||||
if (isThinking) {
|
||||
// Handle content based on thinking state (only if not receiving reasoning from API)
|
||||
if (!hasReasoningFromAPI && isThinking) {
|
||||
thinkingContent += contentChunk;
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) break; // Chat was deleted
|
||||
@@ -1637,7 +1647,10 @@ async function promptGPT(systemPrompt, input) {
|
||||
|
||||
// Process any thinking tags that might be in the accumulated content
|
||||
// This handles cases where tags are split across chunks
|
||||
const { regularContent: processedRegular, thinkingContent: processedThinking } = processThinkingTags(regularContent);
|
||||
// Only process if we're NOT receiving reasoning from API (to avoid duplicates)
|
||||
const { regularContent: processedRegular, thinkingContent: processedThinking } = hasReasoningFromAPI
|
||||
? { regularContent: regularContent, thinkingContent: "" }
|
||||
: processThinkingTags(regularContent);
|
||||
|
||||
// Update or create assistant message with processed regular content
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
@@ -1645,10 +1658,10 @@ async function promptGPT(systemPrompt, input) {
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
if (lastAssistantMessageIndex === -1) {
|
||||
if (processedRegular && processedRegular.trim()) {
|
||||
chatStore.add("assistant", processedRegular, null, null, chatId, requestModel);
|
||||
lastAssistantMessageIndex = targetHistory.length - 1;
|
||||
}
|
||||
// Create assistant message if we have any content (even if empty string after processing)
|
||||
// This ensures the message is created and can be updated with more content later
|
||||
chatStore.add("assistant", processedRegular || "", null, null, chatId, requestModel);
|
||||
lastAssistantMessageIndex = targetHistory.length - 1;
|
||||
} else {
|
||||
const lastMessage = targetHistory[lastAssistantMessageIndex];
|
||||
if (lastMessage && lastMessage.role === "assistant") {
|
||||
@@ -1686,7 +1699,10 @@ async function promptGPT(systemPrompt, input) {
|
||||
if (assistantContentBuffer.length > 0) {
|
||||
const regularContent = assistantContentBuffer.join("");
|
||||
// Process any remaining thinking tags that might be in the buffer
|
||||
const { regularContent: processedRegular, thinkingContent: processedThinking } = processThinkingTags(regularContent);
|
||||
// Only process if we're NOT receiving reasoning from API (to avoid duplicates)
|
||||
const { regularContent: processedRegular, thinkingContent: processedThinking } = hasReasoningFromAPI
|
||||
? { regularContent: regularContent, thinkingContent: "" }
|
||||
: processThinkingTags(regularContent);
|
||||
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) {
|
||||
@@ -1719,23 +1735,26 @@ async function promptGPT(systemPrompt, input) {
|
||||
}
|
||||
|
||||
// Then update or create assistant message
|
||||
// Always create/update assistant message if we have any content
|
||||
if (lastAssistantMessageIndex !== -1) {
|
||||
const lastMessage = targetHistory[lastAssistantMessageIndex];
|
||||
if (lastMessage && lastMessage.role === "assistant") {
|
||||
lastMessage.content = (lastMessage.content || "") + (processedRegular || "");
|
||||
lastMessage.html = DOMPurify.sanitize(marked.parse(lastMessage.content));
|
||||
}
|
||||
} else if (processedRegular && processedRegular.trim()) {
|
||||
} else {
|
||||
// Create assistant message (even if empty, so it can be updated with more content)
|
||||
const request = activeRequests.get(chatId);
|
||||
const requestModel = request?.model || null;
|
||||
chatStore.add("assistant", processedRegular, null, null, chatId, requestModel);
|
||||
chatStore.add("assistant", processedRegular || "", null, null, chatId, requestModel);
|
||||
lastAssistantMessageIndex = targetHistory.length - 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Final thinking content flush if any data remains (from incremental detection)
|
||||
// Only process if we're NOT receiving reasoning from API (to avoid duplicates)
|
||||
const finalChat = chatStore.getChat(chatId);
|
||||
if (finalChat && thinkingContent.trim() && lastThinkingMessageIndex === -1) {
|
||||
if (finalChat && !hasReasoningFromAPI && thinkingContent.trim() && lastThinkingMessageIndex === -1) {
|
||||
const finalHistory = finalChat.history;
|
||||
// Extract thinking content if tags are present
|
||||
const thinkingMatch = thinkingContent.match(/<(?:thinking|redacted_reasoning)>(.*?)<\/(?:thinking|redacted_reasoning)>/s);
|
||||
@@ -1891,9 +1910,13 @@ async function promptGPT(systemPrompt, input) {
|
||||
let buffer = "";
|
||||
let contentBuffer = [];
|
||||
let thinkingContent = "";
|
||||
let reasoningContent = ""; // Track reasoning from API reasoning field
|
||||
let isThinking = false;
|
||||
let lastThinkingMessageIndex = -1;
|
||||
let lastReasoningMessageIndex = -1; // Track reasoning message separately
|
||||
let lastAssistantMessageIndex = -1; // Track assistant message for reasoning placement
|
||||
let lastThinkingScrollTime = 0;
|
||||
let hasReasoningFromAPI = false; // Track if we're receiving reasoning from API (skip tag-based detection)
|
||||
const THINKING_SCROLL_THROTTLE = 200; // Throttle scrolling to every 200ms
|
||||
|
||||
try {
|
||||
@@ -1929,30 +1952,100 @@ async function promptGPT(systemPrompt, input) {
|
||||
chatStore.updateTokenUsage(jsonData.usage, chatId);
|
||||
}
|
||||
|
||||
const token = jsonData.choices[0].delta.content;
|
||||
const token = jsonData.choices?.[0]?.delta?.content;
|
||||
const reasoningDelta = jsonData.choices?.[0]?.delta?.reasoning;
|
||||
|
||||
if (token) {
|
||||
// Check for thinking tags
|
||||
if (token.includes("<thinking>") || token.includes("<think>")) {
|
||||
isThinking = true;
|
||||
thinkingContent = "";
|
||||
lastThinkingMessageIndex = -1;
|
||||
// Handle reasoning from API reasoning field - always use "thinking" role
|
||||
if (reasoningDelta && reasoningDelta.trim() !== "") {
|
||||
hasReasoningFromAPI = true; // Mark that we're receiving reasoning from API
|
||||
reasoningContent += reasoningDelta;
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (!currentChat) {
|
||||
// Chat was deleted, skip this line
|
||||
return;
|
||||
}
|
||||
if (token.includes("</thinking>") || token.includes("</think>")) {
|
||||
isThinking = false;
|
||||
if (thinkingContent.trim()) {
|
||||
// Only add the final thinking message if we don't already have one
|
||||
if (lastThinkingMessageIndex === -1) {
|
||||
chatStore.add("thinking", thinkingContent, null, null, chatId);
|
||||
const isMCPMode = currentChat.mcpMode || false;
|
||||
const shouldExpand = !isMCPMode; // Expanded in non-MCP mode, collapsed in MCP mode
|
||||
|
||||
// Only create/update thinking message if we have actual content
|
||||
if (reasoningContent.trim() !== "") {
|
||||
// Update or create thinking message (always use "thinking" role, not "reasoning")
|
||||
if (lastReasoningMessageIndex === -1) {
|
||||
// Find the last assistant message index to insert thinking before it
|
||||
const targetHistory = currentChat.history;
|
||||
const assistantIndex = targetHistory.length - 1;
|
||||
if (assistantIndex >= 0 && targetHistory[assistantIndex]?.role === "assistant") {
|
||||
// Insert thinking before assistant message
|
||||
targetHistory.splice(assistantIndex, 0, {
|
||||
role: "thinking",
|
||||
content: reasoningContent,
|
||||
html: DOMPurify.sanitize(marked.parse(reasoningContent)),
|
||||
image: [],
|
||||
audio: [],
|
||||
expanded: shouldExpand
|
||||
});
|
||||
lastReasoningMessageIndex = assistantIndex;
|
||||
lastAssistantMessageIndex = assistantIndex + 1; // Adjust for inserted thinking
|
||||
} else {
|
||||
// No assistant message yet, just add normally
|
||||
chatStore.add("thinking", reasoningContent, null, null, chatId);
|
||||
lastReasoningMessageIndex = currentChat.history.length - 1;
|
||||
}
|
||||
} else {
|
||||
// Update existing thinking message
|
||||
const targetHistory = currentChat.history;
|
||||
if (lastReasoningMessageIndex >= 0 && lastReasoningMessageIndex < targetHistory.length) {
|
||||
const thinkingMessage = targetHistory[lastReasoningMessageIndex];
|
||||
if (thinkingMessage && thinkingMessage.role === "thinking") {
|
||||
thinkingMessage.content = reasoningContent;
|
||||
thinkingMessage.html = DOMPurify.sanitize(marked.parse(reasoningContent));
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Scroll when reasoning is updated (throttled)
|
||||
const now = Date.now();
|
||||
if (now - lastThinkingScrollTime > THINKING_SCROLL_THROTTLE) {
|
||||
lastThinkingScrollTime = now;
|
||||
setTimeout(() => {
|
||||
const chatContainer = document.getElementById('chat');
|
||||
if (chatContainer) {
|
||||
chatContainer.scrollTo({
|
||||
top: chatContainer.scrollHeight,
|
||||
behavior: 'smooth'
|
||||
});
|
||||
}
|
||||
scrollThinkingBoxToBottom();
|
||||
}, 100);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle content based on thinking state
|
||||
if (isThinking) {
|
||||
thinkingContent += token;
|
||||
if (token && token.trim() !== "") {
|
||||
// Only check for thinking tags if we're NOT receiving reasoning from API
|
||||
// This prevents duplicate thinking/reasoning messages
|
||||
if (!hasReasoningFromAPI) {
|
||||
// Check for thinking tags (legacy support - models that output tags directly)
|
||||
if (token.includes("<thinking>") || token.includes("<think>")) {
|
||||
isThinking = true;
|
||||
thinkingContent = "";
|
||||
lastThinkingMessageIndex = -1;
|
||||
return;
|
||||
}
|
||||
if (token.includes("</thinking>") || token.includes("</think>")) {
|
||||
isThinking = false;
|
||||
if (thinkingContent.trim()) {
|
||||
// Only add the final thinking message if we don't already have one
|
||||
if (lastThinkingMessageIndex === -1) {
|
||||
chatStore.add("thinking", thinkingContent, null, null, chatId);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle content based on thinking state
|
||||
if (isThinking) {
|
||||
thinkingContent += token;
|
||||
// Count tokens for rate calculation (per chat)
|
||||
const request = activeRequests.get(chatId);
|
||||
if (request) {
|
||||
@@ -1995,7 +2088,42 @@ async function promptGPT(systemPrompt, input) {
|
||||
}, 100);
|
||||
}
|
||||
} else {
|
||||
// Not in thinking state, add to content buffer
|
||||
contentBuffer.push(token);
|
||||
// Track assistant message index for reasoning placement
|
||||
if (lastAssistantMessageIndex === -1) {
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (currentChat) {
|
||||
const targetHistory = currentChat.history;
|
||||
// Find or create assistant message index
|
||||
for (let i = targetHistory.length - 1; i >= 0; i--) {
|
||||
if (targetHistory[i].role === "assistant") {
|
||||
lastAssistantMessageIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// If no assistant message yet, it will be created when we flush contentBuffer
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Receiving reasoning from API, just add token to content buffer
|
||||
contentBuffer.push(token);
|
||||
// Track assistant message index for reasoning placement
|
||||
if (lastAssistantMessageIndex === -1) {
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (currentChat) {
|
||||
const targetHistory = currentChat.history;
|
||||
// Find or create assistant message index
|
||||
for (let i = targetHistory.length - 1; i >= 0; i--) {
|
||||
if (targetHistory[i].role === "assistant") {
|
||||
lastAssistantMessageIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// If no assistant message yet, it will be created when we flush contentBuffer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -2007,6 +2135,17 @@ async function promptGPT(systemPrompt, input) {
|
||||
// Efficiently update the chat in batch
|
||||
if (contentBuffer.length > 0) {
|
||||
addToChat(contentBuffer.join(""));
|
||||
// Update assistant message index after adding content
|
||||
const currentChat = chatStore.getChat(chatId);
|
||||
if (currentChat) {
|
||||
const targetHistory = currentChat.history;
|
||||
for (let i = targetHistory.length - 1; i >= 0; i--) {
|
||||
if (targetHistory[i].role === "assistant") {
|
||||
lastAssistantMessageIndex = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
contentBuffer = [];
|
||||
// Scroll when assistant content is updated (this will also show thinking messages above)
|
||||
setTimeout(() => {
|
||||
@@ -2025,7 +2164,30 @@ async function promptGPT(systemPrompt, input) {
|
||||
if (contentBuffer.length > 0) {
|
||||
addToChat(contentBuffer.join(""));
|
||||
}
|
||||
|
||||
// Final reasoning flush if any data remains - always use "thinking" role
|
||||
const finalChat = chatStore.getChat(chatId);
|
||||
if (finalChat && reasoningContent.trim() && lastReasoningMessageIndex === -1) {
|
||||
const isMCPMode = finalChat.mcpMode || false;
|
||||
const shouldExpand = !isMCPMode;
|
||||
const targetHistory = finalChat.history;
|
||||
// Find assistant message to insert before
|
||||
const assistantIndex = targetHistory.length - 1;
|
||||
if (assistantIndex >= 0 && targetHistory[assistantIndex]?.role === "assistant") {
|
||||
targetHistory.splice(assistantIndex, 0, {
|
||||
role: "thinking",
|
||||
content: reasoningContent,
|
||||
html: DOMPurify.sanitize(marked.parse(reasoningContent)),
|
||||
image: [],
|
||||
audio: [],
|
||||
expanded: shouldExpand
|
||||
});
|
||||
} else {
|
||||
chatStore.add("thinking", reasoningContent, null, null, chatId);
|
||||
}
|
||||
}
|
||||
|
||||
// Final thinking content flush (legacy tag-based thinking)
|
||||
if (finalChat && thinkingContent.trim() && lastThinkingMessageIndex === -1) {
|
||||
chatStore.add("thinking", thinkingContent, null, null, chatId);
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ SOFTWARE.
|
||||
__chatContextSize = {{ .ContextSize }};
|
||||
{{ end }}
|
||||
|
||||
// Store gallery configs for header icon display
|
||||
// Store gallery configs for header icon display and model info modal
|
||||
window.__galleryConfigs = {};
|
||||
{{ $allGalleryConfigs:=.GalleryConfig }}
|
||||
{{ range $modelName, $galleryConfig := $allGalleryConfigs }}
|
||||
@@ -49,6 +49,16 @@ SOFTWARE.
|
||||
{{ if $galleryConfig.Icon }}
|
||||
window.__galleryConfigs["{{$modelName}}"].Icon = "{{$galleryConfig.Icon}}";
|
||||
{{ end }}
|
||||
{{ if $galleryConfig.Description }}
|
||||
window.__galleryConfigs["{{$modelName}}"].Description = {{ printf "%q" $galleryConfig.Description }};
|
||||
{{ end }}
|
||||
{{ if $galleryConfig.URLs }}
|
||||
window.__galleryConfigs["{{$modelName}}"].URLs = [
|
||||
{{ range $idx, $url := $galleryConfig.URLs }}
|
||||
{{ if $idx }},{{ end }}{{ printf "%q" $url }}
|
||||
{{ end }}
|
||||
];
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
// Function to initialize store
|
||||
@@ -326,10 +336,10 @@ SOFTWARE.
|
||||
c += DOMPurify.sanitize(marked.parse(line));
|
||||
});
|
||||
}
|
||||
// Set expanded state: thinking is expanded by default in non-MCP mode, collapsed in MCP mode
|
||||
// Reasoning, tool_call, and tool_result are always collapsed by default
|
||||
// Set expanded state: thinking and reasoning are expanded by default in non-MCP mode, collapsed in MCP mode
|
||||
// tool_call and tool_result are always collapsed by default
|
||||
const isMCPMode = chat.mcpMode || false;
|
||||
const shouldExpand = (role === "thinking" && !isMCPMode) || false;
|
||||
const shouldExpand = ((role === "thinking" || role === "reasoning") && !isMCPMode) || false;
|
||||
chat.history.push({ role, content, html: c, image, audio, expanded: shouldExpand, model: messageModel });
|
||||
|
||||
// Auto-name chat from first user message
|
||||
@@ -497,6 +507,11 @@ SOFTWARE.
|
||||
activeChat.model = modelName;
|
||||
activeChat.updatedAt = Date.now();
|
||||
|
||||
// Update model info modal with new model
|
||||
if (window.updateModelInfoModal) {
|
||||
window.updateModelInfoModal(modelName);
|
||||
}
|
||||
|
||||
// Get context size from data attribute
|
||||
let contextSize = null;
|
||||
if (selectedOption.dataset.contextSize) {
|
||||
@@ -536,18 +551,23 @@ SOFTWARE.
|
||||
}
|
||||
|
||||
// Update model selector to reflect the change (ensure it stays in sync)
|
||||
// Note: We don't dispatch a change event here to avoid infinite loop
|
||||
// The selector is already updated via user interaction or programmatic change
|
||||
const modelSelector = document.getElementById('modelSelector');
|
||||
if (modelSelector) {
|
||||
// Find and select the option matching the model
|
||||
const optionValue = 'chat/' + modelName;
|
||||
for (let i = 0; i < modelSelector.options.length; i++) {
|
||||
if (modelSelector.options[i].value === optionValue) {
|
||||
modelSelector.selectedIndex = i;
|
||||
// Only update if it's different to avoid unnecessary updates
|
||||
if (modelSelector.selectedIndex !== i) {
|
||||
modelSelector.selectedIndex = i;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Trigger Alpine reactivity by dispatching change event
|
||||
modelSelector.dispatchEvent(new Event('change', { bubbles: true }));
|
||||
// Don't dispatch change event here - it would cause infinite recursion
|
||||
// The selector is already in sync with the model
|
||||
}
|
||||
|
||||
// Trigger MCP availability check in Alpine component
|
||||
@@ -603,27 +623,52 @@ SOFTWARE.
|
||||
<div class="flex items-center justify-between gap-2">
|
||||
<label class="text-xs font-medium text-[var(--color-text-secondary)] uppercase tracking-wide flex-shrink-0">Model</label>
|
||||
<div class="flex items-center gap-1 flex-shrink-0">
|
||||
{{ if $model }}
|
||||
{{ $galleryConfig:= index $allGalleryConfigs $model}}
|
||||
{{ if $galleryConfig }}
|
||||
<button
|
||||
data-twe-ripple-init
|
||||
data-twe-ripple-color="light"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
data-modal-target="model-info-modal"
|
||||
data-modal-toggle="model-info-modal"
|
||||
title="Model Information">
|
||||
<i class="fas fa-info-circle"></i>
|
||||
</button>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ if $model }}
|
||||
<a href="/models/edit/{{$model}}"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-warning)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
title="Edit Model Configuration">
|
||||
<i class="fas fa-edit"></i>
|
||||
</a>
|
||||
{{ end }}
|
||||
<!-- Info button - reactive to active chat model -->
|
||||
<template x-if="$store.chat.activeChat() && $store.chat.activeChat().model && window.__galleryConfigs && window.__galleryConfigs[$store.chat.activeChat().model]">
|
||||
<button
|
||||
data-twe-ripple-init
|
||||
data-twe-ripple-color="light"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
data-modal-target="model-info-modal"
|
||||
data-modal-toggle="model-info-modal"
|
||||
:data-model-name="$store.chat.activeChat().model"
|
||||
@click="if (window.updateModelInfoModal) { window.updateModelInfoModal($store.chat.activeChat().model, true); }"
|
||||
title="Model Information">
|
||||
<i class="fas fa-info-circle"></i>
|
||||
</button>
|
||||
</template>
|
||||
<!-- Fallback info button for initial model from server -->
|
||||
<template x-if="(!$store.chat.activeChat() || !$store.chat.activeChat().model) && window.__galleryConfigs && window.__galleryConfigs['{{$model}}']">
|
||||
<button
|
||||
data-twe-ripple-init
|
||||
data-twe-ripple-color="light"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-primary)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
data-modal-target="model-info-modal"
|
||||
data-modal-toggle="model-info-modal"
|
||||
data-model-name="{{$model}}"
|
||||
@click="if (window.updateModelInfoModal) { window.updateModelInfoModal('{{$model}}', true); }"
|
||||
title="Model Information">
|
||||
<i class="fas fa-info-circle"></i>
|
||||
</button>
|
||||
</template>
|
||||
<!-- Edit button - reactive to active chat model -->
|
||||
<template x-if="$store.chat.activeChat() && $store.chat.activeChat().model">
|
||||
<a :href="'/models/edit/' + $store.chat.activeChat().model"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-warning)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
title="Edit Model Configuration">
|
||||
<i class="fas fa-edit"></i>
|
||||
</a>
|
||||
</template>
|
||||
<!-- Fallback edit button for initial model from server -->
|
||||
<template x-if="!$store.chat.activeChat() || !$store.chat.activeChat().model">
|
||||
{{ if $model }}
|
||||
<a href="/models/edit/{{$model}}"
|
||||
class="text-[var(--color-text-secondary)] hover:text-[var(--color-warning)] transition-colors text-xs p-1 rounded hover:bg-[var(--color-bg-primary)]"
|
||||
title="Edit Model Configuration">
|
||||
<i class="fas fa-edit"></i>
|
||||
</a>
|
||||
{{ end }}
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
<select
|
||||
@@ -1488,17 +1533,14 @@ SOFTWARE.
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Modal moved outside of sidebar to appear in center of page -->
|
||||
{{ if $model }}
|
||||
{{ $galleryConfig:= index $allGalleryConfigs $model}}
|
||||
{{ if $galleryConfig }}
|
||||
<div id="model-info-modal" tabindex="-1" aria-hidden="true" class="hidden overflow-y-auto overflow-x-hidden fixed top-0 right-0 left-0 z-50 flex justify-center items-center w-full md:inset-0 h-[calc(100%-1rem)] max-h-full">
|
||||
<!-- Modal moved outside of sidebar to appear in center of page - Always available, content updated dynamically -->
|
||||
<div id="model-info-modal" tabindex="-1" aria-hidden="true" class="hidden overflow-y-auto overflow-x-hidden fixed top-0 right-0 left-0 z-50 flex justify-center items-center w-full h-full md:inset-0 max-h-full" style="padding: 1rem;">
|
||||
<div class="relative p-4 w-full max-w-2xl max-h-full">
|
||||
<div class="relative p-4 w-full max-w-2xl max-h-full bg-white rounded-lg shadow dark:bg-gray-700">
|
||||
<!-- Header -->
|
||||
<div class="flex items-center justify-between p-4 md:p-5 border-b rounded-t dark:border-gray-600">
|
||||
<h3 class="text-xl font-semibold text-gray-900 dark:text-white">{{ $model }}</h3>
|
||||
<button class="text-gray-400 bg-transparent hover:bg-gray-200 hover:text-gray-900 rounded-lg text-sm w-8 h-8 ms-auto inline-flex justify-center items-center dark:hover:bg-gray-600 dark:hover:text-white" data-modal-hide="model-info-modal">
|
||||
<h3 id="model-info-modal-title" class="text-xl font-semibold text-gray-900 dark:text-white">{{ if $model }}{{ $model }}{{ end }}</h3>
|
||||
<button class="text-gray-400 bg-transparent hover:bg-gray-200 hover:text-gray-900 rounded-lg text-sm w-8 h-8 ms-auto inline-flex justify-center items-center dark:hover:bg-gray-600 dark:hover:text-white" data-modal-hide="model-info-modal" @click="if (window.closeModelInfoModal) { window.closeModelInfoModal(); }">
|
||||
<svg class="w-3 h-3" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 14 14">
|
||||
<path stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="m1 1 6 6m0 0 6 6M7 7l6-6M7 7l-6 6"/>
|
||||
</svg>
|
||||
@@ -1509,29 +1551,24 @@ SOFTWARE.
|
||||
<!-- Body -->
|
||||
<div class="p-4 md:p-5 space-y-4">
|
||||
<div class="flex justify-center items-center">
|
||||
{{ if $galleryConfig.Icon }}<img class="lazy rounded-t-lg max-h-48 max-w-96 object-cover mt-3 entered loaded" src="{{$galleryConfig.Icon}}" loading="lazy"/>{{end}}
|
||||
<img id="model-info-modal-icon" class="lazy rounded-t-lg max-h-48 max-w-96 object-cover mt-3 entered loaded" style="display: none;" loading="lazy"/>
|
||||
</div>
|
||||
<div id="model-info-description" class="text-base leading-relaxed text-gray-500 dark:text-gray-400 break-words max-w-full">{{ $galleryConfig.Description }}</div>
|
||||
<div id="model-info-description" class="text-base leading-relaxed text-gray-500 dark:text-gray-400 break-words max-w-full"></div>
|
||||
<hr>
|
||||
<p class="text-sm font-semibold text-gray-900 dark:text-white">Links</p>
|
||||
<ul>
|
||||
{{range $galleryConfig.URLs}}
|
||||
<li><a href="{{ . }}" target="_blank">{{ . }}</a></li>
|
||||
{{end}}
|
||||
<ul id="model-info-links">
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<!-- Footer -->
|
||||
<div class="flex items-center p-4 md:p-5 border-t border-gray-200 rounded-b dark:border-gray-600">
|
||||
<button data-modal-hide="model-info-modal" class="py-2.5 px-5 ms-3 text-sm font-medium text-gray-900 focus:outline-none bg-white rounded-lg border border-gray-200 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:ring-4 focus:ring-gray-100 dark:focus:ring-gray-700 dark:bg-gray-800 dark:text-gray-400 dark:border-gray-600 dark:hover:text-white dark:hover:bg-gray-700">
|
||||
<button data-modal-hide="model-info-modal" class="py-2.5 px-5 ms-3 text-sm font-medium text-gray-900 focus:outline-none bg-white rounded-lg border border-gray-200 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:ring-4 focus:ring-gray-100 dark:focus:ring-gray-700 dark:bg-gray-800 dark:text-gray-400 dark:border-gray-600 dark:hover:text-white dark:hover:bg-gray-700" @click="if (window.closeModelInfoModal) { window.closeModelInfoModal(); }">
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
<!-- Alpine store initialization and utilities -->
|
||||
<script>
|
||||
@@ -1742,10 +1779,20 @@ SOFTWARE.
|
||||
});
|
||||
|
||||
// Also listen for click events on modal toggle buttons
|
||||
document.querySelectorAll('[data-modal-toggle="model-info-modal"]').forEach(button => {
|
||||
button.addEventListener('click', () => {
|
||||
// Use event delegation to handle dynamically created buttons
|
||||
document.addEventListener('click', (e) => {
|
||||
const button = e.target.closest('[data-modal-toggle="model-info-modal"]');
|
||||
if (button) {
|
||||
// Update modal with current model before showing
|
||||
if (window.Alpine && window.Alpine.store("chat")) {
|
||||
const activeChat = window.Alpine.store("chat").activeChat();
|
||||
const modelName = activeChat ? activeChat.model : (button.dataset.modelName || (document.getElementById("chat-model") ? document.getElementById("chat-model").value : null));
|
||||
if (modelName && window.updateModelInfoModal) {
|
||||
window.updateModelInfoModal(modelName, true);
|
||||
}
|
||||
}
|
||||
setTimeout(processMarkdown, 300);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Process on initial load if libraries are ready
|
||||
@@ -1786,12 +1833,176 @@ SOFTWARE.
|
||||
syncModelSelectorOnLoad();
|
||||
}
|
||||
|
||||
// Function to update model info modal with current model
|
||||
// Set openModal to true to actually open the modal, false to just update content
|
||||
window.updateModelInfoModal = function(modelName, openModal = false) {
|
||||
if (!modelName) {
|
||||
return;
|
||||
}
|
||||
if (!window.__galleryConfigs) {
|
||||
return;
|
||||
}
|
||||
|
||||
const galleryConfig = window.__galleryConfigs[modelName];
|
||||
// Check if galleryConfig exists and has at least one property
|
||||
if (!galleryConfig || Object.keys(galleryConfig).length === 0) {
|
||||
// Still update the modal title even if no config, so user can see which model they clicked
|
||||
const titleEl = document.getElementById('model-info-modal-title');
|
||||
if (titleEl) {
|
||||
titleEl.textContent = modelName;
|
||||
}
|
||||
// Show message that no info is available
|
||||
const descEl = document.getElementById('model-info-description');
|
||||
if (descEl) {
|
||||
descEl.textContent = 'No additional information available for this model.';
|
||||
}
|
||||
const linksEl = document.getElementById('model-info-links');
|
||||
if (linksEl) {
|
||||
linksEl.innerHTML = '';
|
||||
}
|
||||
const iconEl = document.getElementById('model-info-modal-icon');
|
||||
if (iconEl) {
|
||||
iconEl.style.display = 'none';
|
||||
}
|
||||
// Only open the modal if explicitly requested
|
||||
if (openModal) {
|
||||
const modalElement = document.getElementById('model-info-modal');
|
||||
if (modalElement) {
|
||||
modalElement.classList.remove('hidden');
|
||||
modalElement.setAttribute('aria-hidden', 'false');
|
||||
// Add backdrop
|
||||
let backdrop = document.querySelector('.modal-backdrop');
|
||||
if (!backdrop) {
|
||||
backdrop = document.createElement('div');
|
||||
backdrop.className = 'modal-backdrop fixed inset-0 bg-gray-900 bg-opacity-50 dark:bg-opacity-80 z-40';
|
||||
document.body.appendChild(backdrop);
|
||||
backdrop.addEventListener('click', () => {
|
||||
closeModelInfoModal();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Update modal title
|
||||
const titleEl = document.getElementById('model-info-modal-title');
|
||||
if (titleEl) {
|
||||
titleEl.textContent = modelName;
|
||||
}
|
||||
|
||||
// Update icon
|
||||
const iconEl = document.getElementById('model-info-modal-icon');
|
||||
if (iconEl) {
|
||||
if (galleryConfig.Icon) {
|
||||
iconEl.src = galleryConfig.Icon;
|
||||
iconEl.style.display = 'block';
|
||||
} else {
|
||||
iconEl.style.display = 'none';
|
||||
}
|
||||
}
|
||||
|
||||
// Update description
|
||||
const descEl = document.getElementById('model-info-description');
|
||||
if (descEl) {
|
||||
descEl.textContent = galleryConfig.Description || 'No description available.';
|
||||
}
|
||||
|
||||
// Update links
|
||||
const linksEl = document.getElementById('model-info-links');
|
||||
if (linksEl && galleryConfig.URLs && Array.isArray(galleryConfig.URLs) && galleryConfig.URLs.length > 0) {
|
||||
linksEl.innerHTML = '';
|
||||
galleryConfig.URLs.forEach(url => {
|
||||
const li = document.createElement('li');
|
||||
const a = document.createElement('a');
|
||||
a.href = url;
|
||||
a.target = '_blank';
|
||||
a.textContent = url;
|
||||
li.appendChild(a);
|
||||
linksEl.appendChild(li);
|
||||
});
|
||||
} else if (linksEl) {
|
||||
linksEl.innerHTML = '<li>No links available</li>';
|
||||
}
|
||||
|
||||
// Only open the modal if explicitly requested
|
||||
if (openModal) {
|
||||
const modalElement = document.getElementById('model-info-modal');
|
||||
if (modalElement) {
|
||||
// Ensure positioning classes are present (they might have been removed)
|
||||
if (!modalElement.classList.contains('flex')) {
|
||||
modalElement.classList.add('flex');
|
||||
}
|
||||
if (!modalElement.classList.contains('justify-center')) {
|
||||
modalElement.classList.add('justify-center');
|
||||
}
|
||||
if (!modalElement.classList.contains('items-center')) {
|
||||
modalElement.classList.add('items-center');
|
||||
}
|
||||
// Ensure fixed positioning
|
||||
if (!modalElement.classList.contains('fixed')) {
|
||||
modalElement.classList.add('fixed');
|
||||
}
|
||||
// Ensure full width and height
|
||||
if (!modalElement.classList.contains('w-full')) {
|
||||
modalElement.classList.add('w-full');
|
||||
}
|
||||
if (!modalElement.classList.contains('h-full')) {
|
||||
modalElement.classList.add('h-full');
|
||||
}
|
||||
// Ensure padding is set
|
||||
if (!modalElement.style.padding) {
|
||||
modalElement.style.padding = '1rem';
|
||||
}
|
||||
// Remove hidden class if present
|
||||
modalElement.classList.remove('hidden');
|
||||
// Set aria-hidden to false
|
||||
modalElement.setAttribute('aria-hidden', 'false');
|
||||
// Add backdrop if needed
|
||||
let backdrop = document.querySelector('.modal-backdrop');
|
||||
if (!backdrop) {
|
||||
backdrop = document.createElement('div');
|
||||
backdrop.className = 'modal-backdrop fixed inset-0 bg-gray-900 bg-opacity-50 dark:bg-opacity-80 z-40';
|
||||
document.body.appendChild(backdrop);
|
||||
backdrop.addEventListener('click', () => {
|
||||
window.closeModelInfoModal();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Function to close the model info modal
|
||||
window.closeModelInfoModal = function() {
|
||||
const modalElement = document.getElementById('model-info-modal');
|
||||
if (modalElement) {
|
||||
modalElement.classList.add('hidden');
|
||||
modalElement.setAttribute('aria-hidden', 'true');
|
||||
}
|
||||
const backdrop = document.querySelector('.modal-backdrop');
|
||||
if (backdrop) {
|
||||
backdrop.remove();
|
||||
}
|
||||
};
|
||||
|
||||
// Also sync after Alpine initializes (in case it runs after DOMContentLoaded)
|
||||
function initializeModelInfo() {
|
||||
syncModelSelectorOnLoad();
|
||||
// Initialize model info modal content with current model (but don't open it)
|
||||
if (window.updateModelInfoModal && window.Alpine && window.Alpine.store("chat")) {
|
||||
const activeChat = window.Alpine.store("chat").activeChat();
|
||||
const modelName = activeChat ? activeChat.model : (document.getElementById("chat-model") ? document.getElementById("chat-model").value : null);
|
||||
if (modelName) {
|
||||
window.updateModelInfoModal(modelName, false); // false = don't open, just update content
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (window.Alpine) {
|
||||
Alpine.nextTick(syncModelSelectorOnLoad);
|
||||
Alpine.nextTick(initializeModelInfo);
|
||||
} else {
|
||||
document.addEventListener('alpine:init', () => {
|
||||
Alpine.nextTick(syncModelSelectorOnLoad);
|
||||
Alpine.nextTick(initializeModelInfo);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
|
||||
176
core/schema/anthropic.go
Normal file
176
core/schema/anthropic.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// AnthropicRequest represents a request to the Anthropic Messages API
|
||||
// https://docs.anthropic.com/claude/reference/messages_post
|
||||
type AnthropicRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []AnthropicMessage `json:"messages"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopK *int `json:"top_k,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
Tools []AnthropicTool `json:"tools,omitempty"`
|
||||
ToolChoice interface{} `json:"tool_choice,omitempty"`
|
||||
|
||||
// Internal fields for request handling
|
||||
Context context.Context `json:"-"`
|
||||
Cancel context.CancelFunc `json:"-"`
|
||||
}
|
||||
|
||||
// ModelName implements the LocalAIRequest interface
|
||||
func (ar *AnthropicRequest) ModelName(s *string) string {
|
||||
if s != nil {
|
||||
ar.Model = *s
|
||||
}
|
||||
return ar.Model
|
||||
}
|
||||
|
||||
// AnthropicTool represents a tool definition in the Anthropic format
|
||||
type AnthropicTool struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
InputSchema map[string]interface{} `json:"input_schema"`
|
||||
}
|
||||
|
||||
// AnthropicMessage represents a message in the Anthropic format
|
||||
type AnthropicMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content interface{} `json:"content"`
|
||||
}
|
||||
|
||||
// AnthropicContentBlock represents a content block in an Anthropic message
|
||||
type AnthropicContentBlock struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Source *AnthropicImageSource `json:"source,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Input map[string]interface{} `json:"input,omitempty"`
|
||||
ToolUseID string `json:"tool_use_id,omitempty"`
|
||||
Content interface{} `json:"content,omitempty"`
|
||||
IsError *bool `json:"is_error,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicImageSource represents an image source in Anthropic format
|
||||
type AnthropicImageSource struct {
|
||||
Type string `json:"type"`
|
||||
MediaType string `json:"media_type"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
// AnthropicResponse represents a response from the Anthropic Messages API
|
||||
type AnthropicResponse struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Role string `json:"role"`
|
||||
Content []AnthropicContentBlock `json:"content"`
|
||||
Model string `json:"model"`
|
||||
StopReason *string `json:"stop_reason"`
|
||||
StopSequence *string `json:"stop_sequence,omitempty"`
|
||||
Usage AnthropicUsage `json:"usage"`
|
||||
}
|
||||
|
||||
// AnthropicUsage represents token usage in Anthropic format
|
||||
type AnthropicUsage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
}
|
||||
|
||||
// AnthropicStreamEvent represents a streaming event from the Anthropic API
|
||||
type AnthropicStreamEvent struct {
|
||||
Type string `json:"type"`
|
||||
Index int `json:"index,omitempty"`
|
||||
ContentBlock *AnthropicContentBlock `json:"content_block,omitempty"`
|
||||
Delta *AnthropicStreamDelta `json:"delta,omitempty"`
|
||||
Message *AnthropicStreamMessage `json:"message,omitempty"`
|
||||
Usage *AnthropicUsage `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicStreamDelta represents the delta in a streaming response
|
||||
type AnthropicStreamDelta struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
PartialJSON string `json:"partial_json,omitempty"`
|
||||
StopReason *string `json:"stop_reason,omitempty"`
|
||||
StopSequence *string `json:"stop_sequence,omitempty"`
|
||||
}
|
||||
|
||||
// AnthropicStreamMessage represents the message object in streaming events
|
||||
type AnthropicStreamMessage struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Role string `json:"role"`
|
||||
Content []AnthropicContentBlock `json:"content"`
|
||||
Model string `json:"model"`
|
||||
StopReason *string `json:"stop_reason"`
|
||||
StopSequence *string `json:"stop_sequence,omitempty"`
|
||||
Usage AnthropicUsage `json:"usage"`
|
||||
}
|
||||
|
||||
// AnthropicErrorResponse represents an error response from the Anthropic API
|
||||
type AnthropicErrorResponse struct {
|
||||
Type string `json:"type"`
|
||||
Error AnthropicError `json:"error"`
|
||||
}
|
||||
|
||||
// AnthropicError represents an error in the Anthropic format
|
||||
type AnthropicError struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// GetStringContent extracts the string content from an AnthropicMessage
|
||||
// Content can be either a string or an array of content blocks
|
||||
func (m *AnthropicMessage) GetStringContent() string {
|
||||
switch content := m.Content.(type) {
|
||||
case string:
|
||||
return content
|
||||
case []interface{}:
|
||||
var result string
|
||||
for _, block := range content {
|
||||
if blockMap, ok := block.(map[string]interface{}); ok {
|
||||
if blockMap["type"] == "text" {
|
||||
if text, ok := blockMap["text"].(string); ok {
|
||||
result += text
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetContentBlocks extracts content blocks from an AnthropicMessage
|
||||
func (m *AnthropicMessage) GetContentBlocks() []AnthropicContentBlock {
|
||||
switch content := m.Content.(type) {
|
||||
case string:
|
||||
return []AnthropicContentBlock{{Type: "text", Text: content}}
|
||||
case []interface{}:
|
||||
var blocks []AnthropicContentBlock
|
||||
for _, block := range content {
|
||||
if blockMap, ok := block.(map[string]interface{}); ok {
|
||||
cb := AnthropicContentBlock{}
|
||||
data, err := json.Marshal(blockMap)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := json.Unmarshal(data, &cb); err != nil {
|
||||
continue
|
||||
}
|
||||
blocks = append(blocks, cb)
|
||||
}
|
||||
}
|
||||
return blocks
|
||||
}
|
||||
return nil
|
||||
}
|
||||
216
core/schema/anthropic_test.go
Normal file
216
core/schema/anthropic_test.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package schema_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Anthropic Schema", func() {
|
||||
Describe("AnthropicRequest", func() {
|
||||
It("should unmarshal a valid request", func() {
|
||||
jsonData := `{
|
||||
"model": "claude-3-sonnet-20240229",
|
||||
"max_tokens": 1024,
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello, world!"}
|
||||
],
|
||||
"system": "You are a helpful assistant.",
|
||||
"temperature": 0.7
|
||||
}`
|
||||
|
||||
var req schema.AnthropicRequest
|
||||
err := json.Unmarshal([]byte(jsonData), &req)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(req.Model).To(Equal("claude-3-sonnet-20240229"))
|
||||
Expect(req.MaxTokens).To(Equal(1024))
|
||||
Expect(len(req.Messages)).To(Equal(1))
|
||||
Expect(req.System).To(Equal("You are a helpful assistant."))
|
||||
Expect(*req.Temperature).To(Equal(0.7))
|
||||
})
|
||||
|
||||
It("should unmarshal a request with tools", func() {
|
||||
jsonData := `{
|
||||
"model": "claude-3-sonnet-20240229",
|
||||
"max_tokens": 1024,
|
||||
"messages": [
|
||||
{"role": "user", "content": "What's the weather?"}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"tool_choice": {"type": "tool", "name": "get_weather"}
|
||||
}`
|
||||
|
||||
var req schema.AnthropicRequest
|
||||
err := json.Unmarshal([]byte(jsonData), &req)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(req.Tools)).To(Equal(1))
|
||||
Expect(req.Tools[0].Name).To(Equal("get_weather"))
|
||||
Expect(req.Tools[0].Description).To(Equal("Get the current weather"))
|
||||
Expect(req.ToolChoice).ToNot(BeNil())
|
||||
})
|
||||
|
||||
It("should implement LocalAIRequest interface", func() {
|
||||
req := &schema.AnthropicRequest{Model: "test-model"}
|
||||
Expect(req.ModelName(nil)).To(Equal("test-model"))
|
||||
|
||||
newModel := "new-model"
|
||||
Expect(req.ModelName(&newModel)).To(Equal("new-model"))
|
||||
Expect(req.Model).To(Equal("new-model"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("AnthropicMessage", func() {
|
||||
It("should get string content from string content", func() {
|
||||
msg := schema.AnthropicMessage{
|
||||
Role: "user",
|
||||
Content: "Hello, world!",
|
||||
}
|
||||
Expect(msg.GetStringContent()).To(Equal("Hello, world!"))
|
||||
})
|
||||
|
||||
It("should get string content from array content", func() {
|
||||
msg := schema.AnthropicMessage{
|
||||
Role: "user",
|
||||
Content: []interface{}{
|
||||
map[string]interface{}{"type": "text", "text": "Hello, "},
|
||||
map[string]interface{}{"type": "text", "text": "world!"},
|
||||
},
|
||||
}
|
||||
Expect(msg.GetStringContent()).To(Equal("Hello, world!"))
|
||||
})
|
||||
|
||||
It("should get content blocks from string content", func() {
|
||||
msg := schema.AnthropicMessage{
|
||||
Role: "user",
|
||||
Content: "Hello, world!",
|
||||
}
|
||||
blocks := msg.GetContentBlocks()
|
||||
Expect(len(blocks)).To(Equal(1))
|
||||
Expect(blocks[0].Type).To(Equal("text"))
|
||||
Expect(blocks[0].Text).To(Equal("Hello, world!"))
|
||||
})
|
||||
|
||||
It("should get content blocks from array content", func() {
|
||||
msg := schema.AnthropicMessage{
|
||||
Role: "user",
|
||||
Content: []interface{}{
|
||||
map[string]interface{}{"type": "text", "text": "Hello"},
|
||||
map[string]interface{}{"type": "image", "source": map[string]interface{}{"type": "base64", "data": "abc123"}},
|
||||
},
|
||||
}
|
||||
blocks := msg.GetContentBlocks()
|
||||
Expect(len(blocks)).To(Equal(2))
|
||||
Expect(blocks[0].Type).To(Equal("text"))
|
||||
Expect(blocks[0].Text).To(Equal("Hello"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("AnthropicResponse", func() {
|
||||
It("should marshal a valid response", func() {
|
||||
stopReason := "end_turn"
|
||||
resp := schema.AnthropicResponse{
|
||||
ID: "msg_123",
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Model: "claude-3-sonnet-20240229",
|
||||
StopReason: &stopReason,
|
||||
Content: []schema.AnthropicContentBlock{
|
||||
{Type: "text", Text: "Hello!"},
|
||||
},
|
||||
Usage: schema.AnthropicUsage{
|
||||
InputTokens: 10,
|
||||
OutputTokens: 5,
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
var result map[string]interface{}
|
||||
err = json.Unmarshal(data, &result)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(result["id"]).To(Equal("msg_123"))
|
||||
Expect(result["type"]).To(Equal("message"))
|
||||
Expect(result["role"]).To(Equal("assistant"))
|
||||
Expect(result["stop_reason"]).To(Equal("end_turn"))
|
||||
})
|
||||
|
||||
It("should marshal a response with tool use", func() {
|
||||
stopReason := "tool_use"
|
||||
resp := schema.AnthropicResponse{
|
||||
ID: "msg_123",
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Model: "claude-3-sonnet-20240229",
|
||||
StopReason: &stopReason,
|
||||
Content: []schema.AnthropicContentBlock{
|
||||
{
|
||||
Type: "tool_use",
|
||||
ID: "toolu_123",
|
||||
Name: "get_weather",
|
||||
Input: map[string]interface{}{
|
||||
"location": "San Francisco",
|
||||
},
|
||||
},
|
||||
},
|
||||
Usage: schema.AnthropicUsage{
|
||||
InputTokens: 10,
|
||||
OutputTokens: 5,
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
var result map[string]interface{}
|
||||
err = json.Unmarshal(data, &result)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(result["stop_reason"]).To(Equal("tool_use"))
|
||||
content := result["content"].([]interface{})
|
||||
Expect(len(content)).To(Equal(1))
|
||||
toolUse := content[0].(map[string]interface{})
|
||||
Expect(toolUse["type"]).To(Equal("tool_use"))
|
||||
Expect(toolUse["id"]).To(Equal("toolu_123"))
|
||||
Expect(toolUse["name"]).To(Equal("get_weather"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("AnthropicErrorResponse", func() {
|
||||
It("should marshal an error response", func() {
|
||||
resp := schema.AnthropicErrorResponse{
|
||||
Type: "error",
|
||||
Error: schema.AnthropicError{
|
||||
Type: "invalid_request_error",
|
||||
Message: "max_tokens is required",
|
||||
},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
var result map[string]interface{}
|
||||
err = json.Unmarshal(data, &result)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(result["type"]).To(Equal("error"))
|
||||
errorObj := result["error"].(map[string]interface{})
|
||||
Expect(errorObj["type"]).To(Equal("invalid_request_error"))
|
||||
Expect(errorObj["message"]).To(Equal("max_tokens is required"))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -27,6 +27,9 @@ type Message struct {
|
||||
FunctionCall interface{} `json:"function_call,omitempty" yaml:"function_call,omitempty"`
|
||||
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty" yaml:"tool_call,omitempty"`
|
||||
|
||||
// Reasoning content extracted from <thinking>...</thinking> tags
|
||||
Reasoning *string `json:"reasoning,omitempty" yaml:"reasoning,omitempty"`
|
||||
}
|
||||
|
||||
type ToolCall struct {
|
||||
@@ -78,8 +81,8 @@ func (messages Messages) ToProto() []*proto.Message {
|
||||
}
|
||||
}
|
||||
|
||||
// Note: tool_call_id and reasoning_content are not in schema.Message yet
|
||||
// They may need to be added to schema.Message if needed in the future
|
||||
// Note: tool_call_id is not in schema.Message yet
|
||||
// Reasoning field is now available in schema.Message but not yet in proto.Message
|
||||
}
|
||||
return protoMessages
|
||||
}
|
||||
|
||||
@@ -29,3 +29,79 @@ helm show values go-skynet/local-ai > values.yaml
|
||||
|
||||
helm install local-ai go-skynet/local-ai -f values.yaml
|
||||
```
|
||||
|
||||
## Security Context Requirements
|
||||
|
||||
LocalAI spawns child processes to run model backends (e.g., llama.cpp, diffusers, whisper). To properly stop these processes and free resources like VRAM, LocalAI needs permission to send signals to its child processes.
|
||||
|
||||
If you're using restrictive security contexts, ensure the `CAP_KILL` capability is available:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: local-ai
|
||||
spec:
|
||||
containers:
|
||||
- name: local-ai
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- KILL # Required for LocalAI to stop backend processes
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
```
|
||||
|
||||
Without the `KILL` capability, LocalAI cannot terminate backend processes when models are stopped, leading to:
|
||||
- VRAM and memory not being freed
|
||||
- Orphaned backend processes holding GPU resources
|
||||
- Error messages like `error while deleting process error=permission denied`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Issue: VRAM is not freed when stopping models
|
||||
|
||||
**Symptoms:**
|
||||
- Models appear to stop but GPU memory remains allocated
|
||||
- Logs show `(deleteProcess) error while deleting process error=permission denied`
|
||||
- Backend processes remain running after model unload
|
||||
|
||||
**Common Causes:**
|
||||
- All capabilities are dropped without adding back `CAP_KILL`
|
||||
- Using user namespacing (`hostUsers: false`) with certain configurations
|
||||
- Overly restrictive seccomp profiles that block signal-related syscalls
|
||||
- Pod Security Policies or Pod Security Standards blocking required capabilities
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add the `KILL` capability to your container's security context as shown in the example above.
|
||||
|
||||
2. If you're using a Helm chart, configure the security context in your `values.yaml`:
|
||||
|
||||
```yaml
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- KILL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
```
|
||||
|
||||
3. Verify the capability is present in the running pod:
|
||||
|
||||
```bash
|
||||
kubectl exec -it <pod-name> -- grep CapEff /proc/1/status
|
||||
```
|
||||
|
||||
4. If running in privileged mode works but the above doesn't, check your cluster's Pod Security Policies or Pod Security Standards. You may need to adjust cluster-level policies to allow the `KILL` capability.
|
||||
|
||||
5. Ensure your seccomp profile (if custom) allows the `kill` syscall. The `RuntimeDefault` profile typically includes this.
|
||||
|
||||
@@ -29,3 +29,79 @@ helm show values go-skynet/local-ai > values.yaml
|
||||
|
||||
helm install local-ai go-skynet/local-ai -f values.yaml
|
||||
```
|
||||
|
||||
## Security Context Requirements
|
||||
|
||||
LocalAI spawns child processes to run model backends (e.g., llama.cpp, diffusers, whisper). To properly stop these processes and free resources like VRAM, LocalAI needs permission to send signals to its child processes.
|
||||
|
||||
If you're using restrictive security contexts, ensure the `CAP_KILL` capability is available:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: local-ai
|
||||
spec:
|
||||
containers:
|
||||
- name: local-ai
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- KILL # Required for LocalAI to stop backend processes
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
```
|
||||
|
||||
Without the `KILL` capability, LocalAI cannot terminate backend processes when models are stopped, leading to:
|
||||
- VRAM and memory not being freed
|
||||
- Orphaned backend processes holding GPU resources
|
||||
- Error messages like `error while deleting process error=permission denied`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Issue: VRAM is not freed when stopping models
|
||||
|
||||
**Symptoms:**
|
||||
- Models appear to stop but GPU memory remains allocated
|
||||
- Logs show `(deleteProcess) error while deleting process error=permission denied`
|
||||
- Backend processes remain running after model unload
|
||||
|
||||
**Common Causes:**
|
||||
- All capabilities are dropped without adding back `CAP_KILL`
|
||||
- Using user namespacing (`hostUsers: false`) with certain configurations
|
||||
- Overly restrictive seccomp profiles that block signal-related syscalls
|
||||
- Pod Security Policies or Pod Security Standards blocking required capabilities
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add the `KILL` capability to your container's security context as shown in the example above.
|
||||
|
||||
2. If you're using a Helm chart, configure the security context in your `values.yaml`:
|
||||
|
||||
```yaml
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- KILL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
```
|
||||
|
||||
3. Verify the capability is present in the running pod:
|
||||
|
||||
```bash
|
||||
kubectl exec -it <pod-name> -- grep CapEff /proc/1/status
|
||||
```
|
||||
|
||||
4. If running in privileged mode works but the above doesn't, check your cluster's Pod Security Policies or Pod Security Standards. You may need to adjust cluster-level policies to allow the `KILL` capability.
|
||||
|
||||
5. Ensure your seccomp profile (if custom) allows the `kill` syscall. The `RuntimeDefault` profile typically includes this.
|
||||
|
||||
@@ -1,4 +1,56 @@
|
||||
---
|
||||
- name: "qwen3-vl-reranker-8b"
|
||||
url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
|
||||
urls:
|
||||
- https://huggingface.co/mradermacher/Qwen3-VL-Reranker-8B-GGUF
|
||||
description: |
|
||||
**Model Name:** Qwen3-VL-Reranker-8B
|
||||
**Base Model:** Qwen/Qwen3-VL-Reranker-8B
|
||||
|
||||
**Description:**
|
||||
A high-performance multimodal reranking model for state-of-the-art cross-modal search. It supports 30+ languages and handles text, images, screenshots, videos, and mixed modalities. With 8B parameters and a 32K context length, it refines retrieval results by combining embedding vectors with precise relevance scores. Optimized for efficiency, it supports quantized versions (e.g., Q8_0, Q4_K_M) and is ideal for applications requiring accurate multimodal content matching.
|
||||
|
||||
**Key Features:**
|
||||
- **Multimodal**: Text, images, videos, and mixed content.
|
||||
- **Language Support**: 30+ languages.
|
||||
- **Quantization**: Available in Q8_0 (best quality), Q4_K_M (fast, recommended), and lower-precision options.
|
||||
- **Performance**: Outperforms base models in retrieval tasks (e.g., JinaVDR, ViDoRe v3).
|
||||
- **Use Case**: Enhances search pipelines by refining embeddings with precise relevance scores.
|
||||
|
||||
**Downloads:**
|
||||
- [GGUF Files](https://huggingface.co/mradermacher/Qwen3-VL-Reranker-8B-GGUF) (e.g., `Qwen3-VL-Reranker-8B.Q8_0.gguf`).
|
||||
|
||||
**Usage:**
|
||||
- Requires `transformers`, `qwen-vl-utils`, and `torch`.
|
||||
- Example: `from scripts.qwen3_vl_reranker import Qwen3VLReranker; model = Qwen3VLReranker(...)`
|
||||
|
||||
**Citation:**
|
||||
@article{qwen3vlembedding, ...}
|
||||
|
||||
This description emphasizes its capabilities, efficiency, and versatility for multimodal search tasks.
|
||||
overrides:
|
||||
parameters:
|
||||
model: llama-cpp/models/Qwen3-VL-Reranker-8B.Q4_K_M.gguf
|
||||
name: Qwen3-VL-Reranker-8B-GGUF
|
||||
backend: llama-cpp
|
||||
template:
|
||||
use_tokenizer_template: true
|
||||
known_usecases:
|
||||
- chat
|
||||
function:
|
||||
grammar:
|
||||
disable: true
|
||||
mmproj: llama-cpp/mmproj/Qwen3-VL-Reranker-8B.mmproj-f16.gguf
|
||||
description: Imported from https://huggingface.co/mradermacher/Qwen3-VL-Reranker-8B-GGUF
|
||||
options:
|
||||
- use_jinja:true
|
||||
files:
|
||||
- filename: llama-cpp/models/Qwen3-VL-Reranker-8B.Q4_K_M.gguf
|
||||
sha256: f73e62ea68abf741c3e713af823cfb4d2fd2ca35c8b68277b87b4b3d8570b66d
|
||||
uri: https://huggingface.co/mradermacher/Qwen3-VL-Reranker-8B-GGUF/resolve/main/Qwen3-VL-Reranker-8B.Q4_K_M.gguf
|
||||
- filename: llama-cpp/mmproj/Qwen3-VL-Reranker-8B.mmproj-f16.gguf
|
||||
sha256: 15cd9bd4882dae771344f0ac204fce07de91b47c1438ada3861dfc817403c31e
|
||||
uri: https://huggingface.co/mradermacher/Qwen3-VL-Reranker-8B-GGUF/resolve/main/Qwen3-VL-Reranker-8B.mmproj-f16.gguf
|
||||
- name: "liquidai.lfm2-2.6b-transcript"
|
||||
url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
|
||||
urls:
|
||||
|
||||
7
go.mod
7
go.mod
@@ -9,6 +9,7 @@ require (
|
||||
fyne.io/fyne/v2 v2.7.1
|
||||
github.com/Masterminds/sprig/v3 v3.3.0
|
||||
github.com/alecthomas/kong v1.13.0
|
||||
github.com/anthropics/anthropic-sdk-go v1.19.0
|
||||
github.com/charmbracelet/glamour v0.10.0
|
||||
github.com/containerd/containerd v1.7.30
|
||||
github.com/ebitengine/purego v0.9.1
|
||||
@@ -58,6 +59,7 @@ require (
|
||||
go.opentelemetry.io/otel/metric v1.39.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0
|
||||
google.golang.org/grpc v1.78.0
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
oras.land/oras-go/v2 v2.6.0
|
||||
@@ -67,8 +69,11 @@ require (
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/labstack/gommon v0.4.2 // indirect
|
||||
github.com/swaggo/files/v2 v2.0.2 // indirect
|
||||
github.com/tidwall/gjson v1.18.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/tidwall/sjson v1.2.5 // indirect
|
||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
4
go.sum
4
go.sum
@@ -44,6 +44,8 @@ github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
|
||||
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/anthropics/anthropic-sdk-go v1.19.0 h1:mO6E+ffSzLRvR/YUH9KJC0uGw0uV8GjISIuzem//3KE=
|
||||
github.com/anthropics/anthropic-sdk-go v1.19.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
|
||||
@@ -762,10 +764,12 @@ github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU=
|
||||
github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
|
||||
114
pkg/functions/reasoning.go
Normal file
114
pkg/functions/reasoning.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package functions
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ExtractReasoning extracts reasoning content from thinking tags and returns
|
||||
// both the extracted reasoning and the cleaned content (with tags removed).
|
||||
// It handles <thinking>...</thinking> and <think>...</think> tags.
|
||||
// Multiple reasoning blocks are concatenated with newlines.
|
||||
func ExtractReasoning(content string) (reasoning string, cleanedContent string) {
|
||||
if content == "" {
|
||||
return "", content
|
||||
}
|
||||
|
||||
var reasoningParts []string
|
||||
var cleanedParts []string
|
||||
remaining := content
|
||||
|
||||
// Define tag pairs to look for
|
||||
tagPairs := []struct {
|
||||
start string
|
||||
end string
|
||||
}{
|
||||
{"<thinking>", "</thinking>"},
|
||||
{"<think>", "</think>"},
|
||||
}
|
||||
|
||||
// Track the last position we've processed
|
||||
lastPos := 0
|
||||
|
||||
for {
|
||||
// Find the earliest tag start
|
||||
earliestStart := -1
|
||||
earliestEnd := -1
|
||||
isUnclosed := false
|
||||
var matchedTag struct {
|
||||
start string
|
||||
end string
|
||||
}
|
||||
|
||||
for _, tagPair := range tagPairs {
|
||||
startIdx := strings.Index(remaining[lastPos:], tagPair.start)
|
||||
if startIdx == -1 {
|
||||
continue
|
||||
}
|
||||
startIdx += lastPos
|
||||
|
||||
// Find the corresponding end tag
|
||||
endIdx := strings.Index(remaining[startIdx+len(tagPair.start):], tagPair.end)
|
||||
if endIdx == -1 {
|
||||
// Unclosed tag - extract what we have
|
||||
if earliestStart == -1 || startIdx < earliestStart {
|
||||
earliestStart = startIdx
|
||||
earliestEnd = len(remaining)
|
||||
isUnclosed = true
|
||||
matchedTag = tagPair
|
||||
}
|
||||
continue
|
||||
}
|
||||
endIdx += startIdx + len(tagPair.start)
|
||||
|
||||
// Found a complete tag pair
|
||||
if earliestStart == -1 || startIdx < earliestStart {
|
||||
earliestStart = startIdx
|
||||
earliestEnd = endIdx + len(tagPair.end)
|
||||
isUnclosed = false
|
||||
matchedTag = tagPair
|
||||
}
|
||||
}
|
||||
|
||||
if earliestStart == -1 {
|
||||
// No more tags found, add remaining content
|
||||
if lastPos < len(remaining) {
|
||||
cleanedParts = append(cleanedParts, remaining[lastPos:])
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Add content before the tag
|
||||
if earliestStart > lastPos {
|
||||
cleanedParts = append(cleanedParts, remaining[lastPos:earliestStart])
|
||||
}
|
||||
|
||||
// Extract reasoning content
|
||||
reasoningStart := earliestStart + len(matchedTag.start)
|
||||
// For unclosed tags, earliestEnd is already at the end of the string
|
||||
// For closed tags, earliestEnd points to after the closing tag, so we subtract the end tag length
|
||||
var reasoningEnd int
|
||||
if isUnclosed {
|
||||
// Unclosed tag - extract everything to the end
|
||||
reasoningEnd = len(remaining)
|
||||
} else {
|
||||
// Closed tag - exclude the end tag
|
||||
reasoningEnd = earliestEnd - len(matchedTag.end)
|
||||
}
|
||||
if reasoningEnd > reasoningStart {
|
||||
reasoningContent := strings.TrimSpace(remaining[reasoningStart:reasoningEnd])
|
||||
if reasoningContent != "" {
|
||||
reasoningParts = append(reasoningParts, reasoningContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Move past this tag
|
||||
lastPos = earliestEnd
|
||||
}
|
||||
|
||||
// Combine reasoning parts
|
||||
reasoning = strings.Join(reasoningParts, "\n\n")
|
||||
// Combine cleaned content parts
|
||||
cleanedContent = strings.Join(cleanedParts, "")
|
||||
|
||||
return reasoning, cleanedContent
|
||||
}
|
||||
261
pkg/functions/reasoning_test.go
Normal file
261
pkg/functions/reasoning_test.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package functions_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
. "github.com/mudler/LocalAI/pkg/functions"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ExtractReasoning", func() {
|
||||
Context("when content has no reasoning tags", func() {
|
||||
It("should return empty reasoning and original content", func() {
|
||||
content := "This is regular content without any tags."
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal(content))
|
||||
})
|
||||
|
||||
It("should handle empty string", func() {
|
||||
content := ""
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("should handle content with only whitespace", func() {
|
||||
content := " \n\t "
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal(content))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has <thinking> tags", func() {
|
||||
It("should extract reasoning from single thinking block", func() {
|
||||
content := "Some text <thinking>This is my reasoning</thinking> More text"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("This is my reasoning"))
|
||||
Expect(cleaned).To(Equal("Some text More text"))
|
||||
})
|
||||
|
||||
It("should extract reasoning and preserve surrounding content", func() {
|
||||
content := "Before <thinking>Reasoning here</thinking> After"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning here"))
|
||||
Expect(cleaned).To(Equal("Before After"))
|
||||
})
|
||||
|
||||
It("should handle thinking block at the start", func() {
|
||||
content := "<thinking>Start reasoning</thinking> Regular content"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Start reasoning"))
|
||||
Expect(cleaned).To(Equal(" Regular content"))
|
||||
})
|
||||
|
||||
It("should handle thinking block at the end", func() {
|
||||
content := "Regular content <thinking>End reasoning</thinking>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("End reasoning"))
|
||||
Expect(cleaned).To(Equal("Regular content "))
|
||||
})
|
||||
|
||||
It("should handle only thinking block", func() {
|
||||
content := "<thinking>Only reasoning</thinking>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Only reasoning"))
|
||||
Expect(cleaned).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("should trim whitespace from reasoning content", func() {
|
||||
content := "Text <thinking> \n Reasoning with spaces \n </thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning with spaces"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has <think> tags", func() {
|
||||
It("should extract reasoning from redacted_reasoning block", func() {
|
||||
content := "Text <think>Redacted reasoning</think> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Redacted reasoning"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
|
||||
It("should handle redacted_reasoning with multiline content", func() {
|
||||
content := "Before <think>Line 1\nLine 2\nLine 3</think> After"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Line 1\nLine 2\nLine 3"))
|
||||
Expect(cleaned).To(Equal("Before After"))
|
||||
})
|
||||
|
||||
It("should handle redacted_reasoning with complex content", func() {
|
||||
content := "Start <think>Complex reasoning\nwith\nmultiple\nlines</think> End"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Complex reasoning\nwith\nmultiple\nlines"))
|
||||
Expect(cleaned).To(Equal("Start End"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has multiple reasoning blocks", func() {
|
||||
It("should concatenate multiple thinking blocks with newlines", func() {
|
||||
content := "Text <thinking>First</thinking> Middle <thinking>Second</thinking> End"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("First\n\nSecond"))
|
||||
Expect(cleaned).To(Equal("Text Middle End"))
|
||||
})
|
||||
|
||||
It("should handle multiple different tag types", func() {
|
||||
content := "A <thinking>One</thinking> B <think>Two</think> C <think>Three</think> D"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(ContainSubstring("One"))
|
||||
Expect(reasoning).To(ContainSubstring("Two"))
|
||||
Expect(reasoning).To(ContainSubstring("Three"))
|
||||
Expect(cleaned).To(Equal("A B C D"))
|
||||
})
|
||||
|
||||
It("should handle nested tags correctly (extracts first match)", func() {
|
||||
content := "Text <thinking>Outer <think>Inner</think></thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
// Should extract the outer thinking block
|
||||
Expect(reasoning).To(ContainSubstring("Outer"))
|
||||
Expect(reasoning).To(ContainSubstring("Inner"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has unclosed reasoning tags", func() {
|
||||
It("should extract unclosed thinking block", func() {
|
||||
content := "Text <thinking>Unclosed reasoning"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Unclosed reasoning"))
|
||||
Expect(cleaned).To(Equal("Text "))
|
||||
})
|
||||
|
||||
It("should extract unclosed think block", func() {
|
||||
content := "Before <think>Incomplete"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Incomplete"))
|
||||
Expect(cleaned).To(Equal("Before "))
|
||||
})
|
||||
|
||||
It("should extract unclosed redacted_reasoning block", func() {
|
||||
content := "Start <think>Partial reasoning content"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Partial reasoning content"))
|
||||
Expect(cleaned).To(Equal("Start "))
|
||||
})
|
||||
|
||||
It("should handle unclosed tag at the end", func() {
|
||||
content := "Regular content <thinking>Unclosed at end"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Unclosed at end"))
|
||||
Expect(cleaned).To(Equal("Regular content "))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has empty reasoning blocks", func() {
|
||||
It("should ignore empty thinking block", func() {
|
||||
content := "Text <thinking></thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
|
||||
It("should ignore thinking block with only whitespace", func() {
|
||||
content := "Text <thinking> \n\t </thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has reasoning tags with special characters", func() {
|
||||
It("should handle reasoning with newlines", func() {
|
||||
content := "Before <thinking>Line 1\nLine 2\nLine 3</thinking> After"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Line 1\nLine 2\nLine 3"))
|
||||
Expect(cleaned).To(Equal("Before After"))
|
||||
})
|
||||
|
||||
It("should handle reasoning with code blocks", func() {
|
||||
content := "Text <thinking>Reasoning with ```code``` blocks</thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning with ```code``` blocks"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
|
||||
It("should handle reasoning with JSON", func() {
|
||||
content := "Before <think>{\"key\": \"value\"}</think> After"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("{\"key\": \"value\"}"))
|
||||
Expect(cleaned).To(Equal("Before After"))
|
||||
})
|
||||
|
||||
It("should handle reasoning with HTML-like content", func() {
|
||||
content := "Text <thinking>Reasoning with <tags> inside</thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning with <tags> inside"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when content has reasoning mixed with regular content", func() {
|
||||
It("should preserve content order correctly", func() {
|
||||
content := "Start <thinking>Reasoning</thinking> Middle <think>More reasoning</think> End"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(ContainSubstring("Reasoning"))
|
||||
Expect(reasoning).To(ContainSubstring("More reasoning"))
|
||||
Expect(cleaned).To(Equal("Start Middle End"))
|
||||
})
|
||||
|
||||
It("should handle reasoning in the middle of a sentence", func() {
|
||||
content := "This is a <thinking>reasoning</thinking> sentence."
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("reasoning"))
|
||||
Expect(cleaned).To(Equal("This is a sentence."))
|
||||
})
|
||||
})
|
||||
|
||||
Context("edge cases", func() {
|
||||
It("should handle content with only opening tag", func() {
|
||||
content := "<thinking>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal(""))
|
||||
})
|
||||
|
||||
It("should handle content with only closing tag", func() {
|
||||
content := "</thinking>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(BeEmpty())
|
||||
Expect(cleaned).To(Equal("</thinking>"))
|
||||
})
|
||||
|
||||
It("should handle mismatched tags", func() {
|
||||
content := "<thinking>Content</think>"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
// Should extract unclosed thinking block
|
||||
Expect(reasoning).To(ContainSubstring("Content"))
|
||||
Expect(cleaned).To(Equal(""))
|
||||
})
|
||||
|
||||
It("should handle very long reasoning content", func() {
|
||||
longReasoning := strings.Repeat("This is reasoning content. ", 100)
|
||||
content := "Text <thinking>" + longReasoning + "</thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
// TrimSpace is applied, so we need to account for that
|
||||
Expect(reasoning).To(Equal(strings.TrimSpace(longReasoning)))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
|
||||
It("should handle reasoning with unicode characters", func() {
|
||||
content := "Text <thinking>Reasoning with 中文 and emoji 🧠</thinking> More"
|
||||
reasoning, cleaned := ExtractReasoning(content)
|
||||
Expect(reasoning).To(Equal("Reasoning with 中文 and emoji 🧠"))
|
||||
Expect(cleaned).To(Equal("Text More"))
|
||||
})
|
||||
})
|
||||
})
|
||||
375
tests/e2e/e2e_anthropic_test.go
Normal file
375
tests/e2e/e2e_anthropic_test.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package e2e_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/anthropics/anthropic-sdk-go"
|
||||
"github.com/anthropics/anthropic-sdk-go/option"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Anthropic API E2E test", func() {
|
||||
var client anthropic.Client
|
||||
|
||||
Context("API with Anthropic SDK", func() {
|
||||
BeforeEach(func() {
|
||||
// Create Anthropic client pointing to LocalAI
|
||||
client = anthropic.NewClient(
|
||||
option.WithBaseURL(localAIURL),
|
||||
option.WithAPIKey("test-api-key"), // LocalAI doesn't require a real API key
|
||||
)
|
||||
|
||||
// Wait for API to be ready by attempting a simple request
|
||||
Eventually(func() error {
|
||||
_, err := client.Messages.New(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 10,
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("Hi")),
|
||||
},
|
||||
})
|
||||
return err
|
||||
}, "2m").ShouldNot(HaveOccurred())
|
||||
})
|
||||
|
||||
Context("Non-streaming responses", func() {
|
||||
It("generates a response for a simple message", func() {
|
||||
message, err := client.Messages.New(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 1024,
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("How much is 2+2? Reply with just the number.")),
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(message.Content).ToNot(BeEmpty())
|
||||
// Role is a constant type that defaults to "assistant"
|
||||
Expect(string(message.Role)).To(Equal("assistant"))
|
||||
Expect(message.StopReason).To(Equal(anthropic.MessageStopReasonEndTurn))
|
||||
Expect(string(message.Type)).To(Equal("message"))
|
||||
|
||||
// Check that content contains text block with expected answer
|
||||
Expect(len(message.Content)).To(BeNumerically(">=", 1))
|
||||
textBlock := message.Content[0]
|
||||
Expect(string(textBlock.Type)).To(Equal("text"))
|
||||
Expect(textBlock.Text).To(Or(ContainSubstring("4"), ContainSubstring("four")))
|
||||
})
|
||||
|
||||
It("handles system prompts", func() {
|
||||
message, err := client.Messages.New(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 1024,
|
||||
System: []anthropic.TextBlockParam{
|
||||
{Text: "You are a helpful assistant. Always respond in uppercase letters."},
|
||||
},
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("Say hello")),
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(message.Content).ToNot(BeEmpty())
|
||||
Expect(len(message.Content)).To(BeNumerically(">=", 1))
|
||||
})
|
||||
|
||||
It("returns usage information", func() {
|
||||
message, err := client.Messages.New(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 100,
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("Hello")),
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(message.Usage.InputTokens).To(BeNumerically(">", 0))
|
||||
Expect(message.Usage.OutputTokens).To(BeNumerically(">", 0))
|
||||
})
|
||||
})
|
||||
|
||||
Context("Streaming responses", func() {
|
||||
It("streams tokens for a simple message", func() {
|
||||
stream := client.Messages.NewStreaming(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 1024,
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("Count from 1 to 5")),
|
||||
},
|
||||
})
|
||||
|
||||
message := anthropic.Message{}
|
||||
eventCount := 0
|
||||
hasContentDelta := false
|
||||
|
||||
for stream.Next() {
|
||||
event := stream.Current()
|
||||
err := message.Accumulate(event)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
eventCount++
|
||||
|
||||
// Check for content block delta events
|
||||
switch event.AsAny().(type) {
|
||||
case anthropic.ContentBlockDeltaEvent:
|
||||
hasContentDelta = true
|
||||
}
|
||||
}
|
||||
|
||||
Expect(stream.Err()).ToNot(HaveOccurred())
|
||||
Expect(eventCount).To(BeNumerically(">", 0))
|
||||
Expect(hasContentDelta).To(BeTrue())
|
||||
|
||||
// Check accumulated message
|
||||
Expect(message.Content).ToNot(BeEmpty())
|
||||
// Role is a constant type that defaults to "assistant"
|
||||
Expect(string(message.Role)).To(Equal("assistant"))
|
||||
})
|
||||
|
||||
It("streams with system prompt", func() {
|
||||
stream := client.Messages.NewStreaming(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 1024,
|
||||
System: []anthropic.TextBlockParam{
|
||||
{Text: "You are a helpful assistant."},
|
||||
},
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("Say hello")),
|
||||
},
|
||||
})
|
||||
|
||||
message := anthropic.Message{}
|
||||
for stream.Next() {
|
||||
event := stream.Current()
|
||||
err := message.Accumulate(event)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(stream.Err()).ToNot(HaveOccurred())
|
||||
Expect(message.Content).ToNot(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Tool calling", func() {
|
||||
It("handles tool calls in non-streaming mode", func() {
|
||||
message, err := client.Messages.New(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 1024,
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("What's the weather like in San Francisco?")),
|
||||
},
|
||||
Tools: []anthropic.ToolParam{
|
||||
{
|
||||
Name: "get_weather",
|
||||
Description: anthropic.F("Get the current weather in a given location"),
|
||||
InputSchema: anthropic.F(map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
},
|
||||
"required": []string{"location"},
|
||||
}),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(message.Content).ToNot(BeEmpty())
|
||||
|
||||
// The model must use tools - find the tool use in the response
|
||||
hasToolUse := false
|
||||
for _, block := range message.Content {
|
||||
if block.Type == anthropic.ContentBlockTypeToolUse {
|
||||
hasToolUse = true
|
||||
Expect(block.Name).To(Equal("get_weather"))
|
||||
Expect(block.ID).ToNot(BeEmpty())
|
||||
// Verify that input contains location
|
||||
inputMap, ok := block.Input.(map[string]interface{})
|
||||
Expect(ok).To(BeTrue())
|
||||
_, hasLocation := inputMap["location"]
|
||||
Expect(hasLocation).To(BeTrue())
|
||||
}
|
||||
}
|
||||
|
||||
// Model must have called the tool
|
||||
Expect(hasToolUse).To(BeTrue(), "Model should have called the get_weather tool")
|
||||
Expect(message.StopReason).To(Equal(anthropic.MessageStopReasonToolUse))
|
||||
})
|
||||
|
||||
It("handles tool_choice parameter", func() {
|
||||
message, err := client.Messages.New(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 1024,
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("Tell me about the weather")),
|
||||
},
|
||||
Tools: []anthropic.ToolParam{
|
||||
{
|
||||
Name: "get_weather",
|
||||
Description: anthropic.F("Get the current weather"),
|
||||
InputSchema: anthropic.F(map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
ToolChoice: anthropic.F[anthropic.ToolChoiceUnionParam](
|
||||
anthropic.ToolChoiceAutoParam{
|
||||
Type: anthropic.F(anthropic.ToolChoiceAutoTypeAuto),
|
||||
},
|
||||
),
|
||||
})
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(message.Content).ToNot(BeEmpty())
|
||||
})
|
||||
|
||||
It("handles tool results in messages", func() {
|
||||
// First, make a request that should trigger a tool call
|
||||
firstMessage, err := client.Messages.New(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 1024,
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("What's the weather in SF?")),
|
||||
},
|
||||
Tools: []anthropic.ToolParam{
|
||||
{
|
||||
Name: "get_weather",
|
||||
Description: anthropic.F("Get weather"),
|
||||
InputSchema: anthropic.F(map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{"type": "string"},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Find the tool use block - model must call the tool
|
||||
var toolUseID string
|
||||
var toolUseName string
|
||||
for _, block := range firstMessage.Content {
|
||||
if block.Type == anthropic.ContentBlockTypeToolUse {
|
||||
toolUseID = block.ID
|
||||
toolUseName = block.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Model must have called the tool
|
||||
Expect(toolUseID).ToNot(BeEmpty(), "Model should have called the get_weather tool")
|
||||
|
||||
// Send back a tool result and verify it's handled correctly
|
||||
secondMessage, err := client.Messages.New(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 1024,
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("What's the weather in SF?")),
|
||||
anthropic.NewAssistantMessage(firstMessage.Content...),
|
||||
anthropic.NewUserMessage(
|
||||
anthropic.NewToolResultBlock(toolUseID, "Sunny, 72°F", false),
|
||||
),
|
||||
},
|
||||
Tools: []anthropic.ToolParam{
|
||||
{
|
||||
Name: toolUseName,
|
||||
Description: anthropic.F("Get weather"),
|
||||
InputSchema: anthropic.F(map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{"type": "string"},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(secondMessage.Content).ToNot(BeEmpty())
|
||||
})
|
||||
|
||||
It("handles tool calls in streaming mode", func() {
|
||||
stream := client.Messages.NewStreaming(context.TODO(), anthropic.MessageNewParams{
|
||||
Model: "gpt-4",
|
||||
MaxTokens: 1024,
|
||||
Messages: []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("What's the weather like in San Francisco?")),
|
||||
},
|
||||
Tools: []anthropic.ToolParam{
|
||||
{
|
||||
Name: "get_weather",
|
||||
Description: anthropic.F("Get the current weather in a given location"),
|
||||
InputSchema: anthropic.F(map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
},
|
||||
"required": []string{"location"},
|
||||
}),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
message := anthropic.Message{}
|
||||
eventCount := 0
|
||||
hasToolUseBlock := false
|
||||
hasContentBlockStart := false
|
||||
hasContentBlockDelta := false
|
||||
hasContentBlockStop := false
|
||||
|
||||
for stream.Next() {
|
||||
event := stream.Current()
|
||||
err := message.Accumulate(event)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
eventCount++
|
||||
|
||||
// Check for different event types related to tool use
|
||||
switch e := event.AsAny().(type) {
|
||||
case anthropic.ContentBlockStartEvent:
|
||||
hasContentBlockStart = true
|
||||
if e.ContentBlock.Type == anthropic.ContentBlockTypeToolUse {
|
||||
hasToolUseBlock = true
|
||||
}
|
||||
case anthropic.ContentBlockDeltaEvent:
|
||||
hasContentBlockDelta = true
|
||||
case anthropic.ContentBlockStopEvent:
|
||||
hasContentBlockStop = true
|
||||
}
|
||||
}
|
||||
|
||||
Expect(stream.Err()).ToNot(HaveOccurred())
|
||||
Expect(eventCount).To(BeNumerically(">", 0))
|
||||
|
||||
// Verify streaming events were emitted
|
||||
Expect(hasContentBlockStart).To(BeTrue(), "Should have content_block_start event")
|
||||
Expect(hasContentBlockDelta).To(BeTrue(), "Should have content_block_delta event")
|
||||
Expect(hasContentBlockStop).To(BeTrue(), "Should have content_block_stop event")
|
||||
|
||||
// Check accumulated message has tool use
|
||||
Expect(message.Content).ToNot(BeEmpty())
|
||||
|
||||
// Model must have called the tool
|
||||
foundToolUse := false
|
||||
for _, block := range message.Content {
|
||||
if block.Type == anthropic.ContentBlockTypeToolUse {
|
||||
foundToolUse = true
|
||||
Expect(block.Name).To(Equal("get_weather"))
|
||||
Expect(block.ID).ToNot(BeEmpty())
|
||||
}
|
||||
}
|
||||
Expect(foundToolUse).To(BeTrue(), "Model should have called the get_weather tool in streaming mode")
|
||||
Expect(message.StopReason).To(Equal(anthropic.MessageStopReasonToolUse))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
Reference in New Issue
Block a user