Compare commits
13 Commits
parth/decr
...
v0.14.3-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aad3f03890 | ||
|
|
55d0b6e8b9 | ||
|
|
38eac40d56 | ||
|
|
80f3f1bc25 | ||
|
|
b1a0db547b | ||
|
|
75d7b5f926 | ||
|
|
349d814814 | ||
|
|
c8743031e0 | ||
|
|
4adb9cf4bb | ||
|
|
74f475e735 | ||
|
|
875cecba74 | ||
|
|
7d411a4686 | ||
|
|
02a2401596 |
@@ -190,7 +190,7 @@ if(MLX_ENGINE)
|
|||||||
install(TARGETS mlx mlxc
|
install(TARGETS mlx mlxc
|
||||||
RUNTIME_DEPENDENCIES
|
RUNTIME_DEPENDENCIES
|
||||||
DIRECTORIES ${CUDAToolkit_BIN_DIR} ${CUDAToolkit_BIN_DIR}/x64 ${CUDAToolkit_LIBRARY_DIR}
|
DIRECTORIES ${CUDAToolkit_BIN_DIR} ${CUDAToolkit_BIN_DIR}/x64 ${CUDAToolkit_LIBRARY_DIR}
|
||||||
PRE_INCLUDE_REGEXES cublas cublasLt cudart nvrtc cudnn nccl
|
PRE_INCLUDE_REGEXES cublas cublasLt cudart nvrtc nvrtc-builtins cudnn nccl openblas gfortran
|
||||||
PRE_EXCLUDE_REGEXES ".*"
|
PRE_EXCLUDE_REGEXES ".*"
|
||||||
RUNTIME DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT MLX
|
RUNTIME DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT MLX
|
||||||
LIBRARY DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT MLX
|
LIBRARY DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT MLX
|
||||||
|
|||||||
41
README.md
@@ -48,7 +48,7 @@ ollama run gemma3
|
|||||||
|
|
||||||
## Model library
|
## Model library
|
||||||
|
|
||||||
Ollama supports a list of models available on [ollama.com/library](https://ollama.com/library 'ollama model library')
|
Ollama supports a list of models available on [ollama.com/library](https://ollama.com/library "ollama model library")
|
||||||
|
|
||||||
Here are some example models that can be downloaded:
|
Here are some example models that can be downloaded:
|
||||||
|
|
||||||
@@ -79,7 +79,7 @@ Here are some example models that can be downloaded:
|
|||||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||||
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||||
| Granite-3.3 | 8B | 4.9GB | `ollama run granite3.3` |
|
| Granite-3.3 | 8B | 4.9GB | `ollama run granite3.3` |
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
> You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||||
@@ -260,6 +260,38 @@ Finally, in a separate shell, run a model:
|
|||||||
./ollama run llama3.2
|
./ollama run llama3.2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Building with MLX (experimental)
|
||||||
|
|
||||||
|
First build the MLX libraries:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cmake --preset MLX
|
||||||
|
cmake --build --preset MLX --parallel
|
||||||
|
cmake --install build --component MLX
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, build the `ollama-mlx` binary, which is a separate build of the Ollama runtime with MLX support enabled (needs to be in the same directory as `ollama`):
|
||||||
|
|
||||||
|
```shell
|
||||||
|
go build -tags mlx -o ollama-mlx .
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, start the server:
|
||||||
|
|
||||||
|
```
|
||||||
|
./ollama serve
|
||||||
|
```
|
||||||
|
|
||||||
|
### Building MLX with CUDA
|
||||||
|
|
||||||
|
When building with CUDA, use the preset "MLX CUDA 13" or "MLX CUDA 12" to enable CUDA with default architectures:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cmake --preset 'MLX CUDA 13'
|
||||||
|
cmake --build --preset 'MLX CUDA 13' --parallel
|
||||||
|
cmake --install build --component MLX
|
||||||
|
```
|
||||||
|
|
||||||
## REST API
|
## REST API
|
||||||
|
|
||||||
Ollama has a REST API for running and managing models.
|
Ollama has a REST API for running and managing models.
|
||||||
@@ -290,6 +322,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
|
|
||||||
### Web & Desktop
|
### Web & Desktop
|
||||||
|
|
||||||
|
- [Onyx](https://github.com/onyx-dot-app/onyx)
|
||||||
- [Open WebUI](https://github.com/open-webui/open-webui)
|
- [Open WebUI](https://github.com/open-webui/open-webui)
|
||||||
- [SwiftChat (macOS with ReactNative)](https://github.com/aws-samples/swift-chat)
|
- [SwiftChat (macOS with ReactNative)](https://github.com/aws-samples/swift-chat)
|
||||||
- [Enchanted (macOS native)](https://github.com/AugustDev/enchanted)
|
- [Enchanted (macOS native)](https://github.com/AugustDev/enchanted)
|
||||||
@@ -493,7 +526,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
### Database
|
### Database
|
||||||
|
|
||||||
- [pgai](https://github.com/timescale/pgai) - PostgreSQL as a vector database (Create and search embeddings from Ollama models using pgvector)
|
- [pgai](https://github.com/timescale/pgai) - PostgreSQL as a vector database (Create and search embeddings from Ollama models using pgvector)
|
||||||
- [Get started guide](https://github.com/timescale/pgai/blob/main/docs/vectorizer-quick-start.md)
|
- [Get started guide](https://github.com/timescale/pgai/blob/main/docs/vectorizer-quick-start.md)
|
||||||
- [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md) (Connects Ollama models with nearly 200 data platforms and apps)
|
- [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md) (Connects Ollama models with nearly 200 data platforms and apps)
|
||||||
- [chromem-go](https://github.com/philippgille/chromem-go/blob/v0.5.0/embed_ollama.go) with [example](https://github.com/philippgille/chromem-go/tree/v0.5.0/examples/rag-wikipedia-ollama)
|
- [chromem-go](https://github.com/philippgille/chromem-go/blob/v0.5.0/embed_ollama.go) with [example](https://github.com/philippgille/chromem-go/tree/v0.5.0/examples/rag-wikipedia-ollama)
|
||||||
- [Kangaroo](https://github.com/dbkangaroo/kangaroo) (AI-powered SQL client and admin tool for popular databases)
|
- [Kangaroo](https://github.com/dbkangaroo/kangaroo) (AI-powered SQL client and admin tool for popular databases)
|
||||||
@@ -636,6 +669,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [llama.cpp](https://github.com/ggml-org/llama.cpp) project founded by Georgi Gerganov.
|
- [llama.cpp](https://github.com/ggml-org/llama.cpp) project founded by Georgi Gerganov.
|
||||||
|
|
||||||
### Observability
|
### Observability
|
||||||
|
|
||||||
- [Opik](https://www.comet.com/docs/opik/cookbook/ollama) is an open-source platform to debug, evaluate, and monitor your LLM applications, RAG systems, and agentic workflows with comprehensive tracing, automated evaluations, and production-ready dashboards. Opik supports native integration to Ollama.
|
- [Opik](https://www.comet.com/docs/opik/cookbook/ollama) is an open-source platform to debug, evaluate, and monitor your LLM applications, RAG systems, and agentic workflows with comprehensive tracing, automated evaluations, and production-ready dashboards. Opik supports native integration to Ollama.
|
||||||
- [Lunary](https://lunary.ai/docs/integrations/ollama) is the leading open-source LLM observability platform. It provides a variety of enterprise-grade features such as real-time analytics, prompt templates management, PII masking, and comprehensive agent tracing.
|
- [Lunary](https://lunary.ai/docs/integrations/ollama) is the leading open-source LLM observability platform. It provides a variety of enterprise-grade features such as real-time analytics, prompt templates management, PII masking, and comprehensive agent tracing.
|
||||||
- [OpenLIT](https://github.com/openlit/openlit) is an OpenTelemetry-native tool for monitoring Ollama Applications & GPUs using traces and metrics.
|
- [OpenLIT](https://github.com/openlit/openlit) is an OpenTelemetry-native tool for monitoring Ollama Applications & GPUs using traces and metrics.
|
||||||
@@ -644,4 +678,5 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [MLflow Tracing](https://mlflow.org/docs/latest/llms/tracing/index.html#automatic-tracing) is an open source LLM observability tool with a convenient API to log and visualize traces, making it easy to debug and evaluate GenAI applications.
|
- [MLflow Tracing](https://mlflow.org/docs/latest/llms/tracing/index.html#automatic-tracing) is an open source LLM observability tool with a convenient API to log and visualize traces, making it easy to debug and evaluate GenAI applications.
|
||||||
|
|
||||||
### Security
|
### Security
|
||||||
|
|
||||||
- [Ollama Fortress](https://github.com/ParisNeo/ollama_proxy_server)
|
- [Ollama Fortress](https://github.com/ParisNeo/ollama_proxy_server)
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ extern NSString *SystemWidePath;
|
|||||||
@interface AppDelegate () <NSWindowDelegate, WKNavigationDelegate, WKUIDelegate>
|
@interface AppDelegate () <NSWindowDelegate, WKNavigationDelegate, WKUIDelegate>
|
||||||
@property(strong, nonatomic) NSStatusItem *statusItem;
|
@property(strong, nonatomic) NSStatusItem *statusItem;
|
||||||
@property(assign, nonatomic) BOOL updateAvailable;
|
@property(assign, nonatomic) BOOL updateAvailable;
|
||||||
|
@property(assign, nonatomic) BOOL systemShutdownInProgress;
|
||||||
@end
|
@end
|
||||||
|
|
||||||
@implementation AppDelegate
|
@implementation AppDelegate
|
||||||
@@ -40,6 +41,13 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
|||||||
}
|
}
|
||||||
|
|
||||||
- (void)applicationDidFinishLaunching:(NSNotification *)aNotification {
|
- (void)applicationDidFinishLaunching:(NSNotification *)aNotification {
|
||||||
|
// Register for system shutdown/restart notification so we can allow termination
|
||||||
|
[[[NSWorkspace sharedWorkspace] notificationCenter]
|
||||||
|
addObserver:self
|
||||||
|
selector:@selector(systemWillPowerOff:)
|
||||||
|
name:NSWorkspaceWillPowerOffNotification
|
||||||
|
object:nil];
|
||||||
|
|
||||||
// if we're in development mode, set the app icon
|
// if we're in development mode, set the app icon
|
||||||
NSString *bundlePath = [[NSBundle mainBundle] bundlePath];
|
NSString *bundlePath = [[NSBundle mainBundle] bundlePath];
|
||||||
if (![bundlePath hasSuffix:@".app"]) {
|
if (![bundlePath hasSuffix:@".app"]) {
|
||||||
@@ -278,7 +286,18 @@ bool firstTimeRun,startHidden; // Set in run before initialization
|
|||||||
[NSApp activateIgnoringOtherApps:YES];
|
[NSApp activateIgnoringOtherApps:YES];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (void)systemWillPowerOff:(NSNotification *)notification {
|
||||||
|
// Set flag so applicationShouldTerminate: knows to allow termination.
|
||||||
|
// The system will call applicationShouldTerminate: after posting this notification.
|
||||||
|
self.systemShutdownInProgress = YES;
|
||||||
|
}
|
||||||
|
|
||||||
- (NSApplicationTerminateReply)applicationShouldTerminate:(NSApplication *)sender {
|
- (NSApplicationTerminateReply)applicationShouldTerminate:(NSApplication *)sender {
|
||||||
|
// Allow termination if the system is shutting down or restarting
|
||||||
|
if (self.systemShutdownInProgress) {
|
||||||
|
return NSTerminateNow;
|
||||||
|
}
|
||||||
|
// Otherwise just hide the app (for Cmd+Q, close button, etc.)
|
||||||
[NSApp hide:nil];
|
[NSApp hide:nil];
|
||||||
[NSApp setActivationPolicy:NSApplicationActivationPolicyAccessory];
|
[NSApp setActivationPolicy:NSApplicationActivationPolicyAccessory];
|
||||||
return NSTerminateCancel;
|
return NSTerminateCancel;
|
||||||
|
|||||||
@@ -116,7 +116,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
Prompt: ">>> ",
|
Prompt: ">>> ",
|
||||||
AltPrompt: "... ",
|
AltPrompt: "... ",
|
||||||
Placeholder: "Send a message (/? for help)",
|
Placeholder: "Send a message (/? for help)",
|
||||||
AltPlaceholder: `Use """ to end multi-line input`,
|
AltPlaceholder: "Press Enter to send",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ ollama pull glm-4.7:cloud
|
|||||||
To use Ollama with tools that expect the Anthropic API (like Claude Code), set these environment variables:
|
To use Ollama with tools that expect the Anthropic API (like Claude Code), set these environment variables:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
export ANTHROPIC_AUTH_TOKEN=ollama # required but ignored
|
||||||
export ANTHROPIC_BASE_URL=http://localhost:11434
|
export ANTHROPIC_BASE_URL=http://localhost:11434
|
||||||
export ANTHROPIC_API_KEY=ollama # required but ignored
|
export ANTHROPIC_API_KEY=ollama # required but ignored
|
||||||
```
|
```
|
||||||
@@ -247,12 +248,13 @@ curl -X POST http://localhost:11434/v1/messages \
|
|||||||
[Claude Code](https://code.claude.com/docs/en/overview) can be configured to use Ollama as its backend:
|
[Claude Code](https://code.claude.com/docs/en/overview) can be configured to use Ollama as its backend:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
ANTHROPIC_BASE_URL=http://localhost:11434 ANTHROPIC_API_KEY=ollama claude --model qwen3-coder
|
ANTHROPIC_AUTH_TOKEN=ollama ANTHROPIC_BASE_URL=http://localhost:11434 ANTHROPIC_API_KEY=ollama claude --model qwen3-coder
|
||||||
```
|
```
|
||||||
|
|
||||||
Or set the environment variables in your shell profile:
|
Or set the environment variables in your shell profile:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
export ANTHROPIC_AUTH_TOKEN=ollama
|
||||||
export ANTHROPIC_BASE_URL=http://localhost:11434
|
export ANTHROPIC_BASE_URL=http://localhost:11434
|
||||||
export ANTHROPIC_API_KEY=ollama
|
export ANTHROPIC_API_KEY=ollama
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ More Ollama [Python example](https://github.com/ollama/ollama-python/blob/main/e
|
|||||||
import { Ollama } from "ollama";
|
import { Ollama } from "ollama";
|
||||||
|
|
||||||
const client = new Ollama();
|
const client = new Ollama();
|
||||||
const results = await client.webSearch({ query: "what is ollama?" });
|
const results = await client.webSearch("what is ollama?");
|
||||||
console.log(JSON.stringify(results, null, 2));
|
console.log(JSON.stringify(results, null, 2));
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -213,7 +213,7 @@ models](https://ollama.com/models)\n\nAvailable for macOS, Windows, and Linux',
|
|||||||
import { Ollama } from "ollama";
|
import { Ollama } from "ollama";
|
||||||
|
|
||||||
const client = new Ollama();
|
const client = new Ollama();
|
||||||
const fetchResult = await client.webFetch({ url: "https://ollama.com" });
|
const fetchResult = await client.webFetch("https://ollama.com");
|
||||||
console.log(JSON.stringify(fetchResult, null, 2));
|
console.log(JSON.stringify(fetchResult, null, 2));
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -111,7 +111,9 @@
|
|||||||
"/integrations/zed",
|
"/integrations/zed",
|
||||||
"/integrations/roo-code",
|
"/integrations/roo-code",
|
||||||
"/integrations/n8n",
|
"/integrations/n8n",
|
||||||
"/integrations/xcode"
|
"/integrations/xcode",
|
||||||
|
"/integrations/onyx",
|
||||||
|
"/integrations/marimo"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ Please refer to the [GPU docs](./gpu).
|
|||||||
|
|
||||||
## How can I specify the context window size?
|
## How can I specify the context window size?
|
||||||
|
|
||||||
By default, Ollama uses a context window size of 2048 tokens.
|
By default, Ollama uses a context window size of 4096 tokens.
|
||||||
|
|
||||||
This can be overridden with the `OLLAMA_CONTEXT_LENGTH` environment variable. For example, to set the default context window to 8K, use:
|
This can be overridden with the `OLLAMA_CONTEXT_LENGTH` environment variable. For example, to set the default context window to 8K, use:
|
||||||
|
|
||||||
|
|||||||
BIN
docs/images/marimo-add-model.png
Normal file
|
After Width: | Height: | Size: 174 KiB |
BIN
docs/images/marimo-chat.png
Normal file
|
After Width: | Height: | Size: 80 KiB |
BIN
docs/images/marimo-code-completion.png
Normal file
|
After Width: | Height: | Size: 230 KiB |
BIN
docs/images/marimo-models.png
Normal file
|
After Width: | Height: | Size: 178 KiB |
BIN
docs/images/marimo-settings.png
Normal file
|
After Width: | Height: | Size: 186 KiB |
BIN
docs/images/onyx-login.png
Normal file
|
After Width: | Height: | Size: 100 KiB |
BIN
docs/images/onyx-ollama-form.png
Normal file
|
After Width: | Height: | Size: 306 KiB |
BIN
docs/images/onyx-ollama-llm.png
Normal file
|
After Width: | Height: | Size: 300 KiB |
BIN
docs/images/onyx-query.png
Normal file
|
After Width: | Height: | Size: 211 KiB |
@@ -25,6 +25,7 @@ Claude Code connects to Ollama using the Anthropic-compatible API.
|
|||||||
1. Set the environment variables:
|
1. Set the environment variables:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
export ANTHROPIC_AUTH_TOKEN=ollama
|
||||||
export ANTHROPIC_BASE_URL=http://localhost:11434
|
export ANTHROPIC_BASE_URL=http://localhost:11434
|
||||||
export ANTHROPIC_API_KEY=ollama
|
export ANTHROPIC_API_KEY=ollama
|
||||||
```
|
```
|
||||||
@@ -38,7 +39,7 @@ claude --model qwen3-coder
|
|||||||
Or run with environment variables inline:
|
Or run with environment variables inline:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
ANTHROPIC_BASE_URL=http://localhost:11434 ANTHROPIC_API_KEY=ollama claude --model qwen3-coder
|
ANTHROPIC_AUTH_TOKEN=ollama ANTHROPIC_BASE_URL=http://localhost:11434 ANTHROPIC_API_KEY=ollama claude --model qwen3-coder
|
||||||
```
|
```
|
||||||
|
|
||||||
## Connecting to ollama.com
|
## Connecting to ollama.com
|
||||||
|
|||||||
73
docs/integrations/marimo.mdx
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
---
|
||||||
|
title: marimo
|
||||||
|
---
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
Install [marimo](https://marimo.io). You can use `pip` or `uv` for this. You
|
||||||
|
can also use `uv` to create a sandboxed environment for marimo by running:
|
||||||
|
|
||||||
|
```
|
||||||
|
uvx marimo edit --sandbox notebook.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage with Ollama
|
||||||
|
|
||||||
|
1. In marimo, go to the user settings and go to the AI tab. From here
|
||||||
|
you can find and configure Ollama as an AI provider. For local use you
|
||||||
|
would typically point the base url to `http://localhost:11434/v1`.
|
||||||
|
|
||||||
|
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||||
|
<img
|
||||||
|
src="/images/marimo-settings.png"
|
||||||
|
alt="Ollama settings in marimo"
|
||||||
|
width="50%"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
2. Once the AI provider is set up, you can turn on/off specific AI models you'd like to access.
|
||||||
|
|
||||||
|
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||||
|
<img
|
||||||
|
src="/images/marimo-models.png"
|
||||||
|
alt="Selecting an Ollama model"
|
||||||
|
width="50%"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
3. You can also add a model to the list of available models by scrolling to the bottom and using the UI there.
|
||||||
|
|
||||||
|
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||||
|
<img
|
||||||
|
src="/images/marimo-add-model.png"
|
||||||
|
alt="Adding a new Ollama model"
|
||||||
|
width="50%"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
4. Once configured, you can now use Ollama for AI chats in marimo.
|
||||||
|
|
||||||
|
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||||
|
<img
|
||||||
|
src="/images/marimo-chat.png"
|
||||||
|
alt="Configure code completion"
|
||||||
|
width="50%"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
4. Alternatively, you can now use Ollama for **inline code completion** in marimo. This can be configured in the "AI Features" tab.
|
||||||
|
|
||||||
|
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||||
|
<img
|
||||||
|
src="/images/marimo-code-completion.png"
|
||||||
|
alt="Configure code completion"
|
||||||
|
width="50%"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
## Connecting to ollama.com
|
||||||
|
|
||||||
|
1. Sign in to ollama cloud via `ollama signin`
|
||||||
|
2. In the ollama model settings add a model that ollama hosts, like `gpt-oss:120b`.
|
||||||
|
3. You can now refer to this model in marimo!
|
||||||
63
docs/integrations/onyx.mdx
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
---
|
||||||
|
title: Onyx
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
[Onyx](http://onyx.app/) is a self-hostable Chat UI that integrates with all Ollama models. Features include:
|
||||||
|
- Creating custom Agents
|
||||||
|
- Web search
|
||||||
|
- Deep Research
|
||||||
|
- RAG over uploaded documents and connected apps
|
||||||
|
- Connectors to applications like Google Drive, Email, Slack, etc.
|
||||||
|
- MCP and OpenAPI Actions support
|
||||||
|
- Image generation
|
||||||
|
- User/Groups management, RBAC, SSO, etc.
|
||||||
|
|
||||||
|
Onyx can be deployed for single users or large organizations.
|
||||||
|
|
||||||
|
## Install Onyx
|
||||||
|
|
||||||
|
Deploy Onyx with the [quickstart guide](https://docs.onyx.app/deployment/getting_started/quickstart).
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
Resourcing/scaling docs [here](https://docs.onyx.app/deployment/getting_started/resourcing).
|
||||||
|
</Info>
|
||||||
|
|
||||||
|
## Usage with Ollama
|
||||||
|
|
||||||
|
1. Login to your Onyx deployment (create an account first).
|
||||||
|
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||||
|
<img
|
||||||
|
src="/images/onyx-login.png"
|
||||||
|
alt="Onyx Login Page"
|
||||||
|
width="75%"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
2. In the set-up process select `Ollama` as the LLM provider.
|
||||||
|
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||||
|
<img
|
||||||
|
src="/images/onyx-ollama-llm.png"
|
||||||
|
alt="Onyx Set Up Form"
|
||||||
|
width="75%"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
3. Provide your **Ollama API URL** and select your models.
|
||||||
|
<Note>If you're running Onyx in Docker, to access your computer's local network use `http://host.docker.internal` instead of `http://127.0.0.1`.</Note>
|
||||||
|
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||||
|
<img
|
||||||
|
src="/images/onyx-ollama-form.png"
|
||||||
|
alt="Selecting Ollama Models"
|
||||||
|
width="75%"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
You can also easily connect up Onyx Cloud with the `Ollama Cloud` tab of the setup.
|
||||||
|
|
||||||
|
## Send your first query
|
||||||
|
<div style={{ display: 'flex', justifyContent: 'center' }}>
|
||||||
|
<img
|
||||||
|
src="/images/onyx-query.png"
|
||||||
|
alt="Onyx Query Example"
|
||||||
|
width="75%"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: "Linux"
|
title: Linux
|
||||||
---
|
---
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
@@ -13,14 +13,15 @@ curl -fsSL https://ollama.com/install.sh | sh
|
|||||||
## Manual install
|
## Manual install
|
||||||
|
|
||||||
<Note>
|
<Note>
|
||||||
If you are upgrading from a prior version, you should remove the old libraries with `sudo rm -rf /usr/lib/ollama` first.
|
If you are upgrading from a prior version, you should remove the old libraries
|
||||||
|
with `sudo rm -rf /usr/lib/ollama` first.
|
||||||
</Note>
|
</Note>
|
||||||
|
|
||||||
Download and extract the package:
|
Download and extract the package:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -fsSL https://ollama.com/download/ollama-linux-amd64.tgz \
|
curl -fsSL https://ollama.com/download/ollama-linux-amd64.tar.zst \
|
||||||
| sudo tar zx -C /usr
|
| sudo tar x -C /usr
|
||||||
```
|
```
|
||||||
|
|
||||||
Start Ollama:
|
Start Ollama:
|
||||||
@@ -40,8 +41,8 @@ ollama -v
|
|||||||
If you have an AMD GPU, also download and extract the additional ROCm package:
|
If you have an AMD GPU, also download and extract the additional ROCm package:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -fsSL https://ollama.com/download/ollama-linux-amd64-rocm.tgz \
|
curl -fsSL https://ollama.com/download/ollama-linux-amd64-rocm.tar.zst \
|
||||||
| sudo tar zx -C /usr
|
| sudo tar x -C /usr
|
||||||
```
|
```
|
||||||
|
|
||||||
### ARM64 install
|
### ARM64 install
|
||||||
@@ -49,8 +50,8 @@ curl -fsSL https://ollama.com/download/ollama-linux-amd64-rocm.tgz \
|
|||||||
Download and extract the ARM64-specific package:
|
Download and extract the ARM64-specific package:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -fsSL https://ollama.com/download/ollama-linux-arm64.tgz \
|
curl -fsSL https://ollama.com/download/ollama-linux-arm64.tar.zst \
|
||||||
| sudo tar zx -C /usr
|
| sudo tar x -C /usr
|
||||||
```
|
```
|
||||||
|
|
||||||
### Adding Ollama as a startup service (recommended)
|
### Adding Ollama as a startup service (recommended)
|
||||||
@@ -112,7 +113,11 @@ sudo systemctl status ollama
|
|||||||
```
|
```
|
||||||
|
|
||||||
<Note>
|
<Note>
|
||||||
While AMD has contributed the `amdgpu` driver upstream to the official linux kernel source, the version is older and may not support all ROCm features. We recommend you install the latest driver from https://www.amd.com/en/support/linux-drivers for best support of your Radeon GPU.
|
While AMD has contributed the `amdgpu` driver upstream to the official linux
|
||||||
|
kernel source, the version is older and may not support all ROCm features. We
|
||||||
|
recommend you install the latest driver from
|
||||||
|
https://www.amd.com/en/support/linux-drivers for best support of your Radeon
|
||||||
|
GPU.
|
||||||
</Note>
|
</Note>
|
||||||
|
|
||||||
## Customizing
|
## Customizing
|
||||||
@@ -141,8 +146,8 @@ curl -fsSL https://ollama.com/install.sh | sh
|
|||||||
Or by re-downloading Ollama:
|
Or by re-downloading Ollama:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -fsSL https://ollama.com/download/ollama-linux-amd64.tgz \
|
curl -fsSL https://ollama.com/download/ollama-linux-amd64.tar.zst \
|
||||||
| sudo tar zx -C /usr
|
| sudo tar x -C /usr
|
||||||
```
|
```
|
||||||
|
|
||||||
## Installing specific versions
|
## Installing specific versions
|
||||||
|
|||||||
@@ -131,7 +131,7 @@ func TestAPIToolCalling(t *testing.T) {
|
|||||||
t.Errorf("unexpected tool called: got %q want %q", lastToolCall.Function.Name, "get_weather")
|
t.Errorf("unexpected tool called: got %q want %q", lastToolCall.Function.Name, "get_weather")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := lastToolCall.Function.Arguments["location"]; !ok {
|
if _, ok := lastToolCall.Function.Arguments.Get("location"); !ok {
|
||||||
t.Errorf("expected tool arguments to include 'location', got: %s", lastToolCall.Function.Arguments.String())
|
t.Errorf("expected tool arguments to include 'location', got: %s", lastToolCall.Function.Arguments.String())
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
@@ -441,6 +442,7 @@ type ResponsesWriter struct {
|
|||||||
stream bool
|
stream bool
|
||||||
responseID string
|
responseID string
|
||||||
itemID string
|
itemID string
|
||||||
|
request openai.ResponsesRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *ResponsesWriter) writeEvent(eventType string, data any) error {
|
func (w *ResponsesWriter) writeEvent(eventType string, data any) error {
|
||||||
@@ -478,7 +480,9 @@ func (w *ResponsesWriter) writeResponse(data []byte) (int, error) {
|
|||||||
|
|
||||||
// Non-streaming response
|
// Non-streaming response
|
||||||
w.ResponseWriter.Header().Set("Content-Type", "application/json")
|
w.ResponseWriter.Header().Set("Content-Type", "application/json")
|
||||||
response := openai.ToResponse(w.model, w.responseID, w.itemID, chatResponse)
|
response := openai.ToResponse(w.model, w.responseID, w.itemID, chatResponse, w.request)
|
||||||
|
completedAt := time.Now().Unix()
|
||||||
|
response.CompletedAt = &completedAt
|
||||||
return len(data), json.NewEncoder(w.ResponseWriter).Encode(response)
|
return len(data), json.NewEncoder(w.ResponseWriter).Encode(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -523,11 +527,12 @@ func ResponsesMiddleware() gin.HandlerFunc {
|
|||||||
|
|
||||||
w := &ResponsesWriter{
|
w := &ResponsesWriter{
|
||||||
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
||||||
converter: openai.NewResponsesStreamConverter(responseID, itemID, req.Model),
|
converter: openai.NewResponsesStreamConverter(responseID, itemID, req.Model, req),
|
||||||
model: req.Model,
|
model: req.Model,
|
||||||
stream: streamRequested,
|
stream: streamRequested,
|
||||||
responseID: responseID,
|
responseID: responseID,
|
||||||
itemID: itemID,
|
itemID: itemID,
|
||||||
|
request: req,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set headers based on streaming mode
|
// Set headers based on streaming mode
|
||||||
|
|||||||
@@ -630,6 +630,10 @@ func nameFromToolCallID(messages []Message, toolCallID string) string {
|
|||||||
|
|
||||||
// decodeImageURL decodes a base64 data URI into raw image bytes.
|
// decodeImageURL decodes a base64 data URI into raw image bytes.
|
||||||
func decodeImageURL(url string) (api.ImageData, error) {
|
func decodeImageURL(url string) (api.ImageData, error) {
|
||||||
|
if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") {
|
||||||
|
return nil, errors.New("image URLs are not currently supported, please use base64 encoded data instead")
|
||||||
|
}
|
||||||
|
|
||||||
types := []string{"jpeg", "jpg", "png", "webp"}
|
types := []string{"jpeg", "jpg", "png", "webp"}
|
||||||
|
|
||||||
// Support blank mime type to match /api/chat's behavior of taking just unadorned base64
|
// Support blank mime type to match /api/chat's behavior of taking just unadorned base64
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
)
|
)
|
||||||
@@ -265,9 +266,9 @@ type ResponsesText struct {
|
|||||||
type ResponsesTool struct {
|
type ResponsesTool struct {
|
||||||
Type string `json:"type"` // "function"
|
Type string `json:"type"` // "function"
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Description string `json:"description,omitempty"`
|
Description *string `json:"description"` // nullable but required
|
||||||
Strict bool `json:"strict,omitempty"`
|
Strict *bool `json:"strict"` // nullable but required
|
||||||
Parameters map[string]any `json:"parameters,omitempty"`
|
Parameters map[string]any `json:"parameters"` // nullable but required
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResponsesRequest struct {
|
type ResponsesRequest struct {
|
||||||
@@ -475,11 +476,16 @@ func convertTool(t ResponsesTool) (api.Tool, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var description string
|
||||||
|
if t.Description != nil {
|
||||||
|
description = *t.Description
|
||||||
|
}
|
||||||
|
|
||||||
return api.Tool{
|
return api.Tool{
|
||||||
Type: t.Type,
|
Type: t.Type,
|
||||||
Function: api.ToolFunction{
|
Function: api.ToolFunction{
|
||||||
Name: t.Name,
|
Name: t.Name,
|
||||||
Description: t.Description,
|
Description: description,
|
||||||
Parameters: params,
|
Parameters: params,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
@@ -516,17 +522,60 @@ func convertInputMessage(m ResponsesInputMessage) (api.Message, error) {
|
|||||||
|
|
||||||
// Response types for the Responses API
|
// Response types for the Responses API
|
||||||
|
|
||||||
|
// ResponsesTextField represents the text output configuration in the response.
|
||||||
|
type ResponsesTextField struct {
|
||||||
|
Format ResponsesTextFormat `json:"format"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponsesReasoningOutput represents reasoning configuration in the response.
|
||||||
|
type ResponsesReasoningOutput struct {
|
||||||
|
Effort *string `json:"effort,omitempty"`
|
||||||
|
Summary *string `json:"summary,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponsesError represents an error in the response.
|
||||||
|
type ResponsesError struct {
|
||||||
|
Code string `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResponsesIncompleteDetails represents details about why a response was incomplete.
|
||||||
|
type ResponsesIncompleteDetails struct {
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
}
|
||||||
|
|
||||||
type ResponsesResponse struct {
|
type ResponsesResponse struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Object string `json:"object"`
|
Object string `json:"object"`
|
||||||
CreatedAt int64 `json:"created_at"`
|
CreatedAt int64 `json:"created_at"`
|
||||||
Status string `json:"status"`
|
CompletedAt *int64 `json:"completed_at"`
|
||||||
Model string `json:"model"`
|
Status string `json:"status"`
|
||||||
Output []ResponsesOutputItem `json:"output"`
|
IncompleteDetails *ResponsesIncompleteDetails `json:"incomplete_details"`
|
||||||
Usage *ResponsesUsage `json:"usage,omitempty"`
|
Model string `json:"model"`
|
||||||
// TODO(drifkin): add `temperature` and `top_p` to the response, but this
|
PreviousResponseID *string `json:"previous_response_id"`
|
||||||
// requires additional plumbing to find the effective values since the
|
Instructions *string `json:"instructions"`
|
||||||
// defaults can come from the model or the request
|
Output []ResponsesOutputItem `json:"output"`
|
||||||
|
Error *ResponsesError `json:"error"`
|
||||||
|
Tools []ResponsesTool `json:"tools"`
|
||||||
|
ToolChoice any `json:"tool_choice"`
|
||||||
|
Truncation string `json:"truncation"`
|
||||||
|
ParallelToolCalls bool `json:"parallel_tool_calls"`
|
||||||
|
Text ResponsesTextField `json:"text"`
|
||||||
|
TopP float64 `json:"top_p"`
|
||||||
|
PresencePenalty float64 `json:"presence_penalty"`
|
||||||
|
FrequencyPenalty float64 `json:"frequency_penalty"`
|
||||||
|
TopLogprobs int `json:"top_logprobs"`
|
||||||
|
Temperature float64 `json:"temperature"`
|
||||||
|
Reasoning *ResponsesReasoningOutput `json:"reasoning"`
|
||||||
|
Usage *ResponsesUsage `json:"usage"`
|
||||||
|
MaxOutputTokens *int `json:"max_output_tokens"`
|
||||||
|
MaxToolCalls *int `json:"max_tool_calls"`
|
||||||
|
Store bool `json:"store"`
|
||||||
|
Background bool `json:"background"`
|
||||||
|
ServiceTier string `json:"service_tier"`
|
||||||
|
Metadata map[string]any `json:"metadata"`
|
||||||
|
SafetyIdentifier *string `json:"safety_identifier"`
|
||||||
|
PromptCacheKey *string `json:"prompt_cache_key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResponsesOutputItem struct {
|
type ResponsesOutputItem struct {
|
||||||
@@ -550,18 +599,39 @@ type ResponsesReasoningSummary struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ResponsesOutputContent struct {
|
type ResponsesOutputContent struct {
|
||||||
Type string `json:"type"` // "output_text"
|
Type string `json:"type"` // "output_text"
|
||||||
Text string `json:"text"`
|
Text string `json:"text"`
|
||||||
|
Annotations []any `json:"annotations"`
|
||||||
|
Logprobs []any `json:"logprobs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResponsesInputTokensDetails struct {
|
||||||
|
CachedTokens int `json:"cached_tokens"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResponsesOutputTokensDetails struct {
|
||||||
|
ReasoningTokens int `json:"reasoning_tokens"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResponsesUsage struct {
|
type ResponsesUsage struct {
|
||||||
InputTokens int `json:"input_tokens"`
|
InputTokens int `json:"input_tokens"`
|
||||||
OutputTokens int `json:"output_tokens"`
|
OutputTokens int `json:"output_tokens"`
|
||||||
TotalTokens int `json:"total_tokens"`
|
TotalTokens int `json:"total_tokens"`
|
||||||
|
InputTokensDetails ResponsesInputTokensDetails `json:"input_tokens_details"`
|
||||||
|
OutputTokensDetails ResponsesOutputTokensDetails `json:"output_tokens_details"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToResponse converts an api.ChatResponse to a Responses API response
|
// derefFloat64 returns the value of a float64 pointer, or a default if nil.
|
||||||
func ToResponse(model, responseID, itemID string, chatResponse api.ChatResponse) ResponsesResponse {
|
func derefFloat64(p *float64, def float64) float64 {
|
||||||
|
if p != nil {
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToResponse converts an api.ChatResponse to a Responses API response.
|
||||||
|
// The request is used to echo back request parameters in the response.
|
||||||
|
func ToResponse(model, responseID, itemID string, chatResponse api.ChatResponse, request ResponsesRequest) ResponsesResponse {
|
||||||
var output []ResponsesOutputItem
|
var output []ResponsesOutputItem
|
||||||
|
|
||||||
// Add reasoning item if thinking is present
|
// Add reasoning item if thinking is present
|
||||||
@@ -585,6 +655,7 @@ func ToResponse(model, responseID, itemID string, chatResponse api.ChatResponse)
|
|||||||
output = append(output, ResponsesOutputItem{
|
output = append(output, ResponsesOutputItem{
|
||||||
ID: fmt.Sprintf("fc_%s_%d", responseID, i),
|
ID: fmt.Sprintf("fc_%s_%d", responseID, i),
|
||||||
Type: "function_call",
|
Type: "function_call",
|
||||||
|
Status: "completed",
|
||||||
CallID: tc.ID,
|
CallID: tc.ID,
|
||||||
Name: tc.Function.Name,
|
Name: tc.Function.Name,
|
||||||
Arguments: tc.Function.Arguments,
|
Arguments: tc.Function.Arguments,
|
||||||
@@ -598,25 +669,90 @@ func ToResponse(model, responseID, itemID string, chatResponse api.ChatResponse)
|
|||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
Content: []ResponsesOutputContent{
|
Content: []ResponsesOutputContent{
|
||||||
{
|
{
|
||||||
Type: "output_text",
|
Type: "output_text",
|
||||||
Text: chatResponse.Message.Content,
|
Text: chatResponse.Message.Content,
|
||||||
|
Annotations: []any{},
|
||||||
|
Logprobs: []any{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var instructions *string
|
||||||
|
if request.Instructions != "" {
|
||||||
|
instructions = &request.Instructions
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build truncation with default
|
||||||
|
truncation := "disabled"
|
||||||
|
if request.Truncation != nil {
|
||||||
|
truncation = *request.Truncation
|
||||||
|
}
|
||||||
|
|
||||||
|
tools := request.Tools
|
||||||
|
if tools == nil {
|
||||||
|
tools = []ResponsesTool{}
|
||||||
|
}
|
||||||
|
|
||||||
|
text := ResponsesTextField{
|
||||||
|
Format: ResponsesTextFormat{Type: "text"},
|
||||||
|
}
|
||||||
|
if request.Text != nil && request.Text.Format != nil {
|
||||||
|
text.Format = *request.Text.Format
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build reasoning output from request
|
||||||
|
var reasoning *ResponsesReasoningOutput
|
||||||
|
if request.Reasoning.Effort != "" || request.Reasoning.Summary != "" {
|
||||||
|
reasoning = &ResponsesReasoningOutput{}
|
||||||
|
if request.Reasoning.Effort != "" {
|
||||||
|
reasoning.Effort = &request.Reasoning.Effort
|
||||||
|
}
|
||||||
|
if request.Reasoning.Summary != "" {
|
||||||
|
reasoning.Summary = &request.Reasoning.Summary
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ResponsesResponse{
|
return ResponsesResponse{
|
||||||
ID: responseID,
|
ID: responseID,
|
||||||
Object: "response",
|
Object: "response",
|
||||||
CreatedAt: chatResponse.CreatedAt.Unix(),
|
CreatedAt: chatResponse.CreatedAt.Unix(),
|
||||||
Status: "completed",
|
CompletedAt: nil, // Set by middleware when writing final response
|
||||||
Model: model,
|
Status: "completed",
|
||||||
Output: output,
|
IncompleteDetails: nil, // Only populated if response incomplete
|
||||||
|
Model: model,
|
||||||
|
PreviousResponseID: nil, // Not supported
|
||||||
|
Instructions: instructions,
|
||||||
|
Output: output,
|
||||||
|
Error: nil, // Only populated on failure
|
||||||
|
Tools: tools,
|
||||||
|
ToolChoice: "auto", // Default value
|
||||||
|
Truncation: truncation,
|
||||||
|
ParallelToolCalls: true, // Default value
|
||||||
|
Text: text,
|
||||||
|
TopP: derefFloat64(request.TopP, 1.0),
|
||||||
|
PresencePenalty: 0, // Default value
|
||||||
|
FrequencyPenalty: 0, // Default value
|
||||||
|
TopLogprobs: 0, // Default value
|
||||||
|
Temperature: derefFloat64(request.Temperature, 1.0),
|
||||||
|
Reasoning: reasoning,
|
||||||
Usage: &ResponsesUsage{
|
Usage: &ResponsesUsage{
|
||||||
InputTokens: chatResponse.PromptEvalCount,
|
InputTokens: chatResponse.PromptEvalCount,
|
||||||
OutputTokens: chatResponse.EvalCount,
|
OutputTokens: chatResponse.EvalCount,
|
||||||
TotalTokens: chatResponse.PromptEvalCount + chatResponse.EvalCount,
|
TotalTokens: chatResponse.PromptEvalCount + chatResponse.EvalCount,
|
||||||
|
// TODO(drifkin): wire through the actual values
|
||||||
|
InputTokensDetails: ResponsesInputTokensDetails{CachedTokens: 0},
|
||||||
|
// TODO(drifkin): wire through the actual values
|
||||||
|
OutputTokensDetails: ResponsesOutputTokensDetails{ReasoningTokens: 0},
|
||||||
},
|
},
|
||||||
|
MaxOutputTokens: request.MaxOutputTokens,
|
||||||
|
MaxToolCalls: nil, // Not supported
|
||||||
|
Store: false, // We don't store responses
|
||||||
|
Background: request.Background,
|
||||||
|
ServiceTier: "default", // Default value
|
||||||
|
Metadata: map[string]any{},
|
||||||
|
SafetyIdentifier: nil, // Not supported
|
||||||
|
PromptCacheKey: nil, // Not supported
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -636,6 +772,7 @@ type ResponsesStreamConverter struct {
|
|||||||
responseID string
|
responseID string
|
||||||
itemID string
|
itemID string
|
||||||
model string
|
model string
|
||||||
|
request ResponsesRequest
|
||||||
|
|
||||||
// State tracking (mutated across Process calls)
|
// State tracking (mutated across Process calls)
|
||||||
firstWrite bool
|
firstWrite bool
|
||||||
@@ -668,11 +805,12 @@ func (c *ResponsesStreamConverter) newEvent(eventType string, data map[string]an
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewResponsesStreamConverter creates a new converter with the given configuration.
|
// NewResponsesStreamConverter creates a new converter with the given configuration.
|
||||||
func NewResponsesStreamConverter(responseID, itemID, model string) *ResponsesStreamConverter {
|
func NewResponsesStreamConverter(responseID, itemID, model string, request ResponsesRequest) *ResponsesStreamConverter {
|
||||||
return &ResponsesStreamConverter{
|
return &ResponsesStreamConverter{
|
||||||
responseID: responseID,
|
responseID: responseID,
|
||||||
itemID: itemID,
|
itemID: itemID,
|
||||||
model: model,
|
model: model,
|
||||||
|
request: request,
|
||||||
firstWrite: true,
|
firstWrite: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -717,25 +855,120 @@ func (c *ResponsesStreamConverter) Process(r api.ChatResponse) []ResponsesStream
|
|||||||
return events
|
return events
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// buildResponseObject creates a full response object with all required fields for streaming events.
|
||||||
|
func (c *ResponsesStreamConverter) buildResponseObject(status string, output []any, usage map[string]any) map[string]any {
|
||||||
|
var instructions any = nil
|
||||||
|
if c.request.Instructions != "" {
|
||||||
|
instructions = c.request.Instructions
|
||||||
|
}
|
||||||
|
|
||||||
|
truncation := "disabled"
|
||||||
|
if c.request.Truncation != nil {
|
||||||
|
truncation = *c.request.Truncation
|
||||||
|
}
|
||||||
|
|
||||||
|
var tools []any
|
||||||
|
if c.request.Tools != nil {
|
||||||
|
for _, t := range c.request.Tools {
|
||||||
|
tools = append(tools, map[string]any{
|
||||||
|
"type": t.Type,
|
||||||
|
"name": t.Name,
|
||||||
|
"description": t.Description,
|
||||||
|
"strict": t.Strict,
|
||||||
|
"parameters": t.Parameters,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tools == nil {
|
||||||
|
tools = []any{}
|
||||||
|
}
|
||||||
|
|
||||||
|
textFormat := map[string]any{"type": "text"}
|
||||||
|
if c.request.Text != nil && c.request.Text.Format != nil {
|
||||||
|
textFormat = map[string]any{
|
||||||
|
"type": c.request.Text.Format.Type,
|
||||||
|
}
|
||||||
|
if c.request.Text.Format.Name != "" {
|
||||||
|
textFormat["name"] = c.request.Text.Format.Name
|
||||||
|
}
|
||||||
|
if c.request.Text.Format.Schema != nil {
|
||||||
|
textFormat["schema"] = c.request.Text.Format.Schema
|
||||||
|
}
|
||||||
|
if c.request.Text.Format.Strict != nil {
|
||||||
|
textFormat["strict"] = *c.request.Text.Format.Strict
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var reasoning any = nil
|
||||||
|
if c.request.Reasoning.Effort != "" || c.request.Reasoning.Summary != "" {
|
||||||
|
r := map[string]any{}
|
||||||
|
if c.request.Reasoning.Effort != "" {
|
||||||
|
r["effort"] = c.request.Reasoning.Effort
|
||||||
|
} else {
|
||||||
|
r["effort"] = nil
|
||||||
|
}
|
||||||
|
if c.request.Reasoning.Summary != "" {
|
||||||
|
r["summary"] = c.request.Reasoning.Summary
|
||||||
|
} else {
|
||||||
|
r["summary"] = nil
|
||||||
|
}
|
||||||
|
reasoning = r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build top_p and temperature with defaults
|
||||||
|
topP := 1.0
|
||||||
|
if c.request.TopP != nil {
|
||||||
|
topP = *c.request.TopP
|
||||||
|
}
|
||||||
|
temperature := 1.0
|
||||||
|
if c.request.Temperature != nil {
|
||||||
|
temperature = *c.request.Temperature
|
||||||
|
}
|
||||||
|
|
||||||
|
return map[string]any{
|
||||||
|
"id": c.responseID,
|
||||||
|
"object": "response",
|
||||||
|
"created_at": time.Now().Unix(),
|
||||||
|
"completed_at": nil,
|
||||||
|
"status": status,
|
||||||
|
"incomplete_details": nil,
|
||||||
|
"model": c.model,
|
||||||
|
"previous_response_id": nil,
|
||||||
|
"instructions": instructions,
|
||||||
|
"output": output,
|
||||||
|
"error": nil,
|
||||||
|
"tools": tools,
|
||||||
|
"tool_choice": "auto",
|
||||||
|
"truncation": truncation,
|
||||||
|
"parallel_tool_calls": true,
|
||||||
|
"text": map[string]any{"format": textFormat},
|
||||||
|
"top_p": topP,
|
||||||
|
"presence_penalty": 0,
|
||||||
|
"frequency_penalty": 0,
|
||||||
|
"top_logprobs": 0,
|
||||||
|
"temperature": temperature,
|
||||||
|
"reasoning": reasoning,
|
||||||
|
"usage": usage,
|
||||||
|
"max_output_tokens": c.request.MaxOutputTokens,
|
||||||
|
"max_tool_calls": nil,
|
||||||
|
"store": false,
|
||||||
|
"background": c.request.Background,
|
||||||
|
"service_tier": "default",
|
||||||
|
"metadata": map[string]any{},
|
||||||
|
"safety_identifier": nil,
|
||||||
|
"prompt_cache_key": nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *ResponsesStreamConverter) createResponseCreatedEvent() ResponsesStreamEvent {
|
func (c *ResponsesStreamConverter) createResponseCreatedEvent() ResponsesStreamEvent {
|
||||||
return c.newEvent("response.created", map[string]any{
|
return c.newEvent("response.created", map[string]any{
|
||||||
"response": map[string]any{
|
"response": c.buildResponseObject("in_progress", []any{}, nil),
|
||||||
"id": c.responseID,
|
|
||||||
"object": "response",
|
|
||||||
"status": "in_progress",
|
|
||||||
"output": []any{},
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ResponsesStreamConverter) createResponseInProgressEvent() ResponsesStreamEvent {
|
func (c *ResponsesStreamConverter) createResponseInProgressEvent() ResponsesStreamEvent {
|
||||||
return c.newEvent("response.in_progress", map[string]any{
|
return c.newEvent("response.in_progress", map[string]any{
|
||||||
"response": map[string]any{
|
"response": c.buildResponseObject("in_progress", []any{}, nil),
|
||||||
"id": c.responseID,
|
|
||||||
"object": "response",
|
|
||||||
"status": "in_progress",
|
|
||||||
"output": []any{},
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -762,9 +995,10 @@ func (c *ResponsesStreamConverter) processThinking(thinking string) []ResponsesS
|
|||||||
|
|
||||||
// Emit delta
|
// Emit delta
|
||||||
events = append(events, c.newEvent("response.reasoning_summary_text.delta", map[string]any{
|
events = append(events, c.newEvent("response.reasoning_summary_text.delta", map[string]any{
|
||||||
"item_id": c.reasoningItemID,
|
"item_id": c.reasoningItemID,
|
||||||
"output_index": c.outputIndex,
|
"output_index": c.outputIndex,
|
||||||
"delta": thinking,
|
"summary_index": 0,
|
||||||
|
"delta": thinking,
|
||||||
}))
|
}))
|
||||||
|
|
||||||
// TODO(drifkin): consider adding
|
// TODO(drifkin): consider adding
|
||||||
@@ -783,9 +1017,10 @@ func (c *ResponsesStreamConverter) finishReasoning() []ResponsesStreamEvent {
|
|||||||
|
|
||||||
events := []ResponsesStreamEvent{
|
events := []ResponsesStreamEvent{
|
||||||
c.newEvent("response.reasoning_summary_text.done", map[string]any{
|
c.newEvent("response.reasoning_summary_text.done", map[string]any{
|
||||||
"item_id": c.reasoningItemID,
|
"item_id": c.reasoningItemID,
|
||||||
"output_index": c.outputIndex,
|
"output_index": c.outputIndex,
|
||||||
"text": c.accumulatedThinking,
|
"summary_index": 0,
|
||||||
|
"text": c.accumulatedThinking,
|
||||||
}),
|
}),
|
||||||
c.newEvent("response.output_item.done", map[string]any{
|
c.newEvent("response.output_item.done", map[string]any{
|
||||||
"output_index": c.outputIndex,
|
"output_index": c.outputIndex,
|
||||||
@@ -898,8 +1133,10 @@ func (c *ResponsesStreamConverter) processTextContent(content string) []Response
|
|||||||
"output_index": c.outputIndex,
|
"output_index": c.outputIndex,
|
||||||
"content_index": c.contentIndex,
|
"content_index": c.contentIndex,
|
||||||
"part": map[string]any{
|
"part": map[string]any{
|
||||||
"type": "output_text",
|
"type": "output_text",
|
||||||
"text": "",
|
"text": "",
|
||||||
|
"annotations": []any{},
|
||||||
|
"logprobs": []any{},
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
@@ -913,6 +1150,7 @@ func (c *ResponsesStreamConverter) processTextContent(content string) []Response
|
|||||||
"output_index": c.outputIndex,
|
"output_index": c.outputIndex,
|
||||||
"content_index": 0,
|
"content_index": 0,
|
||||||
"delta": content,
|
"delta": content,
|
||||||
|
"logprobs": []any{},
|
||||||
}))
|
}))
|
||||||
|
|
||||||
return events
|
return events
|
||||||
@@ -944,8 +1182,10 @@ func (c *ResponsesStreamConverter) buildFinalOutput() []any {
|
|||||||
"status": "completed",
|
"status": "completed",
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
"content": []map[string]any{{
|
"content": []map[string]any{{
|
||||||
"type": "output_text",
|
"type": "output_text",
|
||||||
"text": c.accumulatedText,
|
"text": c.accumulatedText,
|
||||||
|
"annotations": []any{},
|
||||||
|
"logprobs": []any{},
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -967,6 +1207,7 @@ func (c *ResponsesStreamConverter) processCompletion(r api.ChatResponse) []Respo
|
|||||||
"output_index": c.outputIndex,
|
"output_index": c.outputIndex,
|
||||||
"content_index": 0,
|
"content_index": 0,
|
||||||
"text": c.accumulatedText,
|
"text": c.accumulatedText,
|
||||||
|
"logprobs": []any{},
|
||||||
}))
|
}))
|
||||||
|
|
||||||
// response.content_part.done
|
// response.content_part.done
|
||||||
@@ -975,8 +1216,10 @@ func (c *ResponsesStreamConverter) processCompletion(r api.ChatResponse) []Respo
|
|||||||
"output_index": c.outputIndex,
|
"output_index": c.outputIndex,
|
||||||
"content_index": 0,
|
"content_index": 0,
|
||||||
"part": map[string]any{
|
"part": map[string]any{
|
||||||
"type": "output_text",
|
"type": "output_text",
|
||||||
"text": c.accumulatedText,
|
"text": c.accumulatedText,
|
||||||
|
"annotations": []any{},
|
||||||
|
"logprobs": []any{},
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
|
|
||||||
@@ -989,26 +1232,31 @@ func (c *ResponsesStreamConverter) processCompletion(r api.ChatResponse) []Respo
|
|||||||
"status": "completed",
|
"status": "completed",
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
"content": []map[string]any{{
|
"content": []map[string]any{{
|
||||||
"type": "output_text",
|
"type": "output_text",
|
||||||
"text": c.accumulatedText,
|
"text": c.accumulatedText,
|
||||||
|
"annotations": []any{},
|
||||||
|
"logprobs": []any{},
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// response.completed
|
// response.completed
|
||||||
events = append(events, c.newEvent("response.completed", map[string]any{
|
usage := map[string]any{
|
||||||
"response": map[string]any{
|
"input_tokens": r.PromptEvalCount,
|
||||||
"id": c.responseID,
|
"output_tokens": r.EvalCount,
|
||||||
"object": "response",
|
"total_tokens": r.PromptEvalCount + r.EvalCount,
|
||||||
"status": "completed",
|
"input_tokens_details": map[string]any{
|
||||||
"output": c.buildFinalOutput(),
|
"cached_tokens": 0,
|
||||||
"usage": map[string]any{
|
|
||||||
"input_tokens": r.PromptEvalCount,
|
|
||||||
"output_tokens": r.EvalCount,
|
|
||||||
"total_tokens": r.PromptEvalCount + r.EvalCount,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
"output_tokens_details": map[string]any{
|
||||||
|
"reasoning_tokens": 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
response := c.buildResponseObject("completed", c.buildFinalOutput(), usage)
|
||||||
|
response["completed_at"] = time.Now().Unix()
|
||||||
|
events = append(events, c.newEvent("response.completed", map[string]any{
|
||||||
|
"response": response,
|
||||||
}))
|
}))
|
||||||
|
|
||||||
return events
|
return events
|
||||||
|
|||||||
@@ -850,7 +850,7 @@ func TestFromResponsesRequest_Images(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestResponsesStreamConverter_TextOnly(t *testing.T) {
|
func TestResponsesStreamConverter_TextOnly(t *testing.T) {
|
||||||
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b")
|
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b", ResponsesRequest{})
|
||||||
|
|
||||||
// First chunk with content
|
// First chunk with content
|
||||||
events := converter.Process(api.ChatResponse{
|
events := converter.Process(api.ChatResponse{
|
||||||
@@ -916,7 +916,7 @@ func TestResponsesStreamConverter_TextOnly(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestResponsesStreamConverter_ToolCalls(t *testing.T) {
|
func TestResponsesStreamConverter_ToolCalls(t *testing.T) {
|
||||||
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b")
|
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b", ResponsesRequest{})
|
||||||
|
|
||||||
events := converter.Process(api.ChatResponse{
|
events := converter.Process(api.ChatResponse{
|
||||||
Message: api.Message{
|
Message: api.Message{
|
||||||
@@ -952,7 +952,7 @@ func TestResponsesStreamConverter_ToolCalls(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestResponsesStreamConverter_Reasoning(t *testing.T) {
|
func TestResponsesStreamConverter_Reasoning(t *testing.T) {
|
||||||
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b")
|
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b", ResponsesRequest{})
|
||||||
|
|
||||||
// First chunk with thinking
|
// First chunk with thinking
|
||||||
events := converter.Process(api.ChatResponse{
|
events := converter.Process(api.ChatResponse{
|
||||||
@@ -1267,7 +1267,7 @@ func TestToResponse_WithReasoning(t *testing.T) {
|
|||||||
Content: "The answer is 42",
|
Content: "The answer is 42",
|
||||||
},
|
},
|
||||||
Done: true,
|
Done: true,
|
||||||
})
|
}, ResponsesRequest{})
|
||||||
|
|
||||||
// Should have 2 output items: reasoning + message
|
// Should have 2 output items: reasoning + message
|
||||||
if len(response.Output) != 2 {
|
if len(response.Output) != 2 {
|
||||||
@@ -1638,7 +1638,7 @@ func TestFromResponsesRequest_ShorthandFormats(t *testing.T) {
|
|||||||
|
|
||||||
func TestResponsesStreamConverter_OutputIncludesContent(t *testing.T) {
|
func TestResponsesStreamConverter_OutputIncludesContent(t *testing.T) {
|
||||||
// Verify that response.output_item.done includes content field for messages
|
// Verify that response.output_item.done includes content field for messages
|
||||||
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b")
|
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b", ResponsesRequest{})
|
||||||
|
|
||||||
// First chunk
|
// First chunk
|
||||||
converter.Process(api.ChatResponse{
|
converter.Process(api.ChatResponse{
|
||||||
@@ -1686,7 +1686,7 @@ func TestResponsesStreamConverter_OutputIncludesContent(t *testing.T) {
|
|||||||
|
|
||||||
func TestResponsesStreamConverter_ResponseCompletedIncludesOutput(t *testing.T) {
|
func TestResponsesStreamConverter_ResponseCompletedIncludesOutput(t *testing.T) {
|
||||||
// Verify that response.completed includes the output array
|
// Verify that response.completed includes the output array
|
||||||
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b")
|
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b", ResponsesRequest{})
|
||||||
|
|
||||||
// Process some content
|
// Process some content
|
||||||
converter.Process(api.ChatResponse{
|
converter.Process(api.ChatResponse{
|
||||||
@@ -1730,7 +1730,7 @@ func TestResponsesStreamConverter_ResponseCompletedIncludesOutput(t *testing.T)
|
|||||||
|
|
||||||
func TestResponsesStreamConverter_ResponseCreatedIncludesOutput(t *testing.T) {
|
func TestResponsesStreamConverter_ResponseCreatedIncludesOutput(t *testing.T) {
|
||||||
// Verify that response.created includes an empty output array
|
// Verify that response.created includes an empty output array
|
||||||
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b")
|
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b", ResponsesRequest{})
|
||||||
|
|
||||||
events := converter.Process(api.ChatResponse{
|
events := converter.Process(api.ChatResponse{
|
||||||
Message: api.Message{Content: "Hi"},
|
Message: api.Message{Content: "Hi"},
|
||||||
@@ -1757,7 +1757,7 @@ func TestResponsesStreamConverter_ResponseCreatedIncludesOutput(t *testing.T) {
|
|||||||
|
|
||||||
func TestResponsesStreamConverter_SequenceNumbers(t *testing.T) {
|
func TestResponsesStreamConverter_SequenceNumbers(t *testing.T) {
|
||||||
// Verify that events include incrementing sequence numbers
|
// Verify that events include incrementing sequence numbers
|
||||||
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b")
|
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b", ResponsesRequest{})
|
||||||
|
|
||||||
events := converter.Process(api.ChatResponse{
|
events := converter.Process(api.ChatResponse{
|
||||||
Message: api.Message{Content: "Hello"},
|
Message: api.Message{Content: "Hello"},
|
||||||
@@ -1791,7 +1791,7 @@ func TestResponsesStreamConverter_SequenceNumbers(t *testing.T) {
|
|||||||
|
|
||||||
func TestResponsesStreamConverter_FunctionCallStatus(t *testing.T) {
|
func TestResponsesStreamConverter_FunctionCallStatus(t *testing.T) {
|
||||||
// Verify that function call items include status field
|
// Verify that function call items include status field
|
||||||
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b")
|
converter := NewResponsesStreamConverter("resp_123", "msg_456", "gpt-oss:20b", ResponsesRequest{})
|
||||||
|
|
||||||
events := converter.Process(api.ChatResponse{
|
events := converter.Process(api.ChatResponse{
|
||||||
Message: api.Message{
|
Message: api.Message{
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Prompt struct {
|
type Prompt struct {
|
||||||
@@ -36,10 +37,11 @@ type Terminal struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Instance struct {
|
type Instance struct {
|
||||||
Prompt *Prompt
|
Prompt *Prompt
|
||||||
Terminal *Terminal
|
Terminal *Terminal
|
||||||
History *History
|
History *History
|
||||||
Pasting bool
|
Pasting bool
|
||||||
|
pastedLines []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(prompt Prompt) (*Instance, error) {
|
func New(prompt Prompt) (*Instance, error) {
|
||||||
@@ -174,6 +176,8 @@ func (i *Instance) Readline() (string, error) {
|
|||||||
case CharEsc:
|
case CharEsc:
|
||||||
esc = true
|
esc = true
|
||||||
case CharInterrupt:
|
case CharInterrupt:
|
||||||
|
i.pastedLines = nil
|
||||||
|
i.Prompt.UseAlt = false
|
||||||
return "", ErrInterrupt
|
return "", ErrInterrupt
|
||||||
case CharPrev:
|
case CharPrev:
|
||||||
i.historyPrev(buf, ¤tLineBuf)
|
i.historyPrev(buf, ¤tLineBuf)
|
||||||
@@ -188,7 +192,23 @@ func (i *Instance) Readline() (string, error) {
|
|||||||
case CharForward:
|
case CharForward:
|
||||||
buf.MoveRight()
|
buf.MoveRight()
|
||||||
case CharBackspace, CharCtrlH:
|
case CharBackspace, CharCtrlH:
|
||||||
buf.Remove()
|
if buf.IsEmpty() && len(i.pastedLines) > 0 {
|
||||||
|
lastIdx := len(i.pastedLines) - 1
|
||||||
|
prevLine := i.pastedLines[lastIdx]
|
||||||
|
i.pastedLines = i.pastedLines[:lastIdx]
|
||||||
|
fmt.Print(CursorBOL + ClearToEOL + CursorUp + CursorBOL + ClearToEOL)
|
||||||
|
if len(i.pastedLines) == 0 {
|
||||||
|
fmt.Print(i.Prompt.Prompt)
|
||||||
|
i.Prompt.UseAlt = false
|
||||||
|
} else {
|
||||||
|
fmt.Print(i.Prompt.AltPrompt)
|
||||||
|
}
|
||||||
|
for _, r := range prevLine {
|
||||||
|
buf.Add(r)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
buf.Remove()
|
||||||
|
}
|
||||||
case CharTab:
|
case CharTab:
|
||||||
// todo: convert back to real tabs
|
// todo: convert back to real tabs
|
||||||
for range 8 {
|
for range 8 {
|
||||||
@@ -211,13 +231,28 @@ func (i *Instance) Readline() (string, error) {
|
|||||||
case CharCtrlZ:
|
case CharCtrlZ:
|
||||||
fd := os.Stdin.Fd()
|
fd := os.Stdin.Fd()
|
||||||
return handleCharCtrlZ(fd, i.Terminal.termios)
|
return handleCharCtrlZ(fd, i.Terminal.termios)
|
||||||
case CharEnter, CharCtrlJ:
|
case CharCtrlJ:
|
||||||
|
i.pastedLines = append(i.pastedLines, buf.String())
|
||||||
|
buf.Buf.Clear()
|
||||||
|
buf.Pos = 0
|
||||||
|
buf.DisplayPos = 0
|
||||||
|
buf.LineHasSpace.Clear()
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Print(i.Prompt.AltPrompt)
|
||||||
|
i.Prompt.UseAlt = true
|
||||||
|
continue
|
||||||
|
case CharEnter:
|
||||||
output := buf.String()
|
output := buf.String()
|
||||||
|
if len(i.pastedLines) > 0 {
|
||||||
|
output = strings.Join(i.pastedLines, "\n") + "\n" + output
|
||||||
|
i.pastedLines = nil
|
||||||
|
}
|
||||||
if output != "" {
|
if output != "" {
|
||||||
i.History.Add(output)
|
i.History.Add(output)
|
||||||
}
|
}
|
||||||
buf.MoveToEnd()
|
buf.MoveToEnd()
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
i.Prompt.UseAlt = false
|
||||||
|
|
||||||
return output, nil
|
return output, nil
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -179,7 +179,7 @@ _build_macapp() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
rm -f dist/Ollama-darwin.zip
|
rm -f dist/Ollama-darwin.zip
|
||||||
ditto -c -k --keepParent dist/Ollama.app dist/Ollama-darwin.zip
|
ditto -c -k --norsrc --keepParent dist/Ollama.app dist/Ollama-darwin.zip
|
||||||
(cd dist/Ollama.app/Contents/Resources/; tar -cf - ollama ollama-mlx *.so *.dylib *.metallib 2>/dev/null) | gzip -9vc > dist/ollama-darwin.tgz
|
(cd dist/Ollama.app/Contents/Resources/; tar -cf - ollama ollama-mlx *.so *.dylib *.metallib 2>/dev/null) | gzip -9vc > dist/ollama-darwin.tgz
|
||||||
|
|
||||||
# Notarize and Staple
|
# Notarize and Staple
|
||||||
@@ -187,7 +187,7 @@ _build_macapp() {
|
|||||||
$(xcrun -f notarytool) submit dist/Ollama-darwin.zip --wait --timeout 20m --apple-id "$APPLE_ID" --password "$APPLE_PASSWORD" --team-id "$APPLE_TEAM_ID"
|
$(xcrun -f notarytool) submit dist/Ollama-darwin.zip --wait --timeout 20m --apple-id "$APPLE_ID" --password "$APPLE_PASSWORD" --team-id "$APPLE_TEAM_ID"
|
||||||
rm -f dist/Ollama-darwin.zip
|
rm -f dist/Ollama-darwin.zip
|
||||||
$(xcrun -f stapler) staple dist/Ollama.app
|
$(xcrun -f stapler) staple dist/Ollama.app
|
||||||
ditto -c -k --keepParent dist/Ollama.app dist/Ollama-darwin.zip
|
ditto -c -k --norsrc --keepParent dist/Ollama.app dist/Ollama-darwin.zip
|
||||||
|
|
||||||
rm -f dist/Ollama.dmg
|
rm -f dist/Ollama.dmg
|
||||||
|
|
||||||
|
|||||||
50
x/README.md
@@ -1,50 +0,0 @@
|
|||||||
# Experimental Features
|
|
||||||
|
|
||||||
## MLX Backend
|
|
||||||
|
|
||||||
We're working on a new experimental backend based on the [MLX project](https://github.com/ml-explore/mlx)
|
|
||||||
|
|
||||||
Support is currently limited to MacOS and Linux with CUDA GPUs. We're looking to add support for Windows CUDA soon, and other GPU vendors.
|
|
||||||
|
|
||||||
### Building ollama-mlx
|
|
||||||
|
|
||||||
The `ollama-mlx` binary is a separate build of Ollama with MLX support enabled. This enables experimental features like image generation.
|
|
||||||
|
|
||||||
#### macOS (Apple Silicon and Intel)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Build MLX backend libraries
|
|
||||||
cmake --preset MLX
|
|
||||||
cmake --build --preset MLX --parallel
|
|
||||||
cmake --install build --component MLX
|
|
||||||
|
|
||||||
# Build ollama-mlx binary
|
|
||||||
go build -tags mlx -o ollama-mlx .
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Linux (CUDA)
|
|
||||||
|
|
||||||
On Linux, use the preset "MLX CUDA 13" or "MLX CUDA 12" to enable CUDA with the default Ollama NVIDIA GPU architectures enabled:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Build MLX backend libraries with CUDA support
|
|
||||||
cmake --preset 'MLX CUDA 13'
|
|
||||||
cmake --build --preset 'MLX CUDA 13' --parallel
|
|
||||||
cmake --install build --component MLX
|
|
||||||
|
|
||||||
# Build ollama-mlx binary
|
|
||||||
CGO_CFLAGS="-O3 -I$(pwd)/build/_deps/mlx-c-src" \
|
|
||||||
CGO_LDFLAGS="-L$(pwd)/build/lib/ollama -lmlxc -lmlx" \
|
|
||||||
go build -tags mlx -o ollama-mlx .
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Using build scripts
|
|
||||||
|
|
||||||
The build scripts automatically create the `ollama-mlx` binary:
|
|
||||||
|
|
||||||
- **macOS**: `./scripts/build_darwin.sh` produces `dist/darwin/ollama-mlx`
|
|
||||||
- **Linux**: `./scripts/build_linux.sh` produces `ollama-mlx` in the output archives
|
|
||||||
|
|
||||||
## Image Generation
|
|
||||||
|
|
||||||
Image generation is built into the `ollama-mlx` binary. Run `ollama-mlx serve` to start the server with image generation support enabled.
|
|
||||||
67
x/cmd/run.go
@@ -25,14 +25,6 @@ import (
|
|||||||
"github.com/ollama/ollama/x/tools"
|
"github.com/ollama/ollama/x/tools"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MultilineState tracks the state of multiline input
|
|
||||||
type MultilineState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
MultilineNone MultilineState = iota
|
|
||||||
MultilineSystem
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tool output capping constants
|
// Tool output capping constants
|
||||||
const (
|
const (
|
||||||
// localModelTokenLimit is the token limit for local models (smaller context).
|
// localModelTokenLimit is the token limit for local models (smaller context).
|
||||||
@@ -656,7 +648,7 @@ func GenerateInteractive(cmd *cobra.Command, modelName string, wordWrap bool, op
|
|||||||
Prompt: ">>> ",
|
Prompt: ">>> ",
|
||||||
AltPrompt: "... ",
|
AltPrompt: "... ",
|
||||||
Placeholder: "Send a message (/? for help)",
|
Placeholder: "Send a message (/? for help)",
|
||||||
AltPlaceholder: `Use """ to end multi-line input`,
|
AltPlaceholder: "Press Enter to send",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -707,7 +699,6 @@ func GenerateInteractive(cmd *cobra.Command, modelName string, wordWrap bool, op
|
|||||||
var sb strings.Builder
|
var sb strings.Builder
|
||||||
var format string
|
var format string
|
||||||
var system string
|
var system string
|
||||||
var multiline MultilineState = MultilineNone
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
line, err := scanner.Readline()
|
line, err := scanner.Readline()
|
||||||
@@ -721,37 +712,12 @@ func GenerateInteractive(cmd *cobra.Command, modelName string, wordWrap bool, op
|
|||||||
}
|
}
|
||||||
scanner.Prompt.UseAlt = false
|
scanner.Prompt.UseAlt = false
|
||||||
sb.Reset()
|
sb.Reset()
|
||||||
multiline = MultilineNone
|
|
||||||
continue
|
continue
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case multiline != MultilineNone:
|
|
||||||
// check if there's a multiline terminating string
|
|
||||||
before, ok := strings.CutSuffix(line, `"""`)
|
|
||||||
sb.WriteString(before)
|
|
||||||
if !ok {
|
|
||||||
fmt.Fprintln(&sb)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch multiline {
|
|
||||||
case MultilineSystem:
|
|
||||||
system = sb.String()
|
|
||||||
newMessage := api.Message{Role: "system", Content: system}
|
|
||||||
if len(messages) > 0 && messages[len(messages)-1].Role == "system" {
|
|
||||||
messages[len(messages)-1] = newMessage
|
|
||||||
} else {
|
|
||||||
messages = append(messages, newMessage)
|
|
||||||
}
|
|
||||||
fmt.Println("Set system message.")
|
|
||||||
sb.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
multiline = MultilineNone
|
|
||||||
scanner.Prompt.UseAlt = false
|
|
||||||
case strings.HasPrefix(line, "/exit"), strings.HasPrefix(line, "/bye"):
|
case strings.HasPrefix(line, "/exit"), strings.HasPrefix(line, "/bye"):
|
||||||
return nil
|
return nil
|
||||||
case strings.HasPrefix(line, "/clear"):
|
case strings.HasPrefix(line, "/clear"):
|
||||||
@@ -860,41 +826,18 @@ func GenerateInteractive(cmd *cobra.Command, modelName string, wordWrap bool, op
|
|||||||
options[args[2]] = fp[args[2]]
|
options[args[2]] = fp[args[2]]
|
||||||
case "system":
|
case "system":
|
||||||
if len(args) < 3 {
|
if len(args) < 3 {
|
||||||
fmt.Println("Usage: /set system <message> or /set system \"\"\"<multi-line message>\"\"\"")
|
fmt.Println("Usage: /set system <message>")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
multiline = MultilineSystem
|
system = strings.Join(args[2:], " ")
|
||||||
|
newMessage := api.Message{Role: "system", Content: system}
|
||||||
line := strings.Join(args[2:], " ")
|
|
||||||
line, ok := strings.CutPrefix(line, `"""`)
|
|
||||||
if !ok {
|
|
||||||
multiline = MultilineNone
|
|
||||||
} else {
|
|
||||||
// only cut suffix if the line is multiline
|
|
||||||
line, ok = strings.CutSuffix(line, `"""`)
|
|
||||||
if ok {
|
|
||||||
multiline = MultilineNone
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sb.WriteString(line)
|
|
||||||
if multiline != MultilineNone {
|
|
||||||
scanner.Prompt.UseAlt = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
system = sb.String()
|
|
||||||
newMessage := api.Message{Role: "system", Content: sb.String()}
|
|
||||||
// Check if the slice is not empty and the last message is from 'system'
|
|
||||||
if len(messages) > 0 && messages[len(messages)-1].Role == "system" {
|
if len(messages) > 0 && messages[len(messages)-1].Role == "system" {
|
||||||
// Replace the last message
|
|
||||||
messages[len(messages)-1] = newMessage
|
messages[len(messages)-1] = newMessage
|
||||||
} else {
|
} else {
|
||||||
messages = append(messages, newMessage)
|
messages = append(messages, newMessage)
|
||||||
}
|
}
|
||||||
fmt.Println("Set system message.")
|
fmt.Println("Set system message.")
|
||||||
sb.Reset()
|
|
||||||
continue
|
continue
|
||||||
default:
|
default:
|
||||||
fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
|
fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
|
||||||
@@ -1081,7 +1024,7 @@ func GenerateInteractive(cmd *cobra.Command, modelName string, wordWrap bool, op
|
|||||||
sb.WriteString(line)
|
sb.WriteString(line)
|
||||||
}
|
}
|
||||||
|
|
||||||
if sb.Len() > 0 && multiline == MultilineNone {
|
if sb.Len() > 0 {
|
||||||
newMessage := api.Message{Role: "user", Content: sb.String()}
|
newMessage := api.Message{Role: "user", Content: sb.String()}
|
||||||
messages = append(messages, newMessage)
|
messages = append(messages, newMessage)
|
||||||
|
|
||||||
|
|||||||