mirror of
https://github.com/ollama/ollama.git
synced 2026-01-09 16:10:26 -05:00
Compare commits
3 Commits
parth/samp
...
parth/samp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f257f1fd04 | ||
|
|
8b1ae03302 | ||
|
|
db10a7da88 |
@@ -54,10 +54,6 @@ Here are some example models that can be downloaded:
|
||||
|
||||
| Model | Parameters | Size | Download |
|
||||
| ------------------ | ---------- | ----- | -------------------------------- |
|
||||
| Gemma 3 | 1B | 815MB | `ollama run gemma3:1b` |
|
||||
| Gemma 3 | 4B | 3.3GB | `ollama run gemma3` |
|
||||
| Gemma 3 | 12B | 8.1GB | `ollama run gemma3:12b` |
|
||||
| Gemma 3 | 27B | 17GB | `ollama run gemma3:27b` |
|
||||
| QwQ | 32B | 20GB | `ollama run qwq` |
|
||||
| DeepSeek-R1 | 7B | 4.7GB | `ollama run deepseek-r1` |
|
||||
| DeepSeek-R1 | 671B | 404GB | `ollama run deepseek-r1:671b` |
|
||||
@@ -70,6 +66,9 @@ Here are some example models that can be downloaded:
|
||||
| Llama 3.1 | 405B | 231GB | `ollama run llama3.1:405b` |
|
||||
| Phi 4 | 14B | 9.1GB | `ollama run phi4` |
|
||||
| Phi 4 Mini | 3.8B | 2.5GB | `ollama run phi4-mini` |
|
||||
| Gemma 2 | 2B | 1.6GB | `ollama run gemma2:2b` |
|
||||
| Gemma 2 | 9B | 5.5GB | `ollama run gemma2` |
|
||||
| Gemma 2 | 27B | 16GB | `ollama run gemma2:27b` |
|
||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||
|
||||
@@ -195,10 +195,6 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
opts.Messages = []api.Message{}
|
||||
fmt.Printf("Loading model '%s'\n", opts.Model)
|
||||
if err := loadOrUnloadModel(cmd, &opts); err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
fmt.Printf("error: %v\n", err)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
continue
|
||||
|
||||
@@ -15,6 +15,7 @@ type TextOptions struct {
|
||||
attnKeyLen, attnValLen int
|
||||
eps, ropeScale float32
|
||||
ropeLocalBase, ropeGlobalBase float32
|
||||
finalLogitSoftcap float32
|
||||
largeModelScaling bool
|
||||
}
|
||||
|
||||
@@ -56,15 +57,16 @@ func newTextModel(c ml.Config) *TextModel {
|
||||
),
|
||||
Layers: make([]TextLayer, numBlocks),
|
||||
TextOptions: &TextOptions{
|
||||
hiddenSize: int(c.Uint("embedding_length")),
|
||||
numHeads: int(c.Uint("attention.head_count")),
|
||||
numKVHeads: int(c.Uint("attention.head_count_kv")),
|
||||
attnKeyLen: int(c.Uint("attention.key_length", 256)),
|
||||
attnValLen: int(c.Uint("attention.value_length", 256)),
|
||||
eps: c.Float("attention.layer_norm_rms_epsilon", 1e-06),
|
||||
ropeLocalBase: c.Float("rope.local.freq_base", 10000.0),
|
||||
ropeGlobalBase: c.Float("rope.global.freq_base", 1000000.0),
|
||||
ropeScale: c.Float("rope.freq_scale", 1.0),
|
||||
hiddenSize: int(c.Uint("embedding_length")),
|
||||
numHeads: int(c.Uint("attention.head_count")),
|
||||
numKVHeads: int(c.Uint("attention.head_count_kv")),
|
||||
attnKeyLen: int(c.Uint("attention.key_length", 256)),
|
||||
attnValLen: int(c.Uint("attention.value_length", 256)),
|
||||
eps: c.Float("attention.layer_norm_rms_epsilon", 1e-06),
|
||||
ropeLocalBase: c.Float("rope.local.freq_base", 10000.0),
|
||||
ropeGlobalBase: c.Float("rope.global.freq_base", 1000000.0),
|
||||
ropeScale: c.Float("rope.freq_scale", 1.0),
|
||||
finalLogitSoftcap: c.Float("final_logit_softcapping", 30.0),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -243,5 +245,10 @@ func (m *TextModel) Forward(ctx ml.Context, inputs, positions, outputs ml.Tensor
|
||||
}
|
||||
|
||||
hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps)
|
||||
return m.Output.Forward(ctx, hiddenState)
|
||||
hiddenState = m.Output.Forward(ctx, hiddenState)
|
||||
|
||||
// final logit softcap
|
||||
hiddenState = hiddenState.Scale(ctx, 1.0/float64(m.TextOptions.finalLogitSoftcap))
|
||||
hiddenState = hiddenState.Tanh(ctx)
|
||||
return hiddenState.Scale(ctx, float64(m.TextOptions.finalLogitSoftcap))
|
||||
}
|
||||
|
||||
@@ -116,9 +116,19 @@ func (i *Instance) Readline() (string, error) {
|
||||
|
||||
switch r {
|
||||
case KeyUp:
|
||||
i.historyPrev(buf, ¤tLineBuf)
|
||||
if i.History.Pos > 0 {
|
||||
if i.History.Pos == i.History.Size() {
|
||||
currentLineBuf = []rune(buf.String())
|
||||
}
|
||||
buf.Replace([]rune(i.History.Prev()))
|
||||
}
|
||||
case KeyDown:
|
||||
i.historyNext(buf, ¤tLineBuf)
|
||||
if i.History.Pos < i.History.Size() {
|
||||
buf.Replace([]rune(i.History.Next()))
|
||||
if i.History.Pos == i.History.Size() {
|
||||
buf.Replace(currentLineBuf)
|
||||
}
|
||||
}
|
||||
case KeyLeft:
|
||||
buf.MoveLeft()
|
||||
case KeyRight:
|
||||
@@ -175,10 +185,6 @@ func (i *Instance) Readline() (string, error) {
|
||||
esc = true
|
||||
case CharInterrupt:
|
||||
return "", ErrInterrupt
|
||||
case CharPrev:
|
||||
i.historyPrev(buf, ¤tLineBuf)
|
||||
case CharNext:
|
||||
i.historyNext(buf, ¤tLineBuf)
|
||||
case CharLineStart:
|
||||
buf.MoveToStart()
|
||||
case CharLineEnd:
|
||||
@@ -240,24 +246,6 @@ func (i *Instance) HistoryDisable() {
|
||||
i.History.Enabled = false
|
||||
}
|
||||
|
||||
func (i *Instance) historyPrev(buf *Buffer, currentLineBuf *[]rune) {
|
||||
if i.History.Pos > 0 {
|
||||
if i.History.Pos == i.History.Size() {
|
||||
*currentLineBuf = []rune(buf.String())
|
||||
}
|
||||
buf.Replace([]rune(i.History.Prev()))
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Instance) historyNext(buf *Buffer, currentLineBuf *[]rune) {
|
||||
if i.History.Pos < i.History.Size() {
|
||||
buf.Replace([]rune(i.History.Next()))
|
||||
if i.History.Pos == i.History.Size() {
|
||||
buf.Replace(*currentLineBuf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewTerminal() (*Terminal, error) {
|
||||
fd := os.Stdin.Fd()
|
||||
termios, err := SetRawMode(fd)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package sample
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"math/rand"
|
||||
"math/rand/v2"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ollama/ollama/llama"
|
||||
)
|
||||
@@ -86,53 +87,53 @@ func (s *Sampler) sample(tokens []token) (token, error) {
|
||||
// topK also sorts the tokens in descending order of logits
|
||||
tokens = topK(tokens, s.topK)
|
||||
|
||||
// token logit values are updated to probabilities
|
||||
tokens = temperature(tokens, s.temperature)
|
||||
|
||||
tokens = topP(tokens, s.topP)
|
||||
tokens = minP(tokens, s.minP)
|
||||
|
||||
// token logit values are updated to probabilities
|
||||
temperature(tokens, s.temperature)
|
||||
softmax(tokens)
|
||||
return tokens[dist(tokens, s.rng.Int63())], nil
|
||||
// TODO: this should fall back to greedy sampling
|
||||
// or topP, topK values etc should be such that
|
||||
// there are always tokens to sample from
|
||||
if len(tokens) == 0 {
|
||||
return token{}, errors.New("no tokens to sample from")
|
||||
}
|
||||
|
||||
// // TODO: this should fall back to greedy sampling
|
||||
// // or topP, topK values etc should be such that
|
||||
// // there are always tokens to sample from
|
||||
// if len(tokens) == 0 {
|
||||
// return token{}, errors.New("no tokens to sample from")
|
||||
// }
|
||||
var r float32
|
||||
if s.rng != nil {
|
||||
r = s.rng.Float32()
|
||||
} else {
|
||||
r = rand.Float32()
|
||||
}
|
||||
|
||||
// var r float32
|
||||
// if s.rng != nil {
|
||||
// r = s.rng.Float32()
|
||||
// } else {
|
||||
// r = rand.Float32()
|
||||
// }
|
||||
// Calculate cumulative sum of probabilities
|
||||
var sum float32
|
||||
for i := range tokens {
|
||||
sum += tokens[i].value
|
||||
tokens[i].value = sum
|
||||
}
|
||||
r *= tokens[len(tokens)-1].value
|
||||
|
||||
// // Calculate cumulative sum of probabilities
|
||||
// var sum float32
|
||||
// for i := range tokens {
|
||||
// sum += tokens[i].value
|
||||
// tokens[i].value = sum
|
||||
// }
|
||||
// r *= tokens[len(tokens)-1].value
|
||||
idx, _ := slices.BinarySearchFunc(tokens, r, func(token token, target float32) int {
|
||||
if token.value < target {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
})
|
||||
|
||||
// idx, _ := slices.BinarySearchFunc(tokens, r, func(token token, target float32) int {
|
||||
// if token.value < target {
|
||||
// return -1
|
||||
// }
|
||||
// return 1
|
||||
// })
|
||||
|
||||
// return tokens[idx], nil
|
||||
return tokens[idx], nil
|
||||
}
|
||||
|
||||
// TODO(parthsareen): update sampler interface to use json unmarshal https://github.com/ollama/ollama/issues/9278
|
||||
func NewSampler(temperature float32, topK int, topP float32, minP float32, seed int, grammar *Grammar) Sampler {
|
||||
var rng *rand.Rand
|
||||
if seed != -1 {
|
||||
rng = rand.New(rand.NewSource(int64(seed)))
|
||||
} else {
|
||||
rng = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
// PCG requires two parameters: sequence and stream
|
||||
// Use original seed for sequence
|
||||
sequence := uint64(seed)
|
||||
// Use golden ratio hash to generate statistically independent seeds
|
||||
rng = rand.New(rand.NewPCG(sequence, sequence^0x9E3779B9))
|
||||
}
|
||||
if temperature < 0.0 {
|
||||
temperature = 0.0
|
||||
|
||||
1
sample/testdata/logits.bin
vendored
1
sample/testdata/logits.bin
vendored
File diff suppressed because one or more lines are too long
@@ -3,7 +3,6 @@ package sample
|
||||
import (
|
||||
"container/heap"
|
||||
"math"
|
||||
"math/rand"
|
||||
"slices"
|
||||
)
|
||||
|
||||
@@ -26,6 +25,32 @@ func (h *tokenHeap) Pop() any {
|
||||
return x
|
||||
}
|
||||
|
||||
// temperature applies scaling and softmax to the logits
|
||||
func temperature(ts []token, temp float32) []token {
|
||||
// Find max logit for numerical stability
|
||||
maxLogit := float32(math.Inf(-1))
|
||||
for _, t := range ts {
|
||||
if t.value > maxLogit {
|
||||
maxLogit = t.value
|
||||
}
|
||||
}
|
||||
|
||||
// Apply temperature and compute exp(x - max)
|
||||
temp = max(temp, 1e-7)
|
||||
var sum float32
|
||||
for i, v := range ts {
|
||||
ts[i].value = float32(math.Exp(float64((v.value - maxLogit) / temp)))
|
||||
sum += ts[i].value
|
||||
}
|
||||
|
||||
// Normalize
|
||||
for i := range ts {
|
||||
ts[i].value /= sum
|
||||
}
|
||||
|
||||
return ts
|
||||
}
|
||||
|
||||
// topK limits the number of tokens considered to the k highest logits
|
||||
func topK(ts []token, k int) []token {
|
||||
if k >= len(ts) || k <= 0 {
|
||||
@@ -109,59 +134,3 @@ func minP(ts []token, p float32) []token {
|
||||
ts = validTokens
|
||||
return ts
|
||||
}
|
||||
|
||||
func temperature(ts []token, temp float32) {
|
||||
for i := range ts {
|
||||
ts[i].value /= temp
|
||||
}
|
||||
}
|
||||
|
||||
func softmax(ts []token) {
|
||||
if len(ts) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Find max logit for numerical stability
|
||||
maxLogit := ts[0].value
|
||||
for _, t := range ts {
|
||||
if t.value > maxLogit {
|
||||
maxLogit = t.value
|
||||
}
|
||||
}
|
||||
|
||||
// Compute exp(logit - maxLogit) and sum them
|
||||
var sumExp float32
|
||||
for i, t := range ts {
|
||||
expVal := float32(math.Exp(float64(t.value - maxLogit)))
|
||||
ts[i].value = expVal
|
||||
sumExp += expVal
|
||||
}
|
||||
|
||||
// Normalize probabilities
|
||||
for i := range ts {
|
||||
ts[i].value /= sumExp
|
||||
}
|
||||
}
|
||||
|
||||
// applyDist selects a token based on probabilities and seed
|
||||
func dist(ts []token, seed int64) int {
|
||||
rng := rand.New(rand.NewSource(seed))
|
||||
|
||||
cdf := make([]float32, len(ts))
|
||||
var cumSum float32
|
||||
for i, t := range ts {
|
||||
cumSum += t.value
|
||||
cdf[i] = cumSum
|
||||
}
|
||||
|
||||
r := rng.Float32() * cumSum
|
||||
|
||||
// Select token based on CDF
|
||||
for i, probSum := range cdf {
|
||||
if r < probSum {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return len(ts) - 1
|
||||
}
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
package sample
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"math/rand/v2"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -148,98 +143,6 @@ func TestSortLogits(t *testing.T) {
|
||||
compareLogits(t, "sortLogits", want, tokens)
|
||||
}
|
||||
|
||||
// TestSortLogitsWithRealData tests sorting behavior using real model logit distributions
|
||||
func TestSortLogitsWithRealData(t *testing.T) {
|
||||
// This will be populated from testdata/logits.bin
|
||||
// Format: 32-bit float array in binary format
|
||||
logits, err := loadTestLogits(t)
|
||||
if err != nil {
|
||||
t.Skipf("Skipping real logit test: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
tokens := toTokens(logits)
|
||||
sortLogits(tokens)
|
||||
|
||||
// Calculate n for verification
|
||||
n := int(math.Sqrt(float64(len(tokens)))) + 1
|
||||
if n > 1000 {
|
||||
n = 1000
|
||||
} else if n < 100 {
|
||||
n = 100
|
||||
}
|
||||
|
||||
t.Logf("Testing with %d tokens, partial sorting top %d", len(tokens), n)
|
||||
|
||||
// Only verify the top n elements are sorted (which is what we guarantee)
|
||||
// This is much faster than checking the entire array
|
||||
topN := tokens[:n]
|
||||
for i := 1; i < len(topN); i++ {
|
||||
if topN[i].value > topN[i-1].value {
|
||||
t.Fatalf("top %d tokens not properly sorted at index %d: %.15f > %.15f",
|
||||
n, i, topN[i].value, topN[i-1].value)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify we didn't lose any high value tokens by checking that
|
||||
// all tokens after position n are <= the nth token
|
||||
// Do this in chunks to avoid timeouts on large arrays
|
||||
nthValue := tokens[n-1].value
|
||||
const chunkSize = 1000
|
||||
|
||||
for start := n; start < len(tokens); start += chunkSize {
|
||||
end := min(start+chunkSize, len(tokens))
|
||||
for i := start; i < end; i++ {
|
||||
if tokens[i].value > nthValue {
|
||||
t.Fatalf("found higher value token after position %d: tokens[%d].value = %.15f > %.15f",
|
||||
n, i, tokens[i].value, nthValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// loadTestLogits loads logit test data from testdata/logits.bin
|
||||
func loadTestLogits(t *testing.T) ([]float32, error) {
|
||||
t.Helper()
|
||||
|
||||
_, currFile, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
return nil, errors.New("could not determine test file path")
|
||||
}
|
||||
testDataPath := filepath.Join(filepath.Dir(currFile), "testdata", "logits.bin")
|
||||
|
||||
file, err := os.Open(testDataPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
numFloats := stat.Size() / 4 // each float32 is 4 bytes
|
||||
if numFloats*4 != stat.Size() {
|
||||
return nil, errors.New("logits.bin has invalid size: not a multiple of 4 bytes")
|
||||
}
|
||||
|
||||
logits := make([]float32, numFloats)
|
||||
for i := range logits {
|
||||
var val uint32
|
||||
if err := binary.Read(file, binary.LittleEndian, &val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logits[i] = math.Float32frombits(val)
|
||||
}
|
||||
|
||||
if len(logits) == 0 {
|
||||
return nil, errors.New("logits.bin is empty")
|
||||
}
|
||||
|
||||
return logits, nil
|
||||
}
|
||||
|
||||
func BenchmarkTransforms(b *testing.B) {
|
||||
// Generate random logits
|
||||
tokens := make([]token, 1<<16)
|
||||
|
||||
Reference in New Issue
Block a user