mirror of
https://github.com/ollama/ollama.git
synced 2025-11-12 04:37:48 +01:00
client updates
This commit is contained in:
126
llama/llama.go
126
llama/llama.go
@@ -31,135 +31,35 @@ package llama
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type LLama struct {
|
||||
state unsafe.Pointer
|
||||
ctx unsafe.Pointer
|
||||
embeddings bool
|
||||
contextSize int
|
||||
}
|
||||
|
||||
func New(model string, opts ...ModelOption) (*LLama, error) {
|
||||
mo := NewModelOptions(opts...)
|
||||
|
||||
// TODO: free this pointer
|
||||
modelPath := C.CString(model)
|
||||
result := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
|
||||
if result == nil {
|
||||
|
||||
ctx := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
|
||||
if ctx == nil {
|
||||
return nil, fmt.Errorf("failed loading model")
|
||||
}
|
||||
|
||||
ll := &LLama{state: result, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
|
||||
ll := &LLama{ctx: ctx, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
|
||||
|
||||
return ll, nil
|
||||
}
|
||||
|
||||
func (l *LLama) Free() {
|
||||
C.llama_binding_free_model(l.state)
|
||||
}
|
||||
|
||||
func (l *LLama) LoadState(state string) error {
|
||||
d := C.CString(state)
|
||||
w := C.CString("rb")
|
||||
|
||||
result := C.load_state(l.state, d, w)
|
||||
if result != 0 {
|
||||
return fmt.Errorf("error while loading state")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LLama) SaveState(dst string) error {
|
||||
d := C.CString(dst)
|
||||
w := C.CString("wb")
|
||||
|
||||
C.save_state(l.state, d, w)
|
||||
|
||||
_, err := os.Stat(dst)
|
||||
return err
|
||||
}
|
||||
|
||||
// Token Embeddings
|
||||
func (l *LLama) TokenEmbeddings(tokens []int, opts ...PredictOption) ([]float32, error) {
|
||||
if !l.embeddings {
|
||||
return []float32{}, fmt.Errorf("model loaded without embeddings")
|
||||
}
|
||||
|
||||
po := NewPredictOptions(opts...)
|
||||
|
||||
outSize := po.Tokens
|
||||
if po.Tokens == 0 {
|
||||
outSize = 9999999
|
||||
}
|
||||
|
||||
floats := make([]float32, outSize)
|
||||
|
||||
myArray := (*C.int)(C.malloc(C.size_t(len(tokens)) * C.sizeof_int))
|
||||
|
||||
// Copy the values from the Go slice to the C array
|
||||
for i, v := range tokens {
|
||||
(*[1<<31 - 1]int32)(unsafe.Pointer(myArray))[i] = int32(v)
|
||||
}
|
||||
|
||||
params := C.llama_allocate_params(C.CString(""), C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
|
||||
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
|
||||
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
|
||||
C.int(po.Batch), C.int(po.NKeep), nil, C.int(0),
|
||||
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
|
||||
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), C.CString(po.LogitBias),
|
||||
C.CString(po.PathPromptCache), C.bool(po.PromptCacheAll), C.bool(po.MLock), C.bool(po.MMap),
|
||||
C.CString(po.MainGPU), C.CString(po.TensorSplit),
|
||||
C.bool(po.PromptCacheRO),
|
||||
)
|
||||
ret := C.get_token_embeddings(params, l.state, myArray, C.int(len(tokens)), (*C.float)(&floats[0]))
|
||||
if ret != 0 {
|
||||
return floats, fmt.Errorf("embedding inference failed")
|
||||
}
|
||||
return floats, nil
|
||||
}
|
||||
|
||||
// Embeddings
|
||||
func (l *LLama) Embeddings(text string, opts ...PredictOption) ([]float32, error) {
|
||||
if !l.embeddings {
|
||||
return []float32{}, fmt.Errorf("model loaded without embeddings")
|
||||
}
|
||||
|
||||
po := NewPredictOptions(opts...)
|
||||
|
||||
input := C.CString(text)
|
||||
if po.Tokens == 0 {
|
||||
po.Tokens = 99999999
|
||||
}
|
||||
floats := make([]float32, po.Tokens)
|
||||
reverseCount := len(po.StopPrompts)
|
||||
reversePrompt := make([]*C.char, reverseCount)
|
||||
var pass **C.char
|
||||
for i, s := range po.StopPrompts {
|
||||
cs := C.CString(s)
|
||||
reversePrompt[i] = cs
|
||||
pass = &reversePrompt[0]
|
||||
}
|
||||
|
||||
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
|
||||
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
|
||||
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
|
||||
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
|
||||
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
|
||||
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), C.CString(po.LogitBias),
|
||||
C.CString(po.PathPromptCache), C.bool(po.PromptCacheAll), C.bool(po.MLock), C.bool(po.MMap),
|
||||
C.CString(po.MainGPU), C.CString(po.TensorSplit),
|
||||
C.bool(po.PromptCacheRO),
|
||||
)
|
||||
|
||||
ret := C.get_embeddings(params, l.state, (*C.float)(&floats[0]))
|
||||
if ret != 0 {
|
||||
return floats, fmt.Errorf("embedding inference failed")
|
||||
}
|
||||
|
||||
return floats, nil
|
||||
C.llama_binding_free_model(l.ctx)
|
||||
}
|
||||
|
||||
func (l *LLama) Eval(text string, opts ...PredictOption) error {
|
||||
@@ -189,7 +89,7 @@ func (l *LLama) Eval(text string, opts ...PredictOption) error {
|
||||
C.CString(po.MainGPU), C.CString(po.TensorSplit),
|
||||
C.bool(po.PromptCacheRO),
|
||||
)
|
||||
ret := C.eval(params, l.state, input)
|
||||
ret := C.eval(params, l.ctx, input)
|
||||
if ret != 0 {
|
||||
return fmt.Errorf("inference failed")
|
||||
}
|
||||
@@ -203,7 +103,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
|
||||
po := NewPredictOptions(opts...)
|
||||
|
||||
if po.TokenCallback != nil {
|
||||
setCallback(l.state, po.TokenCallback)
|
||||
setCallback(l.ctx, po.TokenCallback)
|
||||
}
|
||||
|
||||
input := C.CString(text)
|
||||
@@ -231,7 +131,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
|
||||
C.CString(po.MainGPU), C.CString(po.TensorSplit),
|
||||
C.bool(po.PromptCacheRO),
|
||||
)
|
||||
ret := C.llama_predict(params, l.state, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
|
||||
ret := C.llama_predict(params, l.ctx, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
|
||||
if ret != 0 {
|
||||
return "", fmt.Errorf("inference failed")
|
||||
}
|
||||
@@ -248,7 +148,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
|
||||
C.llama_free_params(params)
|
||||
|
||||
if po.TokenCallback != nil {
|
||||
setCallback(l.state, nil)
|
||||
setCallback(l.ctx, nil)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
@@ -268,7 +168,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
|
||||
//
|
||||
// It is save to call this method while a prediction is running.
|
||||
func (l *LLama) SetTokenCallback(callback func(token string) bool) {
|
||||
setCallback(l.state, callback)
|
||||
setCallback(l.ctx, callback)
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
Reference in New Issue
Block a user