2023-07-03 15:22:44 -04:00
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2024-09-11 11:01:30 -07:00
|
|
|
"bufio"
|
2023-07-03 15:22:44 -04:00
|
|
|
"context"
|
2023-08-11 10:58:23 -07:00
|
|
|
"crypto/ed25519"
|
|
|
|
"crypto/rand"
|
2024-12-04 16:31:19 -08:00
|
|
|
"encoding/json"
|
2023-08-11 10:58:23 -07:00
|
|
|
"encoding/pem"
|
2023-07-06 15:43:04 -07:00
|
|
|
"errors"
|
2023-07-06 12:24:49 -04:00
|
|
|
"fmt"
|
2023-07-18 14:01:19 -07:00
|
|
|
"io"
|
2023-07-03 15:22:44 -04:00
|
|
|
"log"
|
2024-05-13 17:17:36 -07:00
|
|
|
"math"
|
2023-07-03 15:22:44 -04:00
|
|
|
"net"
|
2023-10-30 11:10:18 -04:00
|
|
|
"net/http"
|
2023-07-03 15:22:44 -04:00
|
|
|
"os"
|
2023-09-28 17:13:01 -07:00
|
|
|
"os/signal"
|
2023-07-18 19:34:05 -07:00
|
|
|
"path/filepath"
|
2023-07-31 16:25:57 -04:00
|
|
|
"runtime"
|
2024-09-11 11:01:30 -07:00
|
|
|
"strconv"
|
2023-07-07 10:12:58 -07:00
|
|
|
"strings"
|
2024-08-12 11:46:32 -07:00
|
|
|
"sync/atomic"
|
2023-09-28 17:13:01 -07:00
|
|
|
"syscall"
|
2023-07-06 15:43:04 -07:00
|
|
|
"time"
|
2023-07-03 15:22:44 -04:00
|
|
|
|
2023-11-23 22:21:32 -08:00
|
|
|
"github.com/containerd/console"
|
2024-05-15 16:29:33 -07:00
|
|
|
"github.com/mattn/go-runewidth"
|
2023-07-18 09:09:45 -07:00
|
|
|
"github.com/olekukonko/tablewriter"
|
2023-07-06 13:49:31 -07:00
|
|
|
"github.com/spf13/cobra"
|
2023-08-11 10:58:23 -07:00
|
|
|
"golang.org/x/crypto/ssh"
|
2023-09-22 13:36:08 -07:00
|
|
|
"golang.org/x/term"
|
2023-07-06 13:49:31 -07:00
|
|
|
|
2024-03-26 13:04:17 -07:00
|
|
|
"github.com/ollama/ollama/api"
|
2024-05-24 14:57:15 -07:00
|
|
|
"github.com/ollama/ollama/envconfig"
|
2024-03-26 13:04:17 -07:00
|
|
|
"github.com/ollama/ollama/format"
|
build: Make target improvements (#7499)
* llama: wire up builtin runner
This adds a new entrypoint into the ollama CLI to run the cgo built runner.
On Mac arm64, this will have GPU support, but on all other platforms it will
be the lowest common denominator CPU build. After we fully transition
to the new Go runners more tech-debt can be removed and we can stop building
the "default" runner via make and rely on the builtin always.
* build: Make target improvements
Add a few new targets and help for building locally.
This also adjusts the runner lookup to favor local builds, then
runners relative to the executable, and finally payloads.
* Support customized CPU flags for runners
This implements a simplified custom CPU flags pattern for the runners.
When built without overrides, the runner name contains the vector flag
we check for (AVX) to ensure we don't try to run on unsupported systems
and crash. If the user builds a customized set, we omit the naming
scheme and don't check for compatibility. This avoids checking
requirements at runtime, so that logic has been removed as well. This
can be used to build GPU runners with no vector flags, or CPU/GPU
runners with additional flags (e.g. AVX512) enabled.
* Use relative paths
If the user checks out the repo in a path that contains spaces, make gets
really confused so use relative paths for everything in-repo to avoid breakage.
* Remove payloads from main binary
* install: clean up prior libraries
This removes support for v0.3.6 and older versions (before the tar bundle)
and ensures we clean up prior libraries before extracting the bundle(s).
Without this change, runners and dependent libraries could leak when we
update and lead to subtle runtime errors.
2024-12-10 09:47:19 -08:00
|
|
|
"github.com/ollama/ollama/llama"
|
|
|
|
"github.com/ollama/ollama/llama/runner"
|
2024-05-20 11:26:45 -07:00
|
|
|
"github.com/ollama/ollama/parser"
|
2024-03-26 13:04:17 -07:00
|
|
|
"github.com/ollama/ollama/progress"
|
|
|
|
"github.com/ollama/ollama/server"
|
2024-11-25 09:40:16 -08:00
|
|
|
"github.com/ollama/ollama/types/model"
|
2024-03-26 13:04:17 -07:00
|
|
|
"github.com/ollama/ollama/version"
|
2023-07-03 15:22:44 -04:00
|
|
|
)
|
|
|
|
|
2024-12-31 18:02:30 -08:00
|
|
|
var errModelfileNotFound = errors.New("specified Modelfile wasn't found")
|
2024-10-22 13:32:24 -07:00
|
|
|
|
|
|
|
func getModelfileName(cmd *cobra.Command) (string, error) {
|
2025-01-10 16:14:08 -08:00
|
|
|
filename, _ := cmd.Flags().GetString("file")
|
2024-10-22 13:32:24 -07:00
|
|
|
|
|
|
|
if filename == "" {
|
|
|
|
filename = "Modelfile"
|
|
|
|
}
|
|
|
|
|
|
|
|
absName, err := filepath.Abs(filename)
|
2023-07-18 19:34:05 -07:00
|
|
|
if err != nil {
|
2024-10-22 13:32:24 -07:00
|
|
|
return "", err
|
2023-07-18 19:34:05 -07:00
|
|
|
}
|
|
|
|
|
2024-10-22 13:32:24 -07:00
|
|
|
_, err = os.Stat(absName)
|
2023-08-16 08:03:48 -07:00
|
|
|
if err != nil {
|
2025-01-16 01:14:04 -08:00
|
|
|
return "", err
|
2023-08-16 08:03:48 -07:00
|
|
|
}
|
2023-07-16 17:02:22 -07:00
|
|
|
|
2024-10-22 13:32:24 -07:00
|
|
|
return absName, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func CreateHandler(cmd *cobra.Command, args []string) error {
|
2023-11-14 16:33:24 -08:00
|
|
|
p := progress.NewProgress(os.Stderr)
|
|
|
|
defer p.Stop()
|
|
|
|
|
2024-10-22 13:32:24 -07:00
|
|
|
var reader io.Reader
|
|
|
|
|
|
|
|
filename, err := getModelfileName(cmd)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
if filename == "" {
|
|
|
|
reader = strings.NewReader("FROM .\n")
|
|
|
|
} else {
|
|
|
|
return errModelfileNotFound
|
|
|
|
}
|
|
|
|
} else if err != nil {
|
2023-11-14 14:07:40 -08:00
|
|
|
return err
|
2024-10-22 13:32:24 -07:00
|
|
|
} else {
|
|
|
|
f, err := os.Open(filename)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
reader = f
|
|
|
|
defer f.Close()
|
2023-11-14 14:07:40 -08:00
|
|
|
}
|
|
|
|
|
2024-10-22 13:32:24 -07:00
|
|
|
modelfile, err := parser.ParseFile(reader)
|
2023-11-14 14:07:40 -08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-12-31 18:02:30 -08:00
|
|
|
status := "gathering model components"
|
|
|
|
spinner := progress.NewSpinner(status)
|
|
|
|
p.Add(status, spinner)
|
|
|
|
|
2025-01-10 16:14:08 -08:00
|
|
|
req, err := modelfile.CreateRequest(filepath.Dir(filename))
|
2023-11-14 14:07:40 -08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-12-31 18:02:30 -08:00
|
|
|
spinner.Stop()
|
2023-11-14 14:07:40 -08:00
|
|
|
|
2024-12-31 18:02:30 -08:00
|
|
|
req.Name = args[0]
|
|
|
|
quantize, _ := cmd.Flags().GetString("quantize")
|
|
|
|
if quantize != "" {
|
|
|
|
req.Quantize = quantize
|
|
|
|
}
|
2023-11-15 16:59:49 -08:00
|
|
|
|
2024-10-22 13:32:24 -07:00
|
|
|
client, err := api.ClientFromEnvironment()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-12-31 18:02:30 -08:00
|
|
|
if len(req.Files) > 0 {
|
|
|
|
fileMap := map[string]string{}
|
|
|
|
for f, digest := range req.Files {
|
|
|
|
if _, err := createBlob(cmd, client, f, digest, p); err != nil {
|
2023-11-14 14:07:40 -08:00
|
|
|
return err
|
|
|
|
}
|
2024-12-31 18:02:30 -08:00
|
|
|
fileMap[filepath.Base(f)] = digest
|
|
|
|
}
|
|
|
|
req.Files = fileMap
|
|
|
|
}
|
2023-11-14 14:07:40 -08:00
|
|
|
|
2024-12-31 18:02:30 -08:00
|
|
|
if len(req.Adapters) > 0 {
|
|
|
|
fileMap := map[string]string{}
|
|
|
|
for f, digest := range req.Adapters {
|
|
|
|
if _, err := createBlob(cmd, client, f, digest, p); err != nil {
|
2023-11-14 14:07:40 -08:00
|
|
|
return err
|
|
|
|
}
|
2024-12-31 18:02:30 -08:00
|
|
|
fileMap[filepath.Base(f)] = digest
|
2023-11-14 14:07:40 -08:00
|
|
|
}
|
2024-12-31 18:02:30 -08:00
|
|
|
req.Adapters = fileMap
|
2023-11-14 14:07:40 -08:00
|
|
|
}
|
2023-07-17 14:14:41 -07:00
|
|
|
|
2024-04-22 11:02:25 -07:00
|
|
|
bars := make(map[string]*progress.Bar)
|
2023-07-25 14:25:13 -04:00
|
|
|
fn := func(resp api.ProgressResponse) error {
|
2023-11-14 16:33:24 -08:00
|
|
|
if resp.Digest != "" {
|
|
|
|
bar, ok := bars[resp.Digest]
|
|
|
|
if !ok {
|
2023-11-20 11:37:17 -05:00
|
|
|
bar = progress.NewBar(fmt.Sprintf("pulling %s...", resp.Digest[7:19]), resp.Total, resp.Completed)
|
2023-11-14 16:33:24 -08:00
|
|
|
bars[resp.Digest] = bar
|
|
|
|
p.Add(resp.Digest, bar)
|
|
|
|
}
|
|
|
|
|
|
|
|
bar.Set(resp.Completed)
|
|
|
|
} else if status != resp.Status {
|
|
|
|
spinner.Stop()
|
|
|
|
|
|
|
|
status = resp.Status
|
|
|
|
spinner = progress.NewSpinner(status)
|
|
|
|
p.Add(status, spinner)
|
|
|
|
}
|
|
|
|
|
2023-07-16 17:02:22 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-12-31 18:02:30 -08:00
|
|
|
if err := client.Create(cmd.Context(), req, fn); err != nil {
|
2025-01-09 10:12:30 -08:00
|
|
|
if strings.Contains(err.Error(), "path or Modelfile are required") {
|
|
|
|
return fmt.Errorf("the ollama server must be updated to use `ollama create` with this client")
|
|
|
|
}
|
2023-07-16 17:02:22 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-12-31 18:02:30 -08:00
|
|
|
func createBlob(cmd *cobra.Command, client *api.Client, path string, digest string, p *progress.Progress) (string, error) {
|
|
|
|
realPath, err := filepath.EvalSymlinks(path)
|
2024-04-25 14:41:30 -07:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2024-12-31 18:02:30 -08:00
|
|
|
bin, err := os.Open(realPath)
|
2024-03-06 21:01:51 -08:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer bin.Close()
|
|
|
|
|
2024-08-12 11:46:32 -07:00
|
|
|
// Get file info to retrieve the size
|
|
|
|
fileInfo, err := bin.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
fileSize := fileInfo.Size()
|
|
|
|
|
|
|
|
var pw progressWriter
|
2024-12-31 18:02:30 -08:00
|
|
|
status := fmt.Sprintf("copying file %s 0%%", digest)
|
|
|
|
spinner := progress.NewSpinner(status)
|
|
|
|
p.Add(status, spinner)
|
|
|
|
defer spinner.Stop()
|
2024-08-12 11:46:32 -07:00
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
defer close(done)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
ticker := time.NewTicker(60 * time.Millisecond)
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
2024-12-31 18:02:30 -08:00
|
|
|
spinner.SetMessage(fmt.Sprintf("copying file %s %d%%", digest, int(100*pw.n.Load()/fileSize)))
|
2024-08-12 11:46:32 -07:00
|
|
|
case <-done:
|
2024-12-31 18:02:30 -08:00
|
|
|
spinner.SetMessage(fmt.Sprintf("copying file %s 100%%", digest))
|
2024-08-12 11:46:32 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if err = client.CreateBlob(cmd.Context(), digest, io.TeeReader(bin, &pw)); err != nil {
|
2024-03-06 21:01:51 -08:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return digest, nil
|
|
|
|
}
|
|
|
|
|
2024-08-12 11:46:32 -07:00
|
|
|
type progressWriter struct {
|
|
|
|
n atomic.Int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *progressWriter) Write(p []byte) (n int, err error) {
|
|
|
|
w.n.Add(int64(len(p)))
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
2024-09-11 16:36:21 -07:00
|
|
|
func loadOrUnloadModel(cmd *cobra.Command, opts *runOptions) error {
|
|
|
|
p := progress.NewProgress(os.Stderr)
|
|
|
|
defer p.StopAndClear()
|
|
|
|
|
|
|
|
spinner := progress.NewSpinner("")
|
|
|
|
p.Add("", spinner)
|
|
|
|
|
|
|
|
client, err := api.ClientFromEnvironment()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
req := &api.GenerateRequest{
|
|
|
|
Model: opts.Model,
|
|
|
|
KeepAlive: opts.KeepAlive,
|
|
|
|
}
|
|
|
|
|
|
|
|
return client.Generate(cmd.Context(), req, func(api.GenerateResponse) error { return nil })
|
|
|
|
}
|
|
|
|
|
|
|
|
func StopHandler(cmd *cobra.Command, args []string) error {
|
|
|
|
opts := &runOptions{
|
|
|
|
Model: args[0],
|
|
|
|
KeepAlive: &api.Duration{Duration: 0},
|
|
|
|
}
|
|
|
|
if err := loadOrUnloadModel(cmd, opts); err != nil {
|
|
|
|
if strings.Contains(err.Error(), "not found") {
|
|
|
|
return fmt.Errorf("couldn't find model \"%s\" to stop", args[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-25 12:07:27 -04:00
|
|
|
func RunHandler(cmd *cobra.Command, args []string) error {
|
2024-02-01 21:33:06 -08:00
|
|
|
interactive := true
|
|
|
|
|
|
|
|
opts := runOptions{
|
cmd: defer stating model info until necessary (#5248)
This commit changes the 'ollama run' command to defer fetching model
information until it really needs it. That is, when in interactive mode.
It also removes one such case where the model information is fetch in
duplicate, just before calling generateInteractive and then again, first
thing, in generateInteractive.
This positively impacts the performance of the command:
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.168 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.220 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.217 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 4% cpu 0.652 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 5% cpu 0.498 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 3% cpu 0.479 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total
2024-06-24 20:14:03 -07:00
|
|
|
Model: args[0],
|
|
|
|
WordWrap: os.Getenv("TERM") == "xterm-256color",
|
|
|
|
Options: map[string]interface{}{},
|
2024-02-01 21:33:06 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
format, err := cmd.Flags().GetString("format")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
opts.Format = format
|
|
|
|
|
2024-05-13 17:17:36 -07:00
|
|
|
keepAlive, err := cmd.Flags().GetString("keepalive")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if keepAlive != "" {
|
|
|
|
d, err := time.ParseDuration(keepAlive)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
opts.KeepAlive = &api.Duration{Duration: d}
|
|
|
|
}
|
|
|
|
|
2024-02-01 21:33:06 -08:00
|
|
|
prompts := args[1:]
|
|
|
|
// prepend stdin to the prompt if provided
|
|
|
|
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
|
|
|
in, err := io.ReadAll(os.Stdin)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
prompts = append([]string{string(in)}, prompts...)
|
|
|
|
opts.WordWrap = false
|
|
|
|
interactive = false
|
|
|
|
}
|
|
|
|
opts.Prompt = strings.Join(prompts, " ")
|
|
|
|
if len(prompts) > 0 {
|
|
|
|
interactive = false
|
|
|
|
}
|
2024-11-22 08:04:54 -08:00
|
|
|
// Be quiet if we're redirecting to a pipe or file
|
|
|
|
if !term.IsTerminal(int(os.Stdout.Fd())) {
|
|
|
|
interactive = false
|
|
|
|
}
|
2024-02-01 21:33:06 -08:00
|
|
|
|
|
|
|
nowrap, err := cmd.Flags().GetBool("nowordwrap")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
opts.WordWrap = !nowrap
|
|
|
|
|
cmd: defer stating model info until necessary (#5248)
This commit changes the 'ollama run' command to defer fetching model
information until it really needs it. That is, when in interactive mode.
It also removes one such case where the model information is fetch in
duplicate, just before calling generateInteractive and then again, first
thing, in generateInteractive.
This positively impacts the performance of the command:
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.168 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.220 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.217 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 4% cpu 0.652 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 5% cpu 0.498 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 3% cpu 0.479 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total
2024-06-24 20:14:03 -07:00
|
|
|
// Fill out the rest of the options based on information about the
|
|
|
|
// model.
|
|
|
|
client, err := api.ClientFromEnvironment()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
name := args[0]
|
|
|
|
info, err := func() (*api.ShowResponse, error) {
|
|
|
|
showReq := &api.ShowRequest{Name: name}
|
|
|
|
info, err := client.Show(cmd.Context(), showReq)
|
|
|
|
var se api.StatusError
|
|
|
|
if errors.As(err, &se) && se.StatusCode == http.StatusNotFound {
|
|
|
|
if err := PullHandler(cmd, []string{name}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
|
|
|
}
|
|
|
|
return info, err
|
|
|
|
}()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2024-02-01 21:33:06 -08:00
|
|
|
}
|
|
|
|
|
2024-10-18 16:12:35 -07:00
|
|
|
opts.MultiModal = len(info.ProjectorInfo) != 0
|
cmd: defer stating model info until necessary (#5248)
This commit changes the 'ollama run' command to defer fetching model
information until it really needs it. That is, when in interactive mode.
It also removes one such case where the model information is fetch in
duplicate, just before calling generateInteractive and then again, first
thing, in generateInteractive.
This positively impacts the performance of the command:
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.168 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.220 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.217 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 4% cpu 0.652 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 5% cpu 0.498 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 3% cpu 0.479 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total
2024-06-24 20:14:03 -07:00
|
|
|
opts.ParentModel = info.Details.ParentModel
|
|
|
|
|
|
|
|
if interactive {
|
2024-09-11 16:36:21 -07:00
|
|
|
if err := loadOrUnloadModel(cmd, &opts); err != nil {
|
2024-07-26 13:39:38 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, msg := range info.Messages {
|
|
|
|
switch msg.Role {
|
|
|
|
case "user":
|
|
|
|
fmt.Printf(">>> %s\n", msg.Content)
|
|
|
|
case "assistant":
|
|
|
|
state := &displayResponseState{}
|
|
|
|
displayResponse(msg.Content, opts.WordWrap, state)
|
|
|
|
fmt.Println()
|
|
|
|
fmt.Println()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
cmd: defer stating model info until necessary (#5248)
This commit changes the 'ollama run' command to defer fetching model
information until it really needs it. That is, when in interactive mode.
It also removes one such case where the model information is fetch in
duplicate, just before calling generateInteractive and then again, first
thing, in generateInteractive.
This positively impacts the performance of the command:
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.168 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.220 total
; time ./before run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.217 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 4% cpu 0.652 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 5% cpu 0.498 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with or would you like to chat?
./after run llama3 'hi' 0.01s user 0.01s system 3% cpu 0.479 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total
; time ./after run llama3 'hi'
Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total
2024-06-24 20:14:03 -07:00
|
|
|
return generateInteractive(cmd, opts)
|
|
|
|
}
|
|
|
|
return generate(cmd, opts)
|
2023-07-06 14:18:40 -04:00
|
|
|
}
|
|
|
|
|
2023-07-20 16:09:23 -07:00
|
|
|
func PushHandler(cmd *cobra.Command, args []string) error {
|
2023-10-09 12:18:26 -07:00
|
|
|
client, err := api.ClientFromEnvironment()
|
2023-08-16 08:03:48 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-16 17:02:22 -07:00
|
|
|
|
2023-07-21 15:42:19 -07:00
|
|
|
insecure, err := cmd.Flags().GetBool("insecure")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-11-14 16:33:24 -08:00
|
|
|
p := progress.NewProgress(os.Stderr)
|
|
|
|
defer p.Stop()
|
|
|
|
|
|
|
|
bars := make(map[string]*progress.Bar)
|
2023-11-19 13:43:21 -05:00
|
|
|
var status string
|
|
|
|
var spinner *progress.Spinner
|
2023-11-14 16:33:24 -08:00
|
|
|
|
2023-07-18 18:51:30 -07:00
|
|
|
fn := func(resp api.ProgressResponse) error {
|
2023-11-14 16:33:24 -08:00
|
|
|
if resp.Digest != "" {
|
2023-11-19 13:43:21 -05:00
|
|
|
if spinner != nil {
|
|
|
|
spinner.Stop()
|
|
|
|
}
|
2023-11-14 16:33:24 -08:00
|
|
|
|
|
|
|
bar, ok := bars[resp.Digest]
|
|
|
|
if !ok {
|
2023-11-20 11:37:17 -05:00
|
|
|
bar = progress.NewBar(fmt.Sprintf("pushing %s...", resp.Digest[7:19]), resp.Total, resp.Completed)
|
2023-11-14 16:33:24 -08:00
|
|
|
bars[resp.Digest] = bar
|
|
|
|
p.Add(resp.Digest, bar)
|
|
|
|
}
|
|
|
|
|
|
|
|
bar.Set(resp.Completed)
|
|
|
|
} else if status != resp.Status {
|
2023-11-19 13:43:21 -05:00
|
|
|
if spinner != nil {
|
|
|
|
spinner.Stop()
|
|
|
|
}
|
2023-11-14 16:33:24 -08:00
|
|
|
|
|
|
|
status = resp.Status
|
|
|
|
spinner = progress.NewSpinner(status)
|
|
|
|
p.Add(status, spinner)
|
|
|
|
}
|
|
|
|
|
2023-07-16 17:02:22 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-14 16:33:24 -08:00
|
|
|
request := api.PushRequest{Name: args[0], Insecure: insecure}
|
2024-11-25 09:40:16 -08:00
|
|
|
|
|
|
|
n := model.ParseName(args[0])
|
2023-10-12 15:56:40 -07:00
|
|
|
if err := client.Push(cmd.Context(), &request, fn); err != nil {
|
2024-04-30 11:02:08 -07:00
|
|
|
if spinner != nil {
|
|
|
|
spinner.Stop()
|
|
|
|
}
|
|
|
|
if strings.Contains(err.Error(), "access denied") {
|
|
|
|
return errors.New("you are not authorized to push to this namespace, create the model under a namespace you own")
|
|
|
|
}
|
2023-11-14 16:33:24 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-11-25 09:40:16 -08:00
|
|
|
p.Stop()
|
2023-11-15 16:59:49 -08:00
|
|
|
spinner.Stop()
|
2024-11-25 09:40:16 -08:00
|
|
|
|
|
|
|
destination := n.String()
|
|
|
|
if strings.HasSuffix(n.Host, ".ollama.ai") || strings.HasSuffix(n.Host, ".ollama.com") {
|
|
|
|
destination = "https://ollama.com/" + strings.TrimSuffix(n.DisplayShortest(), ":latest")
|
|
|
|
}
|
|
|
|
fmt.Printf("\nYou can find your model at:\n\n")
|
|
|
|
fmt.Printf("\t%s\n", destination)
|
|
|
|
|
2023-11-14 16:33:24 -08:00
|
|
|
return nil
|
2023-07-16 17:02:22 -07:00
|
|
|
}
|
|
|
|
|
2023-07-20 16:09:23 -07:00
|
|
|
func ListHandler(cmd *cobra.Command, args []string) error {
|
2023-10-09 12:18:26 -07:00
|
|
|
client, err := api.ClientFromEnvironment()
|
2023-08-16 08:03:48 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-18 09:09:45 -07:00
|
|
|
|
2023-10-12 15:56:40 -07:00
|
|
|
models, err := client.List(cmd.Context())
|
2023-07-18 09:09:45 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var data [][]string
|
|
|
|
|
|
|
|
for _, m := range models.Models {
|
2024-12-11 15:29:59 -08:00
|
|
|
if len(args) == 0 || strings.HasPrefix(strings.ToLower(m.Name), strings.ToLower(args[0])) {
|
2023-11-14 14:57:41 -08:00
|
|
|
data = append(data, []string{m.Name, m.Digest[:12], format.HumanBytes(m.Size), format.HumanTime(m.ModifiedAt, "Never")})
|
2023-07-18 14:01:19 -07:00
|
|
|
}
|
2023-07-18 09:09:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
table := tablewriter.NewWriter(os.Stdout)
|
2023-08-28 20:50:24 -07:00
|
|
|
table.SetHeader([]string{"NAME", "ID", "SIZE", "MODIFIED"})
|
2023-07-18 09:09:45 -07:00
|
|
|
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
|
|
|
|
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
|
|
|
table.SetHeaderLine(false)
|
|
|
|
table.SetBorder(false)
|
|
|
|
table.SetNoWhiteSpace(true)
|
2024-09-11 11:01:30 -07:00
|
|
|
table.SetTablePadding(" ")
|
2023-07-18 09:09:45 -07:00
|
|
|
table.AppendBulk(data)
|
|
|
|
table.Render()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-05-13 17:17:36 -07:00
|
|
|
func ListRunningHandler(cmd *cobra.Command, args []string) error {
|
|
|
|
client, err := api.ClientFromEnvironment()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
models, err := client.ListRunning(cmd.Context())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var data [][]string
|
|
|
|
|
|
|
|
for _, m := range models.Models {
|
|
|
|
if len(args) == 0 || strings.HasPrefix(m.Name, args[0]) {
|
|
|
|
var procStr string
|
|
|
|
switch {
|
|
|
|
case m.SizeVRAM == 0:
|
|
|
|
procStr = "100% CPU"
|
|
|
|
case m.SizeVRAM == m.Size:
|
|
|
|
procStr = "100% GPU"
|
|
|
|
case m.SizeVRAM > m.Size || m.Size == 0:
|
|
|
|
procStr = "Unknown"
|
|
|
|
default:
|
|
|
|
sizeCPU := m.Size - m.SizeVRAM
|
|
|
|
cpuPercent := math.Round(float64(sizeCPU) / float64(m.Size) * 100)
|
|
|
|
procStr = fmt.Sprintf("%d%%/%d%% CPU/GPU", int(cpuPercent), int(100-cpuPercent))
|
|
|
|
}
|
2024-09-11 16:36:21 -07:00
|
|
|
|
|
|
|
var until string
|
|
|
|
delta := time.Since(m.ExpiresAt)
|
|
|
|
if delta > 0 {
|
|
|
|
until = "Stopping..."
|
|
|
|
} else {
|
|
|
|
until = format.HumanTime(m.ExpiresAt, "Never")
|
|
|
|
}
|
|
|
|
data = append(data, []string{m.Name, m.Digest[:12], format.HumanBytes(m.Size), procStr, until})
|
2024-05-13 17:17:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
table := tablewriter.NewWriter(os.Stdout)
|
|
|
|
table.SetHeader([]string{"NAME", "ID", "SIZE", "PROCESSOR", "UNTIL"})
|
|
|
|
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
|
|
|
|
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
|
|
|
table.SetHeaderLine(false)
|
|
|
|
table.SetBorder(false)
|
|
|
|
table.SetNoWhiteSpace(true)
|
2024-09-11 11:01:30 -07:00
|
|
|
table.SetTablePadding(" ")
|
2024-05-13 17:17:36 -07:00
|
|
|
table.AppendBulk(data)
|
|
|
|
table.Render()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-20 16:09:23 -07:00
|
|
|
func DeleteHandler(cmd *cobra.Command, args []string) error {
|
2023-10-09 12:18:26 -07:00
|
|
|
client, err := api.ClientFromEnvironment()
|
2023-08-16 08:03:48 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-20 16:09:23 -07:00
|
|
|
|
2024-10-01 15:45:43 -07:00
|
|
|
// Unload the model if it's running before deletion
|
|
|
|
opts := &runOptions{
|
|
|
|
Model: args[0],
|
|
|
|
KeepAlive: &api.Duration{Duration: 0},
|
|
|
|
}
|
|
|
|
if err := loadOrUnloadModel(cmd, opts); err != nil {
|
|
|
|
if !strings.Contains(err.Error(), "not found") {
|
|
|
|
return fmt.Errorf("unable to stop existing running model \"%s\": %s", args[0], err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-26 00:47:56 -07:00
|
|
|
for _, name := range args {
|
|
|
|
req := api.DeleteRequest{Name: name}
|
2023-10-12 15:56:40 -07:00
|
|
|
if err := client.Delete(cmd.Context(), &req); err != nil {
|
2023-08-26 00:47:56 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
fmt.Printf("deleted '%s'\n", name)
|
2023-07-20 16:09:23 -07:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-09-06 11:04:17 -07:00
|
|
|
func ShowHandler(cmd *cobra.Command, args []string) error {
|
2023-10-09 12:18:26 -07:00
|
|
|
client, err := api.ClientFromEnvironment()
|
2023-09-06 11:04:17 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
license, errLicense := cmd.Flags().GetBool("license")
|
|
|
|
modelfile, errModelfile := cmd.Flags().GetBool("modelfile")
|
|
|
|
parameters, errParams := cmd.Flags().GetBool("parameters")
|
|
|
|
system, errSystem := cmd.Flags().GetBool("system")
|
|
|
|
template, errTemplate := cmd.Flags().GetBool("template")
|
|
|
|
|
|
|
|
for _, boolErr := range []error{errLicense, errModelfile, errParams, errSystem, errTemplate} {
|
|
|
|
if boolErr != nil {
|
|
|
|
return errors.New("error retrieving flags")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
flagsSet := 0
|
|
|
|
showType := ""
|
|
|
|
|
|
|
|
if license {
|
|
|
|
flagsSet++
|
|
|
|
showType = "license"
|
|
|
|
}
|
|
|
|
|
|
|
|
if modelfile {
|
|
|
|
flagsSet++
|
|
|
|
showType = "modelfile"
|
|
|
|
}
|
|
|
|
|
|
|
|
if parameters {
|
|
|
|
flagsSet++
|
|
|
|
showType = "parameters"
|
|
|
|
}
|
|
|
|
|
|
|
|
if system {
|
|
|
|
flagsSet++
|
|
|
|
showType = "system"
|
|
|
|
}
|
|
|
|
|
|
|
|
if template {
|
|
|
|
flagsSet++
|
|
|
|
showType = "template"
|
|
|
|
}
|
|
|
|
|
|
|
|
if flagsSet > 1 {
|
2023-09-06 13:38:49 -07:00
|
|
|
return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified")
|
2024-06-19 14:19:02 -07:00
|
|
|
}
|
|
|
|
|
2024-06-28 13:15:52 -07:00
|
|
|
req := api.ShowRequest{Name: args[0]}
|
|
|
|
resp, err := client.Show(cmd.Context(), &req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-06-19 14:19:02 -07:00
|
|
|
|
2024-06-28 13:15:52 -07:00
|
|
|
if flagsSet == 1 {
|
2024-06-19 14:19:02 -07:00
|
|
|
switch showType {
|
|
|
|
case "license":
|
|
|
|
fmt.Println(resp.License)
|
|
|
|
case "modelfile":
|
|
|
|
fmt.Println(resp.Modelfile)
|
|
|
|
case "parameters":
|
|
|
|
fmt.Println(resp.Parameters)
|
|
|
|
case "system":
|
2024-11-13 23:53:30 -08:00
|
|
|
fmt.Print(resp.System)
|
2024-06-19 14:19:02 -07:00
|
|
|
case "template":
|
2024-11-13 23:53:30 -08:00
|
|
|
fmt.Print(resp.Template)
|
2024-06-19 14:19:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2023-09-06 11:04:17 -07:00
|
|
|
}
|
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
return showInfo(resp, os.Stdout)
|
2024-06-28 13:15:52 -07:00
|
|
|
}
|
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
func showInfo(resp *api.ShowResponse, w io.Writer) error {
|
|
|
|
tableRender := func(header string, rows func() [][]string) {
|
|
|
|
fmt.Fprintln(w, " ", header)
|
|
|
|
table := tablewriter.NewWriter(w)
|
|
|
|
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
|
|
|
table.SetBorder(false)
|
|
|
|
table.SetNoWhiteSpace(true)
|
|
|
|
table.SetTablePadding(" ")
|
2024-06-19 14:19:02 -07:00
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
switch header {
|
|
|
|
case "Template", "System", "License":
|
|
|
|
table.SetColWidth(100)
|
2024-06-28 11:30:16 -07:00
|
|
|
}
|
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
table.AppendBulk(rows())
|
|
|
|
table.Render()
|
|
|
|
fmt.Fprintln(w)
|
2023-09-06 11:04:17 -07:00
|
|
|
}
|
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
tableRender("Model", func() (rows [][]string) {
|
|
|
|
if resp.ModelInfo != nil {
|
|
|
|
arch := resp.ModelInfo["general.architecture"].(string)
|
|
|
|
rows = append(rows, []string{"", "architecture", arch})
|
|
|
|
rows = append(rows, []string{"", "parameters", format.HumanNumber(uint64(resp.ModelInfo["general.parameter_count"].(float64)))})
|
|
|
|
rows = append(rows, []string{"", "context length", strconv.FormatFloat(resp.ModelInfo[fmt.Sprintf("%s.context_length", arch)].(float64), 'f', -1, 64)})
|
|
|
|
rows = append(rows, []string{"", "embedding length", strconv.FormatFloat(resp.ModelInfo[fmt.Sprintf("%s.embedding_length", arch)].(float64), 'f', -1, 64)})
|
|
|
|
} else {
|
|
|
|
rows = append(rows, []string{"", "architecture", resp.Details.Family})
|
|
|
|
rows = append(rows, []string{"", "parameters", resp.Details.ParameterSize})
|
|
|
|
}
|
|
|
|
rows = append(rows, []string{"", "quantization", resp.Details.QuantizationLevel})
|
|
|
|
return
|
|
|
|
})
|
2024-06-19 14:19:02 -07:00
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
if resp.ProjectorInfo != nil {
|
|
|
|
tableRender("Projector", func() (rows [][]string) {
|
|
|
|
arch := resp.ProjectorInfo["general.architecture"].(string)
|
|
|
|
rows = append(rows, []string{"", "architecture", arch})
|
|
|
|
rows = append(rows, []string{"", "parameters", format.HumanNumber(uint64(resp.ProjectorInfo["general.parameter_count"].(float64)))})
|
|
|
|
rows = append(rows, []string{"", "embedding length", strconv.FormatFloat(resp.ProjectorInfo[fmt.Sprintf("%s.vision.embedding_length", arch)].(float64), 'f', -1, 64)})
|
|
|
|
rows = append(rows, []string{"", "dimensions", strconv.FormatFloat(resp.ProjectorInfo[fmt.Sprintf("%s.vision.projection_dim", arch)].(float64), 'f', -1, 64)})
|
|
|
|
return
|
|
|
|
})
|
2024-06-19 14:19:02 -07:00
|
|
|
}
|
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
if resp.Parameters != "" {
|
|
|
|
tableRender("Parameters", func() (rows [][]string) {
|
|
|
|
scanner := bufio.NewScanner(strings.NewReader(resp.Parameters))
|
|
|
|
for scanner.Scan() {
|
|
|
|
if text := scanner.Text(); text != "" {
|
|
|
|
rows = append(rows, append([]string{""}, strings.Fields(text)...))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
})
|
2024-06-19 14:19:02 -07:00
|
|
|
}
|
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
head := func(s string, n int) (rows [][]string) {
|
|
|
|
scanner := bufio.NewScanner(strings.NewReader(s))
|
|
|
|
for scanner.Scan() && (len(rows) < n || n < 0) {
|
|
|
|
if text := scanner.Text(); text != "" {
|
|
|
|
rows = append(rows, []string{"", strings.TrimSpace(text)})
|
2024-06-19 14:19:02 -07:00
|
|
|
}
|
|
|
|
}
|
2024-09-11 11:01:30 -07:00
|
|
|
return
|
2024-06-19 14:19:02 -07:00
|
|
|
}
|
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
if resp.System != "" {
|
|
|
|
tableRender("System", func() [][]string {
|
|
|
|
return head(resp.System, 2)
|
|
|
|
})
|
|
|
|
}
|
2024-06-19 14:19:02 -07:00
|
|
|
|
2024-09-11 11:01:30 -07:00
|
|
|
if resp.License != "" {
|
|
|
|
tableRender("License", func() [][]string {
|
|
|
|
return head(resp.License, 2)
|
|
|
|
})
|
2024-06-19 14:19:02 -07:00
|
|
|
}
|
2024-09-11 11:01:30 -07:00
|
|
|
|
|
|
|
return nil
|
2024-06-19 14:19:02 -07:00
|
|
|
}
|
|
|
|
|
2023-07-24 11:27:28 -04:00
|
|
|
func CopyHandler(cmd *cobra.Command, args []string) error {
|
2023-10-09 12:18:26 -07:00
|
|
|
client, err := api.ClientFromEnvironment()
|
2023-08-16 08:03:48 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-24 11:27:28 -04:00
|
|
|
|
|
|
|
req := api.CopyRequest{Source: args[0], Destination: args[1]}
|
2023-10-12 15:56:40 -07:00
|
|
|
if err := client.Copy(cmd.Context(), &req); err != nil {
|
2023-07-24 11:27:28 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
fmt.Printf("copied '%s' to '%s'\n", args[0], args[1])
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-20 16:09:23 -07:00
|
|
|
func PullHandler(cmd *cobra.Command, args []string) error {
|
2023-07-21 15:42:19 -07:00
|
|
|
insecure, err := cmd.Flags().GetBool("insecure")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-10-09 12:18:26 -07:00
|
|
|
client, err := api.ClientFromEnvironment()
|
2023-08-16 08:03:48 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-16 17:02:22 -07:00
|
|
|
|
2023-11-14 16:33:24 -08:00
|
|
|
p := progress.NewProgress(os.Stderr)
|
|
|
|
defer p.Stop()
|
|
|
|
|
|
|
|
bars := make(map[string]*progress.Bar)
|
|
|
|
|
2023-11-19 13:43:21 -05:00
|
|
|
var status string
|
|
|
|
var spinner *progress.Spinner
|
2023-11-14 16:33:24 -08:00
|
|
|
|
2023-07-18 18:51:30 -07:00
|
|
|
fn := func(resp api.ProgressResponse) error {
|
2023-11-14 16:33:24 -08:00
|
|
|
if resp.Digest != "" {
|
2023-11-19 13:43:21 -05:00
|
|
|
if spinner != nil {
|
|
|
|
spinner.Stop()
|
|
|
|
}
|
2023-11-14 16:33:24 -08:00
|
|
|
|
|
|
|
bar, ok := bars[resp.Digest]
|
|
|
|
if !ok {
|
2023-11-20 11:37:17 -05:00
|
|
|
bar = progress.NewBar(fmt.Sprintf("pulling %s...", resp.Digest[7:19]), resp.Total, resp.Completed)
|
2023-11-14 16:33:24 -08:00
|
|
|
bars[resp.Digest] = bar
|
|
|
|
p.Add(resp.Digest, bar)
|
|
|
|
}
|
|
|
|
|
|
|
|
bar.Set(resp.Completed)
|
|
|
|
} else if status != resp.Status {
|
2023-11-19 13:43:21 -05:00
|
|
|
if spinner != nil {
|
|
|
|
spinner.Stop()
|
|
|
|
}
|
2023-11-14 16:33:24 -08:00
|
|
|
|
|
|
|
status = resp.Status
|
|
|
|
spinner = progress.NewSpinner(status)
|
|
|
|
p.Add(status, spinner)
|
|
|
|
}
|
|
|
|
|
2023-07-16 17:02:22 -07:00
|
|
|
return nil
|
|
|
|
}
|
2023-07-07 10:22:37 -04:00
|
|
|
|
2023-11-14 16:33:24 -08:00
|
|
|
request := api.PullRequest{Name: args[0], Insecure: insecure}
|
2023-10-12 15:56:40 -07:00
|
|
|
if err := client.Pull(cmd.Context(), &request, fn); err != nil {
|
2023-11-14 16:33:24 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2023-07-06 13:49:31 -07:00
|
|
|
}
|
|
|
|
|
2023-12-04 16:35:29 -08:00
|
|
|
type generateContextKey string
|
|
|
|
|
2024-01-12 12:05:52 -08:00
|
|
|
type runOptions struct {
|
2024-01-25 12:12:36 -08:00
|
|
|
Model string
|
|
|
|
ParentModel string
|
|
|
|
Prompt string
|
|
|
|
Messages []api.Message
|
|
|
|
WordWrap bool
|
|
|
|
Format string
|
|
|
|
System string
|
|
|
|
Images []api.ImageData
|
|
|
|
Options map[string]interface{}
|
|
|
|
MultiModal bool
|
2024-05-13 17:17:36 -07:00
|
|
|
KeepAlive *api.Duration
|
2023-11-29 09:56:42 -08:00
|
|
|
}
|
|
|
|
|
2024-01-12 12:05:52 -08:00
|
|
|
type displayResponseState struct {
|
|
|
|
lineLength int
|
|
|
|
wordBuffer string
|
|
|
|
}
|
|
|
|
|
|
|
|
func displayResponse(content string, wordWrap bool, state *displayResponseState) {
|
|
|
|
termWidth, _, _ := term.GetSize(int(os.Stdout.Fd()))
|
|
|
|
if wordWrap && termWidth >= 10 {
|
|
|
|
for _, ch := range content {
|
2024-05-15 17:24:17 -07:00
|
|
|
if state.lineLength+1 > termWidth-5 {
|
|
|
|
if runewidth.StringWidth(state.wordBuffer) > termWidth-10 {
|
2024-01-12 12:05:52 -08:00
|
|
|
fmt.Printf("%s%c", state.wordBuffer, ch)
|
|
|
|
state.wordBuffer = ""
|
|
|
|
state.lineLength = 0
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// backtrack the length of the last word and clear to the end of the line
|
2024-05-30 10:24:21 -07:00
|
|
|
a := runewidth.StringWidth(state.wordBuffer)
|
|
|
|
if a > 0 {
|
2024-05-30 10:38:07 -07:00
|
|
|
fmt.Printf("\x1b[%dD", a)
|
2024-05-30 10:24:21 -07:00
|
|
|
}
|
|
|
|
fmt.Printf("\x1b[K\n")
|
2024-01-12 12:05:52 -08:00
|
|
|
fmt.Printf("%s%c", state.wordBuffer, ch)
|
2024-05-15 16:29:33 -07:00
|
|
|
chWidth := runewidth.RuneWidth(ch)
|
|
|
|
|
|
|
|
state.lineLength = runewidth.StringWidth(state.wordBuffer) + chWidth
|
2024-01-12 12:05:52 -08:00
|
|
|
} else {
|
|
|
|
fmt.Print(string(ch))
|
2024-05-15 16:29:33 -07:00
|
|
|
state.lineLength += runewidth.RuneWidth(ch)
|
|
|
|
if runewidth.RuneWidth(ch) >= 2 {
|
|
|
|
state.wordBuffer = ""
|
|
|
|
continue
|
2024-05-15 17:24:17 -07:00
|
|
|
}
|
2024-01-12 12:05:52 -08:00
|
|
|
|
|
|
|
switch ch {
|
|
|
|
case ' ':
|
|
|
|
state.wordBuffer = ""
|
|
|
|
case '\n':
|
|
|
|
state.lineLength = 0
|
|
|
|
default:
|
|
|
|
state.wordBuffer += string(ch)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fmt.Printf("%s%s", state.wordBuffer, content)
|
|
|
|
if len(state.wordBuffer) > 0 {
|
|
|
|
state.wordBuffer = ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func chat(cmd *cobra.Command, opts runOptions) (*api.Message, error) {
|
|
|
|
client, err := api.ClientFromEnvironment()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
p := progress.NewProgress(os.Stderr)
|
|
|
|
defer p.StopAndClear()
|
|
|
|
|
|
|
|
spinner := progress.NewSpinner("")
|
|
|
|
p.Add("", spinner)
|
|
|
|
|
|
|
|
cancelCtx, cancel := context.WithCancel(cmd.Context())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
sigChan := make(chan os.Signal, 1)
|
|
|
|
signal.Notify(sigChan, syscall.SIGINT)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
<-sigChan
|
|
|
|
cancel()
|
|
|
|
}()
|
|
|
|
|
|
|
|
var state *displayResponseState = &displayResponseState{}
|
|
|
|
var latest api.ChatResponse
|
|
|
|
var fullResponse strings.Builder
|
|
|
|
var role string
|
|
|
|
|
|
|
|
fn := func(response api.ChatResponse) error {
|
|
|
|
p.StopAndClear()
|
|
|
|
|
|
|
|
latest = response
|
|
|
|
|
|
|
|
role = response.Message.Role
|
|
|
|
content := response.Message.Content
|
|
|
|
fullResponse.WriteString(content)
|
|
|
|
|
|
|
|
displayResponse(content, opts.WordWrap, state)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-12-06 14:13:15 -08:00
|
|
|
if opts.Format == "json" {
|
|
|
|
opts.Format = `"` + opts.Format + `"`
|
|
|
|
}
|
|
|
|
|
2024-01-12 12:05:52 -08:00
|
|
|
req := &api.ChatRequest{
|
|
|
|
Model: opts.Model,
|
|
|
|
Messages: opts.Messages,
|
2024-12-04 16:31:19 -08:00
|
|
|
Format: json.RawMessage(opts.Format),
|
2024-01-12 12:05:52 -08:00
|
|
|
Options: opts.Options,
|
|
|
|
}
|
|
|
|
|
2024-05-13 17:17:36 -07:00
|
|
|
if opts.KeepAlive != nil {
|
|
|
|
req.KeepAlive = opts.KeepAlive
|
|
|
|
}
|
|
|
|
|
2024-01-12 12:05:52 -08:00
|
|
|
if err := client.Chat(cancelCtx, req, fn); err != nil {
|
|
|
|
if errors.Is(err, context.Canceled) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(opts.Messages) > 0 {
|
|
|
|
fmt.Println()
|
|
|
|
fmt.Println()
|
|
|
|
}
|
|
|
|
|
|
|
|
verbose, err := cmd.Flags().GetBool("verbose")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if verbose {
|
|
|
|
latest.Summary()
|
|
|
|
}
|
|
|
|
|
|
|
|
return &api.Message{Role: role, Content: fullResponse.String()}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func generate(cmd *cobra.Command, opts runOptions) error {
|
2023-10-09 12:18:26 -07:00
|
|
|
client, err := api.ClientFromEnvironment()
|
2023-09-18 12:26:56 -07:00
|
|
|
if err != nil {
|
2023-12-04 16:35:29 -08:00
|
|
|
return err
|
2023-09-18 12:26:56 -07:00
|
|
|
}
|
2023-07-07 10:12:58 -07:00
|
|
|
|
2023-11-14 16:58:51 -08:00
|
|
|
p := progress.NewProgress(os.Stderr)
|
2023-11-20 00:49:08 -05:00
|
|
|
defer p.StopAndClear()
|
2023-12-04 16:35:29 -08:00
|
|
|
|
2023-11-14 16:58:51 -08:00
|
|
|
spinner := progress.NewSpinner("")
|
|
|
|
p.Add("", spinner)
|
|
|
|
|
2023-12-04 16:35:29 -08:00
|
|
|
var latest api.GenerateResponse
|
|
|
|
|
|
|
|
generateContext, ok := cmd.Context().Value(generateContextKey("context")).([]int)
|
|
|
|
if !ok {
|
|
|
|
generateContext = []int{}
|
|
|
|
}
|
|
|
|
|
2023-10-12 15:56:40 -07:00
|
|
|
ctx, cancel := context.WithCancel(cmd.Context())
|
2023-09-28 17:13:01 -07:00
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
sigChan := make(chan os.Signal, 1)
|
|
|
|
signal.Notify(sigChan, syscall.SIGINT)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
<-sigChan
|
|
|
|
cancel()
|
|
|
|
}()
|
|
|
|
|
2024-01-12 12:05:52 -08:00
|
|
|
var state *displayResponseState = &displayResponseState{}
|
2023-09-22 13:36:08 -07:00
|
|
|
|
2023-12-04 16:35:29 -08:00
|
|
|
fn := func(response api.GenerateResponse) error {
|
2023-11-14 16:58:51 -08:00
|
|
|
p.StopAndClear()
|
2023-12-04 16:35:29 -08:00
|
|
|
|
2023-09-18 12:26:56 -07:00
|
|
|
latest = response
|
2024-01-12 12:05:52 -08:00
|
|
|
content := response.Response
|
2023-07-12 18:18:06 -07:00
|
|
|
|
2024-01-12 12:05:52 -08:00
|
|
|
displayResponse(content, opts.WordWrap, state)
|
2023-09-22 13:36:08 -07:00
|
|
|
|
2023-09-18 12:26:56 -07:00
|
|
|
return nil
|
|
|
|
}
|
2023-07-07 14:04:43 -07:00
|
|
|
|
2024-02-01 21:33:06 -08:00
|
|
|
if opts.MultiModal {
|
|
|
|
opts.Prompt, opts.Images, err = extractFileData(opts.Prompt)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-12-06 14:13:15 -08:00
|
|
|
if opts.Format == "json" {
|
|
|
|
opts.Format = `"` + opts.Format + `"`
|
|
|
|
}
|
|
|
|
|
2023-10-12 15:56:40 -07:00
|
|
|
request := api.GenerateRequest{
|
2024-05-14 15:17:04 -07:00
|
|
|
Model: opts.Model,
|
|
|
|
Prompt: opts.Prompt,
|
|
|
|
Context: generateContext,
|
|
|
|
Images: opts.Images,
|
2024-12-04 17:37:12 -08:00
|
|
|
Format: json.RawMessage(opts.Format),
|
2024-05-14 15:17:04 -07:00
|
|
|
System: opts.System,
|
|
|
|
Options: opts.Options,
|
|
|
|
KeepAlive: opts.KeepAlive,
|
2023-10-12 15:56:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := client.Generate(ctx, &request, fn); err != nil {
|
2024-01-05 15:22:32 -05:00
|
|
|
if errors.Is(err, context.Canceled) {
|
2023-12-04 16:35:29 -08:00
|
|
|
return nil
|
2023-07-07 14:04:43 -07:00
|
|
|
}
|
2024-01-05 15:22:32 -05:00
|
|
|
return err
|
2023-09-18 12:26:56 -07:00
|
|
|
}
|
2024-01-05 15:22:32 -05:00
|
|
|
|
2023-12-04 16:35:29 -08:00
|
|
|
if opts.Prompt != "" {
|
2023-07-07 10:12:58 -07:00
|
|
|
fmt.Println()
|
|
|
|
fmt.Println()
|
2023-09-18 12:26:56 -07:00
|
|
|
}
|
2023-07-12 18:18:06 -07:00
|
|
|
|
2023-12-04 16:35:29 -08:00
|
|
|
if !latest.Done {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-09-18 12:26:56 -07:00
|
|
|
verbose, err := cmd.Flags().GetBool("verbose")
|
|
|
|
if err != nil {
|
2023-12-04 16:35:29 -08:00
|
|
|
return err
|
2023-09-18 12:26:56 -07:00
|
|
|
}
|
2023-07-18 11:59:42 -07:00
|
|
|
|
2023-09-18 12:26:56 -07:00
|
|
|
if verbose {
|
|
|
|
latest.Summary()
|
2023-07-07 10:12:58 -07:00
|
|
|
}
|
2023-07-06 13:49:31 -07:00
|
|
|
|
2023-12-11 13:56:22 -08:00
|
|
|
ctx = context.WithValue(cmd.Context(), generateContextKey("context"), latest.Context)
|
|
|
|
cmd.SetContext(ctx)
|
|
|
|
|
2023-12-04 16:35:29 -08:00
|
|
|
return nil
|
2023-07-06 13:49:31 -07:00
|
|
|
}
|
|
|
|
|
2024-08-14 08:54:19 +08:00
|
|
|
func RunServer(_ *cobra.Command, _ []string) error {
|
2023-09-20 17:49:48 -07:00
|
|
|
if err := initializeKeypair(); err != nil {
|
2023-08-11 10:58:23 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-07-03 16:44:57 -07:00
|
|
|
ln, err := net.Listen("tcp", envconfig.Host().Host)
|
2023-08-07 03:34:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-04 00:47:00 -04:00
|
|
|
|
2024-05-06 16:01:37 -07:00
|
|
|
err = server.Serve(ln)
|
|
|
|
if errors.Is(err, http.ErrServerClosed) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
2023-07-04 00:47:00 -04:00
|
|
|
}
|
|
|
|
|
2023-08-11 10:58:23 -07:00
|
|
|
func initializeKeypair() error {
|
|
|
|
home, err := os.UserHomeDir()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
privKeyPath := filepath.Join(home, ".ollama", "id_ed25519")
|
|
|
|
pubKeyPath := filepath.Join(home, ".ollama", "id_ed25519.pub")
|
|
|
|
|
|
|
|
_, err = os.Stat(privKeyPath)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
fmt.Printf("Couldn't find '%s'. Generating new private key.\n", privKeyPath)
|
2024-02-23 16:50:41 -08:00
|
|
|
cryptoPublicKey, cryptoPrivateKey, err := ed25519.GenerateKey(rand.Reader)
|
2023-08-11 10:58:23 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-02-23 16:50:41 -08:00
|
|
|
privateKeyBytes, err := ssh.MarshalPrivateKey(cryptoPrivateKey, "")
|
2023-08-11 10:58:23 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-02-23 16:50:41 -08:00
|
|
|
if err := os.MkdirAll(filepath.Dir(privKeyPath), 0o755); err != nil {
|
2023-08-11 15:35:55 -07:00
|
|
|
return fmt.Errorf("could not create directory %w", err)
|
|
|
|
}
|
|
|
|
|
2024-02-23 16:50:41 -08:00
|
|
|
if err := os.WriteFile(privKeyPath, pem.EncodeToMemory(privateKeyBytes), 0o600); err != nil {
|
2023-08-11 10:58:23 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-02-23 16:50:41 -08:00
|
|
|
sshPublicKey, err := ssh.NewPublicKey(cryptoPublicKey)
|
2023-08-11 10:58:23 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-02-23 16:50:41 -08:00
|
|
|
publicKeyBytes := ssh.MarshalAuthorizedKey(sshPublicKey)
|
2023-08-11 10:58:23 -07:00
|
|
|
|
2024-02-23 16:50:41 -08:00
|
|
|
if err := os.WriteFile(pubKeyPath, publicKeyBytes, 0o644); err != nil {
|
2023-08-11 10:58:23 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-02-23 16:50:41 -08:00
|
|
|
fmt.Printf("Your new public key is: \n\n%s\n", publicKeyBytes)
|
2023-08-11 10:58:23 -07:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-10-12 15:56:40 -07:00
|
|
|
func checkServerHeartbeat(cmd *cobra.Command, _ []string) error {
|
2023-10-09 12:18:26 -07:00
|
|
|
client, err := api.ClientFromEnvironment()
|
2023-08-16 08:03:48 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-10-12 15:56:40 -07:00
|
|
|
if err := client.Heartbeat(cmd.Context()); err != nil {
|
2023-12-26 16:03:45 -08:00
|
|
|
if !strings.Contains(err.Error(), " refused") {
|
2023-07-31 17:38:10 -04:00
|
|
|
return err
|
|
|
|
}
|
2023-12-26 16:03:45 -08:00
|
|
|
if err := startApp(cmd.Context(), client); err != nil {
|
2024-08-01 14:52:15 -07:00
|
|
|
return errors.New("could not connect to ollama app, is it running?")
|
2023-07-31 16:25:57 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-22 09:41:02 -08:00
|
|
|
func versionHandler(cmd *cobra.Command, _ []string) {
|
|
|
|
client, err := api.ClientFromEnvironment()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
serverVersion, err := client.Version(cmd.Context())
|
|
|
|
if err != nil {
|
2023-12-01 12:10:27 -08:00
|
|
|
fmt.Println("Warning: could not connect to a running Ollama instance")
|
|
|
|
}
|
|
|
|
|
|
|
|
if serverVersion != "" {
|
|
|
|
fmt.Printf("ollama version is %s\n", serverVersion)
|
2023-11-22 09:41:02 -08:00
|
|
|
}
|
|
|
|
|
2023-10-16 09:57:19 -07:00
|
|
|
if serverVersion != version.Version {
|
2023-12-01 12:10:27 -08:00
|
|
|
fmt.Printf("Warning: client version is %s\n", version.Version)
|
2023-10-16 09:57:19 -07:00
|
|
|
}
|
2023-11-22 09:41:02 -08:00
|
|
|
}
|
|
|
|
|
2024-05-24 14:57:15 -07:00
|
|
|
func appendEnvDocs(cmd *cobra.Command, envs []envconfig.EnvVar) {
|
2024-05-18 11:51:57 -07:00
|
|
|
if len(envs) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
envUsage := `
|
2024-03-07 13:57:07 -08:00
|
|
|
Environment Variables:
|
|
|
|
`
|
2024-05-18 11:51:57 -07:00
|
|
|
for _, e := range envs {
|
2024-05-24 14:57:15 -07:00
|
|
|
envUsage += fmt.Sprintf(" %-24s %s\n", e.Name, e.Description)
|
2024-05-18 11:51:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
cmd.SetUsageTemplate(cmd.UsageTemplate() + envUsage)
|
2024-03-07 13:57:07 -08:00
|
|
|
}
|
|
|
|
|
2023-07-03 15:22:44 -04:00
|
|
|
func NewCLI() *cobra.Command {
|
|
|
|
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
2023-11-22 09:41:02 -08:00
|
|
|
cobra.EnableCommandSorting = false
|
2023-07-03 15:22:44 -04:00
|
|
|
|
2024-10-25 13:43:16 -07:00
|
|
|
if runtime.GOOS == "windows" && term.IsTerminal(int(os.Stdout.Fd())) {
|
2024-03-11 15:21:57 -07:00
|
|
|
console.ConsoleFromFile(os.Stdin) //nolint:errcheck
|
2023-11-23 22:21:32 -08:00
|
|
|
}
|
|
|
|
|
2023-07-03 15:22:44 -04:00
|
|
|
rootCmd := &cobra.Command{
|
2023-08-14 11:15:53 -07:00
|
|
|
Use: "ollama",
|
|
|
|
Short: "Large language model runner",
|
|
|
|
SilenceUsage: true,
|
|
|
|
SilenceErrors: true,
|
2023-07-03 15:22:44 -04:00
|
|
|
CompletionOptions: cobra.CompletionOptions{
|
|
|
|
DisableDefaultCmd: true,
|
|
|
|
},
|
2023-11-22 09:41:02 -08:00
|
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
|
|
if version, _ := cmd.Flags().GetBool("version"); version {
|
|
|
|
versionHandler(cmd, args)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd.Print(cmd.UsageString())
|
|
|
|
},
|
2023-07-03 15:22:44 -04:00
|
|
|
}
|
|
|
|
|
2023-11-22 09:41:02 -08:00
|
|
|
rootCmd.Flags().BoolP("version", "v", false, "Show version information")
|
2023-07-03 15:22:44 -04:00
|
|
|
|
2023-07-16 17:02:22 -07:00
|
|
|
createCmd := &cobra.Command{
|
2023-07-31 16:25:57 -04:00
|
|
|
Use: "create MODEL",
|
|
|
|
Short: "Create a model from a Modelfile",
|
2023-10-18 11:57:22 -07:00
|
|
|
Args: cobra.ExactArgs(1),
|
2023-07-31 16:25:57 -04:00
|
|
|
PreRunE: checkServerHeartbeat,
|
|
|
|
RunE: CreateHandler,
|
2023-07-16 17:02:22 -07:00
|
|
|
}
|
|
|
|
|
2024-10-22 13:32:24 -07:00
|
|
|
createCmd.Flags().StringP("file", "f", "", "Name of the Modelfile (default \"Modelfile\"")
|
2024-05-10 13:06:13 -07:00
|
|
|
createCmd.Flags().StringP("quantize", "q", "", "Quantize model to this level (e.g. q4_0)")
|
2023-07-16 17:02:22 -07:00
|
|
|
|
2023-09-06 11:04:17 -07:00
|
|
|
showCmd := &cobra.Command{
|
|
|
|
Use: "show MODEL",
|
|
|
|
Short: "Show information for a model",
|
2023-10-18 11:57:22 -07:00
|
|
|
Args: cobra.ExactArgs(1),
|
2023-09-06 11:04:17 -07:00
|
|
|
PreRunE: checkServerHeartbeat,
|
|
|
|
RunE: ShowHandler,
|
|
|
|
}
|
|
|
|
|
|
|
|
showCmd.Flags().Bool("license", false, "Show license of a model")
|
|
|
|
showCmd.Flags().Bool("modelfile", false, "Show Modelfile of a model")
|
|
|
|
showCmd.Flags().Bool("parameters", false, "Show parameters of a model")
|
|
|
|
showCmd.Flags().Bool("template", false, "Show template of a model")
|
2023-12-12 14:43:19 -05:00
|
|
|
showCmd.Flags().Bool("system", false, "Show system message of a model")
|
2023-09-06 11:04:17 -07:00
|
|
|
|
2023-07-03 15:22:44 -04:00
|
|
|
runCmd := &cobra.Command{
|
2024-04-15 16:58:00 -07:00
|
|
|
Use: "run MODEL [PROMPT]",
|
|
|
|
Short: "Run a model",
|
|
|
|
Args: cobra.MinimumNArgs(1),
|
|
|
|
PreRunE: checkServerHeartbeat,
|
|
|
|
RunE: RunHandler,
|
2023-07-03 15:22:44 -04:00
|
|
|
}
|
|
|
|
|
2024-05-13 17:17:36 -07:00
|
|
|
runCmd.Flags().String("keepalive", "", "Duration to keep a model loaded (e.g. 5m)")
|
2023-07-12 18:18:06 -07:00
|
|
|
runCmd.Flags().Bool("verbose", false, "Show timings for response")
|
2023-08-21 21:56:56 -07:00
|
|
|
runCmd.Flags().Bool("insecure", false, "Use an insecure registry")
|
2023-09-22 13:36:08 -07:00
|
|
|
runCmd.Flags().Bool("nowordwrap", false, "Don't wrap words to the next line automatically")
|
2023-11-13 21:54:02 -05:00
|
|
|
runCmd.Flags().String("format", "", "Response format (e.g. json)")
|
2024-09-11 16:36:21 -07:00
|
|
|
|
|
|
|
stopCmd := &cobra.Command{
|
|
|
|
Use: "stop MODEL",
|
|
|
|
Short: "Stop a running model",
|
|
|
|
Args: cobra.ExactArgs(1),
|
|
|
|
PreRunE: checkServerHeartbeat,
|
|
|
|
RunE: StopHandler,
|
|
|
|
}
|
|
|
|
|
2023-07-03 15:22:44 -04:00
|
|
|
serveCmd := &cobra.Command{
|
|
|
|
Use: "serve",
|
|
|
|
Aliases: []string{"start"},
|
|
|
|
Short: "Start ollama",
|
2023-10-18 11:57:22 -07:00
|
|
|
Args: cobra.ExactArgs(0),
|
2023-07-06 13:49:31 -07:00
|
|
|
RunE: RunServer,
|
2023-07-03 15:22:44 -04:00
|
|
|
}
|
|
|
|
|
2023-07-16 17:02:22 -07:00
|
|
|
pullCmd := &cobra.Command{
|
2023-07-31 16:25:57 -04:00
|
|
|
Use: "pull MODEL",
|
|
|
|
Short: "Pull a model from a registry",
|
2023-10-18 11:57:22 -07:00
|
|
|
Args: cobra.ExactArgs(1),
|
2023-07-31 16:25:57 -04:00
|
|
|
PreRunE: checkServerHeartbeat,
|
|
|
|
RunE: PullHandler,
|
2023-07-16 17:02:22 -07:00
|
|
|
}
|
|
|
|
|
2023-07-21 15:42:19 -07:00
|
|
|
pullCmd.Flags().Bool("insecure", false, "Use an insecure registry")
|
|
|
|
|
2023-07-16 17:02:22 -07:00
|
|
|
pushCmd := &cobra.Command{
|
2023-07-31 16:25:57 -04:00
|
|
|
Use: "push MODEL",
|
|
|
|
Short: "Push a model to a registry",
|
2023-10-18 11:57:22 -07:00
|
|
|
Args: cobra.ExactArgs(1),
|
2023-07-31 16:25:57 -04:00
|
|
|
PreRunE: checkServerHeartbeat,
|
|
|
|
RunE: PushHandler,
|
2023-07-16 17:02:22 -07:00
|
|
|
}
|
|
|
|
|
2023-07-21 15:42:19 -07:00
|
|
|
pushCmd.Flags().Bool("insecure", false, "Use an insecure registry")
|
|
|
|
|
2023-07-18 09:09:45 -07:00
|
|
|
listCmd := &cobra.Command{
|
2023-07-21 15:42:19 -07:00
|
|
|
Use: "list",
|
2023-07-20 15:28:27 -07:00
|
|
|
Aliases: []string{"ls"},
|
2023-07-21 15:42:19 -07:00
|
|
|
Short: "List models",
|
2023-07-31 16:25:57 -04:00
|
|
|
PreRunE: checkServerHeartbeat,
|
2023-07-21 15:42:19 -07:00
|
|
|
RunE: ListHandler,
|
2023-07-20 16:09:23 -07:00
|
|
|
}
|
2024-05-13 17:17:36 -07:00
|
|
|
|
|
|
|
psCmd := &cobra.Command{
|
|
|
|
Use: "ps",
|
|
|
|
Short: "List running models",
|
|
|
|
PreRunE: checkServerHeartbeat,
|
|
|
|
RunE: ListRunningHandler,
|
|
|
|
}
|
|
|
|
|
2023-07-24 11:27:28 -04:00
|
|
|
copyCmd := &cobra.Command{
|
2024-05-01 12:39:05 -07:00
|
|
|
Use: "cp SOURCE DESTINATION",
|
2023-07-31 16:25:57 -04:00
|
|
|
Short: "Copy a model",
|
2023-10-18 11:57:22 -07:00
|
|
|
Args: cobra.ExactArgs(2),
|
2023-07-31 16:25:57 -04:00
|
|
|
PreRunE: checkServerHeartbeat,
|
|
|
|
RunE: CopyHandler,
|
2023-07-24 11:27:28 -04:00
|
|
|
}
|
|
|
|
|
2023-07-20 16:09:23 -07:00
|
|
|
deleteCmd := &cobra.Command{
|
2023-10-18 11:57:22 -07:00
|
|
|
Use: "rm MODEL [MODEL...]",
|
2023-07-31 16:25:57 -04:00
|
|
|
Short: "Remove a model",
|
|
|
|
Args: cobra.MinimumNArgs(1),
|
|
|
|
PreRunE: checkServerHeartbeat,
|
|
|
|
RunE: DeleteHandler,
|
2023-07-18 09:09:45 -07:00
|
|
|
}
|
|
|
|
|
build: Make target improvements (#7499)
* llama: wire up builtin runner
This adds a new entrypoint into the ollama CLI to run the cgo built runner.
On Mac arm64, this will have GPU support, but on all other platforms it will
be the lowest common denominator CPU build. After we fully transition
to the new Go runners more tech-debt can be removed and we can stop building
the "default" runner via make and rely on the builtin always.
* build: Make target improvements
Add a few new targets and help for building locally.
This also adjusts the runner lookup to favor local builds, then
runners relative to the executable, and finally payloads.
* Support customized CPU flags for runners
This implements a simplified custom CPU flags pattern for the runners.
When built without overrides, the runner name contains the vector flag
we check for (AVX) to ensure we don't try to run on unsupported systems
and crash. If the user builds a customized set, we omit the naming
scheme and don't check for compatibility. This avoids checking
requirements at runtime, so that logic has been removed as well. This
can be used to build GPU runners with no vector flags, or CPU/GPU
runners with additional flags (e.g. AVX512) enabled.
* Use relative paths
If the user checks out the repo in a path that contains spaces, make gets
really confused so use relative paths for everything in-repo to avoid breakage.
* Remove payloads from main binary
* install: clean up prior libraries
This removes support for v0.3.6 and older versions (before the tar bundle)
and ensures we clean up prior libraries before extracting the bundle(s).
Without this change, runners and dependent libraries could leak when we
update and lead to subtle runtime errors.
2024-12-10 09:47:19 -08:00
|
|
|
runnerCmd := &cobra.Command{
|
|
|
|
Use: "runner",
|
|
|
|
Short: llama.PrintSystemInfo(),
|
|
|
|
Hidden: true,
|
|
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
|
|
return runner.Execute(os.Args[1:])
|
|
|
|
},
|
|
|
|
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
|
|
|
|
}
|
|
|
|
runnerCmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
|
|
|
|
_ = runner.Execute(args[1:])
|
|
|
|
})
|
|
|
|
|
2024-05-24 14:57:15 -07:00
|
|
|
envVars := envconfig.AsMap()
|
|
|
|
|
|
|
|
envs := []envconfig.EnvVar{envVars["OLLAMA_HOST"]}
|
2024-05-18 11:51:57 -07:00
|
|
|
|
2024-03-07 13:57:07 -08:00
|
|
|
for _, cmd := range []*cobra.Command{
|
|
|
|
createCmd,
|
|
|
|
showCmd,
|
|
|
|
runCmd,
|
2024-09-11 16:36:21 -07:00
|
|
|
stopCmd,
|
2024-03-07 13:57:07 -08:00
|
|
|
pullCmd,
|
|
|
|
pushCmd,
|
|
|
|
listCmd,
|
2024-05-13 17:17:36 -07:00
|
|
|
psCmd,
|
2024-03-07 13:57:07 -08:00
|
|
|
copyCmd,
|
|
|
|
deleteCmd,
|
2024-05-24 14:57:15 -07:00
|
|
|
serveCmd,
|
2024-03-07 13:57:07 -08:00
|
|
|
} {
|
2024-05-18 11:51:57 -07:00
|
|
|
switch cmd {
|
|
|
|
case runCmd:
|
2024-05-24 14:57:15 -07:00
|
|
|
appendEnvDocs(cmd, []envconfig.EnvVar{envVars["OLLAMA_HOST"], envVars["OLLAMA_NOHISTORY"]})
|
|
|
|
case serveCmd:
|
|
|
|
appendEnvDocs(cmd, []envconfig.EnvVar{
|
|
|
|
envVars["OLLAMA_DEBUG"],
|
|
|
|
envVars["OLLAMA_HOST"],
|
|
|
|
envVars["OLLAMA_KEEP_ALIVE"],
|
|
|
|
envVars["OLLAMA_MAX_LOADED_MODELS"],
|
|
|
|
envVars["OLLAMA_MAX_QUEUE"],
|
|
|
|
envVars["OLLAMA_MODELS"],
|
|
|
|
envVars["OLLAMA_NUM_PARALLEL"],
|
|
|
|
envVars["OLLAMA_NOPRUNE"],
|
|
|
|
envVars["OLLAMA_ORIGINS"],
|
2024-07-23 15:14:28 -07:00
|
|
|
envVars["OLLAMA_SCHED_SPREAD"],
|
2024-05-24 14:57:15 -07:00
|
|
|
envVars["OLLAMA_TMPDIR"],
|
2024-05-31 00:36:51 +08:00
|
|
|
envVars["OLLAMA_FLASH_ATTENTION"],
|
2024-12-04 10:57:19 +11:00
|
|
|
envVars["OLLAMA_KV_CACHE_TYPE"],
|
2024-05-31 00:36:51 +08:00
|
|
|
envVars["OLLAMA_LLM_LIBRARY"],
|
2024-09-05 13:46:35 -07:00
|
|
|
envVars["OLLAMA_GPU_OVERHEAD"],
|
2024-09-05 14:00:08 -07:00
|
|
|
envVars["OLLAMA_LOAD_TIMEOUT"],
|
2024-05-24 14:57:15 -07:00
|
|
|
})
|
2024-05-18 11:51:57 -07:00
|
|
|
default:
|
|
|
|
appendEnvDocs(cmd, envs)
|
|
|
|
}
|
2024-03-07 13:57:07 -08:00
|
|
|
}
|
|
|
|
|
2023-07-03 15:22:44 -04:00
|
|
|
rootCmd.AddCommand(
|
|
|
|
serveCmd,
|
2023-07-16 17:02:22 -07:00
|
|
|
createCmd,
|
2023-09-06 11:04:17 -07:00
|
|
|
showCmd,
|
2023-07-03 17:14:20 -04:00
|
|
|
runCmd,
|
2024-09-11 16:36:21 -07:00
|
|
|
stopCmd,
|
2023-07-16 17:02:22 -07:00
|
|
|
pullCmd,
|
|
|
|
pushCmd,
|
2023-07-18 09:09:45 -07:00
|
|
|
listCmd,
|
2024-05-13 17:17:36 -07:00
|
|
|
psCmd,
|
2023-07-24 11:27:28 -04:00
|
|
|
copyCmd,
|
2023-07-20 16:09:23 -07:00
|
|
|
deleteCmd,
|
build: Make target improvements (#7499)
* llama: wire up builtin runner
This adds a new entrypoint into the ollama CLI to run the cgo built runner.
On Mac arm64, this will have GPU support, but on all other platforms it will
be the lowest common denominator CPU build. After we fully transition
to the new Go runners more tech-debt can be removed and we can stop building
the "default" runner via make and rely on the builtin always.
* build: Make target improvements
Add a few new targets and help for building locally.
This also adjusts the runner lookup to favor local builds, then
runners relative to the executable, and finally payloads.
* Support customized CPU flags for runners
This implements a simplified custom CPU flags pattern for the runners.
When built without overrides, the runner name contains the vector flag
we check for (AVX) to ensure we don't try to run on unsupported systems
and crash. If the user builds a customized set, we omit the naming
scheme and don't check for compatibility. This avoids checking
requirements at runtime, so that logic has been removed as well. This
can be used to build GPU runners with no vector flags, or CPU/GPU
runners with additional flags (e.g. AVX512) enabled.
* Use relative paths
If the user checks out the repo in a path that contains spaces, make gets
really confused so use relative paths for everything in-repo to avoid breakage.
* Remove payloads from main binary
* install: clean up prior libraries
This removes support for v0.3.6 and older versions (before the tar bundle)
and ensures we clean up prior libraries before extracting the bundle(s).
Without this change, runners and dependent libraries could leak when we
update and lead to subtle runtime errors.
2024-12-10 09:47:19 -08:00
|
|
|
runnerCmd,
|
2023-07-03 15:22:44 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
return rootCmd
|
|
|
|
}
|