2023-09-22 17:01:03 +01:00
#!/bin/sh
# This script installs Ollama on Linux.
# It detects the current operating system architecture and installs the appropriate version of Ollama.
set -eu
2024-11-19 17:33:52 +01:00
red = " $( ( /usr/bin/tput bold || :; /usr/bin/tput setaf 1 || :) 2>&-) "
plain = " $( ( /usr/bin/tput sgr0 || :) 2>&-) "
2024-11-19 00:02:41 +01:00
2023-09-23 14:46:47 +01:00
status( ) { echo " >>> $* " >& 2; }
2024-11-19 00:02:41 +01:00
error( ) { echo " ${ red } ERROR: ${ plain } $* " ; exit 1; }
warning( ) { echo " ${ red } WARNING: ${ plain } $* " ; }
2023-09-22 17:01:03 +01:00
2023-09-23 14:46:47 +01:00
TEMP_DIR = $( mktemp -d)
cleanup( ) { rm -rf $TEMP_DIR ; }
trap cleanup EXIT
2023-09-22 17:01:03 +01:00
2023-09-25 13:21:02 -07:00
available( ) { command -v $1 >/dev/null; }
require( ) {
2023-09-23 14:46:47 +01:00
local MISSING = ''
for TOOL in $* ; do
2023-09-25 13:21:02 -07:00
if ! available $TOOL ; then
2023-09-23 14:46:47 +01:00
MISSING = " $MISSING $TOOL "
2023-09-22 17:01:03 +01:00
fi
2023-09-23 14:46:47 +01:00
done
echo $MISSING
2023-09-22 17:01:03 +01:00
}
2023-09-23 14:46:47 +01:00
[ " $( uname -s) " = "Linux" ] || error 'This script is intended to run on Linux only.'
2023-09-22 17:01:03 +01:00
2023-10-16 14:40:24 -07:00
ARCH = $( uname -m)
case " $ARCH " in
2023-09-23 14:46:47 +01:00
x86_64) ARCH = "amd64" ; ;
aarch64| arm64) ARCH = "arm64" ; ;
*) error " Unsupported architecture: $ARCH " ; ;
esac
2023-09-22 17:01:03 +01:00
2024-05-28 15:01:22 -07:00
IS_WSL2 = false
2024-01-03 15:06:07 -08:00
KERN = $( uname -r)
case " $KERN " in
2024-05-28 14:41:50 -07:00
*icrosoft*WSL2 | *icrosoft*wsl2) IS_WSL2 = true; ;
2024-09-04 16:34:53 +03:00
*icrosoft) error "Microsoft WSL1 is not currently supported. Please use WSL2 with 'wsl --set-version <distro> 2'" ; ;
2024-01-03 15:06:07 -08:00
*) ; ;
esac
2024-03-14 18:00:16 -07:00
VER_PARAM = " ${ OLLAMA_VERSION : +?version= $OLLAMA_VERSION } "
2024-01-03 15:06:07 -08:00
2023-09-23 10:25:47 -04:00
SUDO =
2023-09-23 14:46:47 +01:00
if [ " $( id -u) " -ne 0 ] ; then
# Running as root, no need for sudo
2023-09-25 13:21:02 -07:00
if ! available sudo; then
2023-09-25 09:25:59 -07:00
error "This script requires superuser permissions. Please re-run as root."
2023-09-22 17:01:03 +01:00
fi
2023-09-23 10:25:47 -04:00
SUDO = "sudo"
2023-09-23 14:46:47 +01:00
fi
2023-09-22 17:01:03 +01:00
2023-09-25 13:21:02 -07:00
NEEDS = $( require curl awk grep sed tee xargs)
if [ -n " $NEEDS " ] ; then
2023-09-25 09:25:59 -07:00
status "ERROR: The following tools are required but missing:"
2023-09-25 13:21:02 -07:00
for NEED in $NEEDS ; do
echo " - $NEED "
2023-09-25 09:25:59 -07:00
done
exit 1
2023-09-23 14:46:47 +01:00
fi
2023-09-26 09:38:11 -07:00
for BINDIR in /usr/local/bin /usr/bin /bin; do
echo $PATH | grep -q $BINDIR && break || continue
done
2024-08-14 16:32:57 -07:00
OLLAMA_INSTALL_DIR = $( dirname ${ BINDIR } )
2023-09-26 09:38:11 -07:00
build: Make target improvements (#7499)
* llama: wire up builtin runner
This adds a new entrypoint into the ollama CLI to run the cgo built runner.
On Mac arm64, this will have GPU support, but on all other platforms it will
be the lowest common denominator CPU build. After we fully transition
to the new Go runners more tech-debt can be removed and we can stop building
the "default" runner via make and rely on the builtin always.
* build: Make target improvements
Add a few new targets and help for building locally.
This also adjusts the runner lookup to favor local builds, then
runners relative to the executable, and finally payloads.
* Support customized CPU flags for runners
This implements a simplified custom CPU flags pattern for the runners.
When built without overrides, the runner name contains the vector flag
we check for (AVX) to ensure we don't try to run on unsupported systems
and crash. If the user builds a customized set, we omit the naming
scheme and don't check for compatibility. This avoids checking
requirements at runtime, so that logic has been removed as well. This
can be used to build GPU runners with no vector flags, or CPU/GPU
runners with additional flags (e.g. AVX512) enabled.
* Use relative paths
If the user checks out the repo in a path that contains spaces, make gets
really confused so use relative paths for everything in-repo to avoid breakage.
* Remove payloads from main binary
* install: clean up prior libraries
This removes support for v0.3.6 and older versions (before the tar bundle)
and ensures we clean up prior libraries before extracting the bundle(s).
Without this change, runners and dependent libraries could leak when we
update and lead to subtle runtime errors.
2024-12-10 09:47:19 -08:00
if [ -d " $OLLAMA_INSTALL_DIR /lib/ollama " ] ; then
status " Cleaning up old version at $OLLAMA_INSTALL_DIR /lib/ollama "
$SUDO rm -rf " $OLLAMA_INSTALL_DIR /lib/ollama "
fi
2024-07-08 12:50:11 -07:00
status " Installing ollama to $OLLAMA_INSTALL_DIR "
2023-09-26 09:38:11 -07:00
$SUDO install -o0 -g0 -m755 -d $BINDIR
2024-07-08 12:50:11 -07:00
$SUDO install -o0 -g0 -m755 -d " $OLLAMA_INSTALL_DIR "
build: Make target improvements (#7499)
* llama: wire up builtin runner
This adds a new entrypoint into the ollama CLI to run the cgo built runner.
On Mac arm64, this will have GPU support, but on all other platforms it will
be the lowest common denominator CPU build. After we fully transition
to the new Go runners more tech-debt can be removed and we can stop building
the "default" runner via make and rely on the builtin always.
* build: Make target improvements
Add a few new targets and help for building locally.
This also adjusts the runner lookup to favor local builds, then
runners relative to the executable, and finally payloads.
* Support customized CPU flags for runners
This implements a simplified custom CPU flags pattern for the runners.
When built without overrides, the runner name contains the vector flag
we check for (AVX) to ensure we don't try to run on unsupported systems
and crash. If the user builds a customized set, we omit the naming
scheme and don't check for compatibility. This avoids checking
requirements at runtime, so that logic has been removed as well. This
can be used to build GPU runners with no vector flags, or CPU/GPU
runners with additional flags (e.g. AVX512) enabled.
* Use relative paths
If the user checks out the repo in a path that contains spaces, make gets
really confused so use relative paths for everything in-repo to avoid breakage.
* Remove payloads from main binary
* install: clean up prior libraries
This removes support for v0.3.6 and older versions (before the tar bundle)
and ensures we clean up prior libraries before extracting the bundle(s).
Without this change, runners and dependent libraries could leak when we
update and lead to subtle runtime errors.
2024-12-10 09:47:19 -08:00
status " Downloading Linux ${ ARCH } bundle "
curl --fail --show-error --location --progress-bar \
" https://ollama.com/download/ollama-linux- ${ ARCH } .tgz ${ VER_PARAM } " | \
$SUDO tar -xzf - -C " $OLLAMA_INSTALL_DIR "
if [ " $OLLAMA_INSTALL_DIR /bin/ollama " != " $BINDIR /ollama " ] ; then
status " Making ollama accessible in the PATH in $BINDIR "
$SUDO ln -sf " $OLLAMA_INSTALL_DIR /ollama " " $BINDIR /ollama "
2024-07-08 12:50:11 -07:00
fi
2024-11-15 16:47:54 -08:00
# Check for NVIDIA JetPack systems with additional downloads
if [ -f /etc/nv_tegra_release ] ; then
if grep R36 /etc/nv_tegra_release > /dev/null ; then
status "Downloading JetPack 6 components"
curl --fail --show-error --location --progress-bar \
" https://ollama.com/download/ollama-linux- ${ ARCH } -jetpack6.tgz ${ VER_PARAM } " | \
$SUDO tar -xzf - -C " $OLLAMA_INSTALL_DIR "
elif grep R35 /etc/nv_tegra_release > /dev/null ; then
status "Downloading JetPack 5 components"
curl --fail --show-error --location --progress-bar \
" https://ollama.com/download/ollama-linux- ${ ARCH } -jetpack5.tgz ${ VER_PARAM } " | \
$SUDO tar -xzf - -C " $OLLAMA_INSTALL_DIR "
else
warning "Unsupported JetPack version detected. GPU may not be supported"
fi
fi
2023-09-23 14:46:47 +01:00
2024-05-26 14:57:17 -07:00
install_success( ) {
2024-02-21 18:30:01 -05:00
status 'The Ollama API is now available at 127.0.0.1:11434.'
2023-11-01 11:28:26 -04:00
status 'Install complete. Run "ollama" from the command line.'
}
2023-09-23 14:46:47 +01:00
trap install_success EXIT
# Everything from this point onwards is optional.
2023-09-22 17:01:03 +01:00
configure_systemd( ) {
2023-09-23 14:46:47 +01:00
if ! id ollama >/dev/null 2>& 1; then
status "Creating ollama user..."
2024-03-14 18:00:16 -07:00
$SUDO useradd -r -s /bin/false -U -m -d /usr/share/ollama ollama
2023-09-23 14:46:47 +01:00
fi
2024-01-03 12:55:54 -08:00
if getent group render >/dev/null 2>& 1; then
status "Adding ollama user to render group..."
$SUDO usermod -a -G render ollama
fi
2024-02-29 08:50:10 -08:00
if getent group video >/dev/null 2>& 1; then
status "Adding ollama user to video group..."
$SUDO usermod -a -G video ollama
fi
2023-09-22 17:01:03 +01:00
2023-10-23 17:06:31 -04:00
status "Adding current user to ollama group..."
$SUDO usermod -a -G ollama $( whoami)
2023-09-23 14:46:47 +01:00
status "Creating ollama systemd service..."
2023-09-23 10:25:47 -04:00
cat <<EOF | $SUDO tee /etc/systemd/system/ollama.service >/dev/null
2023-09-22 17:01:03 +01:00
[ Unit]
Description = Ollama Service
After = network-online.target
[ Service]
2023-09-26 09:38:11 -07:00
ExecStart = $BINDIR /ollama serve
2023-09-22 17:01:03 +01:00
User = ollama
Group = ollama
Restart = always
RestartSec = 3
2023-09-25 17:56:25 -07:00
Environment = " PATH= $PATH "
2023-09-22 17:01:03 +01:00
[ Install]
WantedBy = default.target
EOF
2023-09-25 15:47:39 -07:00
SYSTEMCTL_RUNNING = " $( systemctl is-system-running || true ) "
2023-09-25 09:25:59 -07:00
case $SYSTEMCTL_RUNNING in
running| degraded)
status "Enabling and starting ollama service..."
$SUDO systemctl daemon-reload
$SUDO systemctl enable ollama
2023-09-25 16:11:21 -07:00
start_service( ) { $SUDO systemctl restart ollama; }
trap start_service EXIT
2023-09-25 09:25:59 -07:00
; ;
2024-11-19 00:02:41 +01:00
*)
warning "systemd is not running"
if [ " $IS_WSL2 " = true ] ; then
warning "see https://learn.microsoft.com/en-us/windows/wsl/systemd#how-to-enable-systemd to enable it"
fi
; ;
2023-09-25 09:25:59 -07:00
esac
2023-09-22 17:01:03 +01:00
}
2023-09-25 13:21:02 -07:00
if available systemctl; then
2023-09-22 17:01:03 +01:00
configure_systemd
2023-09-23 14:46:47 +01:00
fi
2024-05-28 14:41:50 -07:00
# WSL2 only supports GPUs via nvidia passthrough
# so check for nvidia-smi to determine if GPU is available
if [ " $IS_WSL2 " = true ] ; then
2024-05-28 14:49:46 -07:00
if available nvidia-smi && [ -n " $( nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*" ) " ] ; then
2024-05-28 14:41:50 -07:00
status "Nvidia GPU detected."
fi
install_success
exit 0
fi
2024-11-15 16:47:54 -08:00
# Don't attempt to install drivers on Jetson systems
if [ -f /etc/nv_tegra_release ] ; then
status "NVIDIA JetPack ready."
install_success
exit 0
fi
2024-05-28 14:41:50 -07:00
# Install GPU dependencies on Linux
2023-09-25 13:21:02 -07:00
if ! available lspci && ! available lshw; then
2024-03-14 18:00:16 -07:00
warning "Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies."
2023-09-25 13:21:02 -07:00
exit 0
fi
2023-09-23 14:46:47 +01:00
check_gpu( ) {
2024-03-14 18:00:16 -07:00
# Look for devices based on vendor ID for NVIDIA and AMD
2023-09-23 14:46:47 +01:00
case $1 in
2024-05-26 14:57:17 -07:00
lspci)
2024-03-14 18:00:16 -07:00
case $2 in
nvidia) available lspci && lspci -d '10de:' | grep -q 'NVIDIA' || return 1 ; ;
amdgpu) available lspci && lspci -d '1002:' | grep -q 'AMD' || return 1 ; ;
esac ; ;
2024-05-26 14:57:17 -07:00
lshw)
2024-03-14 18:00:16 -07:00
case $2 in
2024-06-11 16:11:35 +08:00
nvidia) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[10DE\]' || return 1 ; ;
amdgpu) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[1002\]' || return 1 ; ;
2024-03-14 18:00:16 -07:00
esac ; ;
2023-09-25 13:21:02 -07:00
nvidia-smi) available nvidia-smi || return 1 ; ;
2023-09-23 14:46:47 +01:00
esac
2023-09-22 17:01:03 +01:00
}
2024-05-28 09:59:36 -07:00
if check_gpu nvidia-smi; then
status "NVIDIA GPU installed."
exit 0
fi
2024-03-14 18:00:16 -07:00
if ! check_gpu lspci nvidia && ! check_gpu lshw nvidia && ! check_gpu lspci amdgpu && ! check_gpu lshw amdgpu; then
2023-11-01 11:28:26 -04:00
install_success
2024-03-14 18:00:16 -07:00
warning "No NVIDIA/AMD GPU detected. Ollama will run in CPU-only mode."
exit 0
fi
if check_gpu lspci amdgpu || check_gpu lshw amdgpu; then
build: Make target improvements (#7499)
* llama: wire up builtin runner
This adds a new entrypoint into the ollama CLI to run the cgo built runner.
On Mac arm64, this will have GPU support, but on all other platforms it will
be the lowest common denominator CPU build. After we fully transition
to the new Go runners more tech-debt can be removed and we can stop building
the "default" runner via make and rely on the builtin always.
* build: Make target improvements
Add a few new targets and help for building locally.
This also adjusts the runner lookup to favor local builds, then
runners relative to the executable, and finally payloads.
* Support customized CPU flags for runners
This implements a simplified custom CPU flags pattern for the runners.
When built without overrides, the runner name contains the vector flag
we check for (AVX) to ensure we don't try to run on unsupported systems
and crash. If the user builds a customized set, we omit the naming
scheme and don't check for compatibility. This avoids checking
requirements at runtime, so that logic has been removed as well. This
can be used to build GPU runners with no vector flags, or CPU/GPU
runners with additional flags (e.g. AVX512) enabled.
* Use relative paths
If the user checks out the repo in a path that contains spaces, make gets
really confused so use relative paths for everything in-repo to avoid breakage.
* Remove payloads from main binary
* install: clean up prior libraries
This removes support for v0.3.6 and older versions (before the tar bundle)
and ensures we clean up prior libraries before extracting the bundle(s).
Without this change, runners and dependent libraries could leak when we
update and lead to subtle runtime errors.
2024-12-10 09:47:19 -08:00
status " Downloading Linux ROCm ${ ARCH } bundle "
curl --fail --show-error --location --progress-bar \
" https://ollama.com/download/ollama-linux- ${ ARCH } -rocm.tgz ${ VER_PARAM } " | \
$SUDO tar -xzf - -C " $OLLAMA_INSTALL_DIR "
2024-03-14 18:00:16 -07:00
install_success
2024-05-26 14:57:17 -07:00
status "AMD GPU ready."
2023-09-23 14:46:47 +01:00
exit 0
fi
2024-07-24 17:09:20 -07:00
CUDA_REPO_ERR_MSG = "NVIDIA GPU detected, but your OS and Architecture are not supported by NVIDIA. Please install the CUDA driver manually https://docs.nvidia.com/cuda/cuda-installation-guide-linux/"
2023-09-23 14:46:47 +01:00
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-7-centos-7
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-8-rocky-8
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-9-rocky-9
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#fedora
install_cuda_driver_yum( ) {
status 'Installing NVIDIA repository...'
2024-07-24 17:09:20 -07:00
2023-09-23 14:46:47 +01:00
case $PACKAGE_MANAGER in
yum)
2023-09-23 10:25:47 -04:00
$SUDO $PACKAGE_MANAGER -y install yum-utils
2024-08-01 17:22:25 -07:00
if curl -I --silent --fail --location " https://developer.download.nvidia.com/compute/cuda/repos/ $1 $2 / $( uname -m | sed -e 's/aarch64/sbsa/' ) /cuda- $1 $2 .repo " >/dev/null ; then
$SUDO $PACKAGE_MANAGER -config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1 $2 /$( uname -m | sed -e 's/aarch64/sbsa/' ) /cuda-$1 $2 .repo
2024-07-24 17:09:20 -07:00
else
error $CUDA_REPO_ERR_MSG
fi
2023-09-23 14:46:47 +01:00
; ;
dnf)
2024-08-01 17:22:25 -07:00
if curl -I --silent --fail --location " https://developer.download.nvidia.com/compute/cuda/repos/ $1 $2 / $( uname -m | sed -e 's/aarch64/sbsa/' ) /cuda- $1 $2 .repo " >/dev/null ; then
$SUDO $PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1 $2 /$( uname -m | sed -e 's/aarch64/sbsa/' ) /cuda-$1 $2 .repo
2024-07-24 17:09:20 -07:00
else
error $CUDA_REPO_ERR_MSG
fi
2023-09-23 14:46:47 +01:00
; ;
esac
case $1 in
rhel)
status 'Installing EPEL repository...'
# EPEL is required for third-party dependencies such as dkms and libvdpau
2023-09-23 11:20:39 -04:00
$SUDO $PACKAGE_MANAGER -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$2 .noarch.rpm || true
2023-09-23 14:46:47 +01:00
; ;
esac
status 'Installing CUDA driver...'
if [ " $1 " = 'centos' ] || [ " $1 $2 " = 'rhel7' ] ; then
2023-09-23 10:25:47 -04:00
$SUDO $PACKAGE_MANAGER -y install nvidia-driver-latest-dkms
2023-09-23 14:46:47 +01:00
fi
2023-09-23 11:20:39 -04:00
$SUDO $PACKAGE_MANAGER -y install cuda-drivers
2023-09-23 14:46:47 +01:00
}
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian
install_cuda_driver_apt( ) {
status 'Installing NVIDIA repository...'
2024-08-01 17:22:25 -07:00
if curl -I --silent --fail --location " https://developer.download.nvidia.com/compute/cuda/repos/ $1 $2 / $( uname -m | sed -e 's/aarch64/sbsa/' ) /cuda-keyring_1.1-1_all.deb " >/dev/null ; then
curl -fsSL -o $TEMP_DIR /cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/$1 $2 /$( uname -m | sed -e 's/aarch64/sbsa/' ) /cuda-keyring_1.1-1_all.deb
2024-07-24 17:09:20 -07:00
else
error $CUDA_REPO_ERR_MSG
fi
2023-09-23 14:46:47 +01:00
case $1 in
debian)
status 'Enabling contrib sources...'
2023-11-07 18:59:57 +01:00
$SUDO sed 's/main/contrib/' < /etc/apt/sources.list | $SUDO tee /etc/apt/sources.list.d/contrib.list > /dev/null
2023-11-16 21:53:06 +01:00
if [ -f "/etc/apt/sources.list.d/debian.sources" ] ; then
$SUDO sed 's/main/contrib/' < /etc/apt/sources.list.d/debian.sources | $SUDO tee /etc/apt/sources.list.d/contrib.sources > /dev/null
fi
2023-09-23 14:46:47 +01:00
; ;
esac
status 'Installing CUDA driver...'
2023-09-23 10:25:47 -04:00
$SUDO dpkg -i $TEMP_DIR /cuda-keyring.deb
$SUDO apt-get update
2023-09-25 09:25:59 -07:00
[ -n " $SUDO " ] && SUDO_E = " $SUDO -E " || SUDO_E =
DEBIAN_FRONTEND = noninteractive $SUDO_E apt-get -y install cuda-drivers -q
2023-09-23 14:46:47 +01:00
}
if [ ! -f "/etc/os-release" ] ; then
error "Unknown distribution. Skipping CUDA installation."
fi
. /etc/os-release
OS_NAME = $ID
OS_VERSION = $VERSION_ID
PACKAGE_MANAGER =
for PACKAGE_MANAGER in dnf yum apt-get; do
2023-09-25 13:21:02 -07:00
if available $PACKAGE_MANAGER ; then
2023-09-23 14:46:47 +01:00
break
fi
done
if [ -z " $PACKAGE_MANAGER " ] ; then
error "Unknown package manager. Skipping CUDA installation."
fi
if ! check_gpu nvidia-smi || [ -z " $( nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*" ) " ] ; then
case $OS_NAME in
2023-12-01 16:18:21 -08:00
centos| rhel) install_cuda_driver_yum 'rhel' $( echo $OS_VERSION | cut -d '.' -f 1) ; ;
2023-09-23 14:46:47 +01:00
rocky) install_cuda_driver_yum 'rhel' $( echo $OS_VERSION | cut -c1) ; ;
2024-06-18 17:13:54 -07:00
fedora) [ $OS_VERSION -lt '39' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME '39' ; ;
2024-01-16 11:45:12 -08:00
amzn) install_cuda_driver_yum 'fedora' '37' ; ;
2023-09-25 09:25:59 -07:00
debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ; ;
ubuntu) install_cuda_driver_apt $OS_NAME $( echo $OS_VERSION | sed 's/\.//' ) ; ;
2023-09-25 15:30:58 -07:00
*) exit ; ;
2023-09-23 14:46:47 +01:00
esac
fi
2024-05-28 09:59:36 -07:00
if ! lsmod | grep -q nvidia || ! lsmod | grep -q nvidia_uvm; then
2023-09-23 14:46:47 +01:00
KERNEL_RELEASE = " $( uname -r) "
case $OS_NAME in
2023-12-01 16:18:21 -08:00
rocky) $SUDO $PACKAGE_MANAGER -y install kernel-devel kernel-headers ; ;
centos| rhel| amzn) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE kernel-headers-$KERNEL_RELEASE ; ;
2023-09-27 11:43:47 -04:00
fedora) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE ; ;
2023-09-23 10:25:47 -04:00
debian| ubuntu) $SUDO apt-get -y install linux-headers-$KERNEL_RELEASE ; ;
2023-09-25 15:30:58 -07:00
*) exit ; ;
2023-09-23 14:46:47 +01:00
esac
2023-09-27 15:12:29 -04:00
NVIDIA_CUDA_VERSION = $( $SUDO dkms status | awk -F: '/added/ { print $1 }' )
2023-09-25 11:28:21 -07:00
if [ -n " $NVIDIA_CUDA_VERSION " ] ; then
$SUDO dkms install $NVIDIA_CUDA_VERSION
fi
2024-05-28 09:59:36 -07:00
if lsmod | grep -q nouveau; then
status 'Reboot to complete NVIDIA CUDA driver install.'
exit 0
fi
$SUDO modprobe nvidia
$SUDO modprobe nvidia_uvm
fi
2023-09-25 13:21:02 -07:00
2024-05-26 14:57:17 -07:00
# make sure the NVIDIA modules are loaded on boot with nvidia-persistenced
2024-09-06 00:49:48 +08:00
if available nvidia-persistenced; then
2024-05-26 14:57:17 -07:00
$SUDO touch /etc/modules-load.d/nvidia.conf
MODULES = "nvidia nvidia-uvm"
for MODULE in $MODULES ; do
if ! grep -qxF " $MODULE " /etc/modules-load.d/nvidia.conf; then
2024-09-06 00:49:48 +08:00
echo " $MODULE " | $SUDO tee -a /etc/modules-load.d/nvidia.conf > /dev/null
2024-05-26 14:57:17 -07:00
fi
done
2023-09-23 14:46:47 +01:00
fi
2023-09-25 17:56:43 -07:00
2024-05-26 14:57:17 -07:00
status "NVIDIA GPU ready."
2024-05-28 14:41:50 -07:00
install_success