mirror of
https://github.com/ollama/ollama.git
synced 2025-09-27 05:07:51 +02:00
Refine build to support CPU only
If someone checks out the ollama repo and doesn't install the CUDA library, this will ensure they can build a CPU only version
This commit is contained in:
33
gpu/gpu.go
33
gpu/gpu.go
@@ -3,6 +3,9 @@
|
||||
package gpu
|
||||
|
||||
/*
|
||||
#cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
|
||||
#cgo windows LDFLAGS: -lpthread
|
||||
|
||||
#include "gpu_info.h"
|
||||
|
||||
*/
|
||||
@@ -26,6 +29,7 @@ var gpuHandles *handles = nil
|
||||
|
||||
// Note: gpuMutex must already be held
|
||||
func initGPUHandles() {
|
||||
// TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
|
||||
log.Printf("Detecting GPU type")
|
||||
gpuHandles = &handles{nil, nil}
|
||||
var resp C.cuda_init_resp_t
|
||||
@@ -61,20 +65,32 @@ func GetGPUInfo() GpuInfo {
|
||||
}
|
||||
|
||||
var memInfo C.mem_info_t
|
||||
var resp GpuInfo
|
||||
resp := GpuInfo{"", 0, 0}
|
||||
if gpuHandles.cuda != nil {
|
||||
C.cuda_check_vram(*gpuHandles.cuda, &memInfo)
|
||||
resp.Driver = "CUDA"
|
||||
if memInfo.err != nil {
|
||||
log.Printf("error looking up CUDA GPU memory: %s", C.GoString(memInfo.err))
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
} else {
|
||||
resp.Driver = "CUDA"
|
||||
}
|
||||
} else if gpuHandles.rocm != nil {
|
||||
C.rocm_check_vram(*gpuHandles.rocm, &memInfo)
|
||||
resp.Driver = "ROCM"
|
||||
} else {
|
||||
if memInfo.err != nil {
|
||||
log.Printf("error looking up ROCm GPU memory: %s", C.GoString(memInfo.err))
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
} else {
|
||||
resp.Driver = "ROCM"
|
||||
}
|
||||
}
|
||||
if resp.Driver == "" {
|
||||
C.cpu_check_ram(&memInfo)
|
||||
resp.Driver = "CPU"
|
||||
}
|
||||
if memInfo.err != nil {
|
||||
log.Printf("error looking up GPU memory: %s", C.GoString(memInfo.err))
|
||||
log.Printf("error looking up CPU memory: %s", C.GoString(memInfo.err))
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
return resp
|
||||
}
|
||||
resp.FreeMemory = uint64(memInfo.free)
|
||||
resp.TotalMemory = uint64(memInfo.total)
|
||||
@@ -108,12 +124,7 @@ func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
|
||||
// 75% of the absolute max number of layers we can fit in available VRAM, off-loading too many layers to the GPU can cause OOM errors
|
||||
layers := int(info.FreeMemory/bytesPerLayer) * 3 / 4
|
||||
|
||||
// TODO - not sure on this part... if we can't fit all the layers, just fallback to CPU
|
||||
// if int64(layers) < numLayer {
|
||||
// log.Printf("%d MB VRAM available, insufficient to load current model (reuires %d MB) - falling back to CPU %d", freeBytes/(1024*1024), fileSizeBytes/(1024*1024))
|
||||
// return 0
|
||||
// }
|
||||
log.Printf("%d MB VRAM available, loading up to %d GPU layers out of %d", info.FreeMemory/(1024*1024), layers, numLayer)
|
||||
log.Printf("%d MB VRAM available, loading up to %d %s GPU layers out of %d", info.FreeMemory/(1024*1024), layers, info.Driver, numLayer)
|
||||
|
||||
return layers
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ const char *cuda_lib_paths[] = {
|
||||
#endif
|
||||
|
||||
void cuda_init(cuda_init_resp_t *resp) {
|
||||
nvmlReturn_t ret;
|
||||
resp->err = NULL;
|
||||
const int buflen = 256;
|
||||
char buf[buflen + 1];
|
||||
@@ -56,6 +57,13 @@ void cuda_init(cuda_init_resp_t *resp) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ret = (*resp->ch.initFn)();
|
||||
if (ret != NVML_SUCCESS) {
|
||||
snprintf(buf, buflen, "nvml vram init failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -73,17 +81,9 @@ void cuda_check_vram(cuda_handle_t h, mem_info_t *resp) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = (*h.initFn)();
|
||||
if (ret != NVML_SUCCESS) {
|
||||
snprintf(buf, buflen, "nvml vram init failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO - handle multiple GPUs
|
||||
ret = (*h.getHandle)(0, &device);
|
||||
if (ret != NVML_SUCCESS) {
|
||||
(*h.shutdownFn)();
|
||||
snprintf(buf, buflen, "unable to get device handle: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
@@ -91,20 +91,12 @@ void cuda_check_vram(cuda_handle_t h, mem_info_t *resp) {
|
||||
|
||||
ret = (*h.getMemInfo)(device, &memInfo);
|
||||
if (ret != NVML_SUCCESS) {
|
||||
(*h.shutdownFn)();
|
||||
snprintf(buf, buflen, "device memory info lookup failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
resp->total = memInfo.total;
|
||||
resp->free = memInfo.free;
|
||||
|
||||
ret = (*h.shutdownFn)();
|
||||
if (ret != NVML_SUCCESS) {
|
||||
snprintf(buf, buflen, "nvml vram shutdown failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
#endif // __APPLE__
|
@@ -20,6 +20,7 @@ const char *rocm_lib_paths[] = {
|
||||
#endif
|
||||
|
||||
void rocm_init(rocm_init_resp_t *resp) {
|
||||
rsmi_status_t ret;
|
||||
resp->err = NULL;
|
||||
const int buflen = 256;
|
||||
char buf[buflen + 1];
|
||||
@@ -56,6 +57,13 @@ void rocm_init(rocm_init_resp_t *resp) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ret = (*resp->rh.initFn)(0);
|
||||
if (ret != RSMI_STATUS_SUCCESS) {
|
||||
snprintf(buf, buflen, "rocm vram init failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -70,10 +78,8 @@ void rocm_check_vram(rocm_handle_t h, mem_info_t *resp) {
|
||||
char buf[buflen + 1];
|
||||
int i;
|
||||
|
||||
ret = (*h.initFn)(0);
|
||||
if (ret != RSMI_STATUS_SUCCESS) {
|
||||
snprintf(buf, buflen, "rocm vram init failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
if (h.handle == NULL) {
|
||||
resp->err = strdup("nvml handle sn't initialized");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -89,20 +95,17 @@ void rocm_check_vram(rocm_handle_t h, mem_info_t *resp) {
|
||||
// Get total memory - used memory for available memory
|
||||
ret = (*h.totalMemFn)(0, RSMI_MEM_TYPE_VRAM, &totalMem);
|
||||
if (ret != RSMI_STATUS_SUCCESS) {
|
||||
(*h.shutdownFn)();
|
||||
snprintf(buf, buflen, "rocm total mem lookup failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
ret = (*h.usageMemFn)(0, RSMI_MEM_TYPE_VRAM, &usedMem);
|
||||
if (ret != RSMI_STATUS_SUCCESS) {
|
||||
(*h.shutdownFn)();
|
||||
snprintf(buf, buflen, "rocm usage mem lookup failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
|
||||
(*h.shutdownFn)();
|
||||
resp->total = totalMem;
|
||||
resp->free = totalMem - usedMem;
|
||||
return;
|
||||
|
Reference in New Issue
Block a user