mirror of
https://github.com/ollama/ollama.git
synced 2025-11-10 21:17:32 +01:00
* Fix vulkan PCI ID and ID handling Intel GPUs may not report PCI IDs which was leading to incorrect overlap detection. Switch to using the existing PCI IDs, however AMD GPUs claim not to report PCI IDs, but actually do, so try anyway, as this is required for ADLX to find the GPUs on Windows. Numeric IDs lead to scheduling problems, so this also switches Vulkan to use UUID based IDs. The GPU discovery patches have been squashed into a single patch to simplify future rebases. * review comments
33 lines
1.3 KiB
Diff
33 lines
1.3 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Daniel Hiltgen <daniel@ollama.com>
|
|
Date: Fri, 17 Oct 2025 14:17:00 -0700
|
|
Subject: [PATCH] report LoadLibrary failures
|
|
|
|
---
|
|
ggml/src/ggml-backend-reg.cpp | 12 ++++++++++++
|
|
1 file changed, 12 insertions(+)
|
|
|
|
diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp
|
|
index f794d9cfa..3a855ab2e 100644
|
|
--- a/ggml/src/ggml-backend-reg.cpp
|
|
+++ b/ggml/src/ggml-backend-reg.cpp
|
|
@@ -118,6 +118,18 @@ static dl_handle * dl_load_library(const fs::path & path) {
|
|
SetErrorMode(old_mode | SEM_FAILCRITICALERRORS);
|
|
|
|
HMODULE handle = LoadLibraryW(path.wstring().c_str());
|
|
+ if (!handle) {
|
|
+ DWORD error_code = GetLastError();
|
|
+ std::string msg;
|
|
+ LPSTR lpMsgBuf = NULL;
|
|
+ DWORD bufLen = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
|
|
+ NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&lpMsgBuf, 0, NULL);
|
|
+ if (bufLen) {
|
|
+ msg = lpMsgBuf;
|
|
+ LocalFree(lpMsgBuf);
|
|
+ GGML_LOG_INFO("%s unable to load library %s: %s\n", __func__, path_str(path).c_str(), msg.c_str());
|
|
+ }
|
|
+ }
|
|
|
|
SetErrorMode(old_mode);
|
|
|