summaryrefslogtreecommitdiff
path: root/gnu/packages/patches/llama-cpp-vulkan-optional.patch
diff options
context:
space:
mode:
authorMorgan Smith <Morgan.J.Smith@outlook.com>2025-04-07 19:19:39 -0400
committerDanny Milosavljevic <dannym@friendly-machines.com>2025-04-25 11:33:26 +0200
commitf22424d4fddb6f6c3b1158dc6267cfa8b3423600 (patch)
tree23fa950e8343627de421f7b6bb43e2cbd14958f1 /gnu/packages/patches/llama-cpp-vulkan-optional.patch
parent9407c6370e212cf60e19cd846fb4313290287553 (diff)
gnu: llama-cpp: Update to 0.0.0-b5013.
* gnu/packages/machine-learning.scm (llama-cpp): Update to 0.0.0-b5013. [inputs]: Add curl, glslang, and python-gguf. [native-inputs]: bash -> bash-minimal. [source, homepage]: Update URL. [python-scripts]: Rely on upstream to install them. Delete phase. [fix-tests]: Fix an additional test. (python-gguf): Switch to llama-cpp's version. * gnu/packages/patches/llama-cpp-vulkan-optional.patch: Delete. * gnu/local.mk: Unregister patch. Change-Id: Ic297534cd142cb83e3964eae21b4eb807b74e9bc Signed-off-by: Danny Milosavljevic <dannym@friendly-machines.com>
Diffstat (limited to 'gnu/packages/patches/llama-cpp-vulkan-optional.patch')
-rw-r--r--gnu/packages/patches/llama-cpp-vulkan-optional.patch38
1 files changed, 0 insertions, 38 deletions
diff --git a/gnu/packages/patches/llama-cpp-vulkan-optional.patch b/gnu/packages/patches/llama-cpp-vulkan-optional.patch
deleted file mode 100644
index 43a49b6a02..0000000000
--- a/gnu/packages/patches/llama-cpp-vulkan-optional.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-Author: Danny Milosavljevic <dannym@friendly-machines.com>
-Date: 2025-01-29
-License: Expat
-Subject: Make Vulkan optional
-
-See also: <https://github.com/ggerganov/llama.cpp/pull/11494>
-
-diff -ru orig/llama.cpp/ggml/include/ggml-vulkan.h llama.cpp/ggml/include/ggml-vulkan.h
---- orig/llama.cpp/ggml/include/ggml-vulkan.h 2025-01-29 10:24:10.894476682 +0100
-+++ llama.cpp/ggml/include/ggml-vulkan.h 2025-02-07 18:28:34.509509638 +0100
-@@ -10,8 +10,6 @@
- #define GGML_VK_NAME "Vulkan"
- #define GGML_VK_MAX_DEVICES 16
-
--GGML_BACKEND_API void ggml_vk_instance_init(void);
--
- // backend API
- GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num);
-
-diff -ru orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp
---- orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 10:24:10.922476480 +0100
-+++ llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 22:33:19.955087552 +0100
-@@ -8174,8 +8174,13 @@
- /* .iface = */ ggml_backend_vk_reg_i,
- /* .context = */ nullptr,
- };
--
-- return &reg;
-+ try {
-+ ggml_vk_instance_init();
-+ return &reg;
-+ } catch (const vk::SystemError& e) {
-+ VK_LOG_DEBUG("ggml_vk_get_device_count() -> Error: System error: " << e.what());
-+ return nullptr;
-+ }
- }
-
- // Extension availability