Skip to content

Commit af7b3f3

Browse files
committed
fixed llama.cpp build issue on ARM (Apple aarch64)
1 parent 451fea3 commit af7b3f3

File tree

1 file changed

+10
-0
lines changed

1 file changed

+10
-0
lines changed

llama-cpp-sys-2/build.rs

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,16 @@ fn main() {
336336
}
337337
}
338338

339+
if matches!(target_os, TargetOs::Linux)
340+
&& target_triple.contains("aarch64")
341+
&& !env::var(format!("CARGO_FEATURE_{}", "native".to_uppercase())).is_ok()
342+
{
343+
// If the native feature is not enabled, we take off the native ARM64 support.
344+
// It is useful in docker environments where the native feature is not enabled.
345+
config.define("GGML_NATIVE", "OFF");
346+
config.define("GGML_CPU_ARM_ARCH", "armv8-a");
347+
}
348+
339349
if cfg!(feature = "vulkan") {
340350
config.define("GGML_VULKAN", "ON");
341351
match target_os {

0 commit comments

Comments
 (0)