Skip to content

Commit 941f9b1

Browse files
committed
rpc : rpc_count always zero for non-RPC builds
1 parent 67d4e7d commit 941f9b1

File tree

1 file changed

+8
-0
lines changed

1 file changed

+8
-0
lines changed

src/llama.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3346,7 +3346,11 @@ static size_t llama_get_device_count(const llama_model & model) {
33463346
static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_model & model, int gpu) {
33473347
ggml_backend_buffer_type_t buft = nullptr;
33483348

3349+
#ifdef GGML_USE_RPC
33493350
int rpc_count = (int)model.rpc_servers.size();
3351+
#else
3352+
int rpc_count = 0;
3353+
#endif
33503354
int local_gpu = gpu - rpc_count;
33513355
#if defined(GGML_USE_RPC)
33523356
if (gpu < rpc_count) {
@@ -3403,7 +3407,11 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_split(const llama_mo
34033407
}
34043408

34053409
static size_t llama_get_device_memory(const llama_model & model, int device) {
3410+
#ifdef GGML_USE_RPC
34063411
int rpc_count = (int)model.rpc_servers.size();
3412+
#else
3413+
int rpc_count = 0;
3414+
#endif
34073415
int local_device = device - rpc_count;
34083416
#if defined(GGML_USE_RPC)
34093417
if (device < rpc_count) {

0 commit comments

Comments
 (0)