Skip to content

Commit 823b211

Browse files
slarenarthw
authored andcommitted
llama : fix op mul check with command-r-plus (ggml-org#10476)
1 parent 68ebe18 commit 823b211

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7185,12 +7185,12 @@ static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w
71857185
} break;
71867186
case GGML_OP_ADD:
71877187
{
7188-
ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, w->ne[0], 512);
7188+
ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
71897189
op_tensor = ggml_add(ctx, a, w);
71907190
} break;
71917191
case GGML_OP_MUL:
71927192
{
7193-
ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, w->ne[0], 512);
7193+
ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
71947194
op_tensor = ggml_mul(ctx, a, w);
71957195
} break;
71967196
case GGML_OP_DIV:

0 commit comments

Comments
 (0)