Skip to content

Commit 5ea6cdd

Browse files
committed
llama : add llama_model methods
ggml-ci
1 parent a3d50bc commit 5ea6cdd

File tree

5 files changed

+337
-342
lines changed

5 files changed

+337
-342
lines changed

src/llama-adapter.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const
6262
cvec.tensors.reserve(hparams.n_layer);
6363
cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0
6464
for (size_t il = 1; il < hparams.n_layer; il++) {
65-
ggml_backend_buffer_type_t buft = llama_model_select_buft(model, il);
65+
ggml_backend_buffer_type_t buft = model.select_buft(il);
6666
ggml_context * ctx = ctx_for_buft(buft);
6767
if (!ctx) {
6868
LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
@@ -257,7 +257,7 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char
257257
}
258258

259259
// device buft and device ctx
260-
auto * model_tensor = llama_model_get_tensor(model, name.c_str());
260+
const auto * model_tensor = model.get_tensor( name.c_str());
261261
if (!model_tensor) {
262262
throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model");
263263
}

0 commit comments

Comments
 (0)