File tree 1 file changed +2
-13
lines changed
1 file changed +2
-13
lines changed Original file line number Diff line number Diff line change @@ -2650,12 +2650,7 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
2650
2650
2651
2651
// calculate size of ctx needed for tensors, ensure tensors are f32, and find max layer
2652
2652
{
2653
- struct ggml_init_params meta_params = {
2654
- /* .mem_size = */ ggml_tensor_overhead () * 128 + ggml_graph_overhead (),
2655
- /* .mem_buffer = */ nullptr ,
2656
- /* .no_alloc = */ true ,
2657
- };
2658
- ggml_context * meta_ctx = ggml_init (meta_params);
2653
+ ggml_context * meta_ctx = nullptr ;
2659
2654
struct gguf_init_params meta_gguf_params = {
2660
2655
/* .no_alloc = */ true ,
2661
2656
/* .ctx = */ &meta_ctx,
@@ -2720,13 +2715,7 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
2720
2715
}
2721
2716
2722
2717
// load and scale tensors into final control vector context
2723
- struct ggml_init_params ggml_params = {
2724
- /* .mem_size = */ ggml_tensor_overhead () * n_tensors + n_bytes,
2725
- /* .mem_buffer = */ nullptr ,
2726
- /* .no_alloc = */ false ,
2727
- };
2728
- struct ggml_context * ctx = ggml_init (ggml_params);
2729
-
2718
+ struct ggml_context * ctx = nullptr ;
2730
2719
struct gguf_init_params params = {
2731
2720
/* .no_alloc = */ false ,
2732
2721
/* .ctx = */ &ctx,
You can’t perform that action at this time.
0 commit comments