Skip to content

Commit ab336a9

Browse files
authored
code : normalize enum names (#5697)
* coda : normalize enum names ggml-ci * code : cont * code : cont
1 parent 69917df commit ab336a9

File tree

20 files changed

+502
-502
lines changed

20 files changed

+502
-502
lines changed

common/common.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -295,9 +295,9 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
295295
break;
296296
}
297297
std::string value(argv[i]);
298-
/**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_NONE; }
299-
else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_LINEAR; }
300-
else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_YARN; }
298+
/**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
299+
else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
300+
else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
301301
else { invalid_param = true; break; }
302302
} else if (arg == "--rope-scale") {
303303
if (++i >= argc) {
@@ -630,11 +630,11 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
630630
}
631631
std::string arg_next = argv[i];
632632
if (arg_next == "none") {
633-
params.split_mode = LLAMA_SPLIT_NONE;
633+
params.split_mode = LLAMA_SPLIT_MODE_NONE;
634634
} else if (arg_next == "layer") {
635-
params.split_mode = LLAMA_SPLIT_LAYER;
635+
params.split_mode = LLAMA_SPLIT_MODE_LAYER;
636636
} else if (arg_next == "row") {
637-
params.split_mode = LLAMA_SPLIT_ROW;
637+
params.split_mode = LLAMA_SPLIT_MODE_ROW;
638638
} else {
639639
invalid_param = true;
640640
break;
@@ -837,15 +837,15 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
837837
sep++;
838838
if (strncmp(sep, "int:", 4) == 0) {
839839
sep += 4;
840-
kvo.tag = LLAMA_KV_OVERRIDE_INT;
840+
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
841841
kvo.int_value = std::atol(sep);
842842
} else if (strncmp(sep, "float:", 6) == 0) {
843843
sep += 6;
844-
kvo.tag = LLAMA_KV_OVERRIDE_FLOAT;
844+
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
845845
kvo.float_value = std::atof(sep);
846846
} else if (strncmp(sep, "bool:", 5) == 0) {
847847
sep += 5;
848-
kvo.tag = LLAMA_KV_OVERRIDE_BOOL;
848+
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
849849
if (std::strcmp(sep, "true") == 0) {
850850
kvo.bool_value = true;
851851
} else if (std::strcmp(sep, "false") == 0) {

common/common.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ struct gpt_params {
6161
float p_split = 0.1f; // speculative decoding split probability
6262
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
6363
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
64-
llama_split_mode split_mode = LLAMA_SPLIT_LAYER; // how to split the model across GPUs
64+
llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs
6565
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
6666
float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs
6767
int32_t n_beams = 0; // if non-zero then use beam search of given width.
@@ -75,7 +75,7 @@ struct gpt_params {
7575
float yarn_beta_fast = 32.0f; // YaRN low correction dim
7676
float yarn_beta_slow = 1.0f; // YaRN high correction dim
7777
int32_t yarn_orig_ctx = 0; // YaRN original context length
78-
int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
78+
int32_t rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
7979
ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED;
8080

8181
// // sampling parameters

common/train.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ struct train_state * init_train_state() {
3131

3232
state->opt = new struct ggml_opt_context;
3333
state->opt->ctx = NULL;
34-
state->opt->params = ggml_opt_default_params(GGML_OPT_ADAM);
34+
state->opt->params = ggml_opt_default_params(GGML_OPT_TYPE_ADAM);
3535
state->opt->params.graph_size = LLAMA_TRAIN_MAX_NODES;
3636
state->opt->loss_after = 0.0f;
3737

@@ -556,7 +556,7 @@ void load_opt_context_gguf(struct gguf_context * fctx, struct ggml_context * f_g
556556
std::string opt_type;
557557
GGUF_GET_KEY(fctx, opt_type, gguf_get_val_str, GGUF_TYPE_STRING, true, LLM_KV_OPTIMIZER_TYPE);
558558
if (opt_type == LLM_KV_OPTIMIZER_TYPE_ADAM) {
559-
opt->params.type = GGML_OPT_ADAM;
559+
opt->params.type = GGML_OPT_TYPE_ADAM;
560560

561561
GGUF_GET_KEY(fctx, opt->adam.fx_best, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, LLM_KV_OPTIMIZER_ADAM_BEST_LOSS);
562562
GGUF_GET_KEY(fctx, opt->adam.fx_prev, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, LLM_KV_OPTIMIZER_ADAM_PREVIOUS_LOSS);
@@ -568,7 +568,7 @@ void load_opt_context_gguf(struct gguf_context * fctx, struct ggml_context * f_g
568568
copy_tensor_by_name(opt->adam.v, f_ggml_ctx, LLM_TENSOR_OPTIMIZER_ADAM_SECOND_MOMENTS);
569569
copy_tensor_by_name(opt->adam.pf, f_ggml_ctx, LLM_TENSOR_OPTIMIZER_ADAM_PAST_LOSS_VALUES);
570570
} else if (opt_type == LLM_KV_OPTIMIZER_TYPE_LBFGS) {
571-
opt->params.type = GGML_OPT_LBFGS;
571+
opt->params.type = GGML_OPT_TYPE_LBFGS;
572572

573573
GGUF_GET_KEY(fctx, opt->params.lbfgs.m, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT);
574574
GGUF_GET_KEY(fctx, opt->lbfgs.fx_best, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, LLM_KV_OPTIMIZER_LBFGS_BEST_LOSS);
@@ -603,7 +603,7 @@ void save_opt_context_gguf(struct gguf_context * fctx, struct ggml_opt_context *
603603
gguf_set_val_bool(fctx, LLM_KV_OPTIMIZER_JUST_INITIALIZED, opt->just_initialized);
604604

605605
switch (opt->params.type) {
606-
case GGML_OPT_ADAM:
606+
case GGML_OPT_TYPE_ADAM:
607607
{
608608
gguf_set_val_str(fctx, LLM_KV_OPTIMIZER_TYPE, LLM_KV_OPTIMIZER_TYPE_ADAM);
609609
gguf_set_val_f32(fctx, LLM_KV_OPTIMIZER_ADAM_BEST_LOSS, opt->adam.fx_best);
@@ -622,7 +622,7 @@ void save_opt_context_gguf(struct gguf_context * fctx, struct ggml_opt_context *
622622
gguf_add_tensor(fctx, opt->adam.pf);
623623
}
624624
} break;
625-
case GGML_OPT_LBFGS:
625+
case GGML_OPT_TYPE_LBFGS:
626626
{
627627
gguf_set_val_str(fctx, LLM_KV_OPTIMIZER_TYPE, LLM_KV_OPTIMIZER_TYPE_LBFGS);
628628
gguf_set_val_u32(fctx, LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT, opt->params.lbfgs.m);

examples/baby-llama/baby-llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1547,7 +1547,7 @@ int main(int argc, char ** argv) {
15471547

15481548
float error_before_opt = ggml_get_f32_1d(e, 0);
15491549

1550-
struct ggml_opt_params opt_params_lbfgs = ggml_opt_default_params(GGML_OPT_LBFGS);
1550+
struct ggml_opt_params opt_params_lbfgs = ggml_opt_default_params(GGML_OPT_TYPE_LBFGS);
15511551
opt_params_lbfgs.print_forward_graph = false;
15521552
opt_params_lbfgs.print_backward_graph = false;
15531553
opt_params_lbfgs.lbfgs.n_iter = 16;

examples/finetune/finetune.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1531,7 +1531,7 @@ int main(int argc, char ** argv) {
15311531
lora.hparams.n_rank_output = n_rank_output;
15321532

15331533
// set opt params from command line
1534-
opt->params = ggml_opt_default_params(GGML_OPT_ADAM);
1534+
opt->params = ggml_opt_default_params(GGML_OPT_TYPE_ADAM);
15351535
opt->params.print_forward_graph = false;
15361536
opt->params.print_backward_graph = false;
15371537
opt->params.graph_size = LLAMA_TRAIN_MAX_NODES;

examples/llama-bench/llama-bench.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -157,9 +157,9 @@ static const char * output_format_str(output_formats format) {
157157

158158
static const char * split_mode_str(llama_split_mode mode) {
159159
switch (mode) {
160-
case LLAMA_SPLIT_NONE: return "none";
161-
case LLAMA_SPLIT_LAYER: return "layer";
162-
case LLAMA_SPLIT_ROW: return "row";
160+
case LLAMA_SPLIT_MODE_NONE: return "none";
161+
case LLAMA_SPLIT_MODE_LAYER: return "layer";
162+
case LLAMA_SPLIT_MODE_ROW: return "row";
163163
default: GGML_ASSERT(!"invalid split mode");
164164
}
165165
}
@@ -193,7 +193,7 @@ static const cmd_params cmd_params_defaults = {
193193
/* type_v */ {GGML_TYPE_F16},
194194
/* n_threads */ {get_num_physical_cores()},
195195
/* n_gpu_layers */ {99},
196-
/* split_mode */ {LLAMA_SPLIT_LAYER},
196+
/* split_mode */ {LLAMA_SPLIT_MODE_LAYER},
197197
/* main_gpu */ {0},
198198
/* no_kv_offload */ {false},
199199
/* mul_mat_q */ {true},
@@ -358,11 +358,11 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
358358
for (const auto & m : p) {
359359
llama_split_mode mode;
360360
if (m == "none") {
361-
mode = LLAMA_SPLIT_NONE;
361+
mode = LLAMA_SPLIT_MODE_NONE;
362362
} else if (m == "layer") {
363-
mode = LLAMA_SPLIT_LAYER;
363+
mode = LLAMA_SPLIT_MODE_LAYER;
364364
} else if (m == "row") {
365-
mode = LLAMA_SPLIT_ROW;
365+
mode = LLAMA_SPLIT_MODE_ROW;
366366
} else {
367367
invalid_param = true;
368368
break;

examples/llava/llava.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *>
152152

153153
ggml_tensor * newline_tmp = clip_get_newline_tensor(ctx_clip);
154154
model.newline = ggml_new_tensor_1d(model.ctx, GGML_TYPE_F32, newline_tmp->ne[0]);
155-
if (newline_tmp->backend != GGML_BACKEND_CPU) {
155+
if (newline_tmp->backend != GGML_BACKEND_TYPE_CPU) {
156156
if (newline_tmp->buffer == NULL) {
157157
printf("newline_tmp tensor buffer is NULL\n");
158158
}

examples/server/server.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2086,9 +2086,9 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
20862086
break;
20872087
}
20882088
std::string value(argv[i]);
2089-
/**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_NONE; }
2090-
else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_LINEAR; }
2091-
else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_YARN; }
2089+
/**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
2090+
else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
2091+
else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
20922092
else { invalid_param = true; break; }
20932093
}
20942094
else if (arg == "--rope-freq-base")
@@ -2212,15 +2212,15 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
22122212
std::string arg_next = argv[i];
22132213
if (arg_next == "none")
22142214
{
2215-
params.split_mode = LLAMA_SPLIT_NONE;
2215+
params.split_mode = LLAMA_SPLIT_MODE_NONE;
22162216
}
22172217
else if (arg_next == "layer")
22182218
{
2219-
params.split_mode = LLAMA_SPLIT_LAYER;
2219+
params.split_mode = LLAMA_SPLIT_MODE_LAYER;
22202220
}
22212221
else if (arg_next == "row")
22222222
{
2223-
params.split_mode = LLAMA_SPLIT_ROW;
2223+
params.split_mode = LLAMA_SPLIT_MODE_ROW;
22242224
}
22252225
else {
22262226
invalid_param = true;
@@ -2447,15 +2447,15 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
24472447
sep++;
24482448
if (strncmp(sep, "int:", 4) == 0) {
24492449
sep += 4;
2450-
kvo.tag = LLAMA_KV_OVERRIDE_INT;
2450+
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
24512451
kvo.int_value = std::atol(sep);
24522452
} else if (strncmp(sep, "float:", 6) == 0) {
24532453
sep += 6;
2454-
kvo.tag = LLAMA_KV_OVERRIDE_FLOAT;
2454+
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
24552455
kvo.float_value = std::atof(sep);
24562456
} else if (strncmp(sep, "bool:", 5) == 0) {
24572457
sep += 5;
2458-
kvo.tag = LLAMA_KV_OVERRIDE_BOOL;
2458+
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
24592459
if (std::strcmp(sep, "true") == 0) {
24602460
kvo.bool_value = true;
24612461
} else if (std::strcmp(sep, "false") == 0) {

examples/train-text-from-scratch/train-text-from-scratch.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -960,7 +960,7 @@ int main(int argc, char ** argv) {
960960
struct ggml_opt_context * opt = train->opt;
961961

962962
// set opt params from command line
963-
opt->params = ggml_opt_default_params(GGML_OPT_ADAM);
963+
opt->params = ggml_opt_default_params(GGML_OPT_TYPE_ADAM);
964964
opt->params.print_forward_graph = false;
965965
opt->params.print_backward_graph = false;
966966
opt->params.graph_size = LLAMA_TRAIN_MAX_NODES;

0 commit comments

Comments
 (0)