@@ -1283,7 +1283,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
1283
1283
[](common_params & params) {
1284
1284
params.use_color = true ;
1285
1285
}
1286
- ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
1286
+ ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
1287
1287
add_opt (common_arg (
1288
1288
{" -t" , " --threads" }, " N" ,
1289
1289
string_format (" number of threads to use during generation (default: %d)" , params.cpuparams .n_threads ),
@@ -1416,7 +1416,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
1416
1416
add_opt (common_arg (
1417
1417
{" -n" , " --predict" , " --n-predict" }, " N" ,
1418
1418
string_format (
1419
- ex == LLAMA_EXAMPLE_MAIN || ex == LLAMA_EXAMPLE_INFILL
1419
+ ex == LLAMA_EXAMPLE_MAIN
1420
1420
? " number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)"
1421
1421
: " number of tokens to predict (default: %d, -1 = infinity)" ,
1422
1422
params.n_predict ),
@@ -1655,15 +1655,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
1655
1655
params.input_prefix = value;
1656
1656
params.enable_chat_template = false ;
1657
1657
}
1658
- ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL }));
1658
+ ).set_examples ({LLAMA_EXAMPLE_MAIN}));
1659
1659
add_opt (common_arg (
1660
1660
{" --in-suffix" }, " STRING" ,
1661
1661
" string to suffix after user inputs with (default: empty)" ,
1662
1662
[](common_params & params, const std::string & value) {
1663
1663
params.input_suffix = value;
1664
1664
params.enable_chat_template = false ;
1665
1665
}
1666
- ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL }));
1666
+ ).set_examples ({LLAMA_EXAMPLE_MAIN}));
1667
1667
add_opt (common_arg (
1668
1668
{" --no-warmup" },
1669
1669
" skip warming up the model with an empty run" ,
@@ -1680,7 +1680,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
1680
1680
[](common_params & params) {
1681
1681
params.spm_infill = true ;
1682
1682
}
1683
- ).set_examples ({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL }));
1683
+ ).set_examples ({LLAMA_EXAMPLE_SERVER}));
1684
1684
add_opt (common_arg (
1685
1685
{" --samplers" }, " SAMPLERS" ,
1686
1686
string_format (" samplers that will be used for generation in the order, separated by \' ;\'\n (default: %s)" , sampler_type_names.c_str ()),
@@ -2892,7 +2892,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
2892
2892
[](common_params & params) {
2893
2893
params.simple_io = true ;
2894
2894
}
2895
- ).set_examples ({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL }));
2895
+ ).set_examples ({LLAMA_EXAMPLE_MAIN}));
2896
2896
add_opt (common_arg (
2897
2897
{" --positive-file" }, " FNAME" ,
2898
2898
string_format (" positive prompts file, one prompt per line (default: '%s')" , params.cvector_positive_file .c_str ()),
0 commit comments