Skip to content

Commit 794aeed

Browse files
committed
examples : remove infill
ggml-ci
1 parent 1d36b36 commit 794aeed

File tree

7 files changed

+6
-655
lines changed

7 files changed

+6
-655
lines changed

Makefile

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1187,11 +1187,6 @@ llama-cli: tools/main/main.cpp \
11871187
@echo '==== Run ./llama-cli -h for help. ===='
11881188
@echo
11891189

1190-
llama-infill: examples/infill/infill.cpp \
1191-
$(OBJ_ALL)
1192-
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
1193-
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
1194-
11951190
llama-run: tools/run/run.cpp \
11961191
$(OBJ_ALL)
11971192
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)

common/arg.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1283,7 +1283,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
12831283
[](common_params & params) {
12841284
params.use_color = true;
12851285
}
1286-
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
1286+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP}));
12871287
add_opt(common_arg(
12881288
{"-t", "--threads"}, "N",
12891289
string_format("number of threads to use during generation (default: %d)", params.cpuparams.n_threads),
@@ -1416,7 +1416,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
14161416
add_opt(common_arg(
14171417
{"-n", "--predict", "--n-predict"}, "N",
14181418
string_format(
1419-
ex == LLAMA_EXAMPLE_MAIN || ex == LLAMA_EXAMPLE_INFILL
1419+
ex == LLAMA_EXAMPLE_MAIN
14201420
? "number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)"
14211421
: "number of tokens to predict (default: %d, -1 = infinity)",
14221422
params.n_predict),
@@ -1655,15 +1655,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
16551655
params.input_prefix = value;
16561656
params.enable_chat_template = false;
16571657
}
1658-
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
1658+
).set_examples({LLAMA_EXAMPLE_MAIN}));
16591659
add_opt(common_arg(
16601660
{"--in-suffix"}, "STRING",
16611661
"string to suffix after user inputs with (default: empty)",
16621662
[](common_params & params, const std::string & value) {
16631663
params.input_suffix = value;
16641664
params.enable_chat_template = false;
16651665
}
1666-
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
1666+
).set_examples({LLAMA_EXAMPLE_MAIN}));
16671667
add_opt(common_arg(
16681668
{"--no-warmup"},
16691669
"skip warming up the model with an empty run",
@@ -1680,7 +1680,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
16801680
[](common_params & params) {
16811681
params.spm_infill = true;
16821682
}
1683-
).set_examples({LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_INFILL}));
1683+
).set_examples({LLAMA_EXAMPLE_SERVER}));
16841684
add_opt(common_arg(
16851685
{"--samplers"}, "SAMPLERS",
16861686
string_format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()),
@@ -2892,7 +2892,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
28922892
[](common_params & params) {
28932893
params.simple_io = true;
28942894
}
2895-
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_INFILL}));
2895+
).set_examples({LLAMA_EXAMPLE_MAIN}));
28962896
add_opt(common_arg(
28972897
{"--positive-file"}, "FNAME",
28982898
string_format("positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str()),

common/common.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,6 @@ enum llama_example {
6666
LLAMA_EXAMPLE_COMMON,
6767
LLAMA_EXAMPLE_SPECULATIVE,
6868
LLAMA_EXAMPLE_MAIN,
69-
LLAMA_EXAMPLE_INFILL,
7069
LLAMA_EXAMPLE_EMBEDDING,
7170
LLAMA_EXAMPLE_PERPLEXITY,
7271
LLAMA_EXAMPLE_RETRIEVAL,

examples/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ else()
2121
add_subdirectory(gguf-hash)
2222
add_subdirectory(gguf)
2323
add_subdirectory(gritlm)
24-
add_subdirectory(infill)
2524
add_subdirectory(lookahead)
2625
add_subdirectory(lookup)
2726
add_subdirectory(parallel)

examples/infill/CMakeLists.txt

Lines changed: 0 additions & 5 deletions
This file was deleted.

examples/infill/README.md

Lines changed: 0 additions & 47 deletions
This file was deleted.

0 commit comments

Comments
 (0)