-
Notifications
You must be signed in to change notification settings - Fork 12k
Introduce ggml_threadpool #7526
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Closed
Closed
Changes from all commits
Commits
Show all changes
17 commits
Select commit
Hold shift + click to select a range
9a4bdc8
Introduce ggml_threadpool
4675838
set bitmask properly on windows
fmz 0ce35a6
reset cpu affinity every time for main thread
fmz 8bdae21
roll back CMakePresets.json changes
fmz ef1b87d
free batch threadpool in main
fmz e771674
threadpool: proper handling for non-specified cpumask
max-krasnyansky c1e7f48
threadpool: add persistent threadpool for llama-bench
max-krasnyansky a67dbcc
threadpool: fix compiler errors for android and x64 builds
max-krasnyansky 1d9d39a
threadpool: update backend interface in ggml-rpc
max-krasnyansky 5c9222d
Restrict threadpool to CPU backend
cbab212
Restrict threadpool to CPU backend
65c11d4
llama-bench threadpool CLI params
fmz 88dc99a
Merge branch 'threadpool' of https://github.com/CodeLinaro/llama.cpp …
fmz e8c3364
fixes for non-llvm builds
719d12b
fix server test
9d6675d
fix n_threads == 1 bug
6a8aa22
Merge branch 'master' into threadpool
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -187,10 +187,20 @@ struct cmd_params { | |
std::vector<bool> use_mmap; | ||
std::vector<bool> embeddings; | ||
ggml_numa_strategy numa; | ||
cpu_params cpuparams; | ||
int reps; | ||
bool verbose; | ||
output_formats output_format; | ||
}; | ||
// | ||
//static const cpu_params default_cpuparams( | ||
// int32_t(std::thread::hardware_concurrency()), | ||
// {false}, | ||
// false, | ||
// 1, | ||
// false, | ||
// false | ||
//); | ||
|
||
static const cmd_params cmd_params_defaults = { | ||
/* model */ {"models/7B/ggml-model-q4_0.gguf"}, | ||
|
@@ -212,6 +222,7 @@ static const cmd_params cmd_params_defaults = { | |
/* use_mmap */ {true}, | ||
/* embeddings */ {false}, | ||
/* numa */ GGML_NUMA_STRATEGY_DISABLED, | ||
/* cpuparams */ {}, | ||
/* reps */ 5, | ||
/* verbose */ false, | ||
/* output_format */ MARKDOWN | ||
|
@@ -239,6 +250,11 @@ static void print_usage(int /* argc */, char ** argv) { | |
printf(" -fa, --flash-attn <0|1> (default: %s)\n", join(cmd_params_defaults.flash_attn, ",").c_str()); | ||
printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str()); | ||
printf(" --numa <distribute|isolate|numactl> (default: disabled)\n"); | ||
printf(" -mt, --max-threads <n> (default: %d)\n", cmd_params_defaults.cpuparams.n_threads); | ||
printf(" -C, --cpu-mask <hex> (default: 0x0)\n"); | ||
printf(" --cpu-strict <0|1> (default: %d)\n", cmd_params_defaults.cpuparams.strict_cpu); | ||
printf(" --priority <0|1|2|3> (default: %d)\n", cmd_params_defaults.cpuparams.priority); | ||
printf(" --poll <0|1> (default: %d)\n", cmd_params_defaults.cpuparams.poll); | ||
printf(" -embd, --embeddings <0|1> (default: %s)\n", join(cmd_params_defaults.embeddings, ",").c_str()); | ||
printf(" -ts, --tensor-split <ts0/ts1/..> (default: 0)\n"); | ||
printf(" -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps); | ||
|
@@ -275,7 +291,7 @@ static ggml_type ggml_type_from_name(const std::string & s) { | |
} | ||
|
||
|
||
static cmd_params parse_cmd_params(int argc, char ** argv) { | ||
static cmd_params parse_cmd_params(int argc, char** argv) { | ||
cmd_params params; | ||
std::string arg; | ||
bool invalid_param = false; | ||
|
@@ -326,7 +342,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { | |
invalid_param = true; | ||
break; | ||
} | ||
params.n_pg.push_back({std::stoi(p[0]), std::stoi(p[1])}); | ||
params.n_pg.push_back({ std::stoi(p[0]), std::stoi(p[1]) }); | ||
} else if (arg == "-b" || arg == "--batch-size") { | ||
if (++i >= argc) { | ||
invalid_param = true; | ||
|
@@ -348,7 +364,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { | |
} | ||
auto p = split<std::string>(argv[i], split_delim); | ||
std::vector<ggml_type> types; | ||
for (const auto & t : p) { | ||
for (const auto& t : p) { | ||
ggml_type gt = ggml_type_from_name(t); | ||
if (gt == GGML_TYPE_COUNT) { | ||
invalid_param = true; | ||
|
@@ -364,7 +380,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { | |
} | ||
auto p = split<std::string>(argv[i], split_delim); | ||
std::vector<ggml_type> types; | ||
for (const auto & t : p) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That's what I get when I use an unconfigured IDE |
||
for (const auto& t : p) { | ||
ggml_type gt = ggml_type_from_name(t); | ||
if (gt == GGML_TYPE_COUNT) { | ||
invalid_param = true; | ||
|
@@ -400,7 +416,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { | |
} | ||
auto p = split<std::string>(argv[i], split_delim); | ||
std::vector<llama_split_mode> modes; | ||
for (const auto & m : p) { | ||
for (const auto& m : p) { | ||
llama_split_mode mode; | ||
if (m == "none") { | ||
mode = LLAMA_SPLIT_MODE_NONE; | ||
|
@@ -434,11 +450,36 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { | |
break; | ||
} else { | ||
std::string value(argv[i]); | ||
/**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } | ||
else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } | ||
else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } | ||
/**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } | ||
else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } | ||
else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } | ||
else { invalid_param = true; break; } | ||
} | ||
|
||
} else if (arg == "-mt" || arg == "--max-threads") { | ||
if (++i >= argc) { | ||
invalid_param = true; | ||
break; | ||
} | ||
params.cpuparams.n_threads = std::stoi(argv[i]); | ||
} else if (arg == "-C" || arg == "--cpu-mask") { | ||
if (++i >= argc) { | ||
invalid_param = true; | ||
break; | ||
} | ||
std::string mask = argv[i]; | ||
params.cpuparams.mask_valid = true; | ||
invalid_param = !parse_cpu_mask(mask, params.cpuparams.cpumask); | ||
} else if (arg == "--prio") { | ||
if (++i >= argc) { | ||
invalid_param = true; | ||
break; | ||
} | ||
params.cpuparams.priority = std::stoul(argv[i]); | ||
} else if (arg == "--cpu-strict") { | ||
params.cpuparams.strict_cpu = true; | ||
} else if (arg == "--poll") { | ||
params.cpuparams.poll = true; | ||
} else if (arg == "-fa" || arg == "--flash-attn") { | ||
if (++i >= argc) { | ||
invalid_param = true; | ||
|
@@ -1234,8 +1275,7 @@ struct sql_printer : public printer { | |
} | ||
}; | ||
|
||
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) { | ||
llama_set_n_threads(ctx, n_threads, n_threads); | ||
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch) { | ||
|
||
const llama_model * model = llama_get_model(ctx); | ||
const int32_t n_vocab = llama_n_vocab(model); | ||
|
@@ -1257,9 +1297,7 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_bat | |
llama_synchronize(ctx); | ||
} | ||
|
||
static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should not have changed |
||
llama_set_n_threads(ctx, n_threads, n_threads); | ||
|
||
static void test_gen(llama_context * ctx, int n_gen, int n_past) { | ||
const llama_model * model = llama_get_model(ctx); | ||
const int32_t n_vocab = llama_n_vocab(model); | ||
|
||
|
@@ -1330,6 +1368,23 @@ int main(int argc, char ** argv) { | |
llama_model * lmodel = nullptr; | ||
const cmd_params_instance * prev_inst = nullptr; | ||
|
||
postprocess_cpu_params(params.cpuparams); | ||
|
||
struct ggml_threadpool_params tpp; | ||
tpp.n_threads = params.cpuparams.n_threads; | ||
tpp.mask_specified = params.cpuparams.mask_valid; | ||
tpp.strict_cpu = params.cpuparams.strict_cpu; | ||
tpp.prio = params.cpuparams.priority; | ||
tpp.poll = params.cpuparams.poll; | ||
|
||
std::memcpy(&tpp.cpumask[0], ¶ms.cpuparams.cpumask[0], GGML_N_CORES_MAX); | ||
|
||
struct ggml_compute_threadpool* threadpool = ggml_create_threadpool(&tpp); | ||
if (!threadpool) { | ||
LOG_TEE("%s: threadpool create failed : n_threads %d\n", __func__, tpp.n_threads); | ||
exit(1); | ||
} | ||
|
||
for (const auto & inst : params_instances) { | ||
// keep the same model between tests when possible | ||
if (!lmodel || !prev_inst || !inst.equal_mparams(*prev_inst)) { | ||
|
@@ -1356,13 +1411,16 @@ int main(int argc, char ** argv) { | |
|
||
llama_kv_cache_clear(ctx); | ||
|
||
llama_set_n_threads(ctx, t.n_threads, t.n_threads); | ||
llama_attach_threadpool(ctx, threadpool); | ||
|
||
// warmup run | ||
if (t.n_prompt > 0) { | ||
//test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads); | ||
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads); | ||
//test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch); | ||
test_prompt(ctx, t.n_prompt, 0, t.n_batch); | ||
} | ||
if (t.n_gen > 0) { | ||
test_gen(ctx, 1, 0, t.n_threads); | ||
test_gen(ctx, 1, 0); | ||
} | ||
|
||
for (int i = 0; i < params.reps; i++) { | ||
|
@@ -1371,10 +1429,10 @@ int main(int argc, char ** argv) { | |
uint64_t t_start = get_time_ns(); | ||
|
||
if (t.n_prompt > 0) { | ||
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads); | ||
test_prompt(ctx, t.n_prompt, 0, t.n_batch); | ||
} | ||
if (t.n_gen > 0) { | ||
test_gen(ctx, t.n_gen, t.n_prompt, t.n_threads); | ||
test_gen(ctx, t.n_gen, t.n_prompt); | ||
} | ||
|
||
uint64_t t_ns = get_time_ns() - t_start; | ||
|
@@ -1386,7 +1444,9 @@ int main(int argc, char ** argv) { | |
llama_print_timings(ctx); | ||
|
||
llama_free(ctx); | ||
|
||
} | ||
ggml_release_threadpool(threadpool); | ||
|
||
llama_free_model(lmodel); | ||
|
||
|
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Debug leftovers