Skip to content

Commit f25308b

Browse files
committed
server : some style changes
1 parent e151670 commit f25308b

File tree

1 file changed

+53
-57
lines changed

1 file changed

+53
-57
lines changed

examples/server/server.cpp

Lines changed: 53 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -383,9 +383,9 @@ struct llama_client_slot
383383
bool stopped_eos = false;
384384
bool stopped_word = false;
385385
bool stopped_limit = false;
386-
386+
387387
bool oaicompat = false;
388-
std::string oaicompat_model = "";
388+
std::string oaicompat_model;
389389

390390
std::string stopping_word;
391391

@@ -486,7 +486,7 @@ struct llama_client_slot
486486
};
487487
}
488488

489-
void print_timings() {
489+
void print_timings() const {
490490
LOG_TEE("\n");
491491
LOG_TEE("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
492492
__func__, t_prompt_processing, num_prompt_tokens_processed, t_prompt_processing / num_prompt_tokens_processed, 1e3 / t_prompt_processing * num_prompt_tokens_processed);
@@ -685,15 +685,15 @@ struct llama_server_context
685685
bool launch_slot_with_data(llama_client_slot* &slot, json data) {
686686
slot_params default_params;
687687
llama_sampling_params default_sparams;
688-
688+
689689
if (data.count("__oaicompat") != 0) {
690690
slot->oaicompat = true;
691691
slot->oaicompat_model = json_value(data, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
692692
} else {
693693
slot->oaicompat = false;
694694
slot->oaicompat_model = "";
695695
}
696-
696+
697697
slot->params.stream = json_value(data, "stream", false);
698698
slot->params.cache_prompt = json_value(data, "cache_prompt", false);
699699
slot->params.n_predict = json_value(data, "n_predict", default_params.n_predict);
@@ -1284,7 +1284,7 @@ struct llama_server_context
12841284
std::lock_guard<std::mutex> lock(mutex_tasks);
12851285
task_server task;
12861286
task.id = id_gen++;
1287-
task.data = data;
1287+
task.data = std::move(data);
12881288
task.infill_mode = infill;
12891289
task.embedding_mode = embedding;
12901290
task.type = COMPLETION_TASK;
@@ -2252,29 +2252,27 @@ json oaicompat_completion_params_parse(
22522252
llama_params["__oaicompat"] = true;
22532253

22542254
// Map OpenAI parameters to llama.cpp parameters
2255-
llama_params["prompt"] = format_chatml(body["messages"]); // OpenAI 'messages' to llama.cpp 'prompt'
2256-
llama_params["temperature"] = json_value(body, "temperature", 0.8);
2257-
llama_params["top_k"] = json_value(body, "max_tokens", 40);
2258-
llama_params["top_p"] = json_value(body, "top_p", 0.95);
2259-
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
2260-
llama_params["logit_bias"] = json_value(body, "logit_bias",json::object());
2255+
llama_params["prompt"] = format_chatml(body["messages"]); // OpenAI 'messages' to llama.cpp 'prompt'
2256+
llama_params["temperature"] = json_value(body, "temperature", 0.8);
2257+
llama_params["top_k"] = json_value(body, "top_k", 40);
2258+
llama_params["top_p"] = json_value(body, "top_p", 0.95);
2259+
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
2260+
llama_params["logit_bias"] = json_value(body, "logit_bias",json::object());
22612261
llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
2262-
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
2263-
llama_params["seed"] = json_value(body, "seed", 0);
2264-
llama_params["stream"] =json_value(body, "stream", false);
2265-
llama_params["mirostat"] = json_value(body, "mirostat", false);
2266-
llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", 0.0);
2267-
llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", 0.0);
2268-
llama_params["penalize_nl"] = json_value(body, "penalize_nl", false);
2269-
llama_params["typical_p"] = json_value(body, "typical_p", 0.0);
2270-
llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", 0);
2271-
llama_params["ignore_eos"] = json_value(body, "ignore_eos", false);
2272-
llama_params["tfs_z"] = json_value(body, "tfs_z", 0.0);
2273-
2262+
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
2263+
llama_params["seed"] = json_value(body, "seed", 0);
2264+
llama_params["stream"] = json_value(body, "stream", false);
2265+
llama_params["mirostat"] = json_value(body, "mirostat", false);
2266+
llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", 0.0);
2267+
llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", 0.0);
2268+
llama_params["penalize_nl"] = json_value(body, "penalize_nl", false);
2269+
llama_params["typical_p"] = json_value(body, "typical_p", 0.0);
2270+
llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", 0);
2271+
llama_params["ignore_eos"] = json_value(body, "ignore_eos", false);
2272+
llama_params["tfs_z"] = json_value(body, "tfs_z", 0.0);
2273+
22742274
if (llama_params.count("grammar") != 0) {
2275-
llama_params["grammar"] = json_value(
2276-
body, "grammar",
2277-
json::object());
2275+
llama_params["grammar"] = json_value(body, "grammar", json::object());
22782276
}
22792277

22802278
// Handle 'stop' field
@@ -2287,23 +2285,22 @@ json oaicompat_completion_params_parse(
22872285
body, "stop",
22882286
json::array());
22892287
}
2290-
2288+
22912289
// Ensure there is ChatML-specific end sequence among stop words
22922290
llama_params["stop"].push_back("<|im_end|>");
22932291

22942292
return llama_params;
22952293
}
22962294

2297-
static json format_final_response_oaicompat(json request, task_result response,
2298-
bool streaming = false)
2295+
static json format_final_response_oaicompat(const json &request, const task_result &response, bool streaming = false)
22992296
{
23002297
json result = response.result_json;
23012298

2302-
bool stopped_word = result.count("stopped_word") != 0;
2303-
bool stopped_eos = json_value(result, "stopped_eos", false);
2299+
bool stopped_word = result.count("stopped_word") != 0;
2300+
bool stopped_eos = json_value(result, "stopped_eos", false);
23042301
int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
2305-
int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
2306-
std::string content = json_value(result, "content", std::string(""));
2302+
int num_prompt_tokens = json_value(result, "tokens_evaluated", 0);
2303+
std::string content = json_value(result, "content", std::string(""));
23072304

23082305
std::string finish_reason = "length";
23092306
if (stopped_word || stopped_eos) {
@@ -2314,10 +2311,10 @@ static json format_final_response_oaicompat(json request, task_result response,
23142311
streaming ? json::array({json{{"finish_reason", finish_reason},
23152312
{"index", 0},
23162313
{"delta", json::object()}}})
2317-
: json::array({json{{"finish_reason", finish_reason},
2314+
: json::array({json{{"finish_reason", finish_reason},
23182315
{"index", 0},
23192316
{"message", json{{"content", content},
2320-
{"role", "assistant"}}}}});
2317+
{"role", "assistant"}}}}});
23212318

23222319
std::time_t t = std::time(0);
23232320

@@ -2345,23 +2342,22 @@ static json format_final_response_oaicompat(json request, task_result response,
23452342
}
23462343

23472344
// return value is vector as there is one case where we might need to generate two responses
2348-
static std::vector<json> format_partial_response_oaicompat(task_result response) {
2345+
static std::vector<json> format_partial_response_oaicompat(const task_result &response) {
23492346
json result = response.result_json;
23502347

23512348
if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
23522349
return std::vector<json>({response.result_json});
23532350
}
23542351

23552352
bool first = json_value(result, "oaicompat_token_ctr", 0) == 0;
2356-
std::string modelname =
2357-
json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
2353+
std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
23582354

2359-
bool stopped_word = json_value(result, "stopped_word", false);
2360-
bool stopped_eos = json_value(result, "stopped_eos", false);
2361-
bool stopped_limit = json_value(result, "stopped_limit", false);
2355+
bool stopped_word = json_value(result, "stopped_word", false);
2356+
bool stopped_eos = json_value(result, "stopped_eos", false);
2357+
bool stopped_limit = json_value(result, "stopped_limit", false);
23622358
std::string content = json_value(result, "content", std::string(""));
23632359

2364-
std::string finish_reason = "";
2360+
std::string finish_reason;
23652361
if (stopped_word || stopped_eos) {
23662362
finish_reason = "stop";
23672363
}
@@ -2383,7 +2379,7 @@ static std::vector<json> format_partial_response_oaicompat(task_result response)
23832379
choices = json::array({json{{"finish_reason", nullptr},
23842380
{"index", 0},
23852381
{"delta", json{{"role", "assistant"}}}}});
2386-
} else {
2382+
} else {
23872383
// We have to send this as two updates to conform to openai behavior
23882384
json initial_ret = json{{"choices", json::array({json{
23892385
{"finish_reason", nullptr},
@@ -2400,13 +2396,13 @@ static std::vector<json> format_partial_response_oaicompat(task_result response)
24002396
{"choices", json::array({json{{"finish_reason", nullptr},
24012397
{"index", 0},
24022398
{"delta", json{
2403-
{"content", content}}}
2399+
{"content", content}}}
24042400
}})},
24052401
{"created", t},
24062402
{"id", gen_chatcmplid()},
24072403
{"model", modelname},
24082404
{"object", "chat.completion.chunk"}};
2409-
2405+
24102406
return std::vector<json>({initial_ret, second_ret});
24112407
}
24122408
} else {
@@ -2612,9 +2608,9 @@ int main(int argc, char **argv)
26122608
task_result result = llama.next_result(task_id);
26132609
if (!result.error) {
26142610
const std::string str =
2615-
"data: " +
2616-
result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
2617-
"\n\n";
2611+
"data: " +
2612+
result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
2613+
"\n\n";
26182614
LOG_VERBOSE("data stream", {
26192615
{ "to_send", str }
26202616
});
@@ -2627,9 +2623,9 @@ int main(int argc, char **argv)
26272623
}
26282624
} else {
26292625
const std::string str =
2630-
"error: " +
2631-
result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
2632-
"\n\n";
2626+
"error: " +
2627+
result.result_json.dump(-1, ' ', false, json::error_handler_t::replace) +
2628+
"\n\n";
26332629
LOG_VERBOSE("data stream", {
26342630
{ "to_send", str }
26352631
});
@@ -2655,13 +2651,13 @@ int main(int argc, char **argv)
26552651
});
26562652

26572653

2658-
svr.Post("/v1/chat/completions", [&llama](const httplib::Request &req,
2659-
httplib::Response &res)
2654+
// TODO: add mount point without "/v1" prefix -- how?
2655+
svr.Post("/v1/chat/completions", [&llama](const httplib::Request &req, httplib::Response &res)
26602656
{
26612657
json data = oaicompat_completion_params_parse(json::parse(req.body));
26622658

26632659
const int task_id = llama.request_completion(data, false, false);
2664-
2660+
26652661
if (!json_value(data, "stream", false)) {
26662662
std::string completion_text;
26672663
task_result result = llama.next_result(task_id);
@@ -2683,7 +2679,7 @@ int main(int argc, char **argv)
26832679
task_result llama_result = llama.next_result(task_id);
26842680
if (!llama_result.error) {
26852681
std::vector<json> result_array = format_partial_response_oaicompat( llama_result);
2686-
2682+
26872683
for (auto it = result_array.begin(); it != result_array.end(); ++it)
26882684
{
26892685
if (!it->empty()) {
@@ -2725,7 +2721,7 @@ int main(int argc, char **argv)
27252721
res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete);
27262722
}
27272723
});
2728-
2724+
27292725
svr.Post("/infill", [&llama](const httplib::Request &req, httplib::Response &res)
27302726
{
27312727
json data = json::parse(req.body);

0 commit comments

Comments
 (0)