Skip to content

Commit 19d3c82

Browse files
authored
There's a better way of clearing lines (#11756)
Use the ANSI escape code for clearing a line. Signed-off-by: Eric Curtin <ecurtin@redhat.com>
1 parent 98f6b0f commit 19d3c82

File tree

2 files changed

+5
-11
lines changed

2 files changed

+5
-11
lines changed

common/log.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
#include "ggml.h" // for ggml_log_level
44

5+
#define LOG_CLR_TO_EOL "\033[K\r"
56
#define LOG_COL_DEFAULT "\033[0m"
67
#define LOG_COL_BOLD "\033[1m"
78
#define LOG_COL_RED "\033[31m"

examples/run/run.cpp

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -535,8 +535,7 @@ class HttpClient {
535535

536536
static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
537537
const std::string & progress_suffix) {
538-
printe("\r%*s\r%s%s| %s", get_terminal_width(), " ", progress_prefix.c_str(), progress_bar.c_str(),
539-
progress_suffix.c_str());
538+
printe("\r" LOG_CLR_TO_EOL "%s%s| %s", progress_prefix.c_str(), progress_bar.c_str(), progress_suffix.c_str());
540539
}
541540
// Function to write data to a file
542541
static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
@@ -797,16 +796,13 @@ class LlamaData {
797796
llama_model_ptr initialize_model(Opt & opt) {
798797
ggml_backend_load_all();
799798
resolve_model(opt.model_);
800-
printe(
801-
"\r%*s"
802-
"\rLoading model",
803-
get_terminal_width(), " ");
799+
printe("\r" LOG_CLR_TO_EOL "Loading model");
804800
llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
805801
if (!model) {
806802
printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
807803
}
808804

809-
printe("\r%*s\r", static_cast<int>(sizeof("Loading model")), " ");
805+
printe("\r" LOG_CLR_TO_EOL);
810806
return model;
811807
}
812808

@@ -969,10 +965,7 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str
969965
static int read_user_input(std::string & user_input) {
970966
static const char * prompt_prefix = "> ";
971967
#ifdef WIN32
972-
printf(
973-
"\r%*s"
974-
"\r" LOG_COL_DEFAULT "%s",
975-
get_terminal_width(), " ", prompt_prefix);
968+
printf("\r" LOG_CLR_TO_EOL LOG_COL_DEFAULT "%s", prompt_prefix);
976969

977970
std::getline(std::cin, user_input);
978971
if (std::cin.eof()) {

0 commit comments

Comments
 (0)