Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 1f1564c

Browse files
committed
chore: format code
1 parent 3c674cd commit 1f1564c

File tree

2 files changed

+1785
-1917
lines changed

2 files changed

+1785
-1917
lines changed

controllers/llamaCPP.cc

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -57,18 +57,18 @@ std::string create_return_json(const std::string &id, const std::string &model,
5757
}
5858

5959
void llamaCPP::warmupModel() {
60-
// json pseudo;
61-
//
62-
// pseudo["prompt"] = "Hello";
63-
// pseudo["n_predict"] = 10;
64-
// const int task_id = llama.request_completion(pseudo, false);
65-
// std::string completion_text;
66-
// task_result result = llama.next_result(task_id);
67-
// if (!result.error && result.stop) {
68-
// LOG_INFO << result.result_json.dump(-1, ' ', false,
69-
// json::error_handler_t::replace);
70-
// }
71-
// return;
60+
// json pseudo;
61+
//
62+
// pseudo["prompt"] = "Hello";
63+
// pseudo["n_predict"] = 10;
64+
// const int task_id = llama.request_completion(pseudo, false);
65+
// std::string completion_text;
66+
// task_result result = llama.next_result(task_id);
67+
// if (!result.error && result.stop) {
68+
// LOG_INFO << result.result_json.dump(-1, ' ', false,
69+
// json::error_handler_t::replace);
70+
// }
71+
// return;
7272
}
7373

7474
void llamaCPP::chatCompletion(
@@ -110,7 +110,7 @@ void llamaCPP::chatCompletion(
110110
data["stop"] = stopWords;
111111
}
112112

113-
const int task_id = llama.request_completion(data, false,false);
113+
const int task_id = llama.request_completion(data, false, false);
114114
LOG_INFO << "Resolved request for task_id:" << task_id;
115115

116116
auto state = createState(task_id, this);
@@ -177,8 +177,8 @@ void llamaCPP::embedding(
177177
} else {
178178
prompt = "";
179179
}
180-
const int task_id =
181-
llama.request_completion({{"prompt", prompt}, {"n_predict", 0}}, false, true);
180+
const int task_id = llama.request_completion(
181+
{{"prompt", prompt}, {"n_predict", 0}}, false, true);
182182
task_result result = llama.next_result(task_id);
183183
std::string embeddingResp = result.result_json.dump();
184184
auto resp = nitro_utils::nitroHttpResponse();
@@ -250,7 +250,7 @@ void llamaCPP::loadModel(
250250
jsonResp["message"] = "Model loaded successfully";
251251
model_loaded = true;
252252
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
253-
//warmupModel();
253+
// warmupModel();
254254

255255
LOG_INFO << "Started background task here!";
256256
backgroundThread = std::thread(&llamaCPP::backgroundTask, this);

0 commit comments

Comments
 (0)