Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit dafa80a

Browse files
committed
bug: fix the model loaded result in error
1 parent 710342e commit dafa80a

File tree

1 file changed

+20
-0
lines changed

1 file changed

+20
-0
lines changed

controllers/llamaCPP.cc

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <regex>
99
#include <string>
1010
#include <thread>
11+
#include <trantor/utils/Logger.h>
1112

1213
using namespace inferences;
1314
using json = nlohmann::json;
@@ -149,6 +150,15 @@ void llamaCPP::chatCompletion(
149150
const HttpRequestPtr &req,
150151
std::function<void(const HttpResponsePtr &)> &&callback) {
151152

153+
if (!model_loaded) {
154+
Json::Value jsonResp;
155+
jsonResp["message"] =
156+
"Model has not been loaded, please load model into nitro";
157+
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
158+
resp->setStatusCode(drogon::k409Conflict);
159+
callback(resp);
160+
}
161+
152162
const auto &jsonBody = req->getJsonObject();
153163
std::string formatted_output = pre_prompt;
154164

@@ -338,6 +348,16 @@ void llamaCPP::loadModel(
338348
const HttpRequestPtr &req,
339349
std::function<void(const HttpResponsePtr &)> &&callback) {
340350

351+
if (model_loaded) {
352+
LOG_INFO << "model loaded";
353+
Json::Value jsonResp;
354+
jsonResp["message"] = "Model already loaded";
355+
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
356+
resp->setStatusCode(drogon::k409Conflict);
357+
callback(resp);
358+
return;
359+
}
360+
341361
const auto &jsonBody = req->getJsonObject();
342362

343363
gpt_params params;

0 commit comments

Comments
 (0)