Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 543be27

Browse files
committed
AVX2 check also return with response
1 parent 7e7bf9a commit 543be27

File tree

1 file changed

+11
-3
lines changed

1 file changed

+11
-3
lines changed

controllers/llamaCPP.cc

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -548,6 +548,17 @@ void llamaCPP::ModelStatus(
548548
void llamaCPP::LoadModel(
549549
const HttpRequestPtr& req,
550550
std::function<void(const HttpResponsePtr&)>&& callback) {
551+
552+
if (!nitro_utils::isAVX2Supported() && ggml_cpu_has_avx2()) {
553+
LOG_ERROR << "AVX2 is not supported by your processor";
554+
Json::Value jsonResp;
555+
jsonResp["message"] = "AVX2 is not supported by your processor, please download and replace the correct Nitro asset version";
556+
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
557+
resp->setStatusCode(drogon::k500InternalServerError);
558+
callback(resp);
559+
return;
560+
}
561+
551562
if (llama.model_loaded_external) {
552563
LOG_INFO << "Model already loaded";
553564
Json::Value jsonResp;
@@ -558,9 +569,6 @@ void llamaCPP::LoadModel(
558569
return;
559570
}
560571

561-
if (!nitro_utils::isAVX2Supported() && ggml_cpu_has_avx2())
562-
LOG_ERROR << "AVX2 is not supported by your processor, please download and replace the correct Nitro asset version";
563-
564572
const auto& jsonBody = req->getJsonObject();
565573
if (!LoadModelImpl(jsonBody)) {
566574
// Error occurred during model loading

0 commit comments

Comments
 (0)