This repository was archived by the owner on Jul 4, 2025. It is now read-only.
File tree Expand file tree Collapse file tree 1 file changed +11
-3
lines changed
Expand file tree Collapse file tree 1 file changed +11
-3
lines changed Original file line number Diff line number Diff line change @@ -548,6 +548,17 @@ void llamaCPP::ModelStatus(
548548void llamaCPP::LoadModel (
549549 const HttpRequestPtr& req,
550550 std::function<void (const HttpResponsePtr&)>&& callback) {
551+
552+ if (!nitro_utils::isAVX2Supported () && ggml_cpu_has_avx2 ()) {
553+ LOG_ERROR << " AVX2 is not supported by your processor" ;
554+ Json::Value jsonResp;
555+ jsonResp[" message" ] = " AVX2 is not supported by your processor, please download and replace the correct Nitro asset version" ;
556+ auto resp = nitro_utils::nitroHttpJsonResponse (jsonResp);
557+ resp->setStatusCode (drogon::k500InternalServerError);
558+ callback (resp);
559+ return ;
560+ }
561+
551562 if (llama.model_loaded_external ) {
552563 LOG_INFO << " Model already loaded" ;
553564 Json::Value jsonResp;
@@ -558,9 +569,6 @@ void llamaCPP::LoadModel(
558569 return ;
559570 }
560571
561- if (!nitro_utils::isAVX2Supported () && ggml_cpu_has_avx2 ())
562- LOG_ERROR << " AVX2 is not supported by your processor, please download and replace the correct Nitro asset version" ;
563-
564572 const auto & jsonBody = req->getJsonObject ();
565573 if (!LoadModelImpl (jsonBody)) {
566574 // Error occurred during model loading
You can’t perform that action at this time.
0 commit comments