This repository was archived by the owner on Jul 4, 2025. It is now read-only.
File tree Expand file tree Collapse file tree 1 file changed +23
-0
lines changed
Expand file tree Collapse file tree 1 file changed +23
-0
lines changed Original file line number Diff line number Diff line change 1+ #include < iostream>
2+ #include < string>
13#if defined(_WIN32)
24#define NOMINMAX
35#endif
@@ -2117,6 +2119,25 @@ class llamaCPP : public drogon::HttpController<llamaCPP> {
21172119 // log_disable(); // Disable the log to file feature, reduce bloat for
21182120 // target
21192121 // system ()
2122+ std::vector<std::string> llama_models =
2123+ nitro_utils::listFilesInDir (nitro_utils::models_folder);
2124+ std::string model_index;
2125+ if (llama_models.size () > 0 ) {
2126+ LOG_INFO << " Found models folder, here are the llama models you have:" ;
2127+ int index_val = 0 ;
2128+ for (auto llama_model : llama_models) {
2129+ LOG_INFO << " index: " << index_val++ << " | model: " << llama_model;
2130+ std::cout
2131+ << " Please type the index of the model you want to load here >> " ;
2132+ std::cin >> model_index;
2133+ Json::Value jsonBody;
2134+ jsonBody[" llama_model_path" ] = nitro_utils::models_folder + " /" +
2135+ llama_models[std::stoi (model_index)];
2136+ loadModelImpl (jsonBody);
2137+ }
2138+ } else {
2139+ LOG_INFO << " Not found models folder, start server as usual" ;
2140+ }
21202141 }
21212142
21222143 METHOD_LIST_BEGIN
@@ -2145,6 +2166,8 @@ class llamaCPP : public drogon::HttpController<llamaCPP> {
21452166 void modelStatus (const HttpRequestPtr &req,
21462167 std::function<void (const HttpResponsePtr &)> &&callback);
21472168
2169+ bool loadModelImpl (const Json::Value &jsonBody);
2170+
21482171 void warmupModel ();
21492172
21502173 void backgroundTask ();
You can’t perform that action at this time.
0 commit comments