|
| 1 | +#include "log.h" |
1 | 2 | #include <drogon/HttpTypes.h> |
2 | 3 | #if defined(_WIN32) |
3 | 4 | #define NOMINMAX |
@@ -1258,38 +1259,8 @@ namespace inferences { |
1258 | 1259 | class llamaCPP : public drogon::HttpController<llamaCPP> { |
1259 | 1260 | public: |
1260 | 1261 | llamaCPP() { |
1261 | | - // gpt_params params; |
1262 | | - // auto conf = drogon::app().getCustomConfig(); |
1263 | | - // params.model = conf["llama_model_path"].asString(); |
1264 | | - // params.n_gpu_layers = conf["ngl"].asInt(); |
1265 | | - // params.n_ctx = conf["ctx_len"].asInt(); |
1266 | | - // params.embedding = conf["embedding"].asBool(); |
1267 | | - // #ifdef GGML_USE_CUBLAS |
1268 | | - // LOG_INFO_LLAMA << "Setting up GGML CUBLAS PARAMS"; |
1269 | | - // params.mul_mat_q = false; |
1270 | | - // #endif // GGML_USE_CUBLAS |
1271 | | - // if (params.model_alias == "unknown") { |
1272 | | - // params.model_alias = params.model; |
1273 | | - // } |
1274 | | - // |
1275 | | - // llama_backend_init(params.numa); |
1276 | | - // |
1277 | | - // LOG_INFO_LLAMA_LLAMA("build info", |
1278 | | - // {{"build", BUILD_NUMBER}, {"commit", BUILD_COMMIT}}); |
1279 | | - // LOG_INFO_LLAMA_LLAMA("system info", |
1280 | | - // { |
1281 | | - // {"n_threads", params.n_threads}, |
1282 | | - // {"total_threads", |
1283 | | - // std::thread::hardware_concurrency()}, |
1284 | | - // {"system_info", llama_print_system_info()}, |
1285 | | - // }); |
1286 | | - // |
1287 | | - // // load the model |
1288 | | - // if (!llama.loadModel(params)) { |
1289 | | - // LOG_ERROR_LLAMA << "Error loading the model will exit the program"; |
1290 | | - // std::terminate(); |
1291 | | - // } |
1292 | | - // deprecate this if find no usecase |
| 1262 | + // Some default values for now below |
| 1263 | + log_disable(); //Disable the log to file feature, reduce bloat for target system () |
1293 | 1264 | } |
1294 | 1265 | METHOD_LIST_BEGIN |
1295 | 1266 | // list path definitions here; |
|
0 commit comments