This repository was archived by the owner on Jul 4, 2025. It is now read-only.
File tree Expand file tree Collapse file tree 2 files changed +7
-5
lines changed
Expand file tree Collapse file tree 2 files changed +7
-5
lines changed Original file line number Diff line number Diff line change @@ -452,10 +452,11 @@ bool llamaCPP::loadModelImpl(const Json::Value &jsonBody) {
452452 this ->pre_prompt = jsonBody.get (" pre_prompt" , " " ).asString ();
453453 this ->repeat_last_n = jsonBody.get (" repeat_last_n" , 32 ).asInt ();
454454
455- // Set folder for llama log
456- std::string llama_log_folder =
457- jsonBody.get (" llama_log_folder" , " log/" ).asString ();
458- log_set_target (llama_log_folder + " llama.log" );
455+ if (!jsonBody[" llama_log_folder" ].isNull ()) {
456+ log_enable ();
457+ std::string llama_log_folder = jsonBody[" llama_log_folder" ].asString ();
458+ log_set_target (llama_log_folder + " llama.log" );
459+ } // Set folder for llama log
459460 }
460461#ifdef GGML_USE_CUBLAS
461462 LOG_INFO << " Setting up GGML CUBLAS PARAMS" ;
Original file line number Diff line number Diff line change 55#endif
66
77#pragma once
8+ #define LOG_TARGET stdout
89
910#include " log.h"
1011#include " utils/nitro_utils.h"
@@ -2486,7 +2487,7 @@ class llamaCPP : public drogon::HttpController<llamaCPP> {
24862487public:
24872488 llamaCPP () {
24882489 // Some default values for now below
2489- log_enable (); // Disable the log to file feature, reduce bloat for
2490+ log_disable (); // Disable the log to file feature, reduce bloat for
24902491 // target
24912492 // system ()
24922493 std::vector<std::string> llama_models =
You can’t perform that action at this time.
0 commit comments