This repository was archived by the owner on Jul 4, 2025. It is now read-only.
File tree Expand file tree Collapse file tree 2 files changed +19
-11
lines changed
infrastructure/commanders/shortcuts Expand file tree Collapse file tree 2 files changed +19
-11
lines changed Original file line number Diff line number Diff line change @@ -54,10 +54,14 @@ export class RunCommand extends CommandRunner {
5454 exit ( 1 ) ;
5555 }
5656 }
57+
58+ // Check model compatibility on this machine
59+ await checkModelCompatibility ( modelId , checkingSpinner ) ;
60+
5761 // If not exist
5862 // Try Pull
5963 if ( ! ( await this . modelsCliUsecases . getModel ( modelId ) ) ) {
60- checkingSpinner . succeed ( 'Model not found. Attempting to pull...' ) ;
64+ checkingSpinner . succeed ( ) ;
6165 await this . modelsCliUsecases . pullModel ( modelId ) . catch ( ( e : Error ) => {
6266 if ( e instanceof ModelNotFoundException )
6367 checkingSpinner . fail ( 'Model does not exist.' ) ;
@@ -73,16 +77,11 @@ export class RunCommand extends CommandRunner {
7377 ! Array . isArray ( existingModel . files ) ||
7478 / ^ ( h t t p | h t t p s ) : \/ \/ [ ^ / ] + \/ .* / . test ( existingModel . files [ 0 ] )
7579 ) {
76- checkingSpinner . fail (
77- `Model is not available`
78- ) ;
80+ checkingSpinner . fail ( `Model is not available` ) ;
7981 process . exit ( 1 ) ;
8082 }
8183 checkingSpinner . succeed ( 'Model found' ) ;
8284
83- // Check model compatibility on this machine
84- await checkModelCompatibility ( modelId ) ;
85-
8685 const engine = existingModel . engine || Engines . llamaCPP ;
8786 // Pull engine if not exist
8887 if (
Original file line number Diff line number Diff line change 11import { MIN_CUDA_VERSION } from "@/infrastructure/constants/cortex" ;
22import { getCudaVersion } from "./cuda" ;
3+ import ora from "ora" ;
34
4- export const checkModelCompatibility = async ( modelId : string ) => {
5+ export const checkModelCompatibility = async ( modelId : string , spinner ?: ora . Ora ) => {
6+ function log ( message : string ) {
7+ if ( spinner ) {
8+ spinner . fail ( message ) ;
9+ } else {
10+ console . error ( message ) ;
11+ }
12+ }
513 if ( modelId . includes ( 'onnx' ) && process . platform !== 'win32' ) {
6- console . error ( 'The ONNX engine does not support this OS yet.' ) ;
14+ log ( 'The ONNX engine does not support this OS yet.' ) ;
715 process . exit ( 1 ) ;
816 }
917
1018 if ( modelId . includes ( 'tensorrt-llm' ) ) {
1119 if ( process . platform === 'darwin' ) {
12- console . error ( 'Tensorrt-LLM models are not supported on this OS' ) ;
20+ log ( 'Tensorrt-LLM models are not supported on this OS' ) ;
1321 process . exit ( 1 ) ;
1422 }
1523
@@ -19,11 +27,12 @@ export const checkModelCompatibility = async (modelId: string) => {
1927 const [ requiredMajor , requiredMinor ] = MIN_CUDA_VERSION . split ( '.' ) . map ( Number ) ;
2028 const isMatchRequired = currentMajor > requiredMajor || ( currentMajor === requiredMajor && currentMinor >= requiredMinor ) ;
2129 if ( ! isMatchRequired ) {
22- console . error ( `CUDA version ${ version } is not compatible with TensorRT-LLM models. Required version: ${ MIN_CUDA_VERSION } ` ) ;
30+ log ( `CUDA version ${ version } is not compatible with TensorRT-LLM models. Required version: ${ MIN_CUDA_VERSION } ` )
2331 process . exit ( 1 ) ;
2432 }
2533 } catch ( e ) {
2634 console . error ( e . message ?? e ) ;
35+ log ( e . message ?? e ) ;
2736 process . exit ( 1 ) ;
2837 }
2938
You can’t perform that action at this time.
0 commit comments