-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun.py
More file actions
96 lines (81 loc) · 2.68 KB
/
run.py
File metadata and controls
96 lines (81 loc) · 2.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Bootstrap script to run the Speech-to-Text application
"""
import sys
import os
import argparse
import logging
from pathlib import Path
import torch
# Add root directory to path
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# --- START DIAGNOSTIC PRINT ---
print(f"RUN.PY START: CUDA available? {torch.cuda.is_available()}")
# --- END DIAGNOSTIC PRINT ---
def main():
"""Main entry point to launch the application"""
# Parse command line arguments
parser = argparse.ArgumentParser(description="Speech-to-Text Application")
parser.add_argument(
"--model-path",
type=str,
help="Path to Whisper model directory",
default=os.path.expanduser("~/whisper-models")
)
parser.add_argument(
"--debug",
action="store_true",
help="Enable debug logging"
)
parser.add_argument(
"--test",
action="store_true",
help="Run system tests instead of the application"
)
parser.add_argument(
"--performance",
action="store_true",
help="Run performance tests instead of the application"
)
parser.add_argument(
"--use-cuda",
action="store_true",
help="Enable CUDA GPU acceleration (requires CUDA 12.x)"
)
args = parser.parse_args()
# Set model directory in environment
os.environ["WHISPER_MODEL_PATH"] = args.model_path
# Set log level
log_level = logging.DEBUG if args.debug else logging.INFO
os.environ["STT_LOG_LEVEL"] = str(log_level)
# Set CUDA usage flag
if args.use_cuda:
os.environ["STT_USE_CUDA"] = "1"
print("CUDA acceleration enabled - using GPU for transcription")
else:
os.environ["STT_USE_CUDA"] = "0"
print("Using CPU for transcription (use --use-cuda to enable GPU acceleration if compatible)")
try:
# Run tests if requested
if args.test:
from tests.system_test import run_system_tests
run_system_tests(args.model_path)
return
if args.performance:
from tests.performance_test import run_performance_tests
run_performance_tests(args.model_path)
return
# Launch application
from src.main import main as app_main
return app_main()
except ImportError as e:
print(f"Error importing modules: {e}")
print("Make sure all dependencies are installed. See requirements.txt")
return 1
except Exception as e:
print(f"Error launching application: {e}")
return 1
if __name__ == "__main__":
sys.exit(main())