|
3 | 3 | import tempfile |
4 | 4 | import importlib |
5 | 5 | import datetime |
| 6 | +import json |
| 7 | + |
| 8 | +CONFIG_TORCHSIM_DIR = os.environ.get('TORCHSIM_DIR', default='/workspace/PyTorchSim') |
| 9 | +CONFIG_GEM5_PATH = os.environ.get('GEM5_PATH', default="/workspace/gem5/build/RISCV/gem5.opt") |
| 10 | +CONFIG_TORCHSIM_LLVM_PATH = os.environ.get('TORCHSIM_LLVM_PATH', default="/usr/bin") |
| 11 | + |
| 12 | +CONFIG_TORCHSIM_DUMP_MLIR_IR = int(os.environ.get("TORCHSIM_DUMP_MLIR_IR", default=False)) |
| 13 | +CONFIG_TORCHSIM_DUMP_LLVM_IR = int(os.environ.get("TORCHSIM_DUMP_LLVM_IR", default=False)) |
6 | 14 |
|
7 | 15 | def __getattr__(name): |
| 16 | + # TOGSim config |
| 17 | + config_path = os.environ.get('TOGSIM_CONFIG', |
| 18 | + default=f"{CONFIG_TORCHSIM_DIR}/configs/systolic_ws_128x128_c1_simple_noc_tpuv3.json") |
| 19 | + if name == "CONFIG_TOGSIM_CONFIG": |
| 20 | + return config_path |
| 21 | + config_json = json.load(open(config_path, 'r')) |
8 | 22 |
|
9 | 23 | # Hardware info config |
10 | | - if name == "CONFIG_VECTOR_LANE": |
11 | | - return int(os.environ.get("TORCHSIM_VECTOR_LANE", default=128)) |
12 | | - if name == "CONFIG_VECTOR_LANE_STRIDE": |
13 | | - return int(os.environ.get("TORCHSIM_VECTOR_LANE_STRIDE", default=2)) |
| 24 | + if name == "vpu_num_lanes": |
| 25 | + return config_json["vpu_num_lanes"] |
14 | 26 | if name == "CONFIG_SPAD_INFO": |
15 | 27 | return { |
16 | 28 | "spad_vaddr" : 0xD0000000, |
17 | 29 | "spad_paddr" : 0x2000000000, |
18 | | - "spad_size" : int(os.environ.get("TORCHSIM_SPAD_SIZE", default=128)) << 10 # Note: spad size per lane |
| 30 | + "spad_size" : config_json["vpu_spad_size_kb_per_lane"] << 10 # Note: spad size per lane |
19 | 31 | } |
| 32 | + |
20 | 33 | if name == "CONFIG_PRECISION": |
21 | 34 | return 4 # 32bit |
22 | 35 | if name == "CONFIG_NUM_CORES": |
23 | | - return 1 |
24 | | - if name == "CONFIG_VLEN": |
25 | | - return 256 # 256bits / 32bits = 8 [elements] |
| 36 | + return config_json["num_cores"] |
| 37 | + if name == "vpu_vector_length_bits": |
| 38 | + return config_json["vpu_vector_length_bits"] |
26 | 39 |
|
27 | | - # Tile size config |
28 | | - if name == "CONFIG_TORCHSIM_DIR": |
29 | | - return os.environ.get('TORCHSIM_DIR', default='/workspace/PyTorchSim') |
| 40 | + if name == "pytorchsim_functional_mode": |
| 41 | + return config_json['pytorchsim_functional_mode'] |
| 42 | + if name == "pytorchsim_timing_mode": |
| 43 | + return config_json['pytorchsim_timing_mode'] |
30 | 44 |
|
31 | | - if name == "CONFIG_TORCHSIM_DUMP_PATH": |
32 | | - return os.environ.get('TORCHSIM_DUMP_PATH', default = __getattr__('CONFIG_TORCHSIM_DIR')) |
33 | | - if name == "CONFIG_TORCHSIM_LOG_PATH": |
34 | | - return os.environ.get('TORCHSIM_DUMP_LOG_PATH', default = os.path.join(__getattr__("CONFIG_TORCHSIM_DIR"), "outputs", datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))) |
35 | | - if name == "CONFIG_TORCHSIM_FUNCTIONAL_MODE": |
36 | | - return int(os.environ.get('TORCHSIM_FUNCTIONAL_MODE', default=True)) |
37 | | - if name == "CONFIG_TORCHSIM_TIMING_MODE": |
38 | | - return int(os.environ.get("TORCHSIM_TIMING_MODE", True)) |
39 | | - if name == "CONFIG_CLEANUP_DUMP_ARGS": |
40 | | - return int(os.environ.get('CLEANUP_DUMP_ARGS', default=False)) |
41 | | - |
42 | | - # LLVM PATH |
43 | | - if name == "CONFIG_TORCHSIM_LLVM_PATH": |
44 | | - return os.environ.get('TORCHSIM_LLVM_PATH', default="/usr/bin") |
45 | | - if name == "CONFIG_TORCHSIM_DUMP_MLIR_IR": |
46 | | - return int(os.environ.get("TORCHSIM_DUMP_MLIR_IR", default=False)) |
47 | | - if name == "CONFIG_TORCHSIM_DUMP_LLVM_IR": |
48 | | - return int(os.environ.get("TORCHSIM_DUMP_LLVM_IR", default=False)) |
| 45 | + # Mapping strategy |
| 46 | + if name == "codegen_mapping_strategy": |
| 47 | + codegen_mapping_strategy = config_json["codegen_mapping_strategy"] |
| 48 | + assert(codegen_mapping_strategy in ["heuristic", "autotune", "external-then-heuristic", "external-then-autotune"]), "Invalid mapping strategy!" |
| 49 | + return codegen_mapping_strategy |
49 | 50 |
|
50 | | - # TOGSim config |
51 | | - if name == "CONFIG_TOGSIM_CONFIG": |
52 | | - return os.environ.get('TOGSIM_CONFIG', |
53 | | - default=f"{__getattr__('CONFIG_TORCHSIM_DIR')}/configs/systolic_ws_128x128_c1_simple_noc_tpuv3.json") |
54 | | - if name == "CONFIG_TOGSIM_EAGER_MODE": |
55 | | - return int(os.environ.get("TOGSIM_EAGER_MODE", default=False)) |
56 | | - if name == "CONFIG_TOGSIM_DEBUG_LEVEL": |
57 | | - return os.environ.get("TOGSIM_DEBUG_LEVEL", "") |
58 | | - |
59 | | - # GEM5 config |
60 | | - if name == "CONFIG_GEM5_PATH": |
61 | | - return os.environ.get('GEM5_PATH', default="/workspace/gem5/build/RISCV/gem5.opt") |
62 | | - |
63 | | - # Mapping Policy |
64 | | - if name == "CONFIG_MAPPING_POLICY": |
65 | | - return os.environ.get('TORCHSIM_MAPPING_POLICY', default="heuristic") # heuristic, manual, autotune |
66 | | - |
67 | | - # Manual Tile Size |
68 | | - if name == "CONFIG_TILE_M": |
69 | | - return int(os.getenv("TORCHSIM_TILE_M", __getattr__("CONFIG_VECTOR_LANE"))) |
70 | | - if name == "CONFIG_TILE_N": |
71 | | - return int(os.getenv("TORCHSIM_TILE_N", __getattr__("CONFIG_VECTOR_LANE"))) |
72 | | - if name == "CONFIG_TILE_K": |
73 | | - return int(os.getenv("TORCHSIM_TILE_K", __getattr__("CONFIG_VECTOR_LANE"))) |
74 | | - |
75 | | - if name == "CONFIG_MANUAL_SUBTILE_SIZE": |
76 | | - return int(os.environ.get('TORCHSIM_MANUAL_SUBTILE_SIZE', default=False)) |
77 | | - if name == "CONFIG_SUBTILE_M": |
78 | | - return int(os.environ.get('TORCHSIM_SUBTILE_M', default=__getattr__("CONFIG_VECTOR_LANE"))) |
79 | | - if name == "CONFIG_SUBTILE_N": |
80 | | - return int(os.environ.get('TORCHSIM_SUBTILE_N', default=__getattr__("CONFIG_VECTOR_LANE"))) |
81 | | - if name == "CONFIG_SUBTILE_K": |
82 | | - return int(os.environ.get('TORCHSIM_SUBTILE_K', default=__getattr__("CONFIG_VECTOR_LANE"))) |
| 51 | + if name == "codegen_external_mapping_file": |
| 52 | + return config_json["codegen_external_mapping_file"] |
83 | 53 |
|
84 | 54 | # Autotune config |
85 | | - if name == "CONFIG_MAX_AUTOTUNE_TRY": |
86 | | - return int(os.environ.get('MAX_AUTOTUNE_TRY', default=10)) |
87 | | - if name == "CONFIG_AUTOTUNE_TEMPLATE_TOPK": |
88 | | - return int(os.environ.get('AUTOTUNE_TEMPLATE_TOPK', default=4)) |
89 | | - |
90 | | - if name == "CONFIG_GEMM_CHEATSHEET_PATH": |
91 | | - return os.environ.get('TORCHSIM_GEMM_CHEATSHEET_PATH', |
92 | | - default=f"{__getattr__('CONFIG_TORCHSIM_DIR')}/validation/gemm_tpuv3_cheatsheet.json") |
| 55 | + if name == "codegen_autotune_max_retry": |
| 56 | + return config_json["codegen_autotune_max_retry"] |
| 57 | + if name == "codegen_autotune_template_topk": |
| 58 | + return config_json["codegen_autotune_template_topk"] |
| 59 | + |
93 | 60 | # Compiler Optimization |
94 | | - if name == "CONFIG_COMPILER_OPTIMIZATION": |
95 | | - return os.environ.get('TORCHSIM_COMPILER_OPTIMIZATION', default="all") # options: all, none, custom |
| 61 | + if name == "codegen_compiler_optimization": |
| 62 | + return config_json["codegen_compiler_optimization"] |
96 | 63 |
|
97 | 64 | # Advanced fusion options |
98 | 65 | if name == "CONFIG_FUSION": |
99 | | - return True if (__getattr__("CONFIG_COMPILER_OPTIMIZATION") == "all" or "fusion" in __getattr__("CONFIG_COMPILER_OPTIMIZATION")) else False |
| 66 | + return True if (__getattr__("codegen_compiler_optimization") == "all" or "fusion" in __getattr__("codegen_compiler_optimization")) else False |
100 | 67 | if name == "CONFIG_FUSION_REDUCTION_EPILOGUE": |
101 | | - return True if (__getattr__("CONFIG_COMPILER_OPTIMIZATION") == "all" or "reduction_epliogue" in __getattr__("CONFIG_COMPILER_OPTIMIZATION")) else False |
| 68 | + return True if (__getattr__("codegen_compiler_optimization") == "all" or "reduction_epliogue" in __getattr__("codegen_compiler_optimization")) else False |
102 | 69 | if name == "CONFIG_FUSION_REDUCTION_REDUCTION": |
103 | | - return True if (__getattr__("CONFIG_COMPILER_OPTIMIZATION") == "all" or "reduction_reduction" in __getattr__("CONFIG_COMPILER_OPTIMIZATION")) else False |
| 70 | + return True if (__getattr__("codegen_compiler_optimization") == "all" or "reduction_reduction" in __getattr__("codegen_compiler_optimization")) else False |
104 | 71 | if name == "CONFIG_FUSION_PROLOGUE": |
105 | | - return True if ((__getattr__("CONFIG_COMPILER_OPTIMIZATION") == "all") or ("prologue" in __getattr__("CONFIG_COMPILER_OPTIMIZATION"))) else False |
| 72 | + return True if ((__getattr__("codegen_compiler_optimization") == "all") or ("prologue" in __getattr__("codegen_compiler_optimization"))) else False |
106 | 73 | if name == "CONFIG_SINGLE_BATCH_CONV": |
107 | | - return True if (__getattr__("CONFIG_COMPILER_OPTIMIZATION") == "all" or "single_batch_conv" in __getattr__("CONFIG_COMPILER_OPTIMIZATION")) else False |
| 74 | + return True if (__getattr__("codegen_compiler_optimization") == "all" or "single_batch_conv" in __getattr__("codegen_compiler_optimization")) else False |
108 | 75 | if name == "CONFIG_MULTI_TILE_CONV": |
109 | | - return True if (__getattr__("CONFIG_COMPILER_OPTIMIZATION") == "all" or "multi_tile_conv" in __getattr__("CONFIG_COMPILER_OPTIMIZATION")) else False |
| 76 | + return True if (__getattr__("codegen_compiler_optimization") == "all" or "multi_tile_conv" in __getattr__("codegen_compiler_optimization")) else False |
110 | 77 | if name == "CONFIG_SUBTILE": |
111 | | - return True if (__getattr__("CONFIG_COMPILER_OPTIMIZATION") == "all" or "subtile" in __getattr__("CONFIG_COMPILER_OPTIMIZATION")) else False |
| 78 | + return True if (__getattr__("codegen_compiler_optimization") == "all" or "subtile" in __getattr__("codegen_compiler_optimization")) else False |
| 79 | + |
| 80 | + if name == "CONFIG_TOGSIM_DEBUG_LEVEL": |
| 81 | + return os.environ.get("TOGSIM_DEBUG_LEVEL", "") |
| 82 | + if name == "CONFIG_TORCHSIM_DUMP_PATH": |
| 83 | + return os.environ.get('TORCHSIM_DUMP_PATH', default = CONFIG_TORCHSIM_DIR) |
| 84 | + if name == "CONFIG_TORCHSIM_LOG_PATH": |
| 85 | + return os.environ.get('TORCHSIM_DUMP_LOG_PATH', default = os.path.join(CONFIG_TORCHSIM_DIR, "outputs", datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))) |
| 86 | + |
| 87 | + if name == "CONFIG_TOGSIM_EAGER_MODE": |
| 88 | + return int(os.environ.get("TOGSIM_EAGER_MODE", default=False)) |
112 | 89 |
|
113 | 90 | # SRAM Buffer allocation plan |
114 | 91 | def load_plan_from_module(module_path): |
|
0 commit comments