-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_all_tests.py
More file actions
121 lines (108 loc) · 3.89 KB
/
run_all_tests.py
File metadata and controls
121 lines (108 loc) · 3.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import sys
import traceback
results = []
def run_test(name, fn):
try:
print(f"[TEST] {name} ...", end=" ")
ok = fn()
if ok is None:
ok = True # Some tests just print and don't return
print("PASS" if ok else "FAIL")
results.append(ok)
except ImportError as e:
print(f"SKIPPED (missing dependency: {e})")
results.append(True)
except Exception as e:
print(f"FAIL ({e})")
traceback.print_exc()
results.append(False)
# Data pipeline tests
try:
from data.run_tests import main as data_main
print("[TEST] data.run_tests ...")
data_main()
results.append(True)
except Exception as e:
print(f"[data.run_tests] FAIL ({e})")
traceback.print_exc()
results.append(False)
# Model tests
try:
import unittest
print("[TEST] model/test_model_colorfield.py ...")
r = unittest.TextTestRunner().run(unittest.defaultTestLoader.loadTestsFromName('model.test_model_colorfield'))
results.append(r.wasSuccessful())
except Exception as e:
print(f"[model/test_model_colorfield.py] FAIL ({e})")
traceback.print_exc()
results.append(False)
# Visualization/logging
try:
from train.visualization import test_visualization_manager
run_test("train.visualization.test_visualization_manager", test_visualization_manager)
except Exception as e:
print(f"[train.visualization.test_visualization_manager] FAIL ({e})")
traceback.print_exc()
results.append(False)
# Gradient accumulation
try:
from train.gradient_accumulation import test_gradient_accumulation
run_test("train.gradient_accumulation.test_gradient_accumulation", test_gradient_accumulation)
except Exception as e:
print(f"[train.gradient_accumulation.test_gradient_accumulation] FAIL ({e})")
traceback.print_exc()
results.append(False)
# LR scheduler
try:
from train.lr_scheduler_utils import test_get_scheduler
run_test("train.lr_scheduler_utils.test_get_scheduler", test_get_scheduler)
except Exception as e:
print(f"[train.lr_scheduler_utils.test_get_scheduler] FAIL ({e})")
traceback.print_exc()
results.append(False)
# Losses/metrics
try:
from model import losses
for name in dir(losses):
if name.startswith('test_'):
run_test(f"model.losses.{name}", getattr(losses, name))
except Exception as e:
print(f"[model.losses] FAIL ({e})")
traceback.print_exc()
results.append(False)
# Marching cubes
try:
from model.marching_cubes import test_marching_cubes_extract
run_test("model.marching_cubes.test_marching_cubes_extract", test_marching_cubes_extract)
except Exception as e:
print(f"[model.marching_cubes.test_marching_cubes_extract] FAIL ({e})")
traceback.print_exc()
results.append(False)
# Texture atlas
try:
from model.texture_atlas import test_generate_texture_atlas
run_test("model.texture_atlas.test_generate_texture_atlas", test_generate_texture_atlas)
except Exception as e:
print(f"[model.texture_atlas.test_generate_texture_atlas] FAIL ({e})")
traceback.print_exc()
results.append(False)
# Mesh simplify
try:
from model.mesh_simplify import test_simplify_mesh
run_test("model.mesh_simplify.test_simplify_mesh", test_simplify_mesh)
except Exception as e:
print(f"[model.mesh_simplify.test_simplify_mesh] FAIL ({e})")
traceback.print_exc()
results.append(False)
# Renderer minimal test (optional)
try:
from model.renderer_pipeline import RendererPipeline
run_test("model.renderer_pipeline.RendererPipeline.test_minimal_render", lambda: RendererPipeline.test_minimal_render(renderer='nvdiffrast', device='cuda'))
except Exception as e:
print(f"[model.renderer_pipeline.RendererPipeline.test_minimal_render] SKIPPED ({e})")
results.append(True)
print("\nSummary:")
passed = sum(bool(r) for r in results)
total = len(results)
print(f"Passed {passed}/{total} tests.")
sys.exit(0 if all(results) else 1)