Skip to content

Commit c86bb2b

Browse files
committed
skip unimplemented error; update workflow
1 parent 1e0aeaa commit c86bb2b

9 files changed

Lines changed: 40 additions & 6 deletions

File tree

.github/workflows/cpu-inference.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,4 +63,7 @@ jobs:
6363
unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch
6464
if [[ -d ./torch-extensions ]]; then rm -rf ./torch-extensions; fi
6565
cd tests
66-
TRANSFORMERS_CACHE=~/tmp/transformers_cache/ TORCH_EXTENSIONS_DIR=./torch-extensions pytest -m 'inference' unit/inference/test_inference_config.py
66+
pytest -v -s unit/autotuning/ unit/checkpoint/ unit/comm/ unit/compression/ unit/elasticity/ unit/launcher/ unit/profiling/
67+
TRANSFORMERS_CACHE=~/tmp/transformers_cache/ TORCH_EXTENSIONS_DIR=./torch-extensions pytest -m 'seq_inference' unit/
68+
TRANSFORMERS_CACHE=~/tmp/transformers_cache/ TORCH_EXTENSIONS_DIR=./torch-extensions pytest -m 'inference_ops' unit/
69+
TRANSFORMERS_CACHE=~/tmp/transformers_cache/ TORCH_EXTENSIONS_DIR=./torch-extensions pytest --forked -n 4 -m 'inference' unit/

setup.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,7 @@ def op_enabled(op_name):
153153
for op_name, builder in ALL_OPS.items():
154154
op_compatible = builder.is_compatible()
155155
compatible_ops[op_name] = op_compatible
156+
compatible_ops["deepspeed_not_implemented"] = False
156157

157158
# If op is requested but not available, throw an error.
158159
if op_enabled(op_name) and not op_compatible:

tests/unit/checkpoint/test_latest_checkpoint.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,15 @@
55

66
import deepspeed
77

8+
import pytest
89
from unit.common import DistributedTest
910
from unit.simple_model import *
1011

1112
from unit.checkpoint.common import checkpoint_correctness_verification
13+
from deepspeed.ops.op_builder import FusedAdamBuilder
14+
15+
if not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]:
16+
pytest.skip("This op had not been implemented on this system backend.", allow_module_level=True)
1217

1318

1419
class TestLatestCheckpoint(DistributedTest):

tests/unit/inference/test_inference.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,10 @@
2020
from deepspeed.model_implementations import DeepSpeedTransformerInference
2121
from torch import nn
2222
from deepspeed.accelerator import get_accelerator
23+
from deepspeed.ops.op_builder import InferenceBuilder
24+
25+
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
26+
pytest.skip("This op had not been implemented on this system.", allow_module_level=True)
2327

2428
rocm_version = OpBuilder.installed_rocm_version()
2529
if rocm_version != (0, 0):

tests/unit/inference/test_model_profiling.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,10 @@
1111
from transformers import pipeline
1212
from unit.common import DistributedTest
1313
from deepspeed.accelerator import get_accelerator
14+
from deepspeed.ops.op_builder import InferenceBuilder
15+
16+
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
17+
pytest.skip("This op had not been implemented on this system.", allow_module_level=True)
1418

1519

1620
@pytest.fixture

tests/unit/ops/quantizer/test_fake_quantization.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,12 @@
55

66
import torch
77
import pytest
8+
import deepspeed
89
from deepspeed.accelerator import get_accelerator
9-
from deepspeed.ops import op_builder
10+
from deepspeed.ops.op_builder import QuantizerBuilder
11+
12+
if not deepspeed.ops.__compatible_ops__[QuantizerBuilder.NAME]:
13+
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
1014

1115
quantizer_cuda_module = None
1216

@@ -36,7 +40,7 @@ def run_quant_dequant(inputs, groups, bits):
3640
global quantizer_cuda_module
3741

3842
if quantizer_cuda_module is None:
39-
quantizer_cuda_module = op_builder.QuantizerBuilder().load()
43+
quantizer_cuda_module = QuantizerBuilder().load()
4044
return quantizer_cuda_module.ds_quantize_fp16(inputs, groups, bits)
4145

4246

tests/unit/ops/quantizer/test_quantize.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,20 @@
55

66
import pytest
77
import torch
8-
from deepspeed.ops import op_builder
8+
import deepspeed
9+
from deepspeed.ops.op_builder import QuantizerBuilder
910
from deepspeed.accelerator import get_accelerator
1011

12+
if not deepspeed.ops.__compatible_ops__[QuantizerBuilder.NAME]:
13+
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
14+
1115
inference_module = None
1216

1317

1418
def run_quantize_ds(activations, num_groups, q_bits, is_symmetric_quant):
1519
global inference_module
1620
if inference_module is None:
17-
inference_module = op_builder.QuantizerBuilder().load()
21+
inference_module = QuantizerBuilder().load()
1822

1923
return inference_module.quantize(activations, num_groups, q_bits,
2024
inference_module.Symmetric if is_symmetric_quant else inference_module.Asymmetric)
@@ -23,7 +27,7 @@ def run_quantize_ds(activations, num_groups, q_bits, is_symmetric_quant):
2327
def run_dequantize_ds(activations, params, num_groups, q_bits, is_symmetric_quant):
2428
global inference_module
2529
if inference_module is None:
26-
inference_module = op_builder.QuantizerBuilder().load()
30+
inference_module = QuantizerBuilder().load()
2731
return inference_module.dequantize(
2832
activations,
2933
params,

tests/unit/ops/spatial/test_nhwc_bias_add.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,14 @@
55

66
import pytest
77
import torch
8+
import deepspeed
9+
from deepspeed.ops.op_builder import SpatialInferenceBuilder
810
from deepspeed.ops.transformer.inference.bias_add import nhwc_bias_add
911
from deepspeed.accelerator import get_accelerator
1012

13+
if not deepspeed.ops.__compatible_ops__[SpatialInferenceBuilder.NAME]:
14+
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
15+
1116

1217
def allclose(x, y):
1318
assert x.dtype == y.dtype

tests/unit/profiling/flops_profiler/test_flops_profiler.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@
1010
from unit.simple_model import SimpleModel, random_dataloader
1111
from unit.common import DistributedTest
1212
from unit.util import required_minimum_torch_version
13+
from deepspeed.ops.op_builder import FusedAdamBuilder
14+
15+
if not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]:
16+
pytest.skip("This op had not been implemented on this system.", allow_module_level=True)
1317

1418
pytestmark = pytest.mark.skipif(not required_minimum_torch_version(major_version=1, minor_version=3),
1519
reason='requires Pytorch version 1.3 or above')

0 commit comments

Comments
 (0)