diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ed210a331..e2eaa3ad4 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -441,6 +441,17 @@ jobs: cd ${DEEPLINK_PATH}/${GITHUB_RUN_NUMBER}/Build-Ascend-910b/dipu source scripts/ci/ascend/ci_ascend_env.sh bash tests/run_ascend_tests.sh + + - name: Run dicp op test + run: | + set -ex + cd ${DEEPLINK_PATH}/${GITHUB_RUN_NUMBER}/Build-Ascend-910b/dipu + source scripts/ci/ascend/ci_ascend_env.sh + cd ${DEEPLINK_PATH}/${GITHUB_RUN_NUMBER}/Build-Ascend-910b/dicp + pip uninstall dicp -y + python setup.py clean && python setup.py install --user + export TEST_DIR=$(pwd)/test + bash ${TEST_DIR}/ascend_scripts/ops/run_test_ops.sh false Test-One-Iter-Ascend-910b: name: Test-one-iter-ascend-910b diff --git a/dicp/dicp/dynamo_bridge/utils.py b/dicp/dicp/dynamo_bridge/utils.py index 2df493450..ff65385c3 100644 --- a/dicp/dicp/dynamo_bridge/utils.py +++ b/dicp/dicp/dynamo_bridge/utils.py @@ -1,4 +1,5 @@ import copy +from pathlib import Path from typing import Any, Dict, Tuple import torch.fx @@ -7,6 +8,7 @@ def save_cpu_gm(gm: torch.fx.GraphModule, folder: str): + Path(folder).mkdir(exist_ok=True) cpu_gm = copy_gm_to_cpu(gm) grap_code = cpu_gm.code graph_key = code_hash(grap_code) diff --git a/dicp/dicp/vendor/AscendGraph/compile_job.py b/dicp/dicp/vendor/AscendGraph/compile_job.py index 439d4d1e9..9c0cc8f1c 100644 --- a/dicp/dicp/vendor/AscendGraph/compile_job.py +++ b/dicp/dicp/vendor/AscendGraph/compile_job.py @@ -3,7 +3,7 @@ import time from dicp.dynamo_bridge.compile import DeviceCompileJob -from torch._inductor.codecache import pick_vec_isa, cpp_compile_command, write, get_hash +from torch._inductor.codecache import pick_vec_isa, cpp_compile_command, write, code_hash from torch._inductor import exc @@ -24,7 +24,7 @@ def __init__(self, source_code) -> None: source_code.strip(), "json", extra=cpp_compile_command("i", "o", vec_isa=picked_vec_isa) + - 'local_rank' + str(self._local_rank) + get_hash(compile_file_code, 'code') + 'local_rank' + str(self._local_rank) + code_hash(compile_file_code) ) self._output_graph_path = self._input_path[:-5] + '/graph' print('output_path: ', self._output_graph_path) diff --git a/dicp/dicp/vendor/AscendGraph/opset_convert.py b/dicp/dicp/vendor/AscendGraph/opset_convert.py index 9913055e7..2d2a33e26 100644 --- a/dicp/dicp/vendor/AscendGraph/opset_convert.py +++ b/dicp/dicp/vendor/AscendGraph/opset_convert.py @@ -1,12 +1,15 @@ import torch -from dicp.dynamo_bridge.op_transformer import BackendPatternMatcherTransformer +from dicp.dynamo_bridge.compile_fx import is_torch_210 from dicp.vendor.AscendGraph.ascend_op import MatMul, CastToCpu, IdentityInp from dicp.vendor.AscendGraph.conversion import AtenToAscendTransformer -from dicp.vendor.AscendGraph.pattern_replacement import ( - ascend_pattern_matcher, - aten_patterns_cls_list, - ascend_patterns_cls_list -) + +if is_torch_210: + from dicp.dynamo_bridge.op_transformer import BackendPatternMatcherTransformer + from dicp.vendor.AscendGraph.pattern_replacement import ( + ascend_pattern_matcher, + aten_patterns_cls_list, + ascend_patterns_cls_list + ) # 该pass需要在FuseTransposeMatmul之后 @@ -74,13 +77,14 @@ def symint_in_inputs(nodes): def ascendgraph_opset_convert( gm: torch.fx.GraphModule, ): - gm = BackendPatternMatcherTransformer( - ascend_pattern_matcher, aten_patterns_cls_list).transform(gm) + if is_torch_210: + gm = BackendPatternMatcherTransformer( + ascend_pattern_matcher, aten_patterns_cls_list).transform(gm) gm = AtenToAscendTransformer(gm).transform() # For bug in pytorch # Avoid for dynamic shape - if not symint_in_inputs(list(gm.graph.nodes)): + if is_torch_210 and not symint_in_inputs(list(gm.graph.nodes)): gm = BackendPatternMatcherTransformer( ascend_pattern_matcher, ascend_patterns_cls_list).transform(gm) gm = OutputMarkPass().transform(gm) diff --git a/dicp/test/common/utils.py b/dicp/test/common/utils.py index ad416ab28..b89947674 100644 --- a/dicp/test/common/utils.py +++ b/dicp/test/common/utils.py @@ -5,6 +5,7 @@ import torch._dynamo as dynamo import torch_dipu from dicp.dynamo_bridge import pt_patch # noqa F401 +from dicp.dynamo_bridge.compile_fx import is_torch_210 torch.manual_seed(1) random.seed(1) @@ -22,12 +23,9 @@ def __init__(self, static_size, dynamic_size): def update_dynamo_config(dynamic=False): - if dynamic: - dynamo.config.dynamic_shapes = True - dynamo.config.assume_static_by_default = False - else: - dynamo.config.dynamic_shapes = False - dynamo.config.assume_static_by_default = True + dynamo.config.dynamic_shapes = dynamic + if is_torch_210: + dynamo.config.assume_static_by_default = not dynamic def get_device():