From 7d62254a1947a77909020272cb5e29619a797499 Mon Sep 17 00:00:00 2001 From: z00621985 Date: Wed, 1 May 2024 18:16:07 +0800 Subject: [PATCH] fix matmul bias error --- .../device/ascend/optimizer/CMakeLists.txt | 1 - .../optimizer/ge_backend_optimization.cc | 2 - .../optimizer/ir_fission/matmul_add_bias.cc | 58 ------------------- .../optimizer/ir_fission/matmul_add_bias.h | 38 ------------ .../matrix_calculation_ops_declare.cc | 6 +- .../optimizer/graph/attr_to_args_pass.cc | 23 ++++---- 6 files changed, 15 insertions(+), 113 deletions(-) delete mode 100644 mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.cc delete mode 100644 mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.h diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/CMakeLists.txt b/mindspore/ccsrc/plugin/device/ascend/optimizer/CMakeLists.txt index 0d451c127510..a5d7fe2117be 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/CMakeLists.txt +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/CMakeLists.txt @@ -30,7 +30,6 @@ file(GLOB_RECURSE MS_OPTIMIZER_910B RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "./ir_fission/bn_split.cc" "./ir_fission/bn_grad_split.cc" "./ir_fission/seed_adapter.cc" - "./ir_fission/matmul_add_bias.cc" "./ir_fission/tensor_scatter_fission.cc" "./ir_fission/adam_weight_decay_fission.cc" "./ir_fission/batch_norm_grad_infer_fission.cc" diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ge_backend_optimization.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ge_backend_optimization.cc index a9c882e734ee..898c96e6dddb 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ge_backend_optimization.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ge_backend_optimization.cc @@ -52,7 +52,6 @@ #include "plugin/device/ascend/optimizer/ge/scalar_unify_mindir.h" #include "plugin/device/ascend/optimizer/ge/tuple_unify_mindir.h" #include "plugin/device/ascend/optimizer/ir_fission/seed_adapter.h" -#include "plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.h" #include "plugin/device/ascend/optimizer/ir_fission/ascend_convert_tuple_input_to_dynamic_input.h" #include "plugin/device/ascend/optimizer/backend_common_unify_mindir.h" #include "plugin/device/ascend/optimizer/ge/remove_tensor_to_scalar_or_tuple_ops.h" @@ -94,7 +93,6 @@ void GEBackendOptimization(const KernelGraphPtr &kernel_graph) { opt_ge_pm->AddPass(std::make_shared()); opt_ge_pm->AddPass(std::make_shared()); opt_ge_pm->AddPass(std::make_shared()); - opt_ge_pm->AddPass(std::make_shared()); opt_ge_pm->AddPass(std::make_shared()); opt_ge_pm->AddPass(std::make_shared(true, true)); opt_ge_pm->AddPass(std::make_shared("unfold_nested_output")); diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.cc deleted file mode 100644 index 4af94a8d1b6b..000000000000 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.cc +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright 2024 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.h" - -#include -#include - -#include "ops/framework_op_name.h" -#include "ops/array_ops.h" -#include "ops/math_op_name.h" -#include "include/backend/anf_runtime_algorithm.h" -#include "include/common/utils/utils.h" -#include "include/backend/optimizer/helper.h" -#include "utils/trace_base.h" - -namespace mindspore { -namespace opt { -namespace { -constexpr size_t kMatMulInputSize = 4; -} // namespace -const AnfNodePtr MatMulAddBias::Process(const FuncGraphPtr &graph, const AnfNodePtr &node, const EquivPtr &) const { - MS_EXCEPTION_IF_NULL(graph); - MS_EXCEPTION_IF_NULL(node); - auto cnode = CheckAnfNodeIfCNodeAndInputSize(node, kMatMulInputSize); - auto none_value = std::make_shared(); - auto none_node = NewValueNode(none_value); - none_node->set_abstract(none_value->ToAbstract()); - auto new_inputs = - std::vector{cnode->input(kIndex0), cnode->input(kIndex1), cnode->input(kIndex2), none_node, - cnode->input(kIndex3), cnode->input(kIndex4)}; - auto new_cnode = NewCNode(new_inputs, graph); - new_cnode->set_scope(node->scope()); - new_cnode->set_abstract(cnode->abstract()); - return new_cnode; -} - -const BaseRef MatMulAddBias::DefinePattern() const { - VarPtr X1 = std::make_shared(); - VarPtr X2 = std::make_shared(); - VarPtr X3 = std::make_shared(); - VarPtr X4 = std::make_shared(); - return VectorRef({prim::kPrimMatMul, X1, X2, X3, X4}); -} -} // namespace opt -} // namespace mindspore diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.h b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.h deleted file mode 100644 index cd63ac76d0d1..000000000000 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fission/matmul_add_bias.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2024 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_ASCEND_OPTIMIZER_IR_MATMUL_TO_MATMULV2_H_ -#define MINDSPORE_CCSRC_PLUGIN_DEVICE_ASCEND_OPTIMIZER_IR_MATMUL_TO_MATMULV2_H_ -#include -#include - -#include "include/backend/optimizer/pass.h" -#include "ir/func_graph.h" -#include "ir/anf.h" -#include "include/backend/optimizer/optimizer.h" - -namespace mindspore { -namespace opt { -class MatMulAddBias : public PatternProcessPass { - public: - explicit MatMulAddBias(bool multigraph = true, const string &name = "matmul_add_bias") - : PatternProcessPass(name, multigraph) {} - ~MatMulAddBias() override = default; - const BaseRef DefinePattern() const override; - const AnfNodePtr Process(const FuncGraphPtr &graph, const AnfNodePtr &node, const EquivPtr &) const override; -}; -} // namespace opt -} // namespace mindspore -#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_ASCEND_OPTIMIZER_IR_MATMUL_TO_MATMULV2_H_ diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc index d79f9cfcd043..c2a83ee70712 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/matrix_calculation_ops_declare.cc @@ -102,10 +102,10 @@ OUTPUT_MAP(ScatterNdSub) = {{0, OUTPUT_DESC(var)}}; REG_ADPT_DESC(ScatterNdSub, kNameScatterNdSub, ADPT_DESC(ScatterNdSub)) // MatMulV2 -INPUT_MAP(MatMulV2) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(bias)}}; +INPUT_MAP(MatMulV2) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {5, INPUT_DESC(bias)}}; ATTR_MAP(MatMulV2) = EMPTY_ATTR_MAP; -INPUT_ATTR_MAP(MatMulV2) = {{kIndex4, ATTR_DESC(transpose_x1, AnyTraits())}, - {kIndex5, ATTR_DESC(transpose_x2, AnyTraits())}}; +INPUT_ATTR_MAP(MatMulV2) = {{kIndex3, ATTR_DESC(transpose_x1, AnyTraits())}, + {kIndex4, ATTR_DESC(transpose_x2, AnyTraits())}}; OUTPUT_MAP(MatMulV2) = {{0, OUTPUT_DESC(y)}}; REG_ADPT_DESC(MatMulV2, prim::kPrimMatMul->name(), ADPT_DESC(MatMulV2)) REG_ADPT_DESC(MatMulV2Duplicate, prim::kPrimMatMulV2->name(), ADPT_DESC(MatMulV2)) diff --git a/mindspore/lite/tools/optimizer/graph/attr_to_args_pass.cc b/mindspore/lite/tools/optimizer/graph/attr_to_args_pass.cc index 7ab7b035b76c..efe119532beb 100644 --- a/mindspore/lite/tools/optimizer/graph/attr_to_args_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/attr_to_args_pass.cc @@ -21,6 +21,7 @@ #include "src/common/log_util.h" #include "ops/primitive_c.h" #include "ops/base_operator.h" +#include "utils/anf_utils.h" namespace mindspore { namespace opt { @@ -207,15 +208,15 @@ static const std::map> kAttrMapNeedAdjust = { }}, }; -constexpr size_t kMatMulInputSizeWithoutBias = 3; // primitive, x1, x2 +constexpr size_t kMatMulInputSizeWithBias = 6; // primitive, x1, x2, bias, transpose_a, transpose_b constexpr auto kMatMulOpName = "MatMul"; constexpr auto kMatMulV2OpName = "MatMulV2"; -void AddBiasForMatMul(const FuncGraphManagerPtr &manager, const CNodePtr &cnode) { - auto none_value = std::make_shared(); - auto none_node = NewValueNode(none_value); - none_node->set_abstract(none_value->ToAbstract()); - manager->AddEdge(cnode, none_node); +void RearrangeBiasForMatMul(const FuncGraphManagerPtr &manager, const CNodePtr &cnode) { + auto node_inputs = cnode->inputs(); + auto bias_add_node_it = node_inputs.begin() + kIndexThree; + std::rotate(bias_add_node_it, bias_add_node_it + 1, node_inputs.end()); + cnode->set_inputs(node_inputs); } int ConvertAttrToArgsForNode(const AnfNodePtr &node, const FuncGraphManagerPtr &manager) { @@ -230,11 +231,6 @@ int ConvertAttrToArgsForNode(const AnfNodePtr &node, const FuncGraphManagerPtr & // Create new primitive and inherit the origin attributes. MS_LOG(INFO) << "Begin to convert Primitive to Primitive_Func for node: " << node->DebugString() << "new name: " << prim_name; - if ((prim_name == kMatMulOpName || prim_name == kMatMulV2OpName) && - cnode->inputs().size() == kMatMulInputSizeWithoutBias) { - AddBiasForMatMul(manager, cnode); - } - for (const auto &attr : attrs_adjust) { if (origin_attrs.count(attr) == 0) { MS_LOG(INFO) << "Origin primitive: " << prim_name << " has no attribute : " << attr; @@ -248,6 +244,11 @@ int ConvertAttrToArgsForNode(const AnfNodePtr &node, const FuncGraphManagerPtr & manager->AddEdge(cnode, new_value_node); } } + + if ((prim_name == kMatMulOpName || prim_name == kMatMulV2OpName) && + cnode->inputs().size() == kMatMulInputSizeWithBias) { + RearrangeBiasForMatMul(manager, cnode); + } MS_LOG(INFO) << "End, new node: " << node->DebugString(); return RET_OK; } -- Gitee