1 /*
2 * Copyright (c) 2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.h"
25
26 #include "arm_compute/core/Validate.h"
27
28 namespace arm_compute
29 {
30 namespace experimental
31 {
32 namespace dynamic_fusion
33 {
34 namespace
35 {
get_tensor_ids(const std::vector<const ITensorInfo * > tensors)36 std::vector<DependencyGraph::TensorId> get_tensor_ids(const std::vector<const ITensorInfo *> tensors)
37 {
38 std::vector<DependencyGraph::TensorId> tensor_ids{};
39 std::transform(
40 std::begin(tensors), std::end(tensors),
41 std::back_inserter(tensor_ids),
42 [](const auto & t)
43 {
44 return t->id();
45 });
46 return tensor_ids;
47 }
48
49 } // namespace
50
Operator(OperatorId id,GpuOperatorType operator_type,const ArgumentPack<ITensorInfo> & tensors)51 Operator::Operator(OperatorId id, GpuOperatorType operator_type, const ArgumentPack<ITensorInfo> &tensors)
52 : _id{ id }, _operator_type{ operator_type }, _tensors{ tensors }
53 {
54 }
55
id() const56 OperatorId Operator::id() const
57 {
58 return _id;
59 }
60
operator_type() const61 GpuOperatorType Operator::operator_type() const
62 {
63 return _operator_type;
64 }
65
tensors() const66 ArgumentPack<ITensorInfo> Operator::tensors() const
67 {
68 return _tensors;
69 }
70
try_add_operator(const Operator & op,bool is_output) const71 bool GpuOperatorGroup::try_add_operator(const Operator &op, bool is_output) const
72 {
73 const auto src_tensor_ids = get_tensor_ids(op.tensors().get_const_src_tensors());
74 const auto dst_tensor_ids = get_tensor_ids(op.tensors().get_const_dst_tensors());
75 // Constraint 1
76 if(!_graph.try_add_operator_as_linear(op.id(), src_tensor_ids, dst_tensor_ids, is_output))
77 {
78 return false;
79 }
80 // Constraint 2
81 if(_operators.size() >= max_fused_operators)
82 {
83 return false;
84 }
85 // Constraint 3.1: Pattern: (Unfusable)
86 if(_operators.size() > 0 && get_root_operator()->operator_type() == GpuOperatorType::Unfusable)
87 {
88 return false;
89 }
90 // Constraint 3.2
91 if(_operators.size() > 0 && (op.operator_type() != GpuOperatorType::Simple))
92 {
93 return false;
94 }
95 // Constraint 4
96 if(op.operator_type() != GpuOperatorType::Unfusable && op.tensors().get_const_dst_tensors().size() != 1U)
97 {
98 return false;
99 }
100 // Constraint 5
101 if(_operators.size() > 0)
102 {
103 const auto root_dst_tensors = get_root_operator()->tensors().get_const_dst_tensors();
104 ARM_COMPUTE_ERROR_ON(root_dst_tensors.empty());
105 const auto first_dst_tensor = root_dst_tensors[0];
106 const auto dst_tensors = op.tensors().get_const_dst_tensors();
107 for(const auto &t : root_dst_tensors)
108 {
109 if(detail::have_different_dimensions(t->tensor_shape(), first_dst_tensor->tensor_shape(), 0))
110 {
111 return false;
112 }
113 }
114 for(const auto &t : dst_tensors)
115 {
116 if(detail::have_different_dimensions(t->tensor_shape(), first_dst_tensor->tensor_shape(), 0))
117 {
118 return false;
119 }
120 }
121 }
122 // Constraint 6
123 if(_operators.size() > 0)
124 {
125 const auto root_dst_tensors = get_root_operator()->tensors().get_const_dst_tensors();
126 ARM_COMPUTE_ERROR_ON(root_dst_tensors.empty());
127 const auto first_dst_tensor_layout = root_dst_tensors[0]->data_layout();
128 const auto dst_tensors = op.tensors().get_const_dst_tensors();
129 for(const auto &t : root_dst_tensors)
130 {
131 if(t->data_layout() != first_dst_tensor_layout)
132 {
133 return false;
134 }
135 }
136 for(const auto &t : dst_tensors)
137 {
138 if(t->data_layout() != first_dst_tensor_layout)
139 {
140 return false;
141 }
142 }
143 }
144 return true;
145 }
add_operator(const Operator & op,bool is_output)146 void GpuOperatorGroup::add_operator(const Operator &op, bool is_output)
147 {
148 ARM_COMPUTE_ERROR_ON(!try_add_operator(op, is_output));
149 const auto src_tensor_ids = get_tensor_ids(op.tensors().get_const_src_tensors());
150 const auto dst_tensor_ids = get_tensor_ids(op.tensors().get_const_dst_tensors());
151 _graph.add_operator_as_linear(op.id(), src_tensor_ids, dst_tensor_ids, is_output);
152 _operators[op.id()] = op;
153 }
new_operator(const GpuOperatorType & operator_type,const ArgumentPack<ITensorInfo> & tensors) const154 Operator GpuOperatorGroup::new_operator(const GpuOperatorType &operator_type, const ArgumentPack<ITensorInfo> &tensors) const
155 {
156 auto new_id = static_cast<OperatorId>(_operators.size());
157 return Operator{ new_id, operator_type, tensors };
158 }
get_root_operator() const159 const Operator *GpuOperatorGroup::get_root_operator() const
160 {
161 const auto roots = _graph.get_root_ops();
162 ARM_COMPUTE_ERROR_ON(roots.size() > 1);
163 if(roots.empty())
164 {
165 return nullptr;
166 }
167 return &_operators.at(roots[0]);
168 }
169
170 } // namespace dynamic_fusion
171 } // namespace experimental
172 } // namespace arm_compute
173