1 /*
2 * Copyright (c) 2018-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #ifndef ARM_COMPUTE_GRAPH_UTILS_H
25 #define ARM_COMPUTE_GRAPH_UTILS_H
26
27 #include "arm_compute/graph/Graph.h"
28 #include "arm_compute/graph/PassManager.h"
29
30 namespace arm_compute
31 {
32 namespace graph
33 {
34 // Forward Declaration
35 class GraphContext;
36
is_utility_node(INode * node)37 inline bool is_utility_node(INode *node)
38 {
39 std::set<NodeType> utility_node_types = { NodeType::PrintLayer };
40 return utility_node_types.find(node->type()) != utility_node_types.end();
41 }
42
43 /** Returns the tensor descriptor of a given tensor
44 *
45 * @param[in] g Graph that the tensor belongs to
46 * @param[in] tid Tensor ID
47 *
48 * @return Tensor descriptor if tensor was found else empty descriptor
49 */
get_tensor_descriptor(const Graph & g,TensorID tid)50 inline TensorDescriptor get_tensor_descriptor(const Graph &g, TensorID tid)
51 {
52 const Tensor *tensor = g.tensor(tid);
53 return (tensor != nullptr) ? tensor->desc() : TensorDescriptor();
54 }
55 /** Sets an accessor on a given tensor
56 *
57 * @param[in] tensor Tensor to set the accessor to
58 * @param[in] accessor Accessor to set
59 *
60 * @return True if accessor was set else false
61 */
set_tensor_accessor(Tensor * tensor,std::unique_ptr<ITensorAccessor> accessor)62 inline Status set_tensor_accessor(Tensor *tensor, std::unique_ptr<ITensorAccessor> accessor)
63 {
64 ARM_COMPUTE_RETURN_ERROR_ON(tensor == nullptr);
65 tensor->set_accessor(std::move(accessor));
66
67 return Status{};
68 }
69 /** Checks if a specific target is supported
70 *
71 * @param[in] target Target to check
72 *
73 * @return True if target is support else false
74 */
75 bool is_target_supported(Target target);
76 /** Returns default target for execution
77 *
78 * @note If an OpenCL backend exists then OpenCL is returned,
79 * else if the CPU backend exists returns @ref Target::NEON as target.
80 * If no backends are registered an error is raised.
81 *
82 * @return Default target
83 */
84 Target get_default_target();
85 /** Forces a single target to all graph constructs
86 *
87 * @param[in] g Graph to force target on
88 * @param[in] target Target to force
89 */
90 void force_target_to_graph(Graph &g, Target target);
91 /** Creates a default @ref PassManager
92 *
93 * @param[in] target Target to create the pass manager for
94 * @param[in] cfg Graph configuration meta-data
95 *
96 * @return A PassManager with default mutating passes
97 */
98 PassManager create_default_pass_manager(Target target, const GraphConfig &cfg);
99 /** Setups requested backend context if it exists, is supported and hasn't been initialized already.
100 *
101 * @param[in,out] ctx Graph Context.
102 * @param[in] target Target to setup the backend for.
103 */
104 void setup_requested_backend_context(GraphContext &ctx, Target target);
105 /** Default releases the graph context if not done manually
106 *
107 * @param[in,out] ctx Graph Context
108 */
109 void release_default_graph_context(GraphContext &ctx);
110 /** Synchronize kernels execution on the backends. On GPU, this results in a blocking call waiting for all kernels to be completed. */
111 void sync_backends();
112 /** Get size of a tensor's given dimension depending on its layout
113 *
114 * @param[in] descriptor Descriptor
115 * @param[in] data_layout_dimension Tensor data layout dimension
116 *
117 * @return Size of requested dimension
118 */
119 size_t get_dimension_size(const TensorDescriptor &descriptor, const DataLayoutDimension data_layout_dimension);
120 /** Get index of a tensor's given dimension depending on its layout
121 *
122 * @param[in] data_layout Data layout of the tensor
123 * @param[in] data_layout_dimension Tensor data layout dimension
124 *
125 * @return Idx of given dimension
126 */
127 size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension);
128 /** Get the list of driving nodes of a given node
129 *
130 * @param[in] node Node to find the driving node of
131 *
132 * @return A list with the driving node of a given node
133 */
134 std::vector<NodeIdxPair> get_driving_nodes(const INode &node);
135 /** Get the list of driver nodes of a given node
136 *
137 * @param[in] node Node to find the driver node of
138 *
139 * @return A list with the driver node of a given node
140 */
141 std::vector<NodeIdxPair> get_driver_nodes(const INode &node);
142 /** Configures tensor
143 *
144 * @param[in, out] tensor Tensor to configure
145 */
146 void configure_tensor(Tensor *tensor);
147 } // namespace graph
148 } // namespace arm_compute
149 #endif /* ARM_COMPUTE_GRAPH_UTILS_H */
150