1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7 #include <math.h>
8 #include <stddef.h>
9 #include <stdint.h>
10
11 #include <xnnpack.h>
12 #include <xnnpack/log.h>
13 #include <xnnpack/operator.h>
14 #include <xnnpack/params.h>
15 #include <xnnpack/subgraph.h>
16 #include <xnnpack/subgraph-validation.h>
17
18
create_leaky_relu_operator(const struct xnn_node * node,const struct xnn_value * values,size_t num_values,struct xnn_operator_data * opdata,const struct xnn_caches * caches)19 static enum xnn_status create_leaky_relu_operator(
20 const struct xnn_node* node,
21 const struct xnn_value* values,
22 size_t num_values,
23 struct xnn_operator_data* opdata,
24 const struct xnn_caches* caches)
25 {
26 assert(node->num_inputs == 1);
27 const uint32_t input_id = node->inputs[0];
28 assert(input_id != XNN_INVALID_VALUE_ID);
29 assert(input_id < num_values);
30
31 assert(node->num_outputs == 1);
32 const uint32_t output_id = node->outputs[0];
33 assert(output_id != XNN_INVALID_VALUE_ID);
34 assert(output_id < num_values);
35
36 const size_t num_input_dims = values[input_id].shape.num_dims;
37 const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
38
39 enum xnn_status status;
40 switch (node->compute_type) {
41 #ifndef XNN_NO_F16_OPERATORS
42 case xnn_compute_type_fp16:
43 status = xnn_create_leaky_relu_nc_f16(
44 channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
45 node->params.leaky_relu.negative_slope,
46 node->flags,
47 &opdata->operator_objects[0]);
48 break;
49 #endif // XNN_NO_F16_OPERATORS
50 case xnn_compute_type_fp32:
51 status = xnn_create_leaky_relu_nc_f32(
52 channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
53 node->params.leaky_relu.negative_slope,
54 node->flags,
55 &opdata->operator_objects[0]);
56 break;
57 #ifndef XNN_NO_QS8_OPERATORS
58 case xnn_compute_type_qs8:
59 status = xnn_create_leaky_relu_nc_qs8(
60 channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
61 node->params.leaky_relu.negative_slope,
62 (int8_t) values[input_id].quantization.zero_point, values[input_id].quantization.scale,
63 (int8_t) values[output_id].quantization.zero_point, values[output_id].quantization.scale,
64 node->flags,
65 &opdata->operator_objects[0]);
66 break;
67 #endif // !defined(XNN_NO_QS8_OPERATORS)
68 #ifndef XNN_NO_QU8_OPERATORS
69 case xnn_compute_type_qu8:
70 status = xnn_create_leaky_relu_nc_qu8(
71 channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
72 node->params.leaky_relu.negative_slope,
73 (uint8_t) values[input_id].quantization.zero_point, values[input_id].quantization.scale,
74 (uint8_t) values[output_id].quantization.zero_point, values[output_id].quantization.scale,
75 node->flags,
76 &opdata->operator_objects[0]);
77 break;
78 #endif // !defined(XNN_NO_QU8_OPERATORS)
79 default:
80 XNN_UNREACHABLE;
81 }
82 if (status == xnn_status_success) {
83 opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
84 opdata->inputs[0] = input_id;
85 opdata->outputs[0] = output_id;
86 }
87 return status;
88 }
89
setup_leaky_relu_operator(const struct xnn_operator_data * opdata,const struct xnn_blob * blobs,size_t num_blobs,pthreadpool_t threadpool)90 static enum xnn_status setup_leaky_relu_operator(
91 const struct xnn_operator_data* opdata,
92 const struct xnn_blob* blobs,
93 size_t num_blobs,
94 pthreadpool_t threadpool)
95 {
96 const uint32_t input_id = opdata->inputs[0];
97 assert(input_id != XNN_INVALID_VALUE_ID);
98 assert(input_id < num_blobs);
99
100 const uint32_t output_id = opdata->outputs[0];
101 assert(output_id != XNN_INVALID_VALUE_ID);
102 assert(output_id < num_blobs);
103
104 const struct xnn_blob* input_blob = blobs + input_id;
105 const void* input_data = input_blob->data;
106 assert(input_data != NULL);
107
108 const struct xnn_blob* output_blob = blobs + output_id;
109 void* output_data = output_blob->data;
110 assert(output_data != NULL);
111
112 switch (opdata->operator_objects[0]->type) {
113 #ifndef XNN_NO_F16_OPERATORS
114 case xnn_operator_type_leaky_relu_nc_f16:
115 return xnn_setup_leaky_relu_nc_f16(
116 opdata->operator_objects[0],
117 opdata->batch_size,
118 input_data,
119 output_data,
120 threadpool);
121 #endif // XNN_NO_F16_OPERATORS
122 case xnn_operator_type_leaky_relu_nc_f32:
123 return xnn_setup_leaky_relu_nc_f32(
124 opdata->operator_objects[0],
125 opdata->batch_size,
126 input_data,
127 output_data,
128 threadpool);
129 #ifndef XNN_NO_QS8_OPERATORS
130 case xnn_operator_type_leaky_relu_nc_qs8:
131 return xnn_setup_leaky_relu_nc_qs8(
132 opdata->operator_objects[0],
133 opdata->batch_size,
134 input_data,
135 output_data,
136 threadpool);
137 #endif // !defined(XNN_NO_QS8_OPERATORS)
138 #ifndef XNN_NO_QU8_OPERATORS
139 case xnn_operator_type_leaky_relu_nc_qu8:
140 return xnn_setup_leaky_relu_nc_qu8(
141 opdata->operator_objects[0],
142 opdata->batch_size,
143 input_data,
144 output_data,
145 threadpool);
146 #endif // !defined(XNN_NO_QU8_OPERATORS)
147 default:
148 XNN_UNREACHABLE;
149 }
150 }
151
xnn_define_leaky_relu(xnn_subgraph_t subgraph,float negative_slope,uint32_t input_id,uint32_t output_id,uint32_t flags)152 enum xnn_status xnn_define_leaky_relu(
153 xnn_subgraph_t subgraph,
154 float negative_slope,
155 uint32_t input_id,
156 uint32_t output_id,
157 uint32_t flags)
158 {
159 enum xnn_status status;
160 if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_leaky_relu)) != xnn_status_success) {
161 return status;
162 }
163
164 if (!isfinite(negative_slope)) {
165 xnn_log_error(
166 "failed to create %s operator with %f negative slope: finite number expected",
167 xnn_node_type_to_string(xnn_node_type_leaky_relu),
168 negative_slope);
169 return xnn_status_invalid_parameter;
170 }
171
172 if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_leaky_relu, input_id, subgraph->num_values)) !=
173 xnn_status_success) {
174 return status;
175 }
176
177 const struct xnn_value* input_value = &subgraph->values[input_id];
178 status = xnn_subgraph_check_input_type_dense(xnn_node_type_leaky_relu, input_id, input_value);
179 if (status != xnn_status_success) {
180 return status;
181 }
182
183 switch (input_value->datatype) {
184 case xnn_datatype_fp32:
185 #ifndef XNN_NO_QS8_OPERATORS
186 case xnn_datatype_qint8:
187 #endif // !defined(XNN_NO_QS8_OPERATORS)
188 #ifndef XNN_NO_QU8_OPERATORS
189 case xnn_datatype_quint8:
190 #endif // !defined(XNN_NO_QU8_OPERATORS)
191 break;
192 default:
193 xnn_log_error(
194 "failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
195 xnn_node_type_to_string(xnn_node_type_leaky_relu), input_id,
196 xnn_datatype_to_string(input_value->datatype), input_value->datatype);
197 return xnn_status_invalid_parameter;
198 }
199
200 status = xnn_subgraph_check_output_node_id(xnn_node_type_leaky_relu, output_id, subgraph->num_values);
201 if (status != xnn_status_success) {
202 return status;
203 }
204
205 const struct xnn_value* output_value = &subgraph->values[output_id];
206 status = xnn_subgraph_check_output_type_dense(xnn_node_type_leaky_relu, output_id, output_value);
207 if (status != xnn_status_success) {
208 return status;
209 }
210
211 enum xnn_compute_type compute_type = xnn_compute_type_invalid;
212 switch (output_value->datatype) {
213 case xnn_datatype_fp32:
214 compute_type = xnn_compute_type_fp32;
215 break;
216 #ifndef XNN_NO_QS8_OPERATORS
217 case xnn_datatype_qint8:
218 compute_type = xnn_compute_type_qs8;
219 break;
220 #endif // !defined(XNN_NO_QS8_OPERATORS)
221 #ifndef XNN_NO_QU8_OPERATORS
222 case xnn_datatype_quint8:
223 compute_type = xnn_compute_type_qu8;
224 break;
225 #endif // !defined(XNN_NO_QU8_OPERATORS)
226 default:
227 xnn_log_error(
228 "failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
229 xnn_node_type_to_string(xnn_node_type_leaky_relu), output_id,
230 xnn_datatype_to_string(output_value->datatype), output_value->datatype);
231 return xnn_status_invalid_parameter;
232 }
233 assert(compute_type != xnn_compute_type_invalid);
234
235 status = xnn_subgraph_check_datatype_matches(xnn_node_type_leaky_relu, input_id, input_value, output_id, output_value);
236 if (status != xnn_status_success) {
237 return status;
238 }
239
240 #if !defined(XNN_NO_U8_OPERATORS) || !defined(XNN_NO_S8_OPERATORS)
241 if (compute_type == xnn_datatype_qint8 || compute_type == xnn_datatype_quint8) {
242 const float positive_input_output_scale = input_value->quantization.scale / output_value->quantization.scale;
243 if (positive_input_output_scale < 0x1.0p-8f || positive_input_output_scale > 0x1.0p+7f) {
244 xnn_log_error(
245 "failed to define %s operator with %.7g positive-input-to-output scale ratio: scale ratio must be in [2**-8, 2**7] range",
246 xnn_node_type_to_string(xnn_node_type_leaky_relu), positive_input_output_scale);
247 return xnn_status_invalid_parameter;
248 }
249
250 const float negative_input_output_scale = positive_input_output_scale * negative_slope;
251 if (negative_input_output_scale < -0x1.FFFC00p+6f || negative_input_output_scale > 0x1.0p+7f) {
252 xnn_log_error(
253 "failed to define %s operator with %.7g negative-input-to-output scale ratio: scale ratio must be in (-2**7, 2**7] range and ",
254 xnn_node_type_to_string(xnn_node_type_leaky_relu), negative_input_output_scale);
255 return xnn_status_invalid_parameter;
256 }
257
258 if (fabsf(negative_input_output_scale) < 0x1.0p-8f) {
259 xnn_log_error(
260 "failed to define %s operator with %.7g negative-input-to-output scale ratio: scale ratio must be at least 2**-8 in absolute value",
261 xnn_node_type_to_string(xnn_node_type_leaky_relu), negative_input_output_scale);
262 return xnn_status_invalid_parameter;
263 }
264 }
265 #endif // !defined(XNN_NO_U8_OPERATORS) || !defined(XNN_NO_S8_OPERATORS)
266
267 struct xnn_node* node = xnn_subgraph_new_node(subgraph);
268 if (node == NULL) {
269 return xnn_status_out_of_memory;
270 }
271
272 node->type = xnn_node_type_leaky_relu;
273 node->compute_type = compute_type;
274 node->params.leaky_relu.negative_slope = negative_slope;
275 node->num_inputs = 1;
276 node->inputs[0] = input_id;
277 node->num_outputs = 1;
278 node->outputs[0] = output_id;
279 node->flags = flags;
280
281 node->create = create_leaky_relu_operator;
282 node->setup = setup_leaky_relu_operator;
283
284 return xnn_status_success;
285 }
286