1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7 #include <math.h>
8 #include <stddef.h>
9 #include <stdint.h>
10
11 #include <xnnpack.h>
12 #include <xnnpack/log.h>
13 #include <xnnpack/operator.h>
14 #include <xnnpack/params.h>
15 #include <xnnpack/subgraph.h>
16 #include <xnnpack/subgraph-validation.h>
17
18
create_elu_operator(const struct xnn_node * node,const struct xnn_value * values,size_t num_values,struct xnn_operator_data * opdata,const struct xnn_caches * caches)19 static enum xnn_status create_elu_operator(
20 const struct xnn_node* node,
21 const struct xnn_value* values,
22 size_t num_values,
23 struct xnn_operator_data* opdata,
24 const struct xnn_caches* caches)
25 {
26 assert(node->num_inputs == 1);
27 const uint32_t input_id = node->inputs[0];
28 assert(input_id != XNN_INVALID_VALUE_ID);
29 assert(input_id < num_values);
30
31 assert(node->num_outputs == 1);
32 const uint32_t output_id = node->outputs[0];
33 assert(output_id != XNN_INVALID_VALUE_ID);
34 assert(output_id < num_values);
35
36 const size_t num_input_dims = values[input_id].shape.num_dims;
37 const size_t channel_dim = num_input_dims == 0 ? 1 : values[input_id].shape.dim[num_input_dims - 1];
38
39 enum xnn_status status;
40 switch (node->compute_type) {
41 #ifndef XNN_NO_F16_OPERATORS
42 case xnn_compute_type_fp16:
43 status = xnn_create_elu_nc_f16(
44 channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
45 node->params.elu.alpha,
46 node->flags,
47 &opdata->operator_objects[0]);
48 break;
49 #endif // XNN_NO_F16_OPERATORS
50 case xnn_compute_type_fp32:
51 status = xnn_create_elu_nc_f32(
52 channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
53 node->params.elu.alpha,
54 node->flags,
55 &opdata->operator_objects[0]);
56 break;
57 #ifndef XNN_NO_QS8_OPERATORS
58 case xnn_compute_type_qs8:
59 status = xnn_create_elu_nc_qs8(
60 channel_dim /* channels */, channel_dim /* input stride */, channel_dim /* output stride */,
61 node->params.elu.alpha,
62 (int8_t) values[input_id].quantization.zero_point,
63 values[input_id].quantization.scale,
64 (int8_t) values[output_id].quantization.zero_point,
65 values[output_id].quantization.scale,
66 INT8_MIN, INT8_MAX,
67 node->flags,
68 &opdata->operator_objects[0]);
69 break;
70 #endif // XNN_NO_QS8_OPERATORS
71 default:
72 XNN_UNREACHABLE;
73 }
74 if (status == xnn_status_success) {
75 opdata->batch_size = xnn_shape_multiply_non_channel_dims(&values[input_id].shape);
76 opdata->inputs[0] = input_id;
77 opdata->outputs[0] = output_id;
78 }
79 return status;
80 }
81
setup_elu_operator(const struct xnn_operator_data * opdata,const struct xnn_blob * blobs,size_t num_blobs,pthreadpool_t threadpool)82 static enum xnn_status setup_elu_operator(
83 const struct xnn_operator_data* opdata,
84 const struct xnn_blob* blobs,
85 size_t num_blobs,
86 pthreadpool_t threadpool)
87 {
88 const uint32_t input_id = opdata->inputs[0];
89 assert(input_id != XNN_INVALID_VALUE_ID);
90 assert(input_id < num_blobs);
91
92 const uint32_t output_id = opdata->outputs[0];
93 assert(output_id != XNN_INVALID_VALUE_ID);
94 assert(output_id < num_blobs);
95
96 const struct xnn_blob* input_blob = blobs + input_id;
97 const void* input_data = input_blob->data;
98 assert(input_data != NULL);
99
100 const struct xnn_blob* output_blob = blobs + output_id;
101 void* output_data = output_blob->data;
102 assert(output_data != NULL);
103
104 switch (opdata->operator_objects[0]->type) {
105 #ifndef XNN_NO_F16_OPERATORS
106 case xnn_operator_type_elu_nc_f16:
107 return xnn_setup_elu_nc_f16(
108 opdata->operator_objects[0],
109 opdata->batch_size,
110 input_data,
111 output_data,
112 threadpool);
113 #endif // XNN_NO_F16_OPERATORS
114 case xnn_operator_type_elu_nc_f32:
115 return xnn_setup_elu_nc_f32(
116 opdata->operator_objects[0],
117 opdata->batch_size,
118 input_data,
119 output_data,
120 threadpool);
121 #ifndef XNN_NO_QS8_OPERATORS
122 case xnn_operator_type_elu_nc_qs8:
123 return xnn_setup_elu_nc_qs8(
124 opdata->operator_objects[0],
125 opdata->batch_size,
126 input_data,
127 output_data,
128 threadpool);
129 #endif // XNN_NO_QS8_OPERATORS
130 default:
131 XNN_UNREACHABLE;
132 }
133 }
134
xnn_define_elu(xnn_subgraph_t subgraph,float alpha,uint32_t input_id,uint32_t output_id,uint32_t flags)135 enum xnn_status xnn_define_elu(
136 xnn_subgraph_t subgraph,
137 float alpha,
138 uint32_t input_id,
139 uint32_t output_id,
140 uint32_t flags)
141 {
142 enum xnn_status status;
143 if ((status = xnn_subgraph_check_xnnpack_initialized(xnn_node_type_elu)) != xnn_status_success) {
144 return status;
145 }
146
147 if (alpha <= 0.0f || !isnormal(alpha)) {
148 xnn_log_error(
149 "failed to define %s operator with %.7g alpha parameter: alpha must be finite, normalized, and positive",
150 xnn_node_type_to_string(xnn_node_type_elu), alpha);
151 return xnn_status_invalid_parameter;
152 }
153
154 if ((status = xnn_subgraph_check_input_node_id(xnn_node_type_elu, input_id, subgraph->num_values)) != xnn_status_success) {
155 return status;
156 }
157
158 const struct xnn_value* input_value = &subgraph->values[input_id];
159 status = xnn_subgraph_check_input_type_dense(xnn_node_type_elu, input_id, input_value);
160 if (status != xnn_status_success) {
161 return status;
162 }
163
164 switch (input_value->datatype) {
165 case xnn_datatype_fp32:
166 #ifndef XNN_NO_QS8_OPERATORS
167 case xnn_datatype_qint8:
168 #endif // !defined(XNN_NO_QS8_OPERATORS)
169 break;
170 default:
171 xnn_log_error(
172 "failed to define %s operator with input ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
173 xnn_node_type_to_string(xnn_node_type_elu), input_id,
174 xnn_datatype_to_string(input_value->datatype), input_value->datatype);
175 return xnn_status_invalid_parameter;
176 }
177
178 status = xnn_subgraph_check_output_node_id(xnn_node_type_elu, output_id, subgraph->num_values);
179 if (status != xnn_status_success) {
180 return status;
181 }
182
183 const struct xnn_value* output_value = &subgraph->values[output_id];
184 status = xnn_subgraph_check_output_type_dense(xnn_node_type_elu, output_id, output_value);
185 if (status != xnn_status_success) {
186 return status;
187 }
188
189 enum xnn_compute_type compute_type = xnn_compute_type_invalid;
190 switch (output_value->datatype) {
191 case xnn_datatype_fp32:
192 compute_type = xnn_compute_type_fp32;
193 break;
194 #ifndef XNN_NO_QS8_OPERATORS
195 case xnn_datatype_qint8:
196 compute_type = xnn_compute_type_qs8;
197 break;
198 #endif // !defined(XNN_NO_QS8_OPERATORS)
199 default:
200 xnn_log_error(
201 "failed to define %s operator with output ID #%" PRIu32 ": unsupported Value datatype %s (%d)",
202 xnn_node_type_to_string(xnn_node_type_elu), output_id,
203 xnn_datatype_to_string(output_value->datatype), output_value->datatype);
204 return xnn_status_invalid_parameter;
205 }
206
207 status = xnn_subgraph_check_datatype_matches(xnn_node_type_elu, input_id, input_value, output_id, output_value);
208 if (status != xnn_status_success) {
209 return status;
210 }
211
212 struct xnn_node* node = xnn_subgraph_new_node(subgraph);
213 if (node == NULL) {
214 return xnn_status_out_of_memory;
215 }
216
217 node->type = xnn_node_type_elu;
218 node->compute_type = compute_type;
219 node->params.elu.alpha = alpha;
220 node->num_inputs = 1;
221 node->inputs[0] = input_id;
222 node->num_outputs = 1;
223 node->outputs[0] = output_id;
224 node->flags = flags;
225
226 node->create = create_elu_operator;
227 node->setup = setup_elu_operator;
228
229 return xnn_status_success;
230 }
231