xref: /aosp_15_r20/external/XNNPACK/src/operators/fully-connected-nc.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8 
9 #include <assert.h>
10 #include <stdbool.h>
11 #include <stddef.h>
12 #include <stdint.h>
13 #include <string.h>
14 #include <math.h>
15 
16 #include <fp16.h>
17 
18 #include <xnnpack.h>
19 #include <xnnpack/allocator.h>
20 #include <xnnpack/log.h>
21 #include <xnnpack/math.h>
22 #include <xnnpack/operator.h>
23 #include <xnnpack/pack.h>
24 #include <xnnpack/params.h>
25 
26 
create_fully_connected_nc(size_t input_channels,size_t output_channels,size_t input_stride,size_t output_stride,const void * kernel,const void * bias,uint32_t flags,uint32_t log2_filter_element_size,uint32_t bias_element_size,xnn_pack_gemm_io_w_function pack_gemm_io_w,xnn_pack_gemm_goi_w_function pack_gemm_goi_w,const void * packing_params,int packed_weights_padding_byte,const void * params,size_t params_size,const struct gemm_parameters * gemm_parameters,const struct gemm_fused_ukernels * gemm_ukernels,uint32_t datatype_init_flags,enum xnn_operator_type operator_type,xnn_caches_t caches,xnn_operator_t * fully_connected_op_out)27 static enum xnn_status create_fully_connected_nc(
28     size_t input_channels,
29     size_t output_channels,
30     size_t input_stride,
31     size_t output_stride,
32     const void* kernel,
33     const void* bias,
34     uint32_t flags,
35     uint32_t log2_filter_element_size,
36     uint32_t bias_element_size,
37     xnn_pack_gemm_io_w_function pack_gemm_io_w,
38     xnn_pack_gemm_goi_w_function pack_gemm_goi_w,
39     const void* packing_params,
40     int packed_weights_padding_byte,
41     const void* params,
42     size_t params_size,
43     const struct gemm_parameters* gemm_parameters,
44     const struct gemm_fused_ukernels* gemm_ukernels,
45     uint32_t datatype_init_flags,
46     enum xnn_operator_type operator_type,
47     xnn_caches_t caches,
48     xnn_operator_t* fully_connected_op_out)
49 {
50   xnn_operator_t fully_connected_op = NULL;
51   enum xnn_status status = xnn_status_uninitialized;
52 
53   if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
54     xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
55       xnn_operator_type_to_string(operator_type));
56     goto error;
57   }
58 
59   status = xnn_status_unsupported_hardware;
60 
61   if ((xnn_params.init_flags & datatype_init_flags) != datatype_init_flags) {
62     xnn_log_error(
63       "failed to create %s operator: operations on data type are not supported",
64       xnn_operator_type_to_string(operator_type));
65     goto error;
66   }
67 
68   status = xnn_status_invalid_parameter;
69 
70   if (input_channels == 0) {
71     xnn_log_error(
72       "failed to create %s operator with %zu input channels: number of channels must be non-zero",
73       xnn_operator_type_to_string(operator_type), input_channels);
74     goto error;
75   }
76 
77   if (output_channels == 0) {
78     xnn_log_error(
79       "failed to create %s operator with %zu output channels: number of channels must be non-zero",
80       xnn_operator_type_to_string(operator_type), output_channels);
81     goto error;
82   }
83 
84   if (input_stride < input_channels) {
85     xnn_log_error(
86       "failed to create %s operator with input element stride of %zu: "
87       "stride must be at least as large as the number of input channels (%zu)",
88       xnn_operator_type_to_string(operator_type), input_stride, input_channels);
89     goto error;
90   }
91 
92   if (output_stride < output_channels) {
93     xnn_log_error(
94       "failed to create %s operator with output element stride of %zu: "
95       "stride must be at least as large as the number of output channels (%zu)",
96       xnn_operator_type_to_string(operator_type), output_stride, output_channels);
97     goto error;
98   }
99 
100   status = xnn_status_out_of_memory;
101 
102   fully_connected_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
103   if (fully_connected_op == NULL) {
104     xnn_log_error(
105       "failed to allocate %zu bytes for %s operator descriptor",
106       sizeof(struct xnn_operator), xnn_operator_type_to_string(operator_type));
107     goto error;
108   }
109 
110   if (caches != NULL) {
111     fully_connected_op->weights_cache = caches->weights_cache;
112   }
113 
114   const uint32_t nr = gemm_parameters->nr;
115   const uint32_t kr = UINT32_C(1) << gemm_parameters->log2_kr;
116   const uint32_t sr = UINT32_C(1) << gemm_parameters->log2_sr;
117 
118   const size_t n_stride = round_up(output_channels, nr);
119   const size_t k_stride = round_up_po2(input_channels, kr * sr);
120 
121   const size_t packed_weights_size = n_stride * (bias_element_size + (k_stride << log2_filter_element_size));
122   size_t aligned_total_weights_size = round_up_po2(packed_weights_size, XNN_ALLOCATION_ALIGNMENT);
123   void* weights_ptr = xnn_get_pointer_to_write_weights(
124       fully_connected_op, aligned_total_weights_size, packed_weights_padding_byte);
125   if (weights_ptr == NULL) {
126     xnn_log_error(
127       "failed to allocate %zu bytes for %s operator packed weights",
128       packed_weights_size, xnn_operator_type_to_string(operator_type));
129     goto error;
130   }
131 
132   if (flags & XNN_FLAG_TRANSPOSE_WEIGHTS) {
133     pack_gemm_io_w(
134       output_channels, input_channels,
135       nr, kr, sr,
136       kernel, bias,
137       weights_ptr,
138       packing_params);
139   } else {
140     pack_gemm_goi_w(
141       1, output_channels, input_channels,
142       nr, kr, sr,
143       kernel, bias,
144       weights_ptr,
145       0 /* extra bytes */,
146       packing_params);
147   }
148 
149   if (use_weights_cache(fully_connected_op)) {
150     fully_connected_op->packed_weights.offset = xnn_get_or_insert_weights_cache(
151         fully_connected_op->weights_cache, weights_ptr, aligned_total_weights_size);
152   }
153 
154   fully_connected_op->group_input_channels = input_channels;
155   fully_connected_op->group_output_channels = output_channels;
156   fully_connected_op->input_pixel_stride = input_stride;
157   fully_connected_op->output_pixel_stride = output_stride;
158 
159   memcpy(&fully_connected_op->params, params, params_size);
160   fully_connected_op->type = operator_type;
161   fully_connected_op->flags = flags;
162 
163   const size_t mr = gemm_parameters->mr;
164   fully_connected_op->ukernel.type = xnn_ukernel_type_gemm;
165   fully_connected_op->ukernel.gemm = (struct xnn_ukernel_gemm) {
166     .mr = mr,
167     .nr = nr,
168     .kr = kr,
169     .sr = sr,
170   };
171 
172   assert(XNN_MAX_MR >= mr);
173   fully_connected_op->ukernel.gemm.gemm_cases[0] = gemm_ukernels->gemm[0];
174   for (size_t i = 1; i < mr; i++) {
175     fully_connected_op->ukernel.gemm.gemm_cases[i] = gemm_ukernels->gemm[mr-1];
176   }
177 
178   fully_connected_op->state = xnn_run_state_invalid;
179 
180   *fully_connected_op_out = fully_connected_op;
181   return xnn_status_success;
182 
183 error:
184   xnn_delete_operator(fully_connected_op);
185   return status;
186 }
187 
setup_fully_connected_nc(xnn_operator_t fully_connected_op,enum xnn_operator_type expected_operator_type,size_t batch_size,const void * input,void * output,uint32_t datatype_init_flags,uint32_t log2_input_element_size,uint32_t log2_filter_element_size,uint32_t bias_element_size,uint32_t log2_output_element_size,const void * params,size_t params_size,size_t num_threads)188 static enum xnn_status setup_fully_connected_nc(
189   xnn_operator_t fully_connected_op,
190   enum xnn_operator_type expected_operator_type,
191   size_t batch_size,
192   const void* input,
193   void* output,
194   uint32_t datatype_init_flags,
195   uint32_t log2_input_element_size,
196   uint32_t log2_filter_element_size,
197   uint32_t bias_element_size,
198   uint32_t log2_output_element_size,
199   const void* params,
200   size_t params_size,
201   size_t num_threads)
202 {
203   if (fully_connected_op->type != expected_operator_type) {
204     xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
205       xnn_operator_type_to_string(expected_operator_type),
206       xnn_operator_type_to_string(fully_connected_op->type));
207     return xnn_status_invalid_parameter;
208   }
209   fully_connected_op->state = xnn_run_state_invalid;
210 
211   if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
212     xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
213       xnn_operator_type_to_string(fully_connected_op->type));
214     return xnn_status_uninitialized;
215   }
216 
217   if (batch_size == 0) {
218     fully_connected_op->state = xnn_run_state_skip;
219     return xnn_status_success;
220   }
221 
222   if (fully_connected_op->weights_cache != NULL &&
223       !xnn_weights_cache_is_finalized(fully_connected_op->weights_cache)) {
224     xnn_log_error("failed to setup %s operator: weights cache is not finalized",
225       xnn_operator_type_to_string(fully_connected_op->type));
226     return xnn_status_invalid_state;
227   }
228 
229   fully_connected_op->batch_size = 1;
230   fully_connected_op->input_height = batch_size;
231   fully_connected_op->input_width = 1;
232   fully_connected_op->input = input;
233 
234   fully_connected_op->output_height = batch_size;
235   fully_connected_op->output_width = 1;
236   fully_connected_op->output = output;
237 
238   const size_t input_channels = fully_connected_op->group_input_channels;
239   const size_t output_channels = fully_connected_op->group_output_channels;
240 
241   uint32_t mr = fully_connected_op->ukernel.gemm.mr;
242   const uint32_t nr = fully_connected_op->ukernel.gemm.nr;
243 
244   struct xnn_hmp_gemm_ukernel gemm_ukernel = fully_connected_op->ukernel.gemm.gemm_cases[mr-1];
245   if (batch_size == 1 && fully_connected_op->ukernel.gemm.gemm_cases[0].function[XNN_UARCH_DEFAULT] != NULL) {
246     gemm_ukernel = fully_connected_op->ukernel.gemm.gemm_cases[0];
247     mr = 1;
248   }
249 
250   fully_connected_op->context.gemm = (struct gemm_context) {
251     .k_scaled = input_channels << log2_input_element_size,
252     .w_stride = bias_element_size +
253         (round_up_po2(input_channels, fully_connected_op->ukernel.gemm.kr * fully_connected_op->ukernel.gemm.sr) << log2_input_element_size),
254     .a = input,
255     .a_stride = fully_connected_op->input_pixel_stride << log2_input_element_size,
256     .packed_w = packed_weights(fully_connected_op),
257     .c = output,
258     .cm_stride = fully_connected_op->output_pixel_stride << log2_output_element_size,
259     .cn_stride = nr << log2_output_element_size,
260     .log2_csize = log2_output_element_size,
261     .ukernel = gemm_ukernel,
262   };
263   memcpy(&fully_connected_op->context.gemm.params, params, params_size);
264   fully_connected_op->context.gemm.fused_params = &fully_connected_op->context.gemm.params;
265 
266   #if XNN_TEST_MODE
267     const size_t nc = nr;
268   #else
269     size_t nc = output_channels;
270     if (num_threads > 1) {
271       const size_t num_other_tiles = divide_round_up(batch_size, mr);
272       const size_t target_tiles_per_thread = 5;
273       const size_t max_nc = divide_round_up(output_channels * num_other_tiles, num_threads * target_tiles_per_thread);
274       if (max_nc < nc) {
275         nc = min(nc, divide_round_up(nc, max_nc * nr) * nr);
276       }
277     }
278   #endif
279   #if XNN_MAX_UARCH_TYPES > 1
280     if (xnn_is_hmp_gemm_ukernel(gemm_ukernel)) {
281       fully_connected_op->compute.type = xnn_parallelization_type_2d_tile_2d_with_uarch;
282       fully_connected_op->compute.task_2d_tile_2d_with_id = (pthreadpool_task_2d_tile_2d_with_id_t) xnn_compute_hmp_gemm;
283     } else {
284       fully_connected_op->compute.type = xnn_parallelization_type_2d_tile_2d;
285       fully_connected_op->compute.task_2d_tile_2d = (pthreadpool_task_2d_tile_2d_t) xnn_compute_gemm;
286     }
287   #else
288     fully_connected_op->compute.type = xnn_parallelization_type_2d_tile_2d;
289     fully_connected_op->compute.task_2d_tile_2d = (pthreadpool_task_2d_tile_2d_t) xnn_compute_gemm;
290   #endif
291   fully_connected_op->compute.range[0] = batch_size;
292   fully_connected_op->compute.range[1] = output_channels;
293   fully_connected_op->compute.tile[0] = mr;
294   fully_connected_op->compute.tile[1] = nc;
295   fully_connected_op->state = xnn_run_state_ready;
296 
297   return xnn_status_success;
298 }
299 
xnn_create_fully_connected_nc_f16(size_t input_channels,size_t output_channels,size_t input_stride,size_t output_stride,const void * kernel,const void * bias,float output_min,float output_max,uint32_t flags,xnn_caches_t caches,xnn_operator_t * fully_connected_op_out)300 enum xnn_status xnn_create_fully_connected_nc_f16(
301     size_t input_channels,
302     size_t output_channels,
303     size_t input_stride,
304     size_t output_stride,
305     const void* kernel,
306     const void* bias,
307     float output_min,
308     float output_max,
309     uint32_t flags,
310     xnn_caches_t caches,
311     xnn_operator_t* fully_connected_op_out)
312 {
313   if (isnan(output_min)) {
314     xnn_log_error(
315       "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
316       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f16));
317     return xnn_status_invalid_parameter;
318   }
319 
320   if (isnan(output_max)) {
321     xnn_log_error(
322       "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
323       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f16));
324     return xnn_status_invalid_parameter;
325   }
326 
327   const uint16_t fp16_output_min = fp16_ieee_from_fp32_value(output_min);
328   const uint16_t fp16_output_max = fp16_ieee_from_fp32_value(output_max);
329   const float rounded_output_min = fp16_ieee_to_fp32_value(fp16_output_min);
330   const float rounded_output_max = fp16_ieee_to_fp32_value(fp16_output_max);
331   if (rounded_output_min >= rounded_output_max) {
332     xnn_log_error(
333       "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
334       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f16), rounded_output_min, rounded_output_max);
335     return xnn_status_invalid_parameter;
336   }
337 
338   union xnn_f16_minmax_params params;
339   if XNN_LIKELY(xnn_params.f16.gemm.init.f16 != NULL) {
340     xnn_params.f16.gemm.init.f16(&params, fp16_output_min, fp16_output_max);
341   }
342   xnn_pack_gemm_io_w_function pack_gemm_io_w = (xnn_pack_gemm_io_w_function) xnn_pack_f16_gemm_io_w;
343   xnn_pack_gemm_goi_w_function pack_gemm_goi_w = (xnn_pack_gemm_goi_w_function) xnn_pack_f16_gemm_goi_w;
344   if (flags & XNN_FLAG_FP32_STATIC_WEIGHTS) {
345     pack_gemm_io_w = (xnn_pack_gemm_io_w_function) xnn_pack_f32_to_f16_gemm_io_w;
346     pack_gemm_goi_w = (xnn_pack_gemm_goi_w_function) xnn_pack_f32_to_f16_gemm_goi_w;
347   }
348   return create_fully_connected_nc(
349     input_channels, output_channels,
350     input_stride, output_stride,
351     kernel, bias, flags,
352     1 /* log2(sizeof(filter element)) = log2(sizeof(uint16_t)) */,
353     sizeof(uint16_t) /* sizeof(bias element) */,
354     pack_gemm_io_w,
355     pack_gemm_goi_w,
356     NULL /* packing params */, 0 /* packed weights padding byte */,
357     &params, sizeof(params),
358     &xnn_params.f16.gemm, &xnn_params.f16.gemm.minmax,
359     XNN_INIT_FLAG_F16,
360     xnn_operator_type_fully_connected_nc_f16,
361     caches,
362     fully_connected_op_out);
363 }
364 
xnn_create_fully_connected_nc_f32(size_t input_channels,size_t output_channels,size_t input_stride,size_t output_stride,const float * kernel,const float * bias,float output_min,float output_max,uint32_t flags,xnn_caches_t caches,xnn_operator_t * fully_connected_op_out)365 enum xnn_status xnn_create_fully_connected_nc_f32(
366     size_t input_channels,
367     size_t output_channels,
368     size_t input_stride,
369     size_t output_stride,
370     const float* kernel,
371     const float* bias,
372     float output_min,
373     float output_max,
374     uint32_t flags,
375     xnn_caches_t caches,
376     xnn_operator_t* fully_connected_op_out)
377 {
378   if (isnan(output_min)) {
379     xnn_log_error(
380       "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
381       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32));
382     return xnn_status_invalid_parameter;
383   }
384 
385   if (isnan(output_max)) {
386     xnn_log_error(
387       "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
388       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32));
389     return xnn_status_invalid_parameter;
390   }
391 
392   if (output_min >= output_max) {
393     xnn_log_error(
394       "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
395       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_f32), output_min, output_max);
396     return xnn_status_invalid_parameter;
397   }
398 
399   const struct gemm_fused_ukernels* gemm_ukernels = &xnn_params.f32.gemm.minmax;
400   const bool linear_activation = (output_max == INFINITY) && (output_min == -output_max);
401   if (linear_activation && xnn_params.f32.gemm.linear.gemm[xnn_params.f32.gemm.mr-1].function[XNN_UARCH_DEFAULT] != NULL) {
402     gemm_ukernels = &xnn_params.f32.gemm.linear;
403   }
404 
405   union xnn_f32_minmax_params params;
406   if XNN_LIKELY(xnn_params.f32.gemm.init.f32 != NULL) {
407     xnn_params.f32.gemm.init.f32(&params, output_min, output_max);
408   }
409   return create_fully_connected_nc(
410     input_channels, output_channels,
411     input_stride, output_stride,
412     kernel, bias, flags,
413     2 /* log2(sizeof(filter element)) = log2(sizeof(float)) */,
414     sizeof(float) /* sizeof(bias element) */,
415     (xnn_pack_gemm_io_w_function) xnn_pack_f32_gemm_io_w,
416     (xnn_pack_gemm_goi_w_function) xnn_pack_f32_gemm_goi_w,
417     NULL /* packing params */, 0 /* packed weights padding byte */,
418     &params, sizeof(params),
419     &xnn_params.f32.gemm, gemm_ukernels,
420     XNN_INIT_FLAG_F32,
421     xnn_operator_type_fully_connected_nc_f32,
422     caches,
423     fully_connected_op_out);
424 }
425 
xnn_create_fully_connected_nc_qs8(size_t input_channels,size_t output_channels,size_t input_stride,size_t output_stride,int8_t input_zero_point,float input_scale,float kernel_scale,const int8_t * kernel,const int32_t * bias,int8_t output_zero_point,float output_scale,int8_t output_min,int8_t output_max,uint32_t flags,xnn_caches_t caches,xnn_operator_t * fully_connected_op_out)426 enum xnn_status xnn_create_fully_connected_nc_qs8(
427     size_t input_channels,
428     size_t output_channels,
429     size_t input_stride,
430     size_t output_stride,
431     int8_t input_zero_point,
432     float input_scale,
433     float kernel_scale,
434     const int8_t* kernel,
435     const int32_t* bias,
436     int8_t output_zero_point,
437     float output_scale,
438     int8_t output_min,
439     int8_t output_max,
440     uint32_t flags,
441     xnn_caches_t caches,
442     xnn_operator_t* fully_connected_op_out)
443 {
444   if (input_scale <= 0.0f || !isnormal(input_scale)) {
445     xnn_log_error(
446       "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
447       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qs8), input_scale);
448     return xnn_status_invalid_parameter;
449   }
450 
451   if (kernel_scale <= 0.0f || !isnormal(kernel_scale)) {
452     xnn_log_error(
453       "failed to create %s operator with %.7g kernel scale: scale must be finite, normalized, and positive",
454       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qs8), kernel_scale);
455     return xnn_status_invalid_parameter;
456   }
457 
458   if (output_scale <= 0.0f || !isnormal(output_scale)) {
459     xnn_log_error(
460       "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
461       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qs8), output_scale);
462     return xnn_status_invalid_parameter;
463   }
464 
465   if (output_min >= output_max) {
466     xnn_log_error(
467       "failed to create %s operator with [%" PRId8 ", %" PRId8 "] output range: range min must be below range max",
468       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qs8), output_min, output_max);
469     return xnn_status_invalid_parameter;
470   }
471 
472   const float requantization_scale = input_scale * kernel_scale / output_scale;
473   if (requantization_scale >= 256.0f) {
474     xnn_log_error(
475       "failed to create %s operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
476       "requantization scale %.7g is greater or equal to 256.0",
477       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qs8),
478       input_scale, kernel_scale, output_scale, requantization_scale);
479     return xnn_status_unsupported_parameter;
480   }
481 
482   union xnn_qs8_conv_minmax_params params;
483   if XNN_LIKELY(xnn_params.qs8.gemm.init.qs8 != NULL) {
484     xnn_params.qs8.gemm.init.qs8(&params, requantization_scale, output_zero_point, output_min, output_max);
485   }
486   const struct xnn_qs8_packing_params packing_params = {
487     .input_zero_point = input_zero_point,
488   };
489   return create_fully_connected_nc(
490     input_channels, output_channels,
491     input_stride, output_stride,
492     kernel, bias, flags,
493     0 /* log2(sizeof(filter element)) = log2(sizeof(int8_t)) */,
494     sizeof(int32_t) /* sizeof(bias element) */,
495     (xnn_pack_gemm_io_w_function) xnn_pack_qs8_gemm_io_w,
496     (xnn_pack_gemm_goi_w_function) xnn_pack_qs8_gemm_goi_w,
497     &packing_params, 0 /* packed weights padding byte */,
498     &params, sizeof(params),
499     &xnn_params.qs8.gemm, &xnn_params.qs8.gemm.minmax,
500     XNN_INIT_FLAG_QS8,
501     xnn_operator_type_fully_connected_nc_qs8,
502     caches,
503     fully_connected_op_out);
504 }
505 
xnn_create_fully_connected_nc_qu8(size_t input_channels,size_t output_channels,size_t input_stride,size_t output_stride,uint8_t input_zero_point,float input_scale,uint8_t kernel_zero_point,float kernel_scale,const uint8_t * kernel,const int32_t * bias,uint8_t output_zero_point,float output_scale,uint8_t output_min,uint8_t output_max,uint32_t flags,xnn_caches_t caches,xnn_operator_t * fully_connected_op_out)506 enum xnn_status xnn_create_fully_connected_nc_qu8(
507     size_t input_channels,
508     size_t output_channels,
509     size_t input_stride,
510     size_t output_stride,
511     uint8_t input_zero_point,
512     float input_scale,
513     uint8_t kernel_zero_point,
514     float kernel_scale,
515     const uint8_t* kernel,
516     const int32_t* bias,
517     uint8_t output_zero_point,
518     float output_scale,
519     uint8_t output_min,
520     uint8_t output_max,
521     uint32_t flags,
522     xnn_caches_t caches,
523     xnn_operator_t* fully_connected_op_out)
524 {
525   if (input_scale <= 0.0f || !isnormal(input_scale)) {
526     xnn_log_error(
527       "failed to create %s operator with %.7g input scale: scale must be finite, normalized, and positive",
528       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qu8), input_scale);
529     return xnn_status_invalid_parameter;
530   }
531 
532   if (kernel_scale <= 0.0f || !isnormal(kernel_scale)) {
533     xnn_log_error(
534       "failed to create %s operator with %.7g kernel scale: scale must be finite, normalized, and positive",
535       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qu8), kernel_scale);
536     return xnn_status_invalid_parameter;
537   }
538 
539   if (output_scale <= 0.0f || !isnormal(output_scale)) {
540     xnn_log_error(
541       "failed to create %s operator with %.7g output scale: scale must be finite, normalized, and positive",
542       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qu8), output_scale);
543     return xnn_status_invalid_parameter;
544   }
545 
546   if (output_min >= output_max) {
547     xnn_log_error(
548       "failed to create %s operator with [%" PRIu8 ", %" PRIu8 "] output range: range min must be below range max",
549       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qu8), output_min, output_max);
550     return xnn_status_invalid_parameter;
551   }
552 
553   const float requantization_scale = input_scale * kernel_scale / output_scale;
554   if (requantization_scale >= 256.0f) {
555     xnn_log_error(
556       "failed to create %s operator with %.7g input scale, %.7g kernel scale, and %.7g output scale: "
557       "requantization scale %.7g is greater or equal to 256.0",
558       xnn_operator_type_to_string(xnn_operator_type_fully_connected_nc_qu8),
559       input_scale, kernel_scale, output_scale, requantization_scale);
560     return xnn_status_unsupported_parameter;
561   }
562 
563   union xnn_qu8_conv_minmax_params params;
564   if XNN_LIKELY(xnn_params.qu8.gemm.init.qu8 != NULL) {
565     xnn_params.qu8.gemm.init.qu8(&params,
566       kernel_zero_point, requantization_scale, output_zero_point, output_min, output_max);
567   }
568   const struct xnn_qu8_packing_params packing_params = {
569     .input_zero_point = input_zero_point,
570     .kernel_zero_point = kernel_zero_point,
571   };
572   return create_fully_connected_nc(
573     input_channels, output_channels,
574     input_stride, output_stride,
575     kernel, bias, flags,
576     0 /* log2(sizeof(filter element)) = log2(sizeof(uint8_t)) */,
577     sizeof(int32_t) /* sizeof(bias element) */,
578     (xnn_pack_gemm_io_w_function) xnn_pack_qu8_gemm_io_w,
579     (xnn_pack_gemm_goi_w_function) xnn_pack_qu8_gemm_goi_w,
580     &packing_params, kernel_zero_point /* packed weights padding byte */,
581     &params, sizeof(params),
582     &xnn_params.qu8.gemm, &xnn_params.qu8.gemm.minmax,
583     XNN_INIT_FLAG_QU8,
584     xnn_operator_type_fully_connected_nc_qu8,
585     caches,
586     fully_connected_op_out);
587 }
588 
xnn_setup_fully_connected_nc_f16(xnn_operator_t fully_connected_op,size_t batch_size,const void * input,void * output,pthreadpool_t threadpool)589 enum xnn_status xnn_setup_fully_connected_nc_f16(
590     xnn_operator_t fully_connected_op,
591     size_t batch_size,
592     const void* input,
593     void* output,
594     pthreadpool_t threadpool)
595 {
596   return setup_fully_connected_nc(
597     fully_connected_op, xnn_operator_type_fully_connected_nc_f16,
598     batch_size,
599     input, output,
600     XNN_INIT_FLAG_F32,
601     1 /* log2(sizeof(input element)) = log2(sizeof(uint16_t)) */,
602     1 /* log2(sizeof(filter element)) = log2(sizeof(uint16_t)) */,
603     sizeof(uint16_t) /* sizeof(bias element) */,
604     1 /* log2(sizeof(output element)) = log2(sizeof(uint16_t)) */,
605     &fully_connected_op->params.f16_minmax,
606     sizeof(fully_connected_op->params.f16_minmax),
607     pthreadpool_get_threads_count(threadpool));
608 }
609 
xnn_setup_fully_connected_nc_f32(xnn_operator_t fully_connected_op,size_t batch_size,const float * input,float * output,pthreadpool_t threadpool)610 enum xnn_status xnn_setup_fully_connected_nc_f32(
611     xnn_operator_t fully_connected_op,
612     size_t batch_size,
613     const float* input,
614     float* output,
615     pthreadpool_t threadpool)
616 {
617   return setup_fully_connected_nc(
618     fully_connected_op, xnn_operator_type_fully_connected_nc_f32,
619     batch_size,
620     input, output,
621     XNN_INIT_FLAG_F32,
622     2 /* log2(sizeof(input element)) = log2(sizeof(float)) */,
623     2 /* log2(sizeof(filter element)) = log2(sizeof(float)) */,
624     sizeof(float) /* sizeof(bias element) */,
625     2 /* log2(sizeof(output element)) = log2(sizeof(float)) */,
626     &fully_connected_op->params.f32_minmax,
627     sizeof(fully_connected_op->params.f32_minmax),
628     pthreadpool_get_threads_count(threadpool));
629 }
630 
xnn_setup_fully_connected_nc_qs8(xnn_operator_t fully_connected_op,size_t batch_size,const int8_t * input,int8_t * output,pthreadpool_t threadpool)631 enum xnn_status xnn_setup_fully_connected_nc_qs8(
632     xnn_operator_t fully_connected_op,
633     size_t batch_size,
634     const int8_t* input,
635     int8_t* output,
636     pthreadpool_t threadpool)
637 {
638   return setup_fully_connected_nc(
639     fully_connected_op, xnn_operator_type_fully_connected_nc_qs8,
640     batch_size,
641     input, output,
642     XNN_INIT_FLAG_QS8,
643     0 /* log2(sizeof(input element)) = log2(sizeof(int8_t)) */,
644     0 /* log2(sizeof(filter element)) = log2(sizeof(int8_t)) */,
645     sizeof(int32_t) /* sizeof(bias element) */,
646     0 /* log2(sizeof(output element)) = log2(sizeof(int8_t)) */,
647     &fully_connected_op->params.qs8_conv_minmax,
648     sizeof(fully_connected_op->params.qs8_conv_minmax),
649     pthreadpool_get_threads_count(threadpool));
650 }
651 
xnn_setup_fully_connected_nc_qu8(xnn_operator_t fully_connected_op,size_t batch_size,const uint8_t * input,uint8_t * output,pthreadpool_t threadpool)652 enum xnn_status xnn_setup_fully_connected_nc_qu8(
653     xnn_operator_t fully_connected_op,
654     size_t batch_size,
655     const uint8_t* input,
656     uint8_t* output,
657     pthreadpool_t threadpool)
658 {
659   return setup_fully_connected_nc(
660     fully_connected_op, xnn_operator_type_fully_connected_nc_qu8,
661     batch_size,
662     input, output,
663     XNN_INIT_FLAG_QU8,
664     0 /* log2(sizeof(input element)) = log2(sizeof(uint8_t)) */,
665     0 /* log2(sizeof(filter element)) = log2(sizeof(uint8_t)) */,
666     sizeof(int32_t) /* sizeof(bias element) */,
667     0 /* log2(sizeof(output element)) = log2(sizeof(uint8_t)) */,
668     &fully_connected_op->params.qu8_conv_minmax,
669     sizeof(fully_connected_op->params.qu8_conv_minmax),
670     pthreadpool_get_threads_count(threadpool));
671 }
672