Home
last modified time | relevance | path

Searched defs:n_batch (Results 1 – 25 of 93) sorted by relevance

1234

/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
H A Dneon_tensor_utils.h29 int n_batch, float* result) { in MatrixBatchVectorMultiplyAccumulate()
38 int n_batch, in MatrixBatchVectorMultiplyAccumulate()
48 int n_batch, int32_t* scratch, in MatrixBatchVectorMultiplyAccumulate()
58 int n_batch, float* __restrict__ result, const float* per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
69 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate1x4()
76 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
86 int n_batch, const int32_t input_offset, const int32_t output_multiplier, in SparseMatrixBatchVectorMultiplyAccumulate1x16()
100 const float* scaling_factors, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate()
108 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
118 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
[all …]
H A Dsse_tensor_utils.h39 int n_batch, float* result) { in MatrixBatchVectorMultiplyAccumulate()
52 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate()
61 int n_batch, float* __restrict__ result, const float* per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
72 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate()
82 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate1x4()
91 int n_batch, const int32_t input_offset, const int32_t output_multiplier, in SparseMatrixBatchVectorMultiplyAccumulate1x16()
104 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
113 const float* __restrict__ scaling_factors, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
122 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
132 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
[all …]
H A Dneon_tensor_utils.cc227 int n_batch, float* result) { in NeonMatrixBatchVectorMultiplyAccumulate()
276 const int8_t* ShuffleVectors(const int8_t* vectors, const int n_batch, in ShuffleVectors()
331 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate()
459 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate()
589 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate()
622 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate()
694 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate()
705 const float* scaling_factors, int n_batch, float* __restrict__ result) { in DotprodSparseMatrixBatchVectorMultiplyAccumulate()
769 int32_t n_batch, int32_t n_input, in NeonMatrixBatchVectorMultiplyImpl()
879 int32_t multiplier, int32_t shift, int32_t n_batch, int32_t n_output, in NeonMatrixBatchVectorAccumulateImpl()
[all …]
H A Dsse_tensor_utils.cc161 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in Avx2MatrixBatchVectorMultiplyAccumulateImpl()
202 const float* __restrict__ scaling_factors, int n_batch, in Avx2MatrixBatchVectorMultiplyAccumulateImpl()
294 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulateImpl()
379 const int8_t* input_to_gate_weights, int32_t n_batch, in SseCpuBackendGemm()
413 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulate()
424 const float* __restrict__ scaling_factors, int n_batch, int32_t* scratch, in SseMatrixBatchVectorMultiplyAccumulate()
475 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulate()
597 const float* __restrict__ scaling_factors, int n_batch, in SseSparseMatrixBatchVectorMultiplyAccumulate()
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/reference/
H A Dportable_tensor_utils.h60 int n_batch, float* result) { in MatrixBatchVectorMultiplyAccumulate()
69 int n_batch, in MatrixBatchVectorMultiplyAccumulate()
78 int n_batch, float* __restrict__ result, const float* per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
91 int n_batch, int32_t* scratch, in MatrixBatchVectorMultiplyAccumulate()
101 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate1x4()
108 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
118 int n_batch, const int32_t input_offset, const int32_t output_multiplier, in SparseMatrixBatchVectorMultiplyAccumulate1x16()
132 const float* scaling_factors, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate()
141 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
151 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
[all …]
H A Dportable_tensor_utils.cc122 int n_batch, float* result) { in PortableMatrixBatchVectorMultiplyAccumulate()
141 int n_batch, float* __restrict__ result) { in PortableMatrixBatchVectorMultiplyAccumulate()
166 int n_batch, float* __restrict__ result, const float* per_channel_scale, in PortableMatrixBatchVectorMultiplyAccumulate()
209 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in PortableSparseMatrixBatchVectorMultiplyAccumulate1x4()
234 int n_batch, const int32_t input_offset, const int32_t output_multiplier, in PortableSparseMatrixBatchVectorMultiplyAccumulate1x16()
267 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, in PortableSparseMatrixBatchVectorMultiplyAccumulate()
297 const float* scaling_factors, int n_batch, float* __restrict__ result) { in PortableSparseMatrixBatchVectorMultiplyAccumulate()
331 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in PortableMatrixBatchVectorMultiplyAccumulateImpl()
360 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in PortableMatrixBatchVectorMultiplyAccumulate()
370 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in PortableMatrixBatchVectorMultiplyAccumulate()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/
H A Dunidirectional_sequence_lstm_test.cc36 int n_batch, int n_input, int n_cell, int n_output, int sequence_length, in HybridUnidirectionalLSTMOpModel()
248 const int n_batch = 1; in TEST_F() local
310 const int n_batch = 1; in TEST_F() local
377 const int n_batch = 1; in TEST_P() local
440 const int n_batch = 1; in TEST_P() local
551 const int n_batch = 1; in TEST_F() local
613 const int n_batch = 1; in TEST_P() local
675 const int n_batch = 1; in TEST_P() local
1338 const int n_batch = 2; in TEST_F() local
1405 const int n_batch = 2; in TEST_P() local
[all …]
H A Dbidirectional_sequence_lstm_test.cc32 BidirectionalLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, in BidirectionalLSTMOpModel()
430 const int n_batch = 1; in TEST_P() local
596 const int n_batch = 2; in TEST_P() local
762 const int n_batch = 1; in TEST() local
923 const int n_batch = 1; in TEST() local
1074 const int n_batch = 1; in TEST() local
1226 const int n_batch = 2; in TEST() local
1929 const int n_batch = 2; in TEST() local
2630 const int n_batch = 1; in TEST_P() local
2804 const int n_batch = 1; in TEST_P() local
H A Dlstm_eval.cc47 float* output, int m_rows, int m_cols, int n_batch, in MatrixBatchVectorMultiplyAccumulate()
187 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateFloat()
255 void UpdateLstmCellFloat(int n_batch, int n_cell, float* cell_state, in UpdateLstmCellFloat()
302 void CalculateLstmOutputFloat(int n_batch, int n_cell, int n_output, in CalculateLstmOutputFloat()
363 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateHybrid()
475 int n_batch, int n_cell, int n_output, const float* cell_state, in CalculateLstmOutputHybrid()
548 const int n_batch, const int n_input, const int n_output, const int n_cell, in CalculateLstmGateInteger8x8_16()
613 void UpdateLstmCellInteger(int n_batch, int n_cell, int16_t* cell_state, in UpdateLstmCellInteger()
659 int n_batch, int n_cell, int n_output, const int16_t* cell_state, in CalculateLstmOutputInteger8x8_16()
713 const int n_batch, const int n_input, const int n_output, const int n_cell, in CalculateLstmGateInteger8x8_8()
[all …]
H A Dlstm_test.cc39 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel()
413 const int n_batch = 1; in TEST_P() local
490 const int n_batch = 1; in TEST_P() local
561 const int n_batch = 2; in TEST_P() local
1144 const int n_batch = 2; in TEST_P() local
1239 const int n_batch = 2; in TEST_P() local
1314 LSTMIntegerOpModel(int n_batch, int n_input, int n_cell, int n_output, in LSTMIntegerOpModel()
1598 const int n_batch = 2; in TEST() local
1760 const int n_batch = 2; in TEST() local
1933 const int n_batch = 2; in TEST() local
[all …]
H A Dunidirectional_sequence_gru_test.cc34 explicit GRUOpModel(int n_batch, int n_input, int n_output, in GRUOpModel()
101 const int n_batch = 2; in TEST() local
H A Doptional_tensor_test.cc31 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel()
229 const int n_batch = 1; in TEST() local
H A Dunidirectional_sequence_gru.cc40 const int n_batch = input->dims->data[1]; in GruImpl() local
142 const int n_batch = input->dims->data[1]; in Prepare() local
/aosp_15_r20/packages/modules/NeuralNetworks/common/
DQuantUtils.cpp12 int n_batch, int n_input, int16_t* output) { in ApplyLayerNorm()
86 void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input, int16_t* output) { in ApplySigmoid()
99 void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch, int n_input, int shift, in CwiseMul()
113 int32_t n_batch, int32_t n_input, int32_t output_zp, int8_t* output) { in CwiseMul()
138 void CwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch, int n_input, in CwiseAdd()
150 void CwiseClipping(int16_t* input, const int16_t clipping_value, int32_t n_batch, int32_t n_input) { in CwiseClipping()
164 void CwiseClipping(int8_t* input, const int8_t clipping_value, int32_t n_batch, int32_t n_input) { in CwiseClipping()
179 const int16_t* batch_vector, int n_batch, in VectorBatchVectorCwiseProductAccumulate()
DQuantUtils.h36 int32_t shift, int32_t n_batch, int32_t n_input, in MatrixBatchVectorMultiplyAccumulate()
154 void ApplyTanh(const int16_t* input, int32_t n_batch, int32_t n_input, int16_t* output) { in ApplyTanh()
167 inline void ApplyTanh(int32_t integer_bits, const int16_t* input, int32_t n_batch, int32_t n_input, in ApplyTanh()
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/
H A Dportable_tensor_utils.h51 void VectorBatchVectorAdd(const T* vector, int v_size, int n_batch, in VectorBatchVectorAdd()
73 const T* batch_vector, int n_batch, in VectorBatchVectorCwiseProduct()
101 int n_batch, T* result) { in VectorBatchVectorCwiseProductAccumulate()
112 void VectorBatchVectorAssign(const T* vector, int v_size, int n_batch, in VectorBatchVectorAssign()
153 inline void BatchQuantizeFloats(const float* float_data_ptr, int n_batch, in BatchQuantizeFloats()
419 int v_size, int n_batch, in BatchVectorBatchVectorDotProduct()
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/cl/kernels/
H A Dlstm_full_test.cc35 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel()
265 const int n_batch = 1; in TEST_P() local
339 const int n_batch = 1; in TEST_P() local
407 const int n_batch = 1; in TEST_P() local
970 const int n_batch = 1; in TEST_F() local
1056 const int n_batch = 1; in TEST_F() local
1120 const int n_batch = 1; in TEST_F() local
/aosp_15_r20/packages/modules/NeuralNetworks/common/cpu_operations/
DLSTMTest.cpp78 LSTMOpModel(uint32_t n_batch, uint32_t n_input, uint32_t n_cell, uint32_t n_output, in LSTMOpModel()
273 const int n_batch = 1; in TEST() local
377 const int n_batch = 1; in TEST() local
474 const int n_batch = 2; in TEST() local
DLayerNormLSTMTest.cpp87 LayerNormLSTMOpModel(uint32_t n_batch, uint32_t n_input, uint32_t n_cell, uint32_t n_output, in LayerNormLSTMOpModel()
287 const int n_batch = 2; in TEST() local
/aosp_15_r20/external/tensorflow/tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/
H A Dlstm.cc46 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateFloat()
100 void UpdateLstmCellFloat(int n_batch, int n_cell, float* cell_state, in UpdateLstmCellFloat()
124 int n_batch, int n_cell, int n_output, const float* cell_state, in CalculateLstmOutputCalibration()
181 const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input, in LstmStepCalibration()
294 int max_time, n_batch; in EvalCalibration() local
/aosp_15_r20/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/
Dbidirectional_sequence_lstm.mod.py22 n_batch, argument
455 n_batch = 1 variable
503 n_batch = 1 variable
/aosp_15_r20/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/
Dquantized_lstm.mod.py20 n_batch = 2 variable
107 n_batch = 1 variable
Dlayer_norm_lstm.mod.py22 n_batch = 2 variable
194 n_batch = 2 variable
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A Dbatch_norm_kernel.cpp80 int64_t n_batch = input.size(0); in batch_norm_cpu_contiguous_impl() local
132 int64_t n_batch = input.size(0); in batch_norm_cpu_channels_last_impl() local
183 int64_t n_batch = input.size(0); in batch_norm_cpu_collect_stats_contiguous_impl() local
414 int64_t n_batch = input.size(0); in batch_norm_cpu_backward_contiguous_impl() local
716 int64_t n_batch = input.size(0); in batch_norm_cpu_contiguous_impl() local
780 int64_t n_batch = input.size(0); in batch_norm_cpu_channels_last_impl() local
835 int64_t n_batch = input.size(0); in batch_norm_cpu_collect_stats_contiguous_internal() local
1004 int64_t n_batch = input.size(0); in batch_norm_cpu_backward_contiguous_internal() local
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DSparseCsrTensorUtils.h335 int64_t n_batch = numBatchDimensions(self); in getBlockSize() local
341 int64_t n_batch = numBatchDimensions(self); in getSymIntBlockSize() local

1234