Home
last modified time | relevance | path

Searched refs:cpu_backend_context (Results 1 – 25 of 363) sorted by relevance

12345678910>>...15

/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/optimized/sparse_ops/
H A Dfully_connected.h85 int thread_end, const CpuBackendContext& cpu_backend_context) { in FullyConnectedSparseWeight1x16Impl() argument
121 int thread_end, const CpuBackendContext& cpu_backend_context) { in FullyConnectedSparseWeight1x4Impl() argument
174 cpu_backend_context(cpu_backend_context_x) {} in FullyConnectedSparseWeight1x4Task()
180 thread_end, cpu_backend_context); in Run()
196 const CpuBackendContext& cpu_backend_context; member
205 CpuBackendContext* cpu_backend_context) { in FullyConnectedSparseWeight1x16() argument
216 *cpu_backend_context); in FullyConnectedSparseWeight1x16()
229 CpuBackendContext* cpu_backend_context) { in FullyConnectedSparseWeight1x4() argument
233 const int max_threads = cpu_backend_context->max_num_threads(); in FullyConnectedSparseWeight1x4()
241 *cpu_backend_context); in FullyConnectedSparseWeight1x4()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/
H A Dcpu_backend_gemm_test.cc260 CpuBackendContext* cpu_backend_context) { in PerformGemmThenCompareResultsThenAgainWithClamping() argument
263 dst_data->data(), params, cpu_backend_context); in PerformGemmThenCompareResultsThenAgainWithClamping()
276 dst_data->data(), params_with_clamp, cpu_backend_context); in PerformGemmThenCompareResultsThenAgainWithClamping()
285 dst_data->data(), params_with_clamp, cpu_backend_context); in PerformGemmThenCompareResultsThenAgainWithClamping()
317 CpuBackendContext* cpu_backend_context) { in BisectReasonableMultiplierExponent() argument
332 dst_data->data(), params_copy, cpu_backend_context); in BisectReasonableMultiplierExponent()
337 dst_params, dst_data, params_copy, cpu_backend_context); in BisectReasonableMultiplierExponent()
341 dst_params, dst_data, params_copy, cpu_backend_context); in BisectReasonableMultiplierExponent()
370 CpuBackendContext cpu_backend_context; in TestSomeGemm() local
372 cpu_backend_context.SetMaxNumThreads(1 + (random_engine() % 8)); in TestSomeGemm()
[all …]
H A Dcpu_backend_threadpool.h38 CpuBackendContext* cpu_backend_context) { in Execute() argument
39 TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads()); in Execute()
40 cpu_backend_context->ruy_context()->mutable_thread_pool()->Execute( in Execute()
50 CpuBackendContext* cpu_backend_context) {
51 TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
52 cpu_backend_context->gemmlowp_context()->workers_pool()->Execute(tasks_count,
H A Dcpu_backend_context.cc112 auto* cpu_backend_context = static_cast<CpuBackendContext*>( in GetFromContext() local
114 if (cpu_backend_context == nullptr) { in GetFromContext()
117 cpu_backend_context = new CpuBackendContext(); in GetFromContext()
118 cpu_backend_context->SetMaxNumThreads(context->recommended_num_threads); in GetFromContext()
120 std::unique_ptr<TfLiteInternalBackendContext>(cpu_backend_context)); in GetFromContext()
123 return cpu_backend_context; in GetFromContext()
H A Dadd_n.cc72 CpuBackendContext* cpu_backend_context = in Prepare() local
81 cpu_backend_context->max_num_threads()); in Prepare()
114 CpuBackendContext* cpu_backend_context = in EvalAddN() local
121 GetTensorData<T>(scratch_tensor), cpu_backend_context); in EvalAddN()
H A Dgru_cell.cc44 tflite::CpuBackendContext* cpu_backend_context) { in GruCell() argument
65 activation, cpu_backend_context); in GruCell()
82 output_shape, output, cpu_backend_context); in GruCell()
H A Dunidirectional_sequence_gru.cc38 tflite::CpuBackendContext* cpu_backend_context) { in GruImpl() argument
80 cpu_backend_context); in GruImpl()
261 auto cpu_backend_context = CpuBackendContext::GetFromContext(context); in Eval() local
266 cpu_backend_context); in Eval()
H A Dmirror_pad.cc201 CpuBackendContext* cpu_backend_context = in Eval() local
203 const int thread_count = cpu_backend_context->max_num_threads(); in Eval()
226 cpu_backend_context); in Eval()
H A DBUILD352 name = "cpu_backend_context",
354 "cpu_backend_context.cc",
357 "cpu_backend_context.h",
397 ":cpu_backend_context",
414 ":cpu_backend_context",
442 ":cpu_backend_context",
464 ":cpu_backend_context",
682 ":cpu_backend_context",
799 "//tensorflow/lite/kernels:cpu_backend_context",
827 "//tensorflow/lite/kernels:cpu_backend_context",
[all …]
H A Dfully_connected.cc725 CpuBackendContext* cpu_backend_context = in EvalHybrid() local
746 const int max_threads = cpu_backend_context->max_num_threads(); in EvalHybrid()
786 cpu_backend_context); in EvalHybrid()
795 CpuBackendContext* cpu_backend_context) { in FullyConnectedInt8() argument
818 cpu_backend_context); in FullyConnectedInt8()
852 CpuBackendContext* cpu_backend_context) { in FullyConnectedPerChannelInt8() argument
878 GetTensorData<int8_t>(output), cpu_backend_context); in FullyConnectedPerChannelInt8()
H A Dgru_cell.h39 tflite::CpuBackendContext* cpu_backend_context);
H A Dreduce.cc664 CpuBackendContext* cpu_backend_context = in ReduceAllDims() local
666 int thread_count = cpu_backend_context->max_num_threads(); in ReduceAllDims()
690 cpu_backend_context); in ReduceAllDims()
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
H A Dfully_connected.h37 DstScalar* output_data, CpuBackendContext* cpu_backend_context) { in FullyConnectedPerChannel() argument
63 (cpu_backend_context != nullptr) && cpu_backend_context->use_caching(); in FullyConnectedPerChannel()
97 cpu_backend_context); in FullyConnectedPerChannel()
106 DstScalar* output_data, CpuBackendContext* cpu_backend_context) { in FullyConnected() argument
135 (cpu_backend_context != nullptr) && cpu_backend_context->use_caching(); in FullyConnected()
166 cpu_backend_context); in FullyConnected()
H A Dmean.h185 float output_scale, CpuBackendContext* cpu_backend_context) { in Mean() argument
221 std::min(thread_count, cpu_backend_context->max_num_threads()); in Mean()
244 cpu_backend_context); in Mean()
H A Dtranspose_conv.h36 CpuBackendContext* cpu_backend_context) { in TransposeConvV2() argument
93 col2im_data, gemm_params, cpu_backend_context); in TransposeConvV2()
H A Dconv.h39 InputScalar* im2col_data, CpuBackendContext* cpu_backend_context) { in ConvPerChannel() argument
124 cpu_backend_context); in ConvPerChannel()
H A Ddepthwise_conv.h1811 const CpuBackendContext& cpu_backend_context) {
1895 const CpuBackendContext& cpu_backend_context) {
1899 output_data, thread_start, thread_end, thread_dim, cpu_backend_context);
1928 cpu_backend_context(cpu_backend_context_x) {}
1934 thread_end_, thread_dim_, cpu_backend_context);
1952 const CpuBackendContext& cpu_backend_context;
1975 CpuBackendContext* cpu_backend_context) {
1996 const int max_threads = cpu_backend_context->max_num_threads();
2004 *cpu_backend_context);
2017 thread_end, thread_dim, *cpu_backend_context);
[all …]
H A Ddepthwise_conv_hybrid.h401 CpuBackendContext* cpu_backend_context) { in DepthwiseConvHybridPerChannel() argument
422 const int max_threads = cpu_backend_context->max_num_threads(); in DepthwiseConvHybridPerChannel()
449 cpu_backend_context); in DepthwiseConvHybridPerChannel()
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
H A Ddepthwiseconv_multithread.h127 CpuBackendContext* cpu_backend_context) { in DepthwiseConv() argument
135 const int max_threads = cpu_backend_context->max_num_threads(); in DepthwiseConv()
183 cpu_backend_context); in DepthwiseConv()
H A Doptimized_ops.h273 float* output_data, CpuBackendContext* cpu_backend_context) { in FullyConnected() argument
301 cpu_backend_context); in FullyConnected()
309 uint8* output_data, CpuBackendContext* cpu_backend_context) { in FullyConnected() argument
364 cpu_backend_context); in FullyConnected()
372 int16* output_data, CpuBackendContext* cpu_backend_context) { in FullyConnected() argument
425 cpu_backend_context); in FullyConnected()
753 CpuBackendContext* cpu_backend_context) { in ShuffledFullyConnected() argument
841 LegacyHowManyThreads<kKernelRows>(cpu_backend_context->max_num_threads(), in ShuffledFullyConnected()
873 cpu_backend_context); in ShuffledFullyConnected()
912 float* im2col_data, CpuBackendContext* cpu_backend_context) { in Conv() argument
[all …]
H A Dreduce.h198 float output_scale, CpuBackendContext* cpu_backend_context) { in Mean() argument
234 std::min(thread_count, cpu_backend_context->max_num_threads()); in Mean()
257 cpu_backend_context); in Mean()
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/
H A DBUILD319 "//tensorflow/lite/kernels:cpu_backend_context",
356 "//tensorflow/lite/kernels:cpu_backend_context",
793 "//tensorflow/lite/kernels:cpu_backend_context",
816 "//tensorflow/lite/kernels:cpu_backend_context",
940 "//tensorflow/lite/kernels:cpu_backend_context",
975 "//tensorflow/lite/kernels:cpu_backend_context",
/aosp_15_r20/external/tensorflow/tensorflow/lite/tools/benchmark/
H A Dbenchmark_tflite_model.cc678 std::unique_ptr<tflite::CpuBackendContext> cpu_backend_context( in InitInterpreter() local
680 cpu_backend_context->SetUseCaching(true); in InitInterpreter()
681 cpu_backend_context->SetMaxNumThreads(num_threads); in InitInterpreter()
683 std::move(cpu_backend_context)); in InitInterpreter()
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/internal/reference/
H A Dconv.h115 uint8_t* im2col_data, void* cpu_backend_context) { in Conv() argument
116 (void)cpu_backend_context; // only used in optimized code. in Conv()
/aosp_15_r20/external/tensorflow/tensorflow/lite/
H A DAndroid.bp80 "kernels/cpu_backend_context.cc",

12345678910>>...15