/aosp_15_r20/external/tensorflow/tensorflow/core/common_runtime/ |
H A D | copy_tensor.cc | 55 void CopyHostToDevice(const Tensor* input, Allocator* cpu_allocator, in CopyHostToDevice() argument 61 Tensor copy(cpu_allocator, DT_VARIANT, input->shape()); in CopyHostToDevice() 70 cpu_allocator, edge_name, sync_dst_compute, in CopyHostToDevice() 75 CopyHostToDevice(&from, cpu_allocator, out_allocator, edge_name, dst, in CopyHostToDevice() 123 Allocator* cpu_allocator, Allocator* out_allocator, in CopyDeviceToDevice() argument 131 Tensor copy(cpu_allocator, DT_VARIANT, input->shape()); in CopyDeviceToDevice() 139 auto copier = [copy_function, cpu_allocator, src, dst, src_alloc_attr, in CopyDeviceToDevice() 147 CopyDeviceToDevice(copy_function, cpu_allocator, out_allocator, in CopyDeviceToDevice() 225 Allocator* cpu_allocator = src->GetAllocator(host_alloc_attrs); in ViaDMA() local 241 CopyDeviceToDevice(ri.copy_function, cpu_allocator, out_allocator, in ViaDMA() [all …]
|
H A D | single_threaded_cpu_device.cc | 65 if (!parsed.FromProto(cpu_allocator(), tensor_proto)) { in MakeTensorFromProto() 86 return cpu_allocator(); in GetAllocator()
|
H A D | mkl_cpu_allocator.h | 274 return cpu_allocator()->AllocateRaw(kAlignment, size); in MallocHook() 279 cpu_allocator()->DeallocateRaw(ptr); in FreeHook()
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/flex/ |
H A D | buffer_map_util.cc | 51 proto->set_allocator_name(tensorflow::cpu_allocator()->Name()); in FillAllocationDescription() 59 data(), tensorflow::cpu_allocator()); in LogAllocation() 67 tensorflow::cpu_allocator(), false); in LogDeallocation() 76 return tensorflow::cpu_allocator()->AllocateRaw(EIGEN_MAX_ALIGN_BYTES, in MaybeAllocateTensorflowBuffer() 99 tensorflow::cpu_allocator()->DeallocateRaw(data()); in ~TfLiteTensorBuffer() 110 tensorflow::cpu_allocator(), static_cast<tensorflow::tstring*>(data()), in ~StringTfLiteTensorBuffer() 119 tensorflow::cpu_allocator(), num_strings, in StringTfLiteTensorBuffer()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tests/ |
H A D | unary_ops_composition_test.cc | 79 Allocator* cpu_allocator = device_->GetAllocator(host_alloc_attrs); in RunComposedOp() local 83 Tensor input_on_host(cpu_allocator, dtype, shape); in RunComposedOp() 96 Tensor expected_tensor(cpu_allocator, dtype, shape); in RunComposedOp() 100 Tensor output_on_host(cpu_allocator, output->dtype(), output->shape()); in RunComposedOp()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/framework/ |
H A D | allocator_test.cc | 143 Allocator* a = cpu_allocator(); in TEST() 182 Allocator* a = cpu_allocator(); in TEST() 193 Allocator* a = cpu_allocator(); in TEST() 205 Allocator* a = cpu_allocator(); in TEST() 222 Allocator* a = cpu_allocator(); in TEST() 331 Allocator* a = cpu_allocator(); in BM_Allocation()
|
H A D | op_kernel_test.cc | 172 std::move(device_type), &device_, cpu_allocator(), in ExpectSuccess() 188 CreateOpKernel(std::move(device_type), &device_, cpu_allocator(), in ExpectFailure() 356 return cpu_allocator(); in GetAllocator() 367 CreateOpKernel(DEVICE_CPU, params.device, cpu_allocator(), in TEST_F() 401 return cpu_allocator(); in GetAllocator() 412 return cpu_allocator(); in GetScopedAllocator() 452 DEVICE_CPU, params.device, cpu_allocator(), in TEST_F() 623 DEVICE_CPU, params.device, cpu_allocator(), in TEST_F() 909 cpu_allocator(), node_def, in BM_InputRangeHelper() 984 cpu_allocator(), node_def, in BM_TraceString()
|
H A D | op_kernel_test_base.h | 87 cpu_allocator(), def, 121 cpu_allocator(), def, in ExpectFailure()
|
H A D | cpu_allocator_impl.cc | 193 explicit CPUSubAllocator(CPUAllocator* cpu_allocator) in CPUSubAllocator() argument 194 : SubAllocator({}, {}), cpu_allocator_(cpu_allocator) {} in CPUSubAllocator()
|
H A D | tensor_test.cc | 1503 Allocator* allocator = cpu_allocator(); in BM_CreateAndDestroyWithBuf() 1513 Allocator* allocator = cpu_allocator(); in BM_CreateAndCopyCtrWithBuf() 1524 Allocator* allocator = cpu_allocator(); in BM_CreateAndMoveCtrWithBuf() 1537 Allocator* allocator = cpu_allocator(); in BM_CreateAndDestroyHostScalarNonOptimized() 1559 Allocator* allocator = cpu_allocator(); in BM_FromProto() 1575 Allocator* allocator = cpu_allocator(); in BM_FromProtoCompressed() 1592 Allocator* allocator = cpu_allocator(); in BM_FromProtoCompressedZero()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
H A D | gpu_device_factory.cc | 34 Allocator* gpu_allocator, Allocator* cpu_allocator) in GPUDevice() argument 36 physical_device_desc, gpu_allocator, cpu_allocator, in GPUDevice() 68 Allocator* cpu_allocator) override { in CreateGPUDevice() argument 71 gpu_allocator, cpu_allocator); in CreateGPUDevice()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/sparse/ |
H A D | dense_to_csr_sparse_matrix_op.cc | 89 Tensor dense_shape(cpu_allocator(), DT_INT64, TensorShape({rank})); in Compute() 103 Tensor batch_ptr(cpu_allocator(), DT_INT32, TensorShape({batch_size + 1})); in Compute() 104 Tensor csr_col_ind(cpu_allocator(), DT_INT32, TensorShape({total_nnz})); in Compute() 105 Tensor csr_row_ptr(cpu_allocator(), DT_INT32, in Compute() 249 Tensor dense_shape_t(cpu_allocator(), DT_INT64, TensorShape({rank})); in ComputeAsync() 257 Tensor batch_ptr_t(cpu_allocator(), DT_INT32, in ComputeAsync()
|
H A D | sparse_mat_mul_op.cc | 154 Tensor output_shape(cpu_allocator(), DT_INT64, TensorShape({rank})); in Compute() 161 Tensor batch_ptr(cpu_allocator(), DT_INT32, TensorShape({batch_size + 1})); in Compute() 215 Tensor output_row_ptr(cpu_allocator(), DT_INT32, in Compute() 217 Tensor output_col_ind(cpu_allocator(), DT_INT32, TensorShape({total_nnz})); in Compute() 218 Tensor output_values(cpu_allocator(), DataTypeToEnum<T>::value, in Compute() 359 Tensor c_dense_shape_t(cpu_allocator(), DT_INT64, TensorShape({rank})); in Compute() 379 Tensor c_batch_ptr_t(cpu_allocator(), DT_INT32, in Compute() 501 Tensor c_t(cpu_allocator(), DT_VARIANT, TensorShape({})); in Compute()
|
H A D | sparse_cholesky_op.cc | 91 Tensor batch_ptr(cpu_allocator(), DT_INT32, TensorShape({batch_size + 1})); in Compute() 177 Tensor output_row_ptr(cpu_allocator(), DT_INT32, in Compute() 179 Tensor output_col_ind(cpu_allocator(), DT_INT32, TensorShape({total_nnz})); in Compute() 180 Tensor output_values(cpu_allocator(), DataTypeToEnum<T>::value, in Compute()
|
H A D | sparse_tensor_to_csr_sparse_matrix_op.cc | 89 Tensor batch_ptr(cpu_allocator(), DT_INT32, batch_ptr_shape); in Compute() 93 Tensor csr_col_ind(cpu_allocator(), DT_INT32, csr_col_ind_shape); in Compute() 97 Tensor csr_row_ptr(cpu_allocator(), DT_INT32, csr_row_ind_shape); in Compute() 204 Tensor batch_ptr_t(cpu_allocator(), DT_INT32, in ComputeAsync()
|
H A D | softmax_op.cc | 81 Tensor output_t(cpu_allocator(), DT_VARIANT, TensorShape({})); in Compute() 189 Tensor gradient_t(cpu_allocator(), DT_VARIANT, TensorShape({})); in Compute()
|
H A D | transpose_op.cc | 115 Tensor output_t(cpu_allocator(), DT_VARIANT, TensorShape({})); in Compute() 151 Tensor output_dense_shape_t(cpu_allocator(), DT_INT64, TensorShape({rank})); in operator ()()
|
/aosp_15_r20/external/tensorflow/tensorflow/c/ |
H A D | tf_tensor.cc | 50 return allocate_tensor(operation, len, cpu_allocator()); in allocate_tensor() 56 allocator = cpu_allocator(); in deallocate_buffer() 93 tensorflow::cpu_allocator()); in TF_AllocateTensor() 96 tensorflow::cpu_allocator(), /*owns_memory=*/true); in TF_AllocateTensor()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/jit/kernels/ |
H A D | xla_ops.cc | 538 Allocator* cpu_allocator = ctx->device()->GetAllocator(host_alloc_attrs); in Compute() local 543 Tensor compilation_key(cpu_allocator, DT_STRING, TensorShape({})); in Compute() 545 Tensor compilation_successful(cpu_allocator, DT_BOOL, TensorShape({})); in Compute() 547 ctx->set_output(0, Tensor(cpu_allocator, DT_STRING, TensorShape({}))); in Compute() 561 Tensor compilation_key(cpu_allocator, DT_STRING, TensorShape({})); in Compute() 564 Tensor compilation_successful(cpu_allocator, DT_BOOL, TensorShape({})); in Compute()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/tpu/ |
H A D | virtual_device.cc | 76 return cpu_allocator(); in GetAllocator() 83 Allocator* allocator = cpu_allocator(); in MakeTensorFromProto()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/common_runtime/pluggable_device/ |
H A D | pluggable_device.cc | 144 Allocator* cpu_allocator, bool sync_every_op) in PluggableDevice() argument 149 cpu_allocator_(cpu_allocator), in PluggableDevice() 378 Tensor copy(cpu_allocator(numa_node), DT_VARIANT, parsed.shape()); in MakeTensorFromProto()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/transforms/utils/ |
H A D | eval_utils.cc | 62 return tensorflow::cpu_allocator(); in GetAllocator() 70 if (!parsed.FromProto(tensorflow::cpu_allocator(), tensor_proto)) { in MakeTensorFromProto()
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/ |
H A D | immutable_constant_op_test.cc | 44 : memptr_(cpu_allocator()->AllocateRaw(kTestAlignment, length)), in TestReadOnlyMemoryRegion() 47 cpu_allocator()->DeallocateRaw(memptr_); in ~TestReadOnlyMemoryRegion()
|
H A D | segment_reduction_ops_test.cc | 72 CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(), in BM_UnsortedSegmentReduction() 136 CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(), in BM_SegmentReduction()
|
H A D | stack.cc | 256 Allocator* cpu_allocator = device->GetAllocator(host_alloc_attrs); in ComputeAsync() local 258 new Tensor(cpu_allocator, tensor.dtype(), tensor.shape()); in ComputeAsync()
|