/aosp_15_r20/external/tensorflow/tensorflow/compiler/xrt/ |
H A D | xrt_state.cc | 73 XRTMemoryManager* memory_manager, xla::Backend* backend, int device_ordinal, in AllocateScopedShapedBuffer() 121 int device_ordinal, in XRTBufferAllocation() 148 XRTTupleAllocation::XRTTupleAllocation(int device_ordinal, in XRTTupleAllocation() 172 xla::Backend* backend, int device_ordinal, XRTTupleAllocation** allocation, in CreateAndTransfer() 199 xla::Backend* backend, int device_ordinal, XRTTupleAllocation** allocation, in CreateUninitialized() 223 int device_ordinal, XRTTupleAllocation** allocation, in CreateFromBuffer() 235 int device_ordinal, XRTTupleAllocation** allocation, in CreateFromBuffer() 357 int XRTTupleAllocation::device_ordinal() const { return device_ordinal_; } in device_ordinal() function in tensorflow::XRTTupleAllocation 415 const xla::ShapeTree<ExpandedTupleInput>& elements, int device_ordinal, in ExpandTreeOfTuples() 456 XRTMemoryManager* memory_manager, xla::Backend* backend, int device_ordinal, in MakeTuple() [all …]
|
H A D | xrt_memory_manager.cc | 37 int64_t MakeDeviceHandle(int64_t device_ordinal, int64_t rnd_value) { in MakeDeviceHandle() 229 int device_ordinal = GetDeviceFromHandle(handle); in Lookup() local 243 int device_ordinal = GetDeviceFromHandle(handle); in Release() local 253 xla::Backend* backend, int device_ordinal, in CompactAllocations() 272 xla::Backend* backend, int device_ordinal, size_t size, in Allocate() 309 int device_ordinal, bool create_if_missing) { in GetDeviceContext()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
H A D | cpu_runtime.cc | 63 XfeedManager* GetXfeedManager(int device_ordinal) { in GetXfeedManager() 179 int64_t device_ordinal; member 204 int64_t device_ordinal; member 303 int device_ordinal = GetDeviceOrdinal(run_options); in __xla_cpu_runtime_AcquireInfeedBufferForDequeue() local 326 int device_ordinal = GetDeviceOrdinal(run_options); in __xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue() local 344 int device_ordinal = GetDeviceOrdinal(run_options); in __xla_cpu_runtime_AcquireOutfeedBufferForPopulation() local 367 int device_ordinal = GetDeviceOrdinal(run_options); in __xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation() local 699 int device_ordinal = GetDeviceOrdinal(run_options); in GetRendezvousKey() local 723 int device_ordinal = GetDeviceOrdinal(run_options); in __xla_cpu_runtime_AllToAll() local 766 int device_ordinal = GetDeviceOrdinal(run_options); in __xla_cpu_runtime_AllReduce() local [all …]
|
H A D | cpu_xfeed.cc | 103 Status TransferBufferToInfeed(int device_ordinal, int64_t size, in TransferBufferToInfeed() 116 int device_ordinal, absl::Span<const std::pair<void*, int64_t>> buffer_data, in TransferBuffersFromOutfeedInternal() 161 StatusOr<Shape> TransferArrayBufferFromOutfeed(int device_ordinal, in TransferArrayBufferFromOutfeed() 169 int device_ordinal, in TransferTupleBuffersFromOutfeed() 176 Status TransferLiteralToInfeedOnCpu(int device_ordinal, in TransferLiteralToInfeedOnCpu() 222 Status TransferLiteralFromOutfeedOnCpu(int device_ordinal, in TransferLiteralFromOutfeedOnCpu()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/tests/ |
H A D | multiple_devices_on_host_test.cc | 37 LocalExecutable* executable, int device_ordinal, LocalClient* client, in CompileAndExecute() 76 for (int device_ordinal = 0; device_ordinal < device_count; in TestWithDeviceCount() local 87 for (int device_ordinal = 0; device_ordinal < device_count; in TestWithDeviceCount() local 93 for (int device_ordinal = 0; device_ordinal < device_count; in TestWithDeviceCount() local 101 for (int device_ordinal = 0; device_ordinal < device_count; in TestWithDeviceCount() local 106 for (int device_ordinal = 0; device_ordinal < device_count; in TestWithDeviceCount() local
|
H A D | local_client_test_base.cc | 42 StatusOr<se::OwningDeviceMemory> TestAllocator::Allocate(int device_ordinal, in Allocate() 56 Status TestAllocator::Deallocate(int device_ordinal, se::DeviceMemoryBase mem) { in Deallocate() 199 auto device_ordinal = in ExecuteLocally() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/tpu/kernels/ |
H A D | transfer_ops.cc | 77 OpKernelContext* ctx, int device_ordinal) { in RunTransferWithOrdinal() 123 const int device_ordinal = device_ordinal_tensor.scalar<int32>()(); in RunTransfer() local 153 int device_ordinal, in TransferBuffersToInfeed() 160 int device_ordinal, const xla::LiteralSlice& literal) { in TransferLiteralToInfeed() 166 int device_ordinal, xla::MutableBorrowingLiteral literal) { in TransferLiteralFromOutfeed() 172 StreamExecutorTransferOpImpl::GetStreamExecutor(int device_ordinal) { in GetStreamExecutor()
|
H A D | host_compute_ops.cc | 39 int device_ordinal = 0; in RecvAtHostOp() local 84 const int device_ordinal = device_ordinal_tensor.flat<int64_t>()(0); in ComputeAsync() local 190 int device_ordinal = 0; in SendFromHostOp() local 240 const int device_ordinal = device_ordinal_tensor.flat<int64_t>()(0); in Compute() local
|
H A D | tpu_functional_ops.cc | 113 int device_ordinal; member 222 Status UpdateTPUDeviceOrdinal(int device_ordinal, string* device_name, in UpdateTPUDeviceOrdinal() 1255 int32_t device_ordinal = 0; in ComputeAsync() local 1364 int device_ordinal = device_ordinal_t->scalar<int>()(); in GetTpuCoreOrdinal() local 1375 int device_ordinal, bool fast_mem) { in InitializeVarOnTPU() 1635 Graph* graph, OpKernelContext* ctx, int device_ordinal, in ReplaceResourceArgsWithVarHandleOps() 1798 Graph* graph, OpKernelContext* ctx, int device_ordinal, in ReplaceAndPartitionXLAShardingVariable() 2292 Graph* graph, int device_ordinal, bool* use_spmd_for_xla_partitioning, in GetGraphFromFunction() 2481 int device_ordinal, Graph* graph, in SetDeviceOrdinal() 2564 int device_ordinal = replica_id; in InstantiateFunctionsFromSubgraphs() local [all …]
|
H A D | tpu_reshard_variables_op_util.cc | 50 Status FlushProgramMemory(se::Platform* platform, int device_ordinal) { in FlushProgramMemory() 101 int device_ordinal, se::Stream* stream) { in BuildInputBuffers() 227 xla::Backend* backend, se::Stream* stream, int device_ordinal, in UpdateOutputVariables()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/stream_executor/ |
H A D | tf_allocator_adapter.h | 82 int device_ordinal = p.second->parent()->device_ordinal(); in MultiDeviceAdapter() local 93 port::StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64_t size, in Allocate() 101 port::Status Deallocate(int device_ordinal, DeviceMemoryBase mem) override { in Deallocate() 116 port::StatusOr<Stream *> GetStream(int device_ordinal) override { in GetStream()
|
H A D | device_memory_allocator.h | 61 ScopedDeviceMemory(DeviceMemoryBase mem, int device_ordinal, in ScopedDeviceMemory() 140 int device_ordinal() const { return device_ordinal_; } in device_ordinal() function 188 port::StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, in Allocate() 195 port::StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64_t size, in Allocate()
|
H A D | tf_allocator_adapter.cc | 39 int device_ordinal, uint64_t size, bool retry_on_failure, in Allocate() 56 port::Status TfAllocatorAdapter::Deallocate(int device_ordinal, in Deallocate() 62 port::StatusOr<Stream *> TfAllocatorAdapter::GetStream(int device_ordinal) { in GetStream()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/client/ |
H A D | local_client.cc | 36 StatusOr<StreamPool::Ptr> BorrowStreamForDevice(int device_ordinal, in BorrowStreamForDevice() 273 const ShapeTree<MaybeOwningDeviceMemory>& tree, int device_ordinal) { in MaybeOwningShapeTreeToShapedBuffer() 447 const LiteralSlice& literal, int device_ordinal, in LiteralToShapedBuffer() 476 int device_ordinal) { in TransferToInfeedLocal() 483 Status LocalClient::TransferFromOutfeedLocal(int device_ordinal, in TransferFromOutfeedLocal() 496 const ::xla::BorrowingLiteral& literal, int device_ordinal) { in TransferToLocalServer()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
H A D | nccl_all_reduce_thunk.cc | 46 int device_ordinal = stream.parent()->device_ordinal(); in RunAllReduce() local 257 int device_ordinal = stream.parent()->device_ordinal(); in RunNcclCollective() local 302 int device_ordinal = async_comms_stream.parent()->device_ordinal(); in RunNcclCollective() local 314 StatusOr<se::Event> NcclAllReduceStartThunk::TakeDoneEvent(int device_ordinal) { in TakeDoneEvent() 329 int device_ordinal = params.stream->parent()->device_ordinal(); in ExecuteOnStream() local 373 int device_ordinal = stream.parent()->device_ordinal(); in RunReduceScatter() local
|
H A D | buffer_allocations.h | 39 int device_ordinal, in BufferAllocations() 53 int device_ordinal() const { return device_ordinal_; } in device_ordinal() function
|
/aosp_15_r20/external/tensorflow/tensorflow/python/tpu/ops/ |
H A D | tpu_ops.py | 299 device_ordinal, argument 336 device_ordinal, argument 394 device_ordinal, argument 474 device_ordinal, argument 552 device_ordinal, argument
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
H A D | tpu_execute_op.cc | 141 xla::ScopedShapedBuffer output_scoped_buffer, int device_ordinal) { in AllocateOutputTuple() 178 xla::ScopedShapedBuffer output_scoped_buffer, int device_ordinal) { in AllocateOutputTensors() 194 int device_ordinal, string rendezvous_key_base) { in RunExecutable() 236 int device_ordinal, string rendezvous_key_base) { in ExecuteTPUProgram() 275 const int device_ordinal = metadata->device_ordinal(); in DoWork() local 397 const int device_ordinal = metadata->device_ordinal(); in DoWork() local
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/ |
H A D | shaped_buffer.cc | 34 ShapedBuffer::ShapedBuffer(Shape on_device_shape, int device_ordinal) in ShapedBuffer() 42 int device_ordinal) in ShapedBuffer() 118 int device_ordinal) in ScopedShapedBuffer() 125 int device_ordinal) in ScopedShapedBuffer()
|
H A D | shaped_buffer_test.cc | 61 StatusOr<se::OwningDeviceMemory> Allocate(int device_ordinal, uint64_t size, in Allocate() 74 Status Deallocate(int device_ordinal, se::DeviceMemoryBase mem) override { in Deallocate() 91 StatusOr<se::Stream*> GetStream(int device_ordinal) override { in GetStream()
|
H A D | service_executable_run_options.h | 49 int device_ordinal() const { return run_options_.device_ordinal(); } in device_ordinal() function 53 StatusOr<StreamPool::Ptr> BorrowStream(int device_ordinal) const { in BorrowStream()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/stream_executor/tpu/ |
H A D | tpu_node_context.cc | 28 int device_ordinal) { in Create() 60 Status TpuNodeContext::Initialize(int device_ordinal) { in Initialize() 71 int TpuNodeContext::device_ordinal() const { return device_ordinal_; } in device_ordinal() function in tensorflow::tpu::TpuNodeContext
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
H A D | tpu_extract_outside_compilation.cc | 157 Value device_ordinal, in CreateSendFromHostOp() 180 Value device_ordinal, in CreateRecvAtHostOp() 509 Value device_ordinal, int& communication_key_index) { in MoveOpsToHost() 592 Value device_ordinal, in MoveOpsToHost() 647 Value compilation_key, Value device_ordinal, in DecomposeControlFlow() 743 Value device_ordinal = nullptr; in CreateParallelExecuteForOutsideCompilation() local
|
/aosp_15_r20/external/tensorflow/tensorflow/core/tpu/ |
H A D | tpu_execute.cc | 298 void TPUCancelExecution(Env* env, int device_ordinal) { in TPUCancelExecution() 324 int device_ordinal) { in RegisterCancellation() 364 se::Stream* stream, int device_ordinal, CancellationToken token, in UnregisterCancellation() 488 const int32_t device_ordinal = node_context->device_ordinal(); in TPUExecute() local
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/jit/ |
H A D | xla_device.cc | 115 const xla::Backend* backend, int device_ordinal) { in GetOrCreateXlaDeviceAllocator() 136 int device_ordinal) { in BuildXlaDeviceAttributes() 146 int device_ordinal, se::Platform* platform, const DeviceType& device_type, in Metadata() 157 int XlaDevice::Metadata::device_ordinal() const { return device_ordinal_; } in device_ordinal() function in tensorflow::XlaDevice::Metadata
|