/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/ |
H A D | tuple_util.cc | 49 tuple_elements.reserve(input_shape.tuple_shapes_size()); in AppendSuffix() 50 for (int i = 0; i < input_shape.tuple_shapes_size(); i++) { in AppendSuffix() 91 std::vector<HloInstruction*> tuple_args(tuple_shape.tuple_shapes_size()); in ReplaceTupleWith() 92 CHECK_GE(tuple_shape.tuple_shapes_size(), shape_index[0]); in ReplaceTupleWith() 93 for (int i = 0; i < tuple_shape.tuple_shapes_size(); ++i) { in ReplaceTupleWith() 138 if (shape_index[0] == tuple_shape.tuple_shapes_size()) { in ReplaceTupleWith()
|
H A D | while_util.cc | 55 narrow_shape.tuple_shapes_size()); in WidenWhileCondition() 79 wide_parameter, narrow_shape.tuple_shapes_size()); in WidenWhileBody() 85 for (int i = narrow_shape.tuple_shapes_size(); in WidenWhileBody() 86 i < wide_shape.tuple_shapes_size(); i++) { in WidenWhileBody() 106 int elements_in_old_while_shape = while_instr->shape().tuple_shapes_size(); in MakeInstructionsLiveIn() 133 new_while, while_instr->shape().tuple_shapes_size()); in MakeInstructionsLiveIn() 140 i < new_while_shape.tuple_shapes_size(); i++) { in MakeInstructionsLiveIn() 197 for (int i = 1, e = loop_state_shape.tuple_shapes_size(); i < e; i++) { in MakeCountedLoopBodyComputation()
|
H A D | while_loop_simplifier.cc | 506 for (int index = 0; index < while_init->shape().tuple_shapes_size(); in TryRemoveRepeatedWhileTupleIndicesHelper() 546 while (index_to_investigate < while_init->shape().tuple_shapes_size()) { in TryRemoveRepeatedWhileTupleIndices() 580 i < while_shape.tuple_shapes_size(); ++i) { in TryRemoveRepeatedWhileTupleIndices() 647 for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { in TryRemoveConstantParams() 664 for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { in TryRemoveConstantParams() 686 for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { in TryRemoveConstantParams() 701 for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { in TryRemoveConstantParams() 947 for (int i = 0; i < desired_shape.tuple_shapes_size(); ++i) { in UnflattenTupleInstr() 985 for (int i = 0; i < shape.tuple_shapes_size(); ++i) { in GetFlatTupleElems() 1040 for (int i = 0; i < flat_shape.tuple_shapes_size(); ++i) { in TryFlattenNestedTuples() [all …]
|
H A D | while_loop_concat_code_motion.cc | 644 std::vector<bool> result(loop->shape().tuple_shapes_size(), false); in TupleElementsUsedInCond() 723 std::vector<HloInstruction*> init_elements(loop->shape().tuple_shapes_size()); in RewriteLoopWithConcatGroups() 759 loop->shape().tuple_shapes_size()); in RewriteLoopWithConcatGroups() 957 std::vector<HloInstruction*> gtes(param->shape().tuple_shapes_size(), in RunOnLoop()
|
H A D | tuple_util.h | 48 return ExtractPrefix(input_tuple, input_tuple->shape().tuple_shapes_size()); in Duplicate()
|
H A D | hlo_verifier.cc | 551 if (buffer_offset_shape.tuple_shapes_size() != buffer_shape.rank()) { in CheckBufferOffset() 595 for (int i = 0; i < input_buffer_shape.tuple_shapes_size(); ++i) { in CheckInplaceCollectivePermute() 608 for (int i = 0; i < output_buffer_shape.tuple_shapes_size(); ++i) { in CheckInplaceCollectivePermute() 639 allowed_seen_count = hlo->operand(2)->shape().tuple_shapes_size(); in CheckDuplicatedSourceOrTarget() 642 hlo->operand(2)->shape().tuple_shapes(0).tuple_shapes_size(); in CheckDuplicatedSourceOrTarget() 860 if (hlo->shape().IsTuple() && hlo->shape().tuple_shapes_size() != 2) { in HandleRngBitGenerator() 1390 if (!async_shape.IsTuple() || async_shape.tuple_shapes_size() < 2) { in CheckAsyncOpComputationShapes()
|
H A D | conditional_simplifier.cc | 214 const int old_tuple_shapes_size = conditional_op->shape().tuple_shapes_size(); in RemoveUnusedTupleElements() 424 for (int i = 0; i < conditional->shape().tuple_shapes_size(); ++i) { in MergeDuplicateTupleElements()
|
H A D | dynamic_dimension_inference.cc | 59 wide_parameter, narrow_shape.tuple_shapes_size()); in WidenComputation() 1452 hlo->operand(operand_index)->shape().tuple_shapes_size(); in HandleConditional() 1518 int tuple_count = hlo->shape().tuple_shapes_size(); in HandleConditional() 1596 new_conditional, hlo->shape().tuple_shapes_size()); in HandleConditional() 1695 const int original_tuple_count = hlo->shape().tuple_shapes_size(); in HandleWhile()
|
H A D | reduce_decomposer.cc | 89 for (int i = 0; i < shape.tuple_shapes_size(); i++) { in HandleReduce()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
H A D | utils.cc | 39 if (sharding.tuple_shardings_size() != shape.tuple_shapes_size()) { in GetShardedShape() 43 sharding.tuple_shardings_size(), shape.tuple_shapes_size(), in GetShardedShape() 47 const int tuple_shapes_size = shape.tuple_shapes_size(); in GetShardedShape() local 48 sharded_subshapes.reserve(tuple_shapes_size); in GetShardedShape() 49 for (int i = 0; i < tuple_shapes_size; ++i) { in GetShardedShape() 220 return input_tuple_shape.tuple_shapes_size(); in ComputeParametersThatMustBeDonated()
|
H A D | tfrt_cpu_pjrt_client.cc | 315 buffer_indices.reserve(result_shape.tuple_shapes_size()); in FindResultBufferAllocationIndex() 316 for (int i = 0; i < result_shape.tuple_shapes_size(); ++i) { in FindResultBufferAllocationIndex() 599 int num_leaf_buffers = shape.IsTuple() ? shape.tuple_shapes_size() : 1; in BufferFromHostLiteral() 629 for (int i = 0; i < shape.tuple_shapes_size(); ++i) { in BufferFromHostLiteral() 1201 computation_layout.parameter_shape(0).tuple_shapes_size()); in TfrtCpuExecutable() 1203 i < computation_layout.parameter_shape(0).tuple_shapes_size(); ++i) { in TfrtCpuExecutable()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
H A D | dynamic_shaped_ops.cc | 44 results.reserve(shapes[0]->tuple_shapes_size()); in FindMaxShape() 45 for (int i = 0; i < shapes[0]->tuple_shapes_size(); ++i) { in FindMaxShape() 75 left_branch_shape.tuple_shapes_size() == in ReconsileBranchDifference() 76 right_branch_shape.tuple_shapes_size()); in ReconsileBranchDifference() 79 results.reserve(left_branch_shape.tuple_shapes_size()); in ReconsileBranchDifference() 80 for (int i = 0; i < left_branch_shape.tuple_shapes_size(); ++i) { in ReconsileBranchDifference()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/tools/ |
H A D | hlo_control_flow_flattening.cc | 41 std::vector<HloInstruction*> tuple_arguments(shape.tuple_shapes_size()); in CreateConstant() 42 for (int index = 0; index < shape.tuple_shapes_size(); ++index) { in CreateConstant() 169 new_tuple, new_tuple->shape().tuple_shapes_size() - 1); in FlattenWhileLoop() 255 CHECK_EQ(infeed_hlo->shape().tuple_shapes_size(), 2); in RemoveInfeed() 280 CHECK_EQ(recv_done->shape().tuple_shapes_size(), 2); in RemoveRecvDone()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/ |
H A D | shape_tree.cc | 37 entries_.resize(entries_.size() + shape.tuple_shapes_size()); in CreateEntry() 38 for (size_t i = 0; i < shape.tuple_shapes_size(); ++i) { in CreateEntry()
|
H A D | shape_util.cc | 328 result_shapes.reserve(shape.tuple_shapes_size()); in MoveDimToMajor() 441 CHECK(index < tuple_shape->tuple_shapes_size()); in UpdateTupleShape() 559 return shape.tuple_shapes_size(); in TupleElementCount() 845 return pointer_size * shape.tuple_shapes_size(); in ByteSizeOfTupleIndexTable() 877 if (shape.tuple_shapes_size() > 0) { in ValidateShapeWithOptionalLayoutInternal() 963 new_operands.reserve(original.tuple_shapes_size()); in ChangeElementType() 979 if (!subshape->IsTuple() || i >= subshape->tuple_shapes_size() || i < 0) { in IndexIsValid() 1003 i >= return_shape->tuple_shapes_size()) { in TryGetSubshape()
|
H A D | shape.cc | 49 tuple_shapes_.reserve(shape_proto.tuple_shapes_size()); in Shape() 74 proto.mutable_tuple_shapes()->Reserve(tuple_shapes_size()); in ToProto()
|
H A D | shape.h | 144 int tuple_shapes_size() const { return tuple_shapes_.size(); } in tuple_shapes_size() function 276 return H::combine(std::move(h), s.tuple_shapes_size()); in Hash()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
H A D | all_reduce_blueconnect.cc | 44 outputs.reserve(instruction.shape().tuple_shapes_size()); in GetOutputs() 47 for (int i = 0; i < instruction.shape().tuple_shapes_size(); ++i) { in GetOutputs()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
H A D | shape_op.cc | 390 list_dynamic_dims.reserve(list_shape.tuple_shapes_size() - 1); in Compile() 391 for (int i = 0; i < list_shape.tuple_shapes_size() - 1; ++i) { in Compile()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/ |
H A D | tpu_driver.cc | 100 if (shape.tuple_shapes_size() > 0) { in ComputeBytesFromShape()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/aot/ |
H A D | codegen.cc | 228 size_t num_results = ps.result().tuple_shapes_size(); in GenResultMethods() 238 ps.result().tuple_shapes_size(), ")"); in GenResultMethods()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/xla/ |
H A D | layout_util.cc | 73 for (int i = 0; i < original_shape.tuple_shapes_size(); ++i) { in ReshapeWithCorrectRepresentationAndSharding()
|
H A D | hlo_utils.h | 100 contents.reserve(shape.tuple_shapes_size()); in ConvertShapeToType()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tf2xla/ |
H A D | layout_util.cc | 88 for (int i = 0; i < original_shape.tuple_shapes_size(); ++i) { in ReshapeWithCorrectRepresentationAndSharding()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
H A D | tuple_ops.cc | 71 int tuple_size = tuple_shape.tuple_shapes_size(); in EmitTupleAllocasAtFunctionEntry()
|