Home
last modified time | relevance | path

Searched full:tensors (Results 1 – 25 of 5972) sorted by relevance

12345678910>>...239

/aosp_15_r20/external/pytorch/torch/csrc/jit/tensorexpr/
H A Dexternal_functions_codegen.cpp25 std::vector<at::Tensor> tensors = constructTensors( in nnc_aten_abs() local
27 at::Tensor& r = tensors[0]; in nnc_aten_abs()
28 const at::Tensor& self = tensors[1]; in nnc_aten_abs()
43 std::vector<at::Tensor> tensors = constructTensors( in nnc_aten_absolute() local
45 at::Tensor& r = tensors[0]; in nnc_aten_absolute()
46 const at::Tensor& self = tensors[1]; in nnc_aten_absolute()
61 std::vector<at::Tensor> tensors = constructTensors( in nnc_aten_angle() local
63 at::Tensor& r = tensors[0]; in nnc_aten_angle()
64 const at::Tensor& self = tensors[1]; in nnc_aten_angle()
79 std::vector<at::Tensor> tensors = constructTensors( in nnc_aten_sgn() local
[all …]
H A Dexternal_functions.cpp102 std::vector<at::Tensor> tensors; in constructTensors() local
122 tensors.emplace_back(tensor); in constructTensors()
152 tensors.emplace_back(tensor); in constructTensors()
160 tensors.emplace_back(tensor); in constructTensors()
164 return tensors; in constructTensors()
207 std::vector<at::Tensor> tensors; in constructTensors2() local
211 tensors.emplace_back(und); in constructTensors2()
232 tensors.emplace_back(tensor); in constructTensors2()
262 tensors.emplace_back(tensor); in constructTensors2()
270 tensors.emplace_back(tensor); in constructTensors2()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DForeachUnaryOp.cu47 std::vector<Tensor> foreach_unary_op(TensorList tensors) { in foreach_unary_op() argument
50 vec_res.reserve(tensors.size()); in foreach_unary_op()
51 for (const auto& t : tensors) { in foreach_unary_op()
55 tensor_lists.emplace_back(tensors.vec()); in foreach_unary_op()
72 void foreach_unary_op_(TensorList tensors) { in foreach_unary_op_() argument
74 tensor_lists.emplace_back(tensors.vec()); in foreach_unary_op_()
84 increment_version(tensors); in foreach_unary_op_()
88 std::vector<Tensor> floating_complex_half(TensorList tensors) { in floating_complex_half() argument
91 tensors[0].scalar_type(), in floating_complex_half()
93 [&]() { return foreach_unary_op<scalar_t, Op>(tensors); }); in floating_complex_half()
[all …]
H A DForeachBinaryOpScalar.cu26 TensorList tensors, in foreach_binary_op() argument
30 vec_res.reserve(tensors.size()); in foreach_binary_op()
31 for (const auto& t : tensors) { in foreach_binary_op()
35 tensor_lists.emplace_back(tensors.vec()); in foreach_binary_op()
52 void foreach_binary_op_(TensorList tensors, const Scalar& scalar) { in foreach_binary_op_() argument
54 tensor_lists.emplace_back(tensors.vec()); in foreach_binary_op_()
66 increment_version(tensors); in foreach_binary_op_()
71 TensorList tensors, in all_types_complex_bool_half_bfloat16() argument
77 tensors[0].scalar_type(), in all_types_complex_bool_half_bfloat16()
79 [&]() { return foreach_binary_op<scalar_t, Op>(tensors, scalar); }); in all_types_complex_bool_half_bfloat16()
[all …]
H A DForeachBinaryOpScalarList.cu26 TensorList tensors, in foreach_binary_op() argument
30 vec_res.reserve(tensors.size()); in foreach_binary_op()
31 for (const auto& t : tensors) { in foreach_binary_op()
35 tensor_lists.emplace_back(tensors.vec()); in foreach_binary_op()
53 void foreach_binary_op_(TensorList tensors, at::ArrayRef<Scalar> scalars) { in foreach_binary_op_() argument
55 tensor_lists.emplace_back(tensors.vec()); in foreach_binary_op_()
67 increment_version(tensors); in foreach_binary_op_()
72 TensorList tensors, in all_types_complex_bool_half_bfloat16() argument
78 tensors[0].scalar_type(), in all_types_complex_bool_half_bfloat16()
80 [&]() { return foreach_binary_op<scalar_t, Op>(tensors, scalars); }); in all_types_complex_bool_half_bfloat16()
[all …]
H A DForeachBinaryOpScalarTensor.cu22 TensorList tensors, in foreach_binary_op() argument
33 tensors[0].device() == scalar.device(), in foreach_binary_op()
35 tensors[0].device(), in foreach_binary_op()
40 vec_res.reserve(tensors.size()); in foreach_binary_op()
41 for (const auto& t : tensors) { in foreach_binary_op()
45 tensor_lists.emplace_back(tensors.vec()); in foreach_binary_op()
64 TensorList tensors, in foreach_binary_op_() argument
75 tensors[0].device() == scalar.device(), in foreach_binary_op_()
77 tensors[0].device(), in foreach_binary_op_()
81 tensor_lists.emplace_back(tensors.vec()); in foreach_binary_op_()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/tools/optimize/
H A Dquantize_model_test.cc98 ASSERT_EQ(graph->tensors.size(), expected_graph->tensors.size()); in ExpectSameModels()
99 for (size_t i = 0; i < graph->tensors.size(); i++) { in ExpectSameModels()
100 const auto tensor = graph->tensors[i].get(); in ExpectSameModels()
101 const auto expected_tensor = expected_graph->tensors[i].get(); in ExpectSameModels()
171 ASSERT_EQ(quantized_graph->tensors.size(), float_graph->tensors()->size()); in TEST_P()
172 for (size_t i = 0; i < quantized_graph->tensors.size(); i++) { in TEST_P()
173 const auto quant_tensor = quantized_graph->tensors[i].get(); in TEST_P()
174 const auto float_tensor = float_graph->tensors()->Get(i); in TEST_P()
194 ASSERT_EQ(quantized_graph->tensors.size(), float_graph->tensors()->size()); in TEST_P()
195 for (size_t i = 0; i < quantized_graph->tensors.size(); i++) { in TEST_P()
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/distributed/c10d/
H A DUtils.hpp32 TORCH_API size_t getTensorsNumel(const std::vector<at::Tensor>& tensors);
36 const std::vector<at::Tensor>& tensors);
68 const std::vector<at::Tensor>& tensors) { in assertSameType() argument
69 for (const auto i : c10::irange(tensors.size())) { in assertSameType()
70 if (!tensors[i].options().type_equal(type.options())) { in assertSameType()
72 const std::string actual = tensors[i].toString(); in assertSameType()
191 const std::vector<at::Tensor>& tensors) { in assertSameSizes() argument
192 for (const auto i : c10::irange(tensors.size())) { in assertSameSizes()
193 if (!tensors[i].sizes().equals(sizes)) { in assertSameSizes()
195 const auto actual = toString(tensors[i].sizes()); in assertSameSizes()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/
H A Dnccl_ops.py29 def all_sum(tensors): argument
30 """Returns a list of tensors with the all-reduce sum across `tensors`.
33 returned tensors are evaluated then the computation will hang.
36 tensors: The input tensors across which to sum; must be assigned
40 List of tensors, each with the sum of the input tensors, where tensor i has
41 the same device as `tensors[i]`.
43 return _apply_all_reduce('sum', tensors)
76 def all_prod(tensors): argument
77 """Returns a list of tensors with the all-reduce product across `tensors`.
80 returned tensors are evaluated then the computation will hang.
[all …]
H A Dfunctional_ops.py53 """foldl on the list of tensors unpacked from `elems` on dimension 0.
56 of elements from first to last. The elements are made of the tensors
57 unpacked from `elems` on dimension 0. The callable fn takes two tensors as
63 Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
67 is a (possibly nested) list or tuple of tensors, then each of these tensors
75 elems: A tensor or (possibly nested) sequence of tensors, each of which will
78 initializer: (optional) A tensor or (possibly nested) sequence of tensors,
84 name: (optional) Name prefix for the returned tensors.
87 A tensor or (possibly nested) sequence of tensors, resulting from applying
88 `fn` consecutively to the list of tensors unpacked from `elems`, from first
[all …]
H A Ddata_flow_ops.py120 A queue is a TensorFlow data structure that stores tensors across
122 tensors.
124 Each queue element is a tuple of one or more tensors, where each
145 of tensors in each element.
146 shapes: Constraints on the shapes of tensors in an element:
148 as dtypes. If the shape of any tensors in the element are constrained,
257 The `vals` argument can be a Tensor, a list or tuple of tensors, or a
266 vals: A tensor, a list or tuple of tensors, or a dictionary..
282 # tensors in the dictionary `vals` must be listed.
290 tensors = []
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/
H A Dquantize_model_test.cc189 return expected_graph.tensors[expected_op->inputs[idx]].get(); in FindMatchingExpectedTensor()
206 const auto& tensor = graph->tensors[op->inputs[idx]]; in ExpectSameModels()
236 auto* input = subgraph->tensors[subgraph->inputs[0]].get(); in QuantizeConvModelTest()
237 auto* output = subgraph->tensors[subgraph->outputs[0]].get(); in QuantizeConvModelTest()
295 for (const auto& tensor : subgraph->tensors) { in TEST_P()
354 // There should be 5 tensors: input, output, split, split/split_dim, split:1. in TEST_F()
356 EXPECT_EQ(subgraph->tensors.size(), 5); in TEST_F()
358 EXPECT_EQ(subgraph->tensors[input_idx]->type, TensorType_INT8); in TEST_F()
359 EXPECT_EQ(subgraph->tensors[input_idx]->name, "input"); in TEST_F()
360 EXPECT_EQ(subgraph->tensors[input_idx]->quantization->scale.size(), 1); in TEST_F()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/training/
H A Dinput.py346 A list of tensors, one for each element of `tensor_list`. If the tensor
440 def _as_tensor_list(tensors): argument
441 if isinstance(tensors, dict):
442 return [tensors[k] for k in sorted(tensors, key=str)]
444 return tensors
449 raise ValueError("Expected at least one set of tensors")
452 for tensors in tensors_list[1:]:
453 if set(tensors.keys()) != expected_keys:
456 return [_as_tensor_list(tensors) for tensors in tensors_list]
529 """Conditionally store multiple sparse Tensors."""
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/xnnpack/
H A Dxnnpack_delegate.cc228 // Unpacked data for quasi-static tensors, i.e. tensors produced by
239 // Set of indices of tensors with unpacked static sparse weights.
264 // Exclude quasi-static tensors which may have become subgraph outputs in Create()
291 // Detect which tensors are used as inputs or outputs of any subgraph nodes. in Create()
294 std::vector<int> tensors(context->tensors_size, -1); in Create() local
331 tensors[t] = t; in Create()
339 tensors[t] = t; in Create()
347 tensors[t] = t; in Create()
360 tensors[t] = t; in Create()
367 tensors[t] = t; in Create()
[all …]
/aosp_15_r20/external/pytorch/docs/source/
H A Dtorch.compiler_fake_tensor.rst9 … are, without actually running those operations (or trashing preexisting tensors), which would be …
16tensors don't model devices, and sometimes stride behavior varies depending on your device, so fak…
18 - A tensor subclass lets you subclass torch.Tensor and customize their behavior. Fake tensors are i…
20tensors with symbolic sizes rather than only concrete sizes, and propagate these sizes symbolicall…
25tensors are associated with a FakeTensorMode. Because fake tensor's primary use case is to do anal…
27tensors are meta device tensors; they then use extra extensibility hooks, specifically dispatch_de…
31tensors, reinterpreting them as meta tensors. This is done via a magic context manager in_kernel_i…
48 # Fakeify some real tensors
51 # Do some operations on the fake tensors
56 Q: Why do you have real tensors as inputs?
[all …]
/aosp_15_r20/external/armnn/delegate/test/
H A DUnidirectionalSequenceLstmTestHelper.hpp108 std::vector<flatbuffers::Offset<Tensor>> tensors; in CreateUnidirectionalSequenceLstmTfLiteModel() local
126 tensors.push_back(CreateTensor(flatBufferBuilder, in CreateUnidirectionalSequenceLstmTfLiteModel()
132 operatorInputs.push_back(tensors.size() - 1); in CreateUnidirectionalSequenceLstmTfLiteModel()
141 tensors.push_back(CreateTensor(flatBufferBuilder, in CreateUnidirectionalSequenceLstmTfLiteModel()
148 operatorInputs.push_back(tensors.size() - 1); in CreateUnidirectionalSequenceLstmTfLiteModel()
160 tensors.push_back(CreateTensor(flatBufferBuilder, in CreateUnidirectionalSequenceLstmTfLiteModel()
167 operatorInputs.push_back(tensors.size() - 1); in CreateUnidirectionalSequenceLstmTfLiteModel()
174 tensors.push_back(CreateTensor(flatBufferBuilder, in CreateUnidirectionalSequenceLstmTfLiteModel()
181 operatorInputs.push_back(tensors.size() - 1); in CreateUnidirectionalSequenceLstmTfLiteModel()
188 tensors.push_back(CreateTensor(flatBufferBuilder, in CreateUnidirectionalSequenceLstmTfLiteModel()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DForeachOpsKernels.cpp69 TensorList tensors, const Tensor& scalar) { \
77 check_foreach_api_restrictions(tensors); \
79 for (auto& t : tensors) { \
85 TensorList tensors, const Tensor& scalar) { \
93 check_foreach_api_restrictions(tensors); \
96 result.reserve(tensors.size()); \
97 for (const auto& t : tensors) { \
106 TensorList tensors, const Tensor& scalar, const Scalar& alpha) { \
114 check_foreach_api_restrictions(tensors); \
116 for (auto& t : tensors) { \
[all …]
H A DTensorShape.h14 // Check to see if the shape of tensors is compatible
19 TORCH_CHECK(first_dims == second_dims, "Tensors must have same number of dimensions: got ", in check_cat_shape_except_dim()
27 … TORCH_CHECK(first_dim_size == second_dim_size, "Sizes of tensors must match except in dimension ", in check_cat_shape_except_dim()
32 inline void check_cat_no_zero_dim(const MaterializedITensorListRef& tensors) { in check_cat_no_zero_dim() argument
34 for(const Tensor& t : tensors) { in check_cat_no_zero_dim()
58 inline bool have_same_ndims(TensorList tensors) { in have_same_ndims() argument
59 auto ndim = tensors[0].dim(); in have_same_ndims()
60 for (const auto tensor_idx : c10::irange(tensors.size())) { in have_same_ndims()
61 if(tensors[tensor_idx].dim() != ndim) { in have_same_ndims()
68 inline void leading_dimension_matches(TensorList tensors, int64_t dim) { in leading_dimension_matches() argument
[all …]
H A DTensorShape.cpp219 inline void cat_check_no_zero_dim(const MaterializedITensorListRef& tensors) { in cat_check_no_zero_dim() argument
221 for (const Tensor& t : tensors) { in cat_check_no_zero_dim()
244 TORCH_PRECOMPUTE_META_FUNC(cat)(const ITensorListRef& tensors, int64_t dim) { in TORCH_PRECOMPUTE_META_FUNC()
245 // previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible in TORCH_PRECOMPUTE_META_FUNC()
246 …// to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tenso… in TORCH_PRECOMPUTE_META_FUNC()
249 auto materialized = tensors.materialize(); in TORCH_PRECOMPUTE_META_FUNC()
258 !materialized.empty(), "torch.cat(): expected a non-empty list of Tensors"); in TORCH_PRECOMPUTE_META_FUNC()
277 auto out_dtype = at::native::result_type(tensors); in TORCH_PRECOMPUTE_META_FUNC()
298 // If we found a valid tensor, check whether the input tensors in TORCH_PRECOMPUTE_META_FUNC()
421 // pointers to track whether or not fake cuda tensors are pinned or not in set_storage_meta__symint()
[all …]
/aosp_15_r20/external/executorch/extension/pytree/aten_util/test/
H A Divalue_util_test.cpp17 std::vector<at::Tensor> tensors; in makeExampleTensors() local
19 tensors.push_back(at::randn({2, 3, 5})); in makeExampleTensors()
21 return tensors; in makeExampleTensors()
26 std::vector<at::Tensor> tensors; member
30 auto tensors = makeExampleTensors(3); in makeExampleListOfTensors() local
32 tensors[0], in makeExampleListOfTensors()
33 tensors[1], in makeExampleListOfTensors()
34 tensors[2], in makeExampleListOfTensors()
36 return {list, tensors}; in makeExampleListOfTensors()
40 auto tensors = makeExampleTensors(3); in makeExampleTupleOfTensors() local
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/data/util/
H A Dsparse.py74 def deserialize_sparse_tensors(tensors, types, shapes, classes): argument
75 """Deserializes sparse tensors.
78 tensors: a structure of tensors to deserialize.
79 types: a structure that holds information about types of `tensors`
80 shapes: a structure that holds information about shapes of `tensors`
84 `tensors` with any serialized sparse tensors replaced by their deserialized
91 nest.flatten(tensors), nest.flatten(types), nest.flatten(shapes),
97 def get_classes(tensors): argument
98 """Gets classes for a structure of tensors.
101 tensors: the tensor structure to get classes for.
[all …]
/aosp_15_r20/external/pytorch/test/cpp/rpc/
H A Dtest_tensorpipe_serialization.cpp17 std::vector<at::Tensor> tensors{t1, t2}; in TEST() local
25 std::move(payload), std::move(tensors), mtype); in TEST()
44 recvingTpDescriptor.tensors.reserve(sendingTpMessage.tensors.size()); in TEST()
45 for (auto& tpTensor : sendingTpMessage.tensors) { in TEST()
51 recvingTpDescriptor.tensors.push_back(std::move(t)); in TEST()
54 recvingTpDescriptor.tensors.size(), sendingTpMessage.tensors.size()); in TEST()
76 recvingTpAllocation.tensors.size(), sendingTpMessage.tensors.size()); in TEST()
77 for (const auto i : c10::irange(recvingTpAllocation.tensors.size())) { in TEST()
78 tensorpipe::Message::Tensor& srcTensor = sendingTpMessage.tensors[i]; in TEST()
79 tensorpipe::Allocation::Tensor& dstTensor = recvingTpAllocation.tensors[i]; in TEST()
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/lazy/core/
H A Dlazy_graph_executor.cpp40 // PyTorch currently has an issue comparing tensors which have NaN values in in TensorCompare()
53 bool TensorsHaveIR(const std::vector<LazyTensorPtr>& tensors) { in TensorsHaveIR() argument
54 for (const auto& tensor : tensors) { in TensorsHaveIR()
87 std::vector<LazyTensorPtr> tensors; in GetLiveTensors() local
93 tensors.push_back(LazyTensor::Create(std::move(data))); in GetLiveTensors()
98 return tensors; in GetLiveTensors()
406 auto tensors = GetLiveTensors(device); in SyncLiveTensorsGraph() local
407 VLOG(4) << tensors.size() << " live tensors: devices=(" in SyncLiveTensorsGraph()
409 SyncTensorsGraph(&tensors, devices, wait, /*sync_ltc_data=*/true); in SyncLiveTensorsGraph()
413 std::vector<LazyTensorPtr>* tensors, in SyncTensorsGraph() argument
[all …]
/aosp_15_r20/external/pytorch/torch/nn/parallel/
H A Dcomm.py24 out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
35 a tuple containing :attr:`out` tensors, each containing a copy of
50 def broadcast_coalesced(tensors, devices, buffer_size=10485760): argument
51 """Broadcast a sequence of tensors to the specified GPUs.
53 Small tensors are first coalesced into a buffer to reduce the number of synchronizations.
56 tensors (sequence): tensors to broadcast. Must be on the same device,
66 tensors = [_handle_complex(t) for t in tensors]
67 return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
71 """Sum tensors from multiple GPUs.
77 inputs (Iterable[Tensor]): an iterable of tensors to add.
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/cuda/
H A Dcomm.cpp25 // Some operations can be performed more efficiently if we're handling tensors
47 // tensors on one or more devices.
79 "Expected all output tensors to be CUDA tensors, but output tensor at index ", in _broadcast_out_impl()
86 "Expected all output tensors to have same shape as the source tensor ", in _broadcast_out_impl()
161 TensorList tensors, in _broadcast_out_impl() argument
166 tensors.begin(), in _broadcast_out_impl()
167 tensors.end(), in _broadcast_out_impl()
169 "All tensors must be on devices[0]: ", in _broadcast_out_impl()
177 outputs[0] = tensors.vec(); in _broadcast_out_impl()
179 o.reserve(tensors.size()); in _broadcast_out_impl()
[all …]

12345678910>>...239