Home
last modified time | relevance | path

Searched full:unsqueeze (Results 1 – 25 of 454) sorted by relevance

12345678910>>...19

/aosp_15_r20/external/executorch/backends/arm/test/ops/
H A Dtest_unsqueeze.py8 # Tests the unsqueeze op which copies the data of the input tensor (possibly with new data format)
24 class Unsqueeze(torch.nn.Module): class in TestSimpleUnsqueeze
29 return x.unsqueeze(dim)
41 .check_count({"torch.ops.aten.unsqueeze.default": 1})
60 .check_count({"torch.ops.aten.unsqueeze.default": 1})
82 .check_count({"torch.ops.aten.unsqueeze.default": 1})
89 @parameterized.expand(Unsqueeze.test_parameters)
92 self._test_unsqueeze_tosa_MI_pipeline(self.Unsqueeze(), (test_tensor, i))
94 @parameterized.expand(Unsqueeze.test_parameters)
96 self._test_unsqueeze_tosa_BI_pipeline(self.Unsqueeze(), (test_tensor, 0))
[all …]
/aosp_15_r20/external/executorch/backends/arm/_passes/
H A Dconv1d_unsqueeze_pass.py29 1) unsqueeze the convolution's input from 3d to 4d
30 2) if the input to unsqueeze is quantized, insert q/dq-pair after unsqueeze
52 data=kernel_param_3d.data.contiguous().unsqueeze(dim=-1),
61 kernel_node.meta["val"] = kernel_node.meta["val"].data.unsqueeze(dim=-1)
67 kernel_node.meta["val"] = kernel_node.meta["val"].data.unsqueeze(dim=-1)
77 kernel_node.meta["val"] = kernel_node.meta["val"].data.unsqueeze(dim=-1)
121 # c. Add unsqueeze to input (3d -> 4d) and squeeze to output (4d -> 3d)
122 # unsqueeze -> conv2d -> squeeze
134 # If Quantized we must insert unsqueeze --> q --> dq --> node
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DNaiveDilatedConvolution.cu425 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_cuda()
428 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_cuda()
432 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated2d_cuda()
471 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_backward_cuda()
475 : grad_output.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda()
477 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda()
487 (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) in slow_conv_dilated2d_backward_cuda()
531 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated3d_cuda()
534 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated3d_cuda()
538 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated3d_cuda()
[all …]
H A DReplicationPadding.cu277 gradInput_ = gradInput.unsqueeze(0); in replication_pad2d_backward_out_cuda_template()
278 gradOutput_ = gradOutput.unsqueeze(0); in replication_pad2d_backward_out_cuda_template()
391 gradInput_ = gradInput.unsqueeze(0); in replication_pad3d_backward_out_cuda_template()
392 gradOutput_ = gradOutput.unsqueeze(0); in replication_pad3d_backward_out_cuda_template()
445 input_ = input.unsqueeze(0); in TORCH_IMPL_FUNC()
446 output_ = output.unsqueeze(0); in TORCH_IMPL_FUNC()
508 gradInput_ = gradInput.unsqueeze(0); in TORCH_IMPL_FUNC()
509 gradOutput_ = gradOutput.unsqueeze(0); in TORCH_IMPL_FUNC()
551 input_ = input.unsqueeze(0); in TORCH_IMPL_FUNC()
552 output_ = output.unsqueeze(0); in TORCH_IMPL_FUNC()
[all …]
/aosp_15_r20/external/pytorch/test/onnx/
H A Dtest_onnx_opset.py257 "op_name": "Unsqueeze",
372 {"op_name": "Unsqueeze"},
373 {"op_name": "Unsqueeze"},
398 {"op_name": "Unsqueeze"},
399 {"op_name": "Unsqueeze"},
499 {"op_name": "Unsqueeze"},
501 {"op_name": "Unsqueeze"},
503 {"op_name": "Unsqueeze"},
505 {"op_name": "Unsqueeze"},
514 {"op_name": "Unsqueeze"},
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/
H A DUnsqueeze.cpp17 Tensor unsqueeze(const at::Tensor& self, int64_t dim) { in unsqueeze() function
20 "Vulkan unsqueeze only supports up to 3d tensors as input!"); in unsqueeze()
23 "Vulkan unsqueeze dimension out of range expected to be in range of [", in unsqueeze()
37 // Create the output texture. For unsqueeze, add a dimension. in unsqueeze()
93 // Dimension to unsqueeze in unsqueeze()
105 VK_KERNEL(unsqueeze), in unsqueeze()
129 m.impl(TORCH_SELECTIVE_NAME("aten::unsqueeze"), TORCH_FN(unsqueeze)); in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DNaiveDilatedConvolution.cpp553 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_cpu()
556 (is_batch ? input.contiguous(memory_format) : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_cpu()
560 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated2d_cpu()
605 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated3d_cpu()
608 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated3d_cpu()
612 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated3d_cpu()
654 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_backward_cpu()
658 : grad_output.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cpu()
660 (is_batch ? input.contiguous(memory_format) : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cpu()
670 (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) in slow_conv_dilated2d_backward_cpu()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/test/
H A Dwrapdim_test.cpp14 ASSERT_TRUE(a.unsqueeze(-5).equal(a.unsqueeze(0))); in TestExpressionSpecification()
15 ASSERT_TRUE(a.unsqueeze(4).equal(a.unsqueeze(-1))); in TestExpressionSpecification()
17 // can unsqueeze scalar in TestExpressionSpecification()
19 ASSERT_TRUE(b.unsqueeze(0).equal(b.unsqueeze(-1))); in TestExpressionSpecification()
H A Dlegacy_vmap_test.cpp174 tensor = tensor.unsqueeze(0); in TEST()
428 {x.unsqueeze(1), y.unsqueeze(2)}); in TEST()
440 {x.unsqueeze(1), y}); in TEST()
612 ASSERT_TRUE(at::allclose(out, x.unsqueeze(1) * y)); in TEST()
630 ASSERT_TRUE(at::allclose(out, x * y.permute({1, 2, 0}).unsqueeze(3))); in TEST()
745 // Basic test for BatchedTensor::unsqueeze
752 auto batched_out = batched.unsqueeze(0); in TEST()
755 ASSERT_TRUE(at::allclose(out, tensor.unsqueeze(1))); in TEST()
762 auto batched_out = batched.unsqueeze(0); in TEST()
765 ASSERT_TRUE(at::allclose(out, tensor.unsqueeze(2))); in TEST()
[all …]
/aosp_15_r20/external/pytorch/functorch/
H A Dwriting_batching_rules.md18 ### Basic Batching Rule (unsqueeze)
19 …atching rule API. For some reference, the function signature for unsqueeze is `unsqueeze(Tensor(a)…
28 return std::make_tuple(self_.unsqueeze(dim), 0);
45unsqueeze(x, dim)`, the strategy for the batching rule is pretty simple. We first move the batchin…
48 return std::make_tuple(self_.unsqueeze(dim), 0);
53 VMAP_SUPPORT(unsqueeze, unsqueeze_batch_rule);
/aosp_15_r20/external/pytorch/benchmarks/fastrnns/
H A Dfactory.py290 inputs[seq_idx].unsqueeze(0), (hy, cy), wih, whh, bih, bhh
294 hx_outs += [hy.unsqueeze(0)]
295 cx_outs += [cy.unsqueeze(0)]
422 return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0))
448 return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0))
480 return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0))
527 return torch.stack(inputs), (hy.unsqueeze(0), cy.unsqueeze(0))
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_cuda_repro.py124 unsqueeze = torch.ops.aten.unsqueeze.default(x, 4)
125 permute = torch.ops.aten.permute.default(unsqueeze, [0, 1, 2, 4, 3])
159 unsqueeze_default_2 = torch.ops.aten.unsqueeze.default(randn, -1)
501 unsqueeze = torch.ops.aten.unsqueeze.default(sub, -1)
502 gather = torch.ops.aten.gather.default(x, 1, unsqueeze)
567 return a.unsqueeze(0).unsqueeze(0) + z
628 dec_mask = padmask.unsqueeze(-1) == padmask.unsqueeze(-2)
635 values = rel_pos.abs().neg().unsqueeze(0).unsqueeze(0)
1158 unsqueeze = torch.ops.aten.unsqueeze.default(mul_1, 0)
1159 unsqueeze_1 = torch.ops.aten.unsqueeze.default(unsqueeze, 2)
[all …]
H A Dtest_scatter_optimization.py38 y.scatter_(2, x.unsqueeze(2), 2.718)
56 y.scatter_(0, x.unsqueeze(0), 2.718)
69 y.scatter_(-1, x.unsqueeze(1), 2.718)
82 y.scatter_(1, x.unsqueeze(1), 2.718)
96 y.scatter_(1, x.unsqueeze(1), 2.718)
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/reference/modules/
H A Drnn.py191 input = input.unsqueeze(0)
198 hx = hx.unsqueeze(0) if not is_batched else hx
280 input = input.unsqueeze(0)
288 hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx
353 input = input.unsqueeze(0)
360 hx = hx.unsqueeze(0) if not is_batched else hx
583 input = input.unsqueeze(batch_dim)
624 hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
757 input = input.unsqueeze(batch_dim)
763 hx = hx.unsqueeze(1)
/aosp_15_r20/external/executorch/kernels/test/
H A Dop_unsqueeze_copy_test.cpp60 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in test_dtype()
61 // Here input.dim == 2, so the range of legal dim for unsqueeze is [-3, 2] in test_dtype()
73 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in test_empty_input()
74 // Here input.dim == 4, so the range of legal dim for unsqueeze is [-5, 4] in test_empty_input()
169 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in TEST_F()
170 // Here input.dim == 3, so the range of legal dim for unsqueeze is [-4, 3] in TEST_F()
193 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in TEST_F()
194 // Here input.dim == 2, so the range of legal dim for unsqueeze is [-3, 2] in TEST_F()
226 res = torch.unsqueeze(x, 1)
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/
H A DSparseCsrTensor.cpp1145 …ol_indices[dim_indices].mul(blocksize[other_dim]).unsqueeze(1) + arange(blocksize[other_dim]).unsq… in select_sparse_csr_worker()
1150 -> (col_indices[[0, 1]].mul(3).unsqueeze(1) + arange(3).unsqueeze(0)).flatten(0, 1) in select_sparse_csr_worker()
1151 -> ([[0 1].mul(3).unsqueeze(1) + [[0 1 2]]).flatten(0, 1) in select_sparse_csr_worker()
1159 sparse_coo_tensor([0 1 2 3 4 5].unsqueeze(0), [4 5 6 10 11 12], (6,)) in select_sparse_csr_worker()
1184 …ow_indices[dim_indices].mul(blocksize[other_dim]).unsqueeze(1) + arange(blocksize[other_dim]).unsq… in select_sparse_csr_worker()
1194 …ow_indices[dim_indices].mul(blocksize[other_dim]).unsqueeze(1) + arange(blocksize[other_dim]).unsq… in select_sparse_csr_worker()
1195 -> (row_indices[[1 3]].mul(2).unsqueeze(1) + arange(2).unsqueeze(0)).flatten(0, 1) in select_sparse_csr_worker()
1196 -> ([0 4].unsqueeze(1) + [0 1].unsqueeze(0)).flatten(0, 1) in select_sparse_csr_worker()
1204 sparse_coo_tensor([0 1 4 5].unsqueeze(0), [8 11 20 23], (6,)) in select_sparse_csr_worker()
1210 …indices = indices.mul(blocksize[other_dim]).unsqueeze(1).add(subblock_indices.unsqueeze(0)).flatte… in select_sparse_csr_worker()
[all …]
/aosp_15_r20/external/pytorch/torch/_decomp/
H A Ddecompositions.py108 x = x.unsqueeze(-1)
521 target = target.unsqueeze(channel_dim)
527 grad_output = grad_output.unsqueeze(channel_dim)
926 blocks_d_indices = arange_kw(0, blocks_d, stride_d).unsqueeze(0)
929 kernel_grid = arange_kw(0, kernel_d * dilation_d, dilation_d).unsqueeze(-1)
984 input = input.unsqueeze(0)
1004 blocks_row_indices = blocks_row_indices.unsqueeze(-1).unsqueeze(-1)
1082 input = input.unsqueeze(0)
1286 grad_output = grad_output / grad_weights_scale.unsqueeze(-1)
1609 ds_val = torch.mul(ds, gamma.unsqueeze(0)).reshape(N, group, cpg).sum(2)
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A DFunctionsManual.cpp234 x2 * ratio.sum(-2, false).unsqueeze(-1) - ratio.mT().matmul(x1)}; in _euclidean_dist_backward()
400 (grad_output.dim() == 1 ? grad_output.unsqueeze(0) : grad_output) in linear_double_backward()
408 (grad_output.dim() == 1 ? grad_output.unsqueeze(1) : grad_output.mT()) in linear_double_backward()
409 .matmul(grads[0].dim() == 1 ? grads[0].unsqueeze(0) : grads[0]); in linear_double_backward()
415 grad_grad_output = grad_grad_output.unsqueeze(0); in linear_double_backward()
421 (grads[0].dim() == 1 ? grads[0].unsqueeze(0) : grads[0]) in linear_double_backward()
426 (self.dim() == 1 ? self.unsqueeze(0) : self).matmul(grads[1].mT()); in linear_double_backward()
574 args = args.add(self.unsqueeze(-1)); in mvlgamma_backward()
693 return t.unsqueeze(dim[0]); in unsqueeze_multiple()
700 res = res.unsqueeze(static_cast<int64_t>(i)); in unsqueeze_multiple()
[all …]
/aosp_15_r20/external/pytorch/torch/distributions/
H A Dlowrank_multivariate_normal.py21 Wt_Dinv = W.mT / D.unsqueeze(-2)
46 Wt_Dinv = W.mT / D.unsqueeze(-2)
110 loc_ = loc.unsqueeze(-1)
111 cov_diag_ = cov_diag.unsqueeze(-1)
167 cov_diag_sqrt_unsqueeze = self._unbroadcasted_cov_diag.sqrt().unsqueeze(-1)
192 / self._unbroadcasted_cov_diag.unsqueeze(-2)
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/
H A Dsymbolic_script.cpp24 t = t.unsqueeze(d)
35 return grad.unsqueeze(dims[0]).expand(sizes)
218 grad = grad.unsqueeze(dim)
219 indices = indices.unsqueeze(dim)
330 self = self.unsqueeze(i)
338 return self.unsqueeze(dim)
424 mat2_unsqueeze = mat2.unsqueeze(-1)
432 out = torch.matmul(mat1.unsqueeze(dim1), mat2.unsqueeze(target_dim2))
434 out = torch.matmul(mat1, mat2.unsqueeze(dim2)).squeeze(-1)
436 out = torch.matmul(mat1.unsqueeze(-2), mat2).squeeze(-2)
[all …]
/aosp_15_r20/external/executorch/backends/xnnpack/_passes/
H A Dconv1d_unsqueeze_pass.py23 1) unsqueeze the convolution's input from 3d to 4d
81 data=kernel_param_3d.data.contiguous().unsqueeze(dim=-1),
150 # c. Add unsqueeze to input (3d -> 4d) and squeeze to output (4d -> 3d)
151 # unsqueeze -> conv2d -> squeeze
163 # If Quantized we must insert unsqueeze --> q --> dq --> node
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DLegacyBatchingRegistrations.cpp237 // NB: unsqueeze has some special handling of its `dim` argument so we can't call in unsqueeze_batching_rule()
238 // self_physical.getPhysicalDim directly. In particular, native::unsqueeze in unsqueeze_batching_rule()
243 auto result = self_physical.tensor().unsqueeze(dim_physical); in unsqueeze_batching_rule()
322 auto grad_physical_tensor = grad_physical.tensor().unsqueeze(-1); in trace_backward_batching_rule()
827 // a tensor of size [..., L, 1], and unsqueeze the last dim. in mv_batching_rule()
829 auto result = at::matmul(self, other_physical.tensor().unsqueeze(-1)); in mv_batching_rule()
835 // a tensor of size [..., L, 1], and unsqueeze the last dim. in mv_batching_rule()
839 physical_args[1].tensor().unsqueeze(-1)); in mv_batching_rule()
868 // View the tensors as [..., 1, K] and [K], perform matmul, and unsqueeze. in dot_batching_rule()
870 auto result = at::matmul(self_physical.tensor().unsqueeze(-2), other); in dot_batching_rule()
[all …]
/aosp_15_r20/external/libopus/dnn/torch/osce/utils/layers/
H A Dsilk_upsampler.py138 y = torch.cat((y_even.unsqueeze(-1), y_odd.unsqueeze(-1)), dim=-1).flatten(2)
157 (y_01_24.unsqueeze(-1), y_17_24.unsqueeze(-1), y_09_24_sh1.unsqueeze(-1)),
/aosp_15_r20/external/pytorch/functorch/notebooks/_src/
H A Dplot_per_sample_gradients.py68 sample = sample.unsqueeze(0)
69 target = target.unsqueeze(0)
106 # ``torch.unsqueeze`` to add a batch dimension.
108 batch = sample.unsqueeze(0)
109 targets = target.unsqueeze(0)
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DLegacyVmapTransforms.cpp103 tensor = tensor.unsqueeze(0); in moveDimToFrontAndExpand()
144 // Unsqueeze dim 0, expand it to the correct shape in logicalToPhysical()
163 tensor = tensor.unsqueeze(0); in moveDimToFrontAndUnsqueeze()
167 tensor = tensor.unsqueeze(1); in moveDimToFrontAndUnsqueeze()
199 // Unsqueeze dim 0, expand it to the correct shape in logicalToPhysical()

12345678910>>...19