/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | ConvolutionMM2d.cu | 151 resize_output(output, {batchSize, nOutputPlane, outputHeight, outputWidth}); in slow_conv2d_forward() 247 resize_output(grad_input, input_sizes); in slow_conv2d_backward() 251 resize_output(grad_columns, {nInputPlane*kW*kH, outputHeight*outputWidth}); in slow_conv2d_backward() 321 resize_output(columns, {nInputPlane * kH * kW, outputHeight * outputWidth}); in slow_conv2d_grad_weight() 434 resize_output(grad_input, self_.sizes()); in slow_conv2d_backward_out_cuda() 449 resize_output(grad_weight, weight_.sizes()); in slow_conv2d_backward_out_cuda()
|
H A D | TensorModeKernel.cpp | 41 at::native::resize_output(values, self_sizes); in mode_kernel_impl() 42 at::native::resize_output(indices, self_sizes); in mode_kernel_impl()
|
H A D | MultiMarginLoss.cu | 176 resize_output(out_, {nframe}); in multi_margin_loss_cuda_out() 178 resize_output(out_, {}); in multi_margin_loss_cuda_out() 309 resize_output(grad_input_, input_.sizes()); in multi_margin_loss_cuda_backward_out()
|
H A D | DepthwiseConv2d.cu | 401 resize_output(output, out_sizes); in conv_depthwise2d_forward_out() 475 resize_output(grad_input, in_sizes); in conv_depthwise2d_backward_out() 614 resize_output(grad_weight, {outputChannels, 1, kH, kW}); in conv_depthwise2d_grad_weight_out()
|
H A D | NLLLoss2d.cu | 259 at::native::resize_output(output, {batch_size, H, W}); in nll_loss2d_forward_out_cuda_template() 289 at::native::resize_output(output, {}); in nll_loss2d_forward_out_cuda_template()
|
H A D | Activation.cpp | 38 resize_output(grad_input, input_sizes); in glu_backward_cuda_out()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/cuda/ |
H A D | SparseBlas.cpp | 130 at::native::resize_output(result, self_->sizes()); in addmm_out_sparse_compressed_cuda() 175 at::native::resize_output(result, self.sizes()); in baddbmm_out_sparse_csr_cuda() 233 at::native::resize_output(result, self_->sizes()); in addmv_out_sparse_compressed_cuda()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | LinearAlgebra.cpp | 584 at::native::resize_output(result, result_tmp.sizes()); in linalg_pinv_out() 598 at::native::resize_output(result, result_tmp.sizes()); in linalg_pinv_out() 608 at::native::resize_output(result, result_tmp.sizes()); in linalg_pinv_out() 803 at::native::resize_output(result, result_shape); in linalg_matrix_rank_out() 1035 at::native::resize_output(out, out_shape); in multi_dot_impl() 1134 at::native::resize_output(result, matrices[0].sizes()); in chain_matmul_out() 1266 at::native::resize_output(result, addr_result.sizes().vec()); in math_addr_out() 1414 at::native::resize_output(result, self_sizes); in addmm_impl_cpu_() 1903 at::native::resize_output(result, {}); in dot_out() 1919 at::native::resize_output(result, {}); in vdot_out() [all …]
|
H A D | ReduceAllOps.cpp | 41 at::native::resize_output(out, {}); in min_unary_out() 63 at::native::resize_output(out, {}); in max_unary_out()
|
H A D | SortingUtils.h | 36 resize_output(values, result_sizes); in _reduction_with_indices_allocate_or_resize_output() 50 resize_output(indices, result_sizes); in _reduction_with_indices_allocate_or_resize_output()
|
H A D | BatchLinearAlgebra.cpp | 1681 at::native::resize_output(result, result_tmp.sizes()); in cholesky_solve_out() 1741 at::native::resize_output(result, result_tmp.sizes()); in cholesky_out() 1857 at::native::resize_output(result, result_tmp.sizes()); in cholesky_inverse_out() 2332 at::native::resize_output(QR, QR_tmp.sizes()); in geqrf_out() 2334 at::native::resize_output(tau, tau_tmp.sizes()); in geqrf_out() 2551 at::native::resize_output(result, result_tmp.sizes()); in linalg_householder_product_out() 2707 at::native::resize_output(result, result_tmp.sizes()); in ormqr_out() 3018 at::native::resize_output(values, values_tmp.sizes()); in linalg_eig_out() 3020 at::native::resize_output(vectors, vectors_tmp.sizes()); in linalg_eig_out() 3026 at::native::resize_output(vectors, vectors_tmp.sizes()); in linalg_eig_out() [all …]
|
H A D | ReduceOpsUtils.h | 159 at::native::resize_output(result, shape); in resize_reduction_result() 320 at::native::resize_output(result, sizes); in zero_numel_tensor_resize() 321 at::native::resize_output(result_indices, sizes); in zero_numel_tensor_resize()
|
H A D | DistributionTemplates.h | 223 at::native::resize_output(output, shape); in normal_out_impl() 234 at::native::resize_output(output, shape); in normal_out_impl() 248 at::native::resize_output(output, shape); in normal_out_impl()
|
H A D | Resize.cpp | 76 bool resize_output(const Tensor& output, IntArrayRef shape) { in resize_output() function 86 at::native::resize_output(self, shape); in _resize_output_()
|
H A D | UnaryOps.cpp | 446 at::native::resize_output(result, complex_result.sizes()); in unary_op_impl_with_complex_to_float_out() 739 at::native::resize_output(result, ndtr.sizes()); in special_ndtr_out() 816 at::native::resize_output(result, self.sizes()); in nan_to_num_out() 929 at::native::resize_output(result, out.sizes()); in mvlgamma_out()
|
H A D | Normalization.cpp | 772 at::native::resize_output(out, self.sizes()); in batch_norm_cpu_out() 783 at::native::resize_output(save_mean, {self.size(1)}); in batch_norm_cpu_out() 784 at::native::resize_output(save_var, {self.size(1)}); in batch_norm_cpu_out() 793 at::native::resize_output(save_mean, {self.size(1)}); in batch_norm_cpu_out() 794 at::native::resize_output(save_var, {self.size(1)}); in batch_norm_cpu_out()
|
H A D | LossNLL2d.cpp | 123 at::native::resize_output(output, {batch_size, H, W}); in nll_loss2d_forward_out_frame() 159 at::native::resize_output(output, {}); in nll_loss2d_forward_out_frame()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/ |
H A D | TensorCompare.cpp | 24 at::native::resize_output(out, temp.sizes()); in max_quantized_unary_out() 41 at::native::resize_output(out, temp.sizes()); in min_quantized_unary_out()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/templates/ |
H A D | CompositeViewCopyKernels.cpp | 54 at::native::resize_output(dst, src.sizes()); in resize_out_helper() 60 at::native::resize_output(dst[i], src[i].sizes()); in resize_out_helper()
|
/aosp_15_r20/external/zlib/contrib/bench/ |
H A D | zlib_bench.cc | 129 bool resize_output = false) in zlib_compress() argument 131 if (resize_output) in zlib_compress() 157 if (resize_output) in zlib_compress()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkl/ |
H A D | SpectralOps.cpp | 174 resize_output(out, result.sizes()); in _fft_r2c_mkl_out() 178 resize_output(out, self.sizes()); in _fft_r2c_mkl_out() 191 resize_output(out, result.sizes()); in _fft_c2r_mkl_out() 198 resize_output(out, result.sizes()); in _fft_c2c_mkl_out()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/ |
H A D | SparseMatMul.cpp | 240 at::native::resize_output(output_indices, {2, nnz}); in sparse_matmul_kernel() 241 at::native::resize_output(output_values, nnz); in sparse_matmul_kernel()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/ |
H A D | Indexing.mm | 231 at::native::resize_output(out_, {total_nonzero, nDim}); 271 at::native::resize_output(out_, out_fallback.sizes()); 278 at::native::resize_output(out_, out_fallback.sizes()); 285 at::native::resize_output(out_, {0, nDim}); 311 at::native::resize_output(out_, out_fallback.sizes()); 323 at::native::resize_output(out_, {total_nonzero, nDim});
|
H A D | BitwiseOps.mm | 246 resize_output(output, output_size); 291 resize_output(output, self.sizes());
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | TensorCompareKernel.cpp | 53 at::native::resize_output(result1, self_sizes); in compare_base_kernel_core() 54 at::native::resize_output(result2, self_sizes); in compare_base_kernel_core()
|