Home
last modified time | relevance | path

Searched full:is_cuda (Results 1 – 25 of 238) sorted by relevance

12345678910

/aosp_15_r20/external/pytorch/torch/ao/quantization/pt2e/
H A Dqat_utils.py62 is_cuda: bool,
79 if is_cuda:
637 for is_cuda in is_cuda_options:
639 m, F.conv1d, _conv1d_bn_example_inputs, is_cuda=is_cuda
642 m, F.conv2d, _conv2d_bn_example_inputs, is_cuda=is_cuda
645 m, F.conv_transpose1d, _conv1d_bn_example_inputs, is_cuda=is_cuda
648 m, F.conv_transpose2d, _conv2d_bn_example_inputs, is_cuda=is_cuda
657 is_cuda: bool,
671 conv_bn_pattern, example_inputs, is_cuda
684 is_cuda,
[all …]
H A Dexport_utils.py142 is_cuda = device is not None and device.type == "cuda"
146 is_cuda,
151 is_cuda,
/aosp_15_r20/external/pytorch/test/cpp/api/
H A Dtransformer.cpp55 bool is_cuda, in transformer_encoder_layer_test_helper() argument
58 torch::Device device = is_cuda ? torch::kCUDA : torch::kCPU; in transformer_encoder_layer_test_helper()
234 /*is_cuda=*/false, /*use_callable_activation=*/false); in TEST_F()
236 /*is_cuda=*/false, /*use_callable_activation=*/true); in TEST_F()
241 /*is_cuda=*/true, /*use_callable_activation=*/false); in TEST_F()
243 /*is_cuda=*/true, /*use_callable_activation=*/true); in TEST_F()
247 bool is_cuda, in transformer_decoder_layer_test_helper() argument
249 torch::Device device = is_cuda ? torch::kCUDA : torch::kCPU; in transformer_decoder_layer_test_helper()
440 /*is_cuda=*/false, /*use_callable_activation=*/false); in TEST_F()
442 /*is_cuda=*/false, /*use_callable_activation=*/true); in TEST_F()
[all …]
H A Dparallel.cpp155 ASSERT_TRUE(outputs[0].device().is_cuda()); in TEST_F()
158 ASSERT_TRUE(outputs[1].device().is_cuda()); in TEST_F()
196 ASSERT_TRUE(output.device().is_cuda()); in TEST_F()
207 ASSERT_TRUE(output.device().is_cuda()); in TEST_F()
/aosp_15_r20/external/pytorch/test/distributions/
H A Dtest_constraints.py85 def build_constraint(constraint_fn, args, is_cuda=False): argument
88 t = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor
94 "is_cuda",
102 def test_constraint(constraint_fn, result, value, is_cuda): argument
103 t = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor
111 "is_cuda",
119 def test_biject_to(constraint_fn, args, is_cuda): argument
120 constraint = build_constraint(constraint_fn, args, is_cuda=is_cuda)
131 if is_cuda:
152 "is_cuda",
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_foreach.py77 def __call__(self, inputs, is_cuda, expect_fastpath, **kwargs): argument
81 is_cuda
142 def is_cuda(self): member in TestForeach
175 is_cuda=self.is_cuda,
184 is_cuda=self.is_cuda,
229 self.is_cuda,
261 actual = op(inputs, self.is_cuda, is_fastpath)
284 actual = op(inputs, self.is_cuda, is_fastpath, **op_kwargs)
344 [rhs_arg, tensors], is_cuda=False, expect_fastpath=False
401 if self.is_cuda:
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/cuda/
H A DSparseCUDATensorMath.cu91 …TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to… in s_addmm_out_sparse_dense_cuda()
92 …TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to… in s_addmm_out_sparse_dense_cuda()
93 …TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'ma… in s_addmm_out_sparse_dense_cuda()
94 …TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2… in s_addmm_out_sparse_dense_cuda()
187 TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU"); in hspmm_out_sparse_cuda()
188 TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU"); in hspmm_out_sparse_cuda()
189 TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU"); in hspmm_out_sparse_cuda()
272 TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"); in add_out_dense_sparse_cuda()
273 TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor"); in add_out_dense_sparse_cuda()
274 TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor"); in add_out_dense_sparse_cuda()
[all …]
H A DSparseCsrTensorMath.cu147 TORCH_INTERNAL_ASSERT(dense.is_cuda()); in add_out_dense_sparse_compressed_cuda()
154 output.is_cuda(), in add_out_dense_sparse_compressed_cuda()
159 src.is_cuda(), in add_out_dense_sparse_compressed_cuda()
327 self.is_cuda(), in add_out_sparse_compressed_cuda()
331 other.is_cuda(), in add_out_sparse_compressed_cuda()
335 out.is_cuda(), in add_out_sparse_compressed_cuda()
487 // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type in reduce_sparse_csr_dim0_cuda_template()
578 // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type in reduce_sparse_csr_dim1_cuda_template()
676 TORCH_INTERNAL_ASSERT(sparse.is_cuda()); in reduce_sparse_csr_cuda_template()
781 // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type in _sparse_csr_sum_cuda()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DSummaryOps.cu37 at::acc_type<input_t, /*is_cuda=*/true> minvalue, in getBin()
38 at::acc_type<input_t, /*is_cuda=*/true> maxvalue, in getBin()
70 at::acc_type<input_t, /*is_cuda=*/true> minvalue, in C10_LAUNCH_BOUNDS_1()
71 at::acc_type<input_t, /*is_cuda=*/true> maxvalue, in C10_LAUNCH_BOUNDS_1()
178 at::acc_type<input_t, /*is_cuda=*/true> minvalue, in CUDA_tensor_histogram()
179 at::acc_type<input_t, /*is_cuda=*/true> maxvalue, in CUDA_tensor_histogram()
280 using bounds_t = at::acc_type<input_t, /*is_cuda=*/true>; in _bincount_cuda_template()
312 at::acc_type<input_t, /*is_cuda=*/true> min, in _histc_cuda_template()
313 at::acc_type<input_t, /*is_cuda=*/true> max) { in _histc_cuda_template()
396 using bounds_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in _histc_cuda()
H A DCopy.cu263 TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda()); in copy_requires_temporaries()
271 } else if (dst_device.is_cuda() && src_device.is_cuda()) { in copy_requires_temporaries()
339 if (dst_device.is_cuda() && src_device.is_cuda()) { in copy_kernel_cuda()
347 if (dst_device.is_cuda() && src_device.is_cpu()) { in copy_kernel_cuda()
350 } else if (dst_device.is_cpu() && src_device.is_cuda()) { in copy_kernel_cuda()
H A DAmpKernels.cu96 TORCH_CHECK(inv_scale.is_cuda(), "inv_scale must be a CUDA tensor."); in _amp_foreach_non_finite_check_and_unscale_cuda_()
97 TORCH_CHECK(found_inf.is_cuda(), "found_inf must be a CUDA tensor."); in _amp_foreach_non_finite_check_and_unscale_cuda_()
117 TORCH_CHECK(scaled_grads[0].is_cuda(), "scaled_grads must be CUDA tensors."); in _amp_foreach_non_finite_check_and_unscale_cuda_()
133 TORCH_CHECK(t.is_cuda(), "one of scaled_grads was not a CUDA tensor."); in _amp_foreach_non_finite_check_and_unscale_cuda_()
230 TORCH_CHECK(growth_tracker.is_cuda(), "growth_tracker must be a CUDA tensor."); in _amp_update_scale_cuda_()
231 TORCH_CHECK(current_scale.is_cuda(), "current_scale must be a CUDA tensor."); in _amp_update_scale_cuda_()
232 TORCH_CHECK(found_inf.is_cuda(), "found_inf must be a CUDA tensor."); in _amp_update_scale_cuda_()
H A DIGammaKernel.cu32 using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in ratevl()
83 using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in lanczos_sum_expg_scaled()
127 using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in _igam_helper_fac()
160 using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in _igam_helper_series()
194 using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in _igamc_helper_series()
221 using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in _igam_helper_asymptotic_series()
313 using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in _igamc_helper_continued_fraction()
385 using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in calc_igammac()
468 using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; in calc_igamma()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cuda/
H A DEmbeddingBag.cu199 TORCH_CHECK(weight.is_cuda()); in embedding_bag_byte_impl()
200 TORCH_CHECK(indices.is_cuda()); in embedding_bag_byte_impl()
201 TORCH_CHECK(offsets.is_cuda()); in embedding_bag_byte_impl()
262 TORCH_CHECK(output.is_cuda()); in embedding_bag_byte_impl()
381 TORCH_CHECK(weight.is_cuda()); in embedding_bag_4bit_impl()
382 TORCH_CHECK(indices.is_cuda()); in embedding_bag_4bit_impl()
383 TORCH_CHECK(offsets.is_cuda()); in embedding_bag_4bit_impl()
444 TORCH_CHECK(output.is_cuda()); in embedding_bag_4bit_impl()
/aosp_15_r20/external/pytorch/torch/csrc/tensor/
H A Dpython_tensor.cpp37 bool is_cuda; member
73 !tensor_type.is_cuda || torch::utils::cuda_enabled(), in Tensor_new()
77 if (tensor_type.is_cuda) { in Tensor_new()
126 if (self->is_cuda) { in Tensor_is_cuda()
168 {"is_cuda", (getter)Tensor_is_cuda, nullptr, nullptr, nullptr},
248 type_obj.is_cuda = in set_type()
440 !type->is_cuda || torch::utils::cuda_enabled(), in py_set_default_tensor_type()
/aosp_15_r20/external/pytorch/aten/src/ATen/test/
H A Dcuda_reportMemoryUsage_test.cpp22 EXPECT_TRUE(r.device.is_cuda()); in TEST()
38 EXPECT_TRUE(r.device.is_cuda()); in TEST()
54 EXPECT_TRUE(r.device.is_cuda()); in TEST()
62 EXPECT_TRUE(r.device.is_cuda()); in TEST()
H A Dcuda_tensor_interop_test.cpp34 ASSERT_TRUE(at_tensor.is_cuda()); in TEST()
53 ASSERT_TRUE(at_tensor.is_cuda()); in TEST()
72 ASSERT_TRUE(at_tensor.is_cuda()); in TEST()
117 ASSERT_TRUE(at_result.is_cuda()); in TEST()
/aosp_15_r20/external/pytorch/test/quantization/pt2e/
H A Dtest_quantize_pt2e_qat.py186 is_cuda: bool = False,
197 is_cuda=is_cuda,
207 is_cuda=is_cuda,
219 is_cuda: bool = False,
276 if is_cuda:
471 is_cuda=True,
558 is_cuda=True,
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/
H A DSparseTensorMath.cpp562 AT_ASSERT(!t.is_cuda()); // the dispatch argument in add_out_sparse_cpu()
563 TORCH_CHECK(!r.is_cuda(), "add: expected 'out' to be CPU tensor, but got CUDA tensor"); in add_out_sparse_cpu()
564 TORCH_CHECK(!src.is_cuda(), "add: expected 'other' to be a CPU tensor, but got a CUDA tensor"); in add_out_sparse_cpu()
715 TORCH_CHECK(!dense.is_cuda()); // dispatch argument in add_out_dense_sparse_cpu()
716 TORCH_CHECK(!r.is_cuda(), "add: expected 'out' to be CPU tensor, but got CUDA tensor"); in add_out_dense_sparse_cpu()
717 …TORCH_CHECK(!sparse_.is_cuda(), "add: expected 'other' to be a CPU tensor, but got a CUDA tensor"); in add_out_dense_sparse_cpu()
1050 AT_ASSERT(!t_.is_cuda()); // dispatch argument in mul_out_sparse_cpu()
1051 TORCH_CHECK(!r.is_cuda(), "mul: expected 'out' to be CPU tensor, but got CUDA tensor"); in mul_out_sparse_cpu()
1052 TORCH_CHECK(!src_.is_cuda(), "mul: expected 'other' to be a CPU tensor, but got a CUDA tensor"); in mul_out_sparse_cpu()
1410 AT_ASSERT(!sparse_.is_cuda()); // dispatch argument in hspmm_out_sparse_cpu()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DAccumulateType.h14 // using accscalar_t = acc_type<scalar_t, /*is_cuda*/true>;
76 template <typename T, bool is_cuda>
77 using acc_type = typename AccumulateType<T, is_cuda>::type;
171 TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda);
/aosp_15_r20/external/pytorch/test/distributed/checkpoint/
H A Dtest_state_dict_utils.py50 self.assertTrue(gathered_state_dict["dtensor"].is_cuda)
70 self.assertFalse(gathered_state_dict["dtensor"].is_cuda)
86 self.assertFalse(v.is_cuda)
156 self.assertFalse(v.is_cuda)
/aosp_15_r20/external/pytorch/torch/csrc/cuda/
H A Dcomm.cpp78 out_tensors[i].is_cuda(), in _broadcast_out_impl()
247 out_tensors[i].is_cuda(), in _broadcast_out_impl()
386 chunks[i].copy_(tensors[i], /*non_blocking=*/out_tensor.is_cuda()); in _broadcast_out_impl()
405 tensor.is_cuda(), in _broadcast_out_impl()
461 tensor.is_cuda(), in _broadcast_out_impl()
/aosp_15_r20/external/pytorch/torch/csrc/distributed/c10d/
H A DProcessGroupUCC.cpp188 if (dev1.is_cuda() && dev2.is_cuda() && dev1 != dev2) { in check_device()
328 if (dev.is_cuda()) { in Comm()
397 if (dev.is_cuda() && !is_health_check) { in get_comm()
687 if (device.is_cuda()) { in runHealthCheck()
924 bool isCuda = outputTensors[0][0].device().is_cuda(); in allgather()
1420 bool isCuda = inputTensors[0][0].device().is_cuda(); in reduce_scatter()
1618 if (dev.is_cuda()) { in initComm()
1628 if (dev.is_cuda()) { in initComm()
1642 if (!cuda_ee && dev.is_cuda()) { in initComm()
/aosp_15_r20/external/pytorch/
H A DDockerfile80 RUN IS_CUDA=$(python -c 'import torch ; print(torch.cuda._is_compiled())'); \
81 echo "Is torch compiled with cuda: ${IS_CUDA}"; \
82 if test "${IS_CUDA}" != "True" -a ! -z "${CUDA_VERSION}"; then \
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/
H A Dfrozen_conv_add_relu_fusion_cuda.cpp86 if (!weight_t.device().is_cuda() || !weight_t.is_contiguous()) { in fuseFrozenConvAddReluImpl()
96 bias_t.size(0) != weight_t.size(0) || !bias_t.device().is_cuda()) { in fuseFrozenConvAddReluImpl()
109 !z_t.device().is_cuda()) { in fuseFrozenConvAddReluImpl()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DReduceOpsUtils.h218 …self.is_cuda() && (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) && out_dtype ==… in make_reduction()
256 (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) { in make_reduction()
431 (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) { in make_reduction()
448 (self.is_cuda() && in make_reduction_from_out_ty()

12345678910