/aosp_15_r20/external/pytorch/test/ |
H A D | test_mkldnn.py | 45 cpu_tensor.requires_grad_() 204 root = torch.randn(4, 5, dtype=torch.float32).to_mkldnn().requires_grad_() 216 root = torch.randn(4, 5, dtype=torch.float32).to_mkldnn().requires_grad_() 256 x1.requires_grad_() 257 x2.requires_grad_() 332 x_ref = x_lower.clone().float().detach().requires_grad_() 333 x_lower.requires_grad_() 384 x1.requires_grad_() 385 x2.requires_grad_() 498 x.requires_grad_() [all …]
|
H A D | test_mps.py | 1535 cpu_x.requires_grad_() 1536 mps_x.requires_grad_() 1658 cpu_x = torch.from_numpy(np_features).requires_grad_() 1659 mps_x = torch.from_numpy(np_features).to('mps').requires_grad_() 2084 linear_mps_input = linear_mps_input.requires_grad_() 2085 linear_cpu_input = linear_cpu_input.requires_grad_() 2095 grad = cpu_grad.detach().to('mps').requires_grad_() 2171 high = (torch.ones(5, 5) * 3).requires_grad_() 2173 high_1d = (torch.ones(1) * 3).requires_grad_() 2244 x = cpu_x.detach().clone().to('mps').requires_grad_() [all …]
|
H A D | test_nn.py | 149 self.assertIs(m.requires_grad_(requires_grad), m) 1535 m.weight.grad = torch.randn(10, 20).requires_grad_() 1612 m.weight.grad = torch.randn(10, 20).requires_grad_() 1634 m.weight.grad = torch.randn(10, 20).requires_grad_() 1926 m.weight.requires_grad_(requires_grad) 1960 gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False) 2016 gradcheck(fn, (input.clone().requires_grad_(),)) 2671 …rch.randn(input_length, batch_size, vocab_size, dtype=torch.double).log_softmax(2).requires_grad_() 2698 inp = torch.randn(T, N, C, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_() 2715 … inp = torch.randn(T, N, C, dtype=torch.float, device=device).log_softmax(2).requires_grad_() [all …]
|
H A D | test_expanded_weights.py | 54 weight.clone().requires_grad_(), 3, loss_reduction="sum" 58 bias.clone().requires_grad_(), 3, loss_reduction="sum" 116 weight.clone().requires_grad_(), 4, loss_reduction="sum" 120 bias.clone().requires_grad_(), 4, loss_reduction="sum" 170 input.requires_grad_(False) 182 input.requires_grad_(False) 315 sample_input.input.requires_grad_(False) 630 input.requires_grad_() 696 input.requires_grad_() 1150 res.requires_grad_(t.requires_grad)
|
/aosp_15_r20/external/pytorch/test/distributions/ |
H A D | test_distributions.py | 178 "concentration1": torch.randn(2, 3).exp().requires_grad_(), 179 "concentration0": torch.randn(2, 3).exp().requires_grad_(), 182 "concentration1": torch.randn(4).exp().requires_grad_(), 183 "concentration0": torch.randn(4).exp().requires_grad_(), 290 {"df": torch.randn(2, 3).exp().requires_grad_()}, 291 {"df": torch.randn(1).exp().requires_grad_()}, 297 {"df": torch.randn(2, 3).exp().requires_grad_()}, 298 {"df": torch.randn(1).exp().requires_grad_()}, 304 {"concentration": torch.randn(2, 3).exp().requires_grad_()}, 305 {"concentration": torch.randn(4).exp().requires_grad_()}, [all …]
|
H A D | test_transforms.py | 224 x = generate_data(transform).requires_grad_() 236 x = generate_data(transform).requires_grad_() 338 x = generate_data(transform).requires_grad_() 349 x = generate_data(transform).requires_grad_() 355 y = generate_data(transform.inv).requires_grad_() 366 y = generate_data(transform.inv).requires_grad_() 372 x = generate_data(transform).requires_grad_() 384 x = generate_data(transform).requires_grad_() 566 x = generate_data(transform).requires_grad_()
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/opinfo/definitions/ |
H A D | sparse.py | 105 return x.detach().clone().requires_grad_(requires_grad) 159 .requires_grad_(requires_grad), 176 inp.requires_grad_(requires_grad), 190 .requires_grad_(requires_grad), 384 .requires_grad_(requires_grad), 390 .requires_grad_(requires_grad), 396 .requires_grad_(requires_grad), 402 .requires_grad_(requires_grad), 408 .requires_grad_(requires_grad), 416 .requires_grad_(requires_grad), [all …]
|
/aosp_15_r20/external/pytorch/test/nn/ |
H A D | test_pooling.py | 223 input = input.contiguous(memory_format=torch.channels_last).requires_grad_() 227 ref_input = input.detach().clone().contiguous().requires_grad_(True) 249 input = input[:, ::2, :, :].requires_grad_() 254 ref_input = input.detach().clone().contiguous().requires_grad_(True) 273 input = input.to(device).to(memory_format=memory_format).requires_grad_() 276 input2 = input.detach().clone().to(dtype=dtype).requires_grad_(True) 319 input = input.contiguous(memory_format=torch.channels_last).requires_grad_() 324 ref_input = input.detach().clone().contiguous().requires_grad_(True) 344 input = input.contiguous(memory_format=torch.channels_last).requires_grad_() 347 ref_input = input.detach().clone().contiguous().requires_grad_(True) [all …]
|
H A D | test_convolution.py | 732 i1 = i.data[:, :2].contiguous().requires_grad_(True) 738 i2 = i.data[:, 2:].contiguous().requires_grad_(True) 778 i1 = i.data[:, :2].contiguous().requires_grad_(True) 784 i2 = i.data[:, 2:].contiguous().requires_grad_(True) 816 i1 = i.data[:, :2].contiguous().requires_grad_(True) 822 i2 = i.data[:, 2:].contiguous().requires_grad_(True) 853 i1 = i.data[:, :2].contiguous().requires_grad_(True) 860 i2 = i.data[:, 2:].contiguous().requires_grad_(True) 1143 .requires_grad_(True) 1149 .requires_grad_(True) [all …]
|
H A D | test_embedding.py | 355 weight.requires_grad_() 599 weights_check = weights.clone().detach().requires_grad_(True) 718 weights_check = weights.clone().detach().requires_grad_(True) 1014 ).requires_grad_(trainable_scale) 1015 ref_per_sample_weights = per_sample_weights.detach().requires_grad_( 1018 reference_weights = es.weight.detach().requires_grad_() 1078 ).requires_grad_(trainable_scale) 1079 ref_per_sample_weights = per_sample_weights.detach().requires_grad_( 1082 reference_weights = es.weight.detach().requires_grad_() 1154 ).requires_grad_(trainable_scale) [all …]
|
/aosp_15_r20/external/pytorch/test/distributed/pipelining/ |
H A D | test_backward.py | 26 x.requires_grad_(True) 32 ref_x = x.detach().requires_grad_(x.requires_grad) 65 x.requires_grad_(True) 71 ref_x = x.detach().requires_grad_(x.requires_grad) 100 x.requires_grad_(True) 106 ref_x = x.detach().requires_grad_(x.requires_grad) 144 x.requires_grad_(True) 153 ref_inputs.append(x.detach().requires_grad_(x.requires_grad))
|
/aosp_15_r20/external/pytorch/torch/csrc/inductor/aoti_eager/ |
H A D | kernel_meta_info.cpp | 15 requires_grad_(src_tensor.requires_grad()) {} in TensorMetadata() 31 requires_grad_(requires_grad) { in TensorMetadata() 57 requires_grad_, in build_guard() 84 other.requires_grad_ /* Should we need to care about grad requirement?*/); in operator ==() 90 this->requires_grad_ == other.requires_grad_ && in operator ==() 111 stream << "requires_grad_: " << tensor_metadata.requires_grad_ << '\n'; in operator <<()
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_decompose_mem_bound_mm.py | 89 mat1 = torch.randn(b, m, k, device=GPU_TYPE).requires_grad_(True) 90 mat2 = torch.randn(b, k, n, device=GPU_TYPE).requires_grad_(True) 127 input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True) 174 input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True) 218 mat1 = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True) 219 mat2 = torch.randn(k, n, device=GPU_TYPE).requires_grad_(True) 258 mat1 = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True) 259 mat2 = torch.randn(k, n, device=GPU_TYPE).requires_grad_(True) 294 input = torch.randn(m, k, device=GPU_TYPE).requires_grad_(True)
|
/aosp_15_r20/external/pytorch/test/distributed/fsdp/ |
H A D | test_wrap.py | 75 self.embed_tokens.weight.requires_grad_(False) 76 self.norm.weight.requires_grad_(False) 77 self.norm.bias.requires_grad_(False) 87 self.inp_layernorm.weight.requires_grad_(False) 88 self.inp_layernorm.bias.requires_grad_(False) 89 self.post_attn_layernorm.weight.requires_grad_(False) 90 self.post_attn_layernorm.bias.requires_grad_(False) 102 self.q_proj.weight.requires_grad_(False) 103 self.k_proj.weight.requires_grad_(False) 104 self.v_proj.weight.requires_grad_(False) [all …]
|
/aosp_15_r20/external/pytorch/test/dynamo/ |
H A D | test_aot_autograd_cache.py | 166 a2 = a.detach().clone().requires_grad_(True) 167 b2 = b.detach().clone().requires_grad_(True) 214 a2 = a.detach().clone().requires_grad_(True) 215 b2 = b.detach().clone().requires_grad_(True) 240 a2 = a.detach().clone().requires_grad_(True) 241 b2 = b.detach().clone().requires_grad_(True) 264 a2 = a.detach().clone().requires_grad_(True) 265 b2 = b.detach().clone().requires_grad_(True) 283 a2 = a.detach().clone().requires_grad_(True) 284 b2 = b.detach().clone().requires_grad_(True) [all …]
|
/aosp_15_r20/external/pytorch/functorch/examples/maml_regression/ |
H A D | evjang.py | 30 torch.Tensor(40, 1).uniform_(-1.0, 1.0).requires_grad_(), 31 torch.Tensor(40).zero_().requires_grad_(), 34 .requires_grad_(), 35 torch.Tensor(40).zero_().requires_grad_(), 38 .requires_grad_(), 39 torch.Tensor(1).zero_().requires_grad_(),
|
H A D | evjang_transforms.py | 31 torch.Tensor(40, 1).uniform_(-1.0, 1.0).requires_grad_(), 32 torch.Tensor(40).zero_().requires_grad_(), 35 .requires_grad_(), 36 torch.Tensor(40).zero_().requires_grad_(), 39 .requires_grad_(), 40 torch.Tensor(1).zero_().requires_grad_(),
|
/aosp_15_r20/external/pytorch/test/xpu/ |
H A D | test_conv.py | 118 out = conv(x.detach().clone().requires_grad_()) 154 .requires_grad_() 168 i1 = i.detach()[:, :1].clone().requires_grad_() 175 i2 = i.detach()[:, 1:].clone().requires_grad_() 217 .requires_grad_() 233 i1 = i.detach()[:, :1].clone().requires_grad_() 240 i2 = i.detach()[:, 1:].clone().requires_grad_() 765 w = w.detach().requires_grad_() 954 i1 = i.data[:, :2].contiguous().requires_grad_(True) 961 i2 = i.data[:, 2:].contiguous().requires_grad_(True) [all …]
|
/aosp_15_r20/external/pytorch/test/autograd/ |
H A D | test_complex.py | 11 y = x.detach().requires_grad_(True) 27 y = x.detach().requires_grad_(True) 47 y = x.detach().requires_grad_(True) 78 x.requires_grad_(True) 101 z1 = z.clone().detach().requires_grad_(True)
|
H A D | test_functional.py | 209 return a.long().float().requires_grad_().clone() 235 inp.requires_grad_() 258 inputs.requires_grad_() 259 v.requires_grad_() 333 inputs.requires_grad_() 334 v.requires_grad_() 439 return a.long().float().requires_grad_().clone() 465 inp.requires_grad_() 488 inputs.requires_grad_() 489 v.requires_grad_() [all …]
|
/aosp_15_r20/external/pytorch/test/functorch/ |
H A D | test_aotdispatch.py | 388 x_copy = x.clone().detach().requires_grad_(x.requires_grad) 843 a2 = a.clone().detach().requires_grad_() 844 a3 = a.clone().detach().requires_grad_() 845 a4 = a.clone().detach().requires_grad_() 870 a2 = a.clone().detach().requires_grad_() 871 a3 = a.clone().detach().requires_grad_() 872 a4 = a.clone().detach().requires_grad_() 876 aa2 = aa.clone().detach().requires_grad_() 901 custom_aa_compile = custom_aa.clone().detach().requires_grad_() 926 x2 = x.clone().detach().requires_grad_() [all …]
|
/aosp_15_r20/external/pytorch/test/expect/ |
H A D | TestSparseCPU.test_print_uncoalesced_cpu_float64.expect | 20 # after requires_grad_ 52 # after requires_grad_ 92 # after requires_grad_ 136 # after requires_grad_ 176 # after requires_grad_ 208 # after requires_grad_ 242 # after requires_grad_
|
H A D | TestSparseCPU.test_print_coalesced_cpu_float64.expect | 20 # after requires_grad_ 52 # after requires_grad_ 92 # after requires_grad_ 136 # after requires_grad_ 176 # after requires_grad_ 208 # after requires_grad_ 242 # after requires_grad_
|
H A D | TestSparseCUDA.test_print_coalesced_cuda_float64.expect | 21 # after requires_grad_ 57 # after requires_grad_ 98 # after requires_grad_ 144 # after requires_grad_ 188 # after requires_grad_ 224 # after requires_grad_ 259 # after requires_grad_
|
H A D | TestSparseCUDA.test_print_uncoalesced_cuda_float64.expect | 21 # after requires_grad_ 57 # after requires_grad_ 98 # after requires_grad_ 144 # after requires_grad_ 188 # after requires_grad_ 224 # after requires_grad_ 259 # after requires_grad_
|