/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/quantization/ |
H A D | quantization_patterns.h | 307 // aten::conv1d - aten::relu_ in quant_fusion_pattern_and_replacements() 314 %r = aten::relu_(%conv_out) in quant_fusion_pattern_and_replacements() 351 // aten::conv2d - aten::relu_ in quant_fusion_pattern_and_replacements() 358 %r = aten::relu_(%conv_out) in quant_fusion_pattern_and_replacements() 395 // aten::conv3d - aten::relu_ in quant_fusion_pattern_and_replacements() 402 %r = aten::relu_(%conv_out) in quant_fusion_pattern_and_replacements() 464 %r_relu = aten::relu_(%r_add) in quant_fusion_pattern_and_replacements() 482 %r_relu = aten::relu_(%r_add) in quant_fusion_pattern_and_replacements() 517 %r = aten::relu_(%linear_out) in quant_fusion_pattern_and_replacements() 595 %r = aten::relu_(%r_add) in quant_fusion_pattern_and_replacements() [all …]
|
H A D | insert_observers.cpp | 565 // nn.Linear + aten::relu_ 570 %second_output = aten::relu_(%first_output) 600 // aten::linear + aten::relu_ 605 %second_output = aten::relu_(%first_output) 636 %second_output = aten::relu_(%first_output) 668 %second_output = aten::relu_(%first_output) 700 %second_output = aten::relu_(%first_output) 745 %second_output = aten::relu_(%first_output) 757 %second_output = aten::relu_(%first_output) 788 %second_output = aten::relu_(%first_output) [all …]
|
/aosp_15_r20/external/pytorch/test/jit/ |
H A D | test_batch_mm.py | 108 torch.relu_(T1) 136 torch.relu_(T1) 172 torch.relu_(T1) 236 torch.relu_(T1) 272 torch.relu_(A)
|
H A D | test_optimize_for_mobile_preserve_debug_info.py | 256 conv2d_activation=F.relu_, 257 conv2d_activation_kind="aten::relu_", 263 linear_activation=F.relu_, 264 linear_activation_kind="aten::relu_",
|
H A D | test_convert_activation.py | 113 FileCheck().check_not("aten::relu_").run(fn.graph) 128 FileCheck().check_not("aten::relu_").run(fn.graph) 168 torch.relu_,
|
H A D | test_device_analysis.py | 115 def relu_(x): function 116 return torch.nn.functional.relu_(x) 118 functions = [add_self, relu_]
|
/aosp_15_r20/external/pytorch/test/dynamo/ |
H A D | test_subclasses.py | 957 return x.add_(1.0) + torch.nn.functional.relu_(x) 974 relu_: "f32[3, 4]" = torch.relu_(l_x_); l_x_ = None 975 add: "f32[3, 4]" = add_ + relu_; add_ = relu_ = None 996 relu_: "f32[3, 4]" = torch.relu_(l_x_); l_x_ = None 997 add: "f32[3, 4]" = add_ + relu_; add_ = relu_ = None 1038 relu_: "f32[3, 4]" = torch.relu_(l_x_); l_x_ = None 1039 add: "f32[3, 4]" = add_ + relu_; add_ = relu_ = None
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/ |
H A D | fuse_relu.cpp | 27 %res = aten::relu_(%add_res) in fuseAddReluImpl() 34 %res = aten::relu_(%add_res) in fuseAddReluImpl() 45 %res = aten::relu_(%add_res) in fuseAddReluImpl()
|
H A D | metal_rewrite.cpp | 119 %res = aten::relu_(%linear_res) in fuseReluWithPackedOps() 129 %r = aten::relu_(%r) in fuseReluWithPackedOps()
|
H A D | xnnpack_rewrite.cpp | 325 %res = aten::relu_(%linear_res) in fuseReluWithPackedOps() 335 %res = aten::relu_(%conv2d_res) in fuseReluWithPackedOps()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/metal/ops/ |
H A D | MetalNeurons.mm | 59 static Tensor& relu_(Tensor& input) { 84 m.impl(TORCH_SELECTIVE_NAME("aten::relu_"), TORCH_FN(relu_));
|
/aosp_15_r20/external/executorch/backends/qualcomm/quantizer/ |
H A D | README.md | 57 @register_annotator([torch.ops.aten.relu.default, torch.ops.aten.relu_.default]) 59 Where `torch.ops.aten.relu.default` / `torch.ops.aten.relu_.default` map to `copy` / `in-place` ver… 162 @register_annotator([torch.ops.aten.relu.default, torch.ops.aten.relu_.default])
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/quantizer/ |
H A D | xnnpack_quantizer_utils.py | 237 torch.ops.aten.relu_.default, 343 torch.ops.aten.relu_.default, 486 output = F.relu_(bn) if relu_is_inplace else F.relu(bn) 720 torch.ops.aten.relu_.default, 842 torch.ops.aten.relu_.default,
|
/aosp_15_r20/external/pytorch/benchmarks/operator_benchmark/pt/ |
H A D | unary_test.py | 124 ["relu_", torch.relu_],
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/backend_config/ |
H A D | _qnnpack_pt2e.py | 98 (torch.ops.aten.convolution.default, torch.ops.aten.relu_.default) 160 (op_with_quantized_bop_scalar_variant, torch.ops.aten.relu_.default),
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Clamp.cpp | 305 Tensor& relu_(Tensor& self) { in relu_() function 618 m.impl(TORCH_SELECTIVE_NAME("aten::relu_"), relu_); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/test/distributed/_tools/ |
H A D | test_mod_tracker.py | 20 x = x["a"].relu_() 88 return self.foo(x).relu_()
|
/aosp_15_r20/external/pytorch/test/quantization/pt2e/ |
H A D | test_x86inductor_quantizer.py | 622 "relu_inplace": [torch.nn.ReLU(inplace=True), torch.ops.aten.relu_.default], 1283 self._test_linear_unary_helper(nn.ReLU, aten.relu.default, aten.relu_.default) 1295 nn.ReLU, aten.relu.default, aten.relu_.default, is_qat=True 1308 nn.ReLU, aten.relu.default, aten.relu_.default, is_dynamic=True 1324 nn.ReLU, aten.relu.default, aten.relu_.default, is_qat=True, is_dynamic=True 1611 relu_op = aten.relu_.default if inplace_relu else aten.relu.default 1755 "relu_inplace": [torch.nn.ReLU(inplace=True), torch.ops.aten.relu_.default],
|
/aosp_15_r20/external/pytorch/torch/ao/ns/fx/ |
H A D | mappings.py | 105 "relu_", 704 "relu_",
|
/aosp_15_r20/external/pytorch/test/quantization/jit/ |
H A D | test_quantize_jit.py | 1907 ).check_not("aten::relu_(").check_not("quantized::add(").check_not( 2116 ).check_not("aten::relu_(").check_not("quantized::add(").check_not( 2202 ).check_not("aten::relu_(").check_not( 2292 ).check_not("aten::relu_").run(model.graph) 2313 "aten::relu_" 2335 "aten::relu_" 2541 ).check_not("aten::relu_(").check_not("quantized::mul(").check_not( 2626 ).check_not("aten::relu_(").check_not( 2837 x.relu_()
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_schema_check.py | 332 expected.relu_() 338 actual.relu_()
|
H A D | test_module_tracker.py | 21 x = x["a"].relu_()
|
/aosp_15_r20/external/executorch/docs/source/ |
H A D | compiler-custom-compiler-passes.md | 55 relu_ is the in-place version. Replace it with relu, which is the 60 if op != torch.ops.aten.relu_.default:
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/pt2e/ |
H A D | graph_utils.py | 25 {torch.nn.ReLU, torch.nn.functional.relu, torch.nn.functional.relu_},
|
/aosp_15_r20/external/pytorch/docs/source/ |
H A D | nn.functional.rst | 75 relu_
|