/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/ |
H A D | functional_modules.py | 34 - add_relu 88 def add_relu(self, x: Tensor, y: Tensor) -> Tensor: member in FloatFunctional 110 - add_relu 153 def add_relu(self, x: Tensor, y: Tensor) -> Tensor: member in FXFloatFunctional 188 - add_relu 275 r"""Operation equivalent to ``torch.ops.quantized.add_relu``""" 277 def add_relu(self, x: Tensor, y: Tensor) -> Tensor: member in QFunctional 278 r = ops.quantized.add_relu(x, y, scale=self.scale, zero_point=self.zero_point)
|
/aosp_15_r20/external/executorch/docs/source/ |
H A D | compiler-backend-dialect.md | 84 Now I want to write a pass to merge `add` and `relu` into `add_relu`, the first step is to write a … 97 @bind_pattern_to_op(lib, "add_relu(Tensor self, Tensor other) -> Tensor") 103 This way we are registering the pattern as a kernel to `add_relu` and it is ready to be used in a p… 108 @bind_pattern_to_op(lib, "add_relu") 115 return torch.ops.foo_namespace.add_relu.default(x, y) 129 …= call_function[target=executorch.exir.dialects.edge._ops.foo_namespace.add_relu.default](args = (… 184 * `quantized_decomposed::add_relu(Tensor a, float a_scale, int a_zero_point, int a_quant_min, int a…
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | BinaryOps.cpp | 480 …m.impl(TORCH_SELECTIVE_NAME("quantized::add_relu"), TORCH_FN(qadd</*ReLUFused=*/true>)); in TORCH_LIBRARY_IMPL() 481 …m.impl(TORCH_SELECTIVE_NAME("quantized::add_relu.out"), TORCH_FN(qadd_out</*ReLUFused=*/tru… in TORCH_LIBRARY_IMPL() 482 …m.impl(TORCH_SELECTIVE_NAME("quantized::add_relu.Scalar"), TORCH_FN(qadd_scalar</*ReLUFused=*/… in TORCH_LIBRARY_IMPL() 483 …m.impl(TORCH_SELECTIVE_NAME("quantized::add_relu.Scalar2"), TORCH_FN(qadd_scalar2</*ReLUFused=… in TORCH_LIBRARY_IMPL() 484 …m.impl(TORCH_SELECTIVE_NAME("quantized::add_relu.Scalar_out"), TORCH_FN(qadd_scalar_out</*ReLUFuse… in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/benchmarks/operator_benchmark/pt/ |
H A D | qarithmetic_test.py | 18 ("add_relu", ops.quantized.add_relu),
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/quantization/ |
H A D | quantization_patterns.h | 450 std::string add_relu = R"( in quant_fusion_pattern_and_replacements() local 488 %r = quantized::add_relu(%a_quant, %b_quant, %scale, %zero_point) in quant_fusion_pattern_and_replacements() 967 {"quantized::add_relu", in quant_fusion_pattern_and_replacements() 968 std::move(add_relu), in quant_fusion_pattern_and_replacements() 971 {"quantized::add_relu", in quant_fusion_pattern_and_replacements() 975 {"quantized::add_relu", in quant_fusion_pattern_and_replacements() 979 {"quantized::add_relu", in quant_fusion_pattern_and_replacements()
|
H A D | fusion_passes.cpp | 17 %r = quantized::add_relu(%a_quant, %b_quant, %scale, %zero_point) in fuseQuantizeAddReluImpl()
|
/aosp_15_r20/external/executorch/exir/tests/ |
H A D | test_quantization.py | 69 # quantized ops to implement: add_relu 87 # TODO: implement torch.ops.quantized_decomposed.add_relu.out
|
H A D | test_passes.py | 99 lib.define("add_relu(Tensor self, Tensor other) -> Tensor") 886 @bind_pattern_to_op(lib, "add_relu") 893 return ops.backend.DO_NOT_USE_TEST_ONLY.add_relu.default(x, y) 901 if op == torch.ops.DO_NOT_USE_TEST_ONLY.add_relu.default: 903 ops.backend.DO_NOT_USE_TEST_ONLY.add_relu.default,
|
/aosp_15_r20/external/pytorch/test/quantization/core/ |
H A D | test_quantized_op.py | 852 …ry_op_scalar_relu(A, b, "add", operator.add, torch.ops.quantized.add, torch.ops.quantized.add_relu) 862 """Tests the correctness of the add and add_relu op.""" 865 add_relu = torch.ops.quantized.add_relu 868 add_relu_out = torch.ops.quantized.add_relu 899 qCrelu_hat = add_relu(qA, qB, scale=scale, zero_point=zero_point) 910 """Tests the correctness of the cudnn add and add_relu op 918 add_relu = torch.ops.quantized.add_relu 942 qCrelu_hat = add_relu(qA, qB, scale=scale_C, zero_point=zero_point).to(device="cpu") 946 """Tests the correctness of the cudnn add and add_relu op for nhwc format""" 953 add_relu = torch.ops.quantized.add_relu [all …]
|
H A D | test_quantized_module.py | 284 if post_op in ["add", "add_relu"]: 296 raw_conv_module = conv_module[0] if post_op in ["relu", "add", "add_relu"] else conv_module 411 if post_op in ["add", "add_relu"] else torch.ao.nn.intrinsic._FusedModule(conv_module) 424 …self.assertEqual(conv_module[0].bias if (post_op in ["relu", "add", "add_relu"]) else conv_module.… 893 … Y_scale, Y_zero_point, use_bias, "add_relu", use_channelwise, X2_scale, X2_zero_point)
|
/aosp_15_r20/external/pytorch/torch/ao/ns/ |
H A D | _numeric_suite.py | 332 def add_relu(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: member in Shadow 337 output = self.orig_module.add_relu(x, y) 340 shadow_output = self.shadow_module.add_relu(x, y)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/ |
H A D | library.cpp | 24 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::add_relu(Tensor qa, Tensor qb, float scale, int zero_poin… in TORCH_LIBRARY() 25 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::add_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc"), {at:… in TORCH_LIBRARY() 26 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::add_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc"), {at… in TORCH_LIBRARY() 27 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::add_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Ten… in TORCH_LIBRARY() 28 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::add_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) … in TORCH_LIBRARY()
|
/aosp_15_r20/external/pytorch/torch/_export/passes/ |
H A D | replace_quantized_ops_with_standard_ops_pass.py | 263 if opname in ["conv1d_relu", "conv2d_relu", "linear_relu", "add_relu", "mul_relu"]: 398 "add_relu": torch.ops.aten.add, 482 "add_relu.default": _transform_op_where_last_two_arguments_are_scale_and_zero_point,
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | BinaryOps.cpp | 835 false, "Unsupported datatype for add_relu:", self.dtype().name()); in add_relu_impl() 847 Tensor add_relu(const Tensor& self, const Tensor& other, const Scalar& alpha) { in add_relu() function 852 Tensor add_relu(const Tensor& self, const Scalar& other, const Scalar& alpha) { in add_relu() function 853 return add_relu(self, wrapped_scalar_tensor(other), alpha); in add_relu()
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_nnapi.py | 629 return func.add_relu(lhs, rhs) 635 for name, mod in [("add", AddMod), ("add_relu", AddReluMod), ("mul", MulMod)]:
|
/aosp_15_r20/external/executorch/exir/passes/ |
H A D | _quant_patterns_and_replacements.py | 452 …"add_relu(Tensor a, float a_scale, int a_zero_point, int a_quant_min, int a_quant_max, Tensor b, f… 755 exir_ops.edge.quantized_decomposed.add_relu.default,
|
/aosp_15_r20/external/executorch/exir/dialects/backend/ |
H A D | _ops.py | 41 add_relu(only works on dsp): hold reference to add -> relu pattern, for re-capturing purpose.
|
/aosp_15_r20/external/pytorch/docs/source/ |
H A D | mobile_optimizer.rst | 20 … finds instances of ``relu`` ops that follow ``add`` ops and fuses them into a single ``add_relu``.
|
/aosp_15_r20/external/pytorch/benchmarks/dynamo/microbenchmarks/ |
H A D | bench_mm_fusion.py | 92 fusion_types = ["", "add", "relu", "add_relu"]
|
/aosp_15_r20/external/pytorch/test/quantization/jit/ |
H A D | test_fusion_passes.py | 39 FileCheck().check_not("aten::relu").check("quantized::add_relu").run(
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | coverage.yaml | 663 - quantized::add_relu 1027 quantized::add_relu: 1
|
H A D | quantization_ops.py | 30 self.func.add_relu(a, b),
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/ |
H A D | match_utils.py | 27 # need to put the fusion patterns before single patterns. For example, add_relu should be registere…
|
H A D | _lower_to_native_backend.py | 408 operator.add: torch.ops.quantized.add_relu, 409 torch.add: torch.ops.quantized.add_relu,
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/quantizer/ |
H A D | xnnpack_quantizer.py | 258 "add_relu",
|