Home
last modified time | relevance | path

Searched full:_convolution (Results 1 – 25 of 49) sorted by relevance

12

/aosp_15_r20/external/pytorch/test/
H A Dtest_jit_llga_fuser.py439 ["aten::_convolution", 'aten::sigmoid', 'aten::mul'],
440 ["aten::_convolution"]
443 self.assertFused(graph, ['aten::_convolution', silu_op])
516 self.assertFused(graph, ['aten::_convolution', "aten::clamp"])
538 self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm'])
561 self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
672 self.assertFused(graph, ['aten::_convolution'])
811 self.assertFused(graph, ['aten::_convolution', 'aten::batch_norm',
H A Dtest_mkldnn_fusion.py95 conv_node_name = 'aten::_convolution' if trace else 'aten::conv2d'
170 self.assertGraphContains(graph, kind='aten::_convolution')
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/utils/
H A Dop_registry.cpp29_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilatio… in nn_ops_first_input_preserving()
30 …"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] … in nn_ops_first_input_preserving()
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/
H A Dfold_conv_bn.cpp49 // Only looks for _convolution pattern. in replaceConvBiasWithGetAttr()
56 %conv_out = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvBiasWithGetAttr()
64 %conv_out = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvBiasWithGetAttr()
78 // And change _convolution to take the new value. in replaceConvBiasWithGetAttr()
H A Dtensorexpr_fuser.cpp82 …"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] … in supported_non_eltwise_set()
892 …"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] … in typesAreSupported()
1079 if (node->kind() == aten::_convolution && !tensorexpr::isConv2d(node)) { in typesAreSupported()
1080 GRAPH_DEBUG("This aten::_convolution node is not a 2D conv"); in typesAreSupported()
1083 if (node->kind() == aten::_convolution || node->kind() == aten::conv2d) { in typesAreSupported()
H A Dgraph_rewrite_helper.cpp61 %r = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvolutionWithAtenConv()
69 %r = aten::_convolution(%a, %w, %b, %stride, %padding, %dilation, in replaceConvolutionWithAtenConv()
H A Dxnnpack_rewrite.cpp71 // Replace _convolution with conv1d and conv2d in transformConv1dToConv2d()
118 // Replace _convolution with conv2d in insertPrePackedConv2dOp()
/aosp_15_r20/external/pytorch/aten/src/ATen/test/
H A Dmobile_memory_cleanup.cpp16 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
32 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A Dautocast_mode.cpp215 KERNEL_MPS2(_convolution, deprecated, lower_precision_fp) in TORCH_LIBRARY_IMPL()
216 KERNEL_MPS(_convolution, lower_precision_fp) in TORCH_LIBRARY_IMPL()
341 KERNEL_CPU(_convolution, deprecated, lower_precision_fp) in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/pytorch/torch/testing/_internal/
H A Dautocast_test_lists.py87 # deprecated _convolution
88 ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
90 # the current _convolution
91 ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
/aosp_15_r20/external/pytorch/test/mobile/model_test/
H A Dmodel_ops.yaml34 aten::_convolution: 27
35 aten::_convolution.deprecated: 3
H A Dcoverage.yaml1052 aten::_convolution: 12
1053 aten::_convolution.deprecated: 3
/aosp_15_r20/external/pytorch/test/cpp/jit/
H A Dtest_lite_interpreter_direct.cpp127 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
564 …x3 = torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1, F… in TEST()
574 "aten::_convolution", in TEST()
H A Dtest_flatbuffer.cpp282 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
324 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
882 …x3 = torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1, F… in TEST()
892 "aten::_convolution", in TEST()
H A Dtest_lite_interpreter.cpp129 …return torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1,… in TEST()
693 …x3 = torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1, F… in TEST()
1041 …x3 = torch._convolution(input, self.weight, self.bias, [1, 1], [0, 0], [1, 1], False, [0, 0], 1, F… in TEST()
1053 "aten::_convolution", in TEST()
/aosp_15_r20/external/pytorch/test/mobile/
H A Dtest_lite_script_module.py299 x3 = torch._convolution(
323 "aten::_convolution",
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/xpu/
H A DConv.cpp537 Tensor _convolution( in _convolution() function
585 return _convolution( in convolution_overrideable()
/aosp_15_r20/external/pytorch/ios/TestApp/custom_build/
H A Dmobilenetv2.yaml1 - aten::_convolution
/aosp_15_r20/external/pytorch/torch/_inductor/kernel/
H A Dconv.py650 @register_lowering(aten._convolution)
651 def _convolution( function
/aosp_15_r20/external/pytorch/torch/csrc/jit/codegen/onednn/
H A Dregister_interface.cpp11 case aten::_convolution: in canFuseNode()
H A Ddecompose_silu.cpp18 if (inputToSilu->kind() == aten::_convolution) { in shouldDecomposeSilu()
H A Dgraph_helper.cpp93 (nodeKind == Symbol::fromQualString("aten::_convolution")) || in createOperator()
478 (kindOfOp == aten::conv2d) || (kindOfOp == aten::_convolution) || in isBetterSuitedForLLGA()
/aosp_15_r20/external/pytorch/torch/utils/
H A Dflop_counter.py131 @register_flop_formula([aten.convolution, aten._convolution])
540 aten._convolution: conv_flop,
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DConvolution.cpp38 #include <ATen/ops/_convolution.h>
1184 return at::_convolution(input, weight, bias, stride, padding, dilation, in convolution()
1468 at::Tensor _convolution( in _convolution() function
1708 at::Tensor _convolution( in _convolution() function
1718 …return at::_convolution(input_r, weight_r, bias_r, stride_, padding_, dilation_, transposed_, outp… in _convolution()
/aosp_15_r20/external/pytorch/test/cpp_extensions/
H A Dmaia_extension.cpp18 // This is a hack to workaround the shape checks in _convolution. in get_tensor()

12