Home
last modified time | relevance | path

Searched full:conv1d (Results 1 – 25 of 325) sorted by relevance

12345678910>>...13

/aosp_15_r20/external/tensorflow/tensorflow/core/grappler/costs/graph_properties_testdata/
H A Dlarge_graph.pbtxt.html123622 …name: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Expan…
123644 …name: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Expan…
123647 …input: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Expa…
123683 …name: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Expan…
123705 …name: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Expan…
123708 …input: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Expa…
123744 …name: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Conv2…
123746 …input: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Expa…
123747 …input: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Expa…
123817 …name: "seq2seq/seq2seq_2/convert_to_lin_specgram/dilated_conv1d_stack/1x1_residual_in/conv1d/Squee…
[all …]
/aosp_15_r20/external/executorch/backends/xnnpack/test/ops/
H A Dconv1d.py22 class Conv1d(torch.nn.Module): class in TestConv1d
34 self.conv1d = torch.nn.Conv1d(
46 return self.conv1d(x)
59 self.conv1 = torch.nn.Conv1d(
70 self.conv2 = torch.nn.Conv1d(
109 .check_count({"torch.ops.aten.conv1d.default": conv_count})
123 self.Conv1d(dtype=torch.float16),
132 self._test_conv1d(self.Conv1d(), inputs, 1, dynamic_shape=dynamic_shapes)
145 self.Conv1d(), inputs, 1, quantized=True, dynamic_shape=dynamic_shapes
163 self.Conv1d(),
/aosp_15_r20/external/executorch/backends/arm/test/ops/
H A Dtest_conv1d.py19 class Conv1d(torch.nn.Module): class
88 torch.nn.Conv1d(
111 conv1d_2_3x2x40_nobias = Conv1d(
122 conv1d_3_1x3x256_st1 = Conv1d(
132 conv1d_3_1x3x12_st2_pd1 = Conv1d(
142 conv1d_1_1x2x128_st1 = Conv1d(
152 conv1d_2_1x2x14_st2 = Conv1d(
162 conv1d_5_3x2x128_st1 = Conv1d(
172 conv1d_3_1x3x224_st2_pd1 = Conv1d(
182 two_conv1d_nobias = Conv1d(
[all …]
H A Dtest_depthwise_conv.py15 from executorch.backends.arm.test.ops.test_conv1d import Conv1d
31 dw_conv1d_3_1x3x14_gp3_st1 = Conv1d(
42 dw_conv1d_2_1x6x4_gp6_st1 = Conv1d(
65 dw_conv1d_3_1x3x256_gp3_st1 = Conv1d(
124 two_dw_conv1d = Conv1d(
170 """Tests Conv1D and Conv2D where groups == in_channels and out_channels = K * in_channels. This
257 # Expected to fail as conv1d needs transpose which is not supported
/aosp_15_r20/external/pytorch/torch/ao/nn/intrinsic/qat/modules/
H A Dconv_fused.py437 class ConvBn1d(_ConvBnNd, nn.Conv1d):
439 A ConvBn1d module is a module fused from Conv1d and BatchNorm1d,
443 We combined the interface of :class:`torch.nn.Conv1d` and
446 Similar to :class:`torch.nn.Conv1d`, with FakeQuantize modules initialized
457 _FLOAT_CONV_MODULE = nn.Conv1d
461 # Conv1d args
508 A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU,
512 We combined the interface of :class:`torch.nn.Conv1d` and
515 Similar to `torch.nn.Conv1d`, with FakeQuantize modules initialized to
524 _FLOAT_CONV_MODULE = nn.Conv1d
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/dynamic/modules/
H A Dconv.py18 "Conv1d",
27 class Conv1d(nnq.Conv1d): class
31 :class:`~torch.nn.Conv1d` and :class:`~torch.ao.nn.quantized.dynamic.Conv1d` and
39 See :class:`~torch.nn.Conv1d` for other attributes.
44 >>> m = nn.quantized.dynamic.Conv1d(16, 33, 3, stride=2)
50 _FLOAT_MODULE = nn.Conv1d
100 # Padding in Conv1d is stored as (p, p), need to get (p,)
283 For special notes, please, see :class:`~torch.ao.nn.quantized.dynamic.Conv1d`
301 >>> downsample = nndq.Conv1d(16, 16, 3, stride=2, padding=1)
/aosp_15_r20/external/pytorch/test/nn/
H A Dtest_convolution.py104 module = nn.Conv1d(
116 module = nn.Conv1d(
226 torch.nn.Conv1d(1, 1, kernel_size=3, dilation=2, stride=2, groups=0)
235 module = nn.Conv1d(
238 expect = F.conv1d(x, module.weight, module.bias, padding="same")
242 module = nn.Conv1d(
245 expect = F.conv1d(x, module.weight, module.bias, padding="same", dilation=2)
249 module = nn.Conv1d(
257 expect = F.conv1d(x_padded, module.weight, module.bias, padding="valid")
263 module = nn.Conv1d(
[all …]
/aosp_15_r20/external/pytorch/benchmarks/functional_autograd_benchmark/
H A Dtorchaudio_models.py34 nn.Conv1d(
42 nn.Conv1d(
46 nn.Conv1d(
50 nn.Conv1d(
54 nn.Conv1d(
58 nn.Conv1d(
62 nn.Conv1d(
66 nn.Conv1d(
70 nn.Conv1d(
74 nn.Conv1d(
[all …]
/aosp_15_r20/external/libopus/dnn/torch/osce/utils/layers/
H A Dtd_shaper.py54 self.feature_alpha1_f = norm(nn.Conv1d(self.feature_dim, frame_size, 2))
55 self.feature_alpha1_t = norm(nn.Conv1d(self.env_dim, frame_size, 2))
56 self.feature_alpha2 = norm(nn.Conv1d(frame_size, frame_size, 2))
62 self.feature_alpha1b = norm(nn.Conv1d(self.feature_dim + self.env_dim, frame_size, 2))
63 self.feature_alpha1c = norm(nn.Conv1d(self.feature_dim + self.env_dim, frame_size, 2))
65 self.feature_alpha2b = norm(nn.Conv1d(frame_size, frame_size, 2))
66 self.feature_alpha2c = norm(nn.Conv1d(frame_size, frame_size, 2))
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/reference/modules/
H A Dconv.py13 "Conv1d",
54 class Conv1d(_ConvNd, nn.Conv1d): class
70 nn.Conv1d.__init__(
90 x(float) ------------- F.conv1d ---
94 x -- quant --- *dequant -- *F.conv1d --- *quant - dequant
95 and the backend should be able to fuse the ops with `*` into a quantized conv1d
98 result = F.conv1d(
317 and the backend should be able to fuse the ops with `*` into a quantized conv1d
/aosp_15_r20/external/tensorflow/tensorflow/python/layers/
H A Dconvolutional.py20 Conv1D = convolutional.Conv1D variable
21 conv1d = convolutional.conv1d variable
37 Convolution1D = Conv1D
43 convolution1d = conv1d
/aosp_15_r20/external/executorch/backends/cadence/reference/operators/
H A Dquantized_conv_out.cpp154 // quantized::conv1d or quantized::conv2d based on the dimensionality of
174 bool conv1d = input.dim() == 3; in quantized_conv_out() local
178 const int h = conv1d ? 1 : input.size(2); in quantized_conv_out()
179 const int w = conv1d ? input.size(2) : input.size(3); in quantized_conv_out()
183 const int wh = conv1d ? 1 : weight.size(2); in quantized_conv_out()
184 const int ww = conv1d ? weight.size(2) : weight.size(3); in quantized_conv_out()
186 const int oh = conv1d ? 1 : out.size(2); in quantized_conv_out()
187 const int ow = conv1d ? out.size(2) : out.size(3); in quantized_conv_out()
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/onnx/
H A Dunpack_quantized_weights.cpp247 // CONV1D needs a different unpacking from CONV, since it's
250 enum class QuantizedParamsType { CONV1D, CONV, LINEAR }; enumerator
373 params_type == QuantizedParamsType::CONV1D) && in unpackQuantizedWeightsHelper()
386 // kSpatialDim = 2 even it's for Conv1D from torch.op to adopt Conv2D, in unpackQuantizedWeightsHelper()
387 // so we need a special unpack for Conv1D which has Conv2D dim. in unpackQuantizedWeightsHelper()
390 if (params_type != QuantizedParamsType::CONV1D || i != 0) { in unpackQuantizedWeightsHelper()
396 if (params_type != QuantizedParamsType::CONV1D || i != 0) { in unpackQuantizedWeightsHelper()
402 if (params_type != QuantizedParamsType::CONV1D || i != 0) { in unpackQuantizedWeightsHelper()
408 if (params_type != QuantizedParamsType::CONV1D || i != 0) { in unpackQuantizedWeightsHelper()
428 if (params_type == QuantizedParamsType::CONV1D) { in unpackQuantizedWeightsHelper()
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/qat/modules/
H A Dconv.py11 __all__ = ["Conv1d", "Conv2d", "Conv3d"]
126 class Conv1d(_ConvNd, nn.Conv1d): class
128 A Conv1d module attached with FakeQuantize modules for weight,
131 We adopt the same interface as :class:`~torch.nn.Conv1d`
139 _FLOAT_MODULE = nn.Conv1d
140 _FLOAT_CONV_MODULE = nn.Conv1d
/aosp_15_r20/external/pytorch/torch/ao/ns/fx/
H A Dweight_utils.py52 if isinstance(mod, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
159 # Conv1d
160 nn.Conv1d: mod_weight_detach,
162 nnq.Conv1d: mod_weight_bias_0,
163 nnqat.Conv1d: mod_weight_detach,
202 F.conv1d: get_conv_fun_weight,
205 toq.conv1d: get_qconv_fun_weight,
H A Dmappings.py30 nn.Conv1d,
40 F.conv1d,
374 # example: nn.Conv1d, nn.ReLU fused into nni.ConvReLU1d
490 F.conv1d,
515 toq.conv1d,
582 nn.Conv1d,
585 nnqat.Conv1d,
643 nnq.Conv1d,
/aosp_15_r20/external/pytorch/torch/ao/quantization/
H A Dquantization_mappings.py61 nn.Conv1d: nnqr.Conv1d,
82 nn.Conv1d: nnq.Conv1d,
116 nniqat.ConvBn1d: nnq.Conv1d,
166 # nn.Conv1d: nnqd.Conv1d,
H A Dfuser_method_mappings.py42 nn.Conv1d: nni.ConvBn1d,
89 nn.Conv1d: nni.ConvBnReLU1d,
107 nn.Conv1d: nni.ConvReLU1d,
195 (nn.Conv1d, nn.BatchNorm1d): fuse_conv_bn,
196 (nn.Conv1d, nn.BatchNorm1d, nn.ReLU): fuse_conv_bn_relu,
201 (nn.Conv1d, nn.ReLU): _sequential_wrapper2(nni.ConvReLU1d),
/aosp_15_r20/external/pytorch/test/jit/
H A Dtest_optimize_for_mobile_preserve_debug_info.py49 return F.conv1d(x, self.weight, self.bias)
59 "prim::ListUnpack": "aten::conv1d",
60 "prim::ListConstruct": "aten::conv1d",
61 "aten::unsqueeze": "aten::conv1d",
62 "aten::conv2d": "aten::conv1d",
63 "aten::squeeze": "aten::conv1d",
/aosp_15_r20/external/pytorch/test/xpu/
H A Dtest_conv.py381 z = F.conv1d(x, y, padding="same", dilation=dilation, stride=stride)
386 expect = F.conv1d(x, y, padding=1)
387 actual = F.conv1d(x, y, padding="same")
392 expect = F.conv1d(x, y, padding=3, dilation=2)
393 actual = F.conv1d(x, y, padding="same", dilation=2)
396 expect = F.conv1d(x, y, padding=5, dilation=3)[..., 1:]
397 actual = F.conv1d(x, y, padding="same", dilation=3)
422 expect = F.conv1d(x, y)
423 actual = F.conv1d(x, y, padding="valid")
447 z = F.conv1d(x, y, padding=3, dilation=2)
[all …]
/aosp_15_r20/external/libopus/dnn/torch/fwgan/models/
H A Dfwgan400.py20 …self.conv = which_norm(nn.Conv1d(in_ch,out_ch,kernel_size,dilation=dilation, groups=groups, bias= …
27 …if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d) or isinstance(m, nn.Linear) or is…
50 if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d)\
82 …if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d) or isinstance(m, nn.Linear) or is…
138 … if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d) or isinstance(m, nn.Linear) or\
174 if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d) or\
228 … if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d) or isinstance(m, nn.Linear) or\
/aosp_15_r20/external/pytorch/torch/ao/nn/intrinsic/quantized/modules/
H A Dconv_relu.py21 class ConvReLU1d(nnq.Conv1d):
23 A ConvReLU1d module is a fused module of Conv1d and ReLU
25 We adopt the same interface as :class:`torch.ao.nn.quantized.Conv1d`.
28 Same as torch.ao.nn.quantized.Conv1d
67 # Padding in Conv1d is stored as (p, p), need to get (p,)
98 ), "BatchNorm1d should be fused into Conv1d before converting to reference module"
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/
H A Dconv.h26 inline Tensor conv1d( in conv1d() function
36 return torch::conv1d( in conv1d()
45 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.conv1d
54 /// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
56 inline Tensor conv1d(
60 return detail::conv1d(
/aosp_15_r20/external/libopus/dnn/torch/dnntools/dnntools/sparsification/
H A Dconv1d_sparsifier.py43 task_list contains a list of tuples (conv1d, params), where conv1d is an instance
44 of torch.nn.Conv1d and params is a tuple (density, [m, n]),
66 >>> conv = torch.nn.Conv1d(8, 16, 8)
125 conv = torch.nn.Conv1d(8, 16, 8)
/aosp_15_r20/external/pytorch/benchmarks/operator_benchmark/pt/
H A Dconv_test.py10 Microbenchmarks for Conv1d and ConvTranspose1d operators.
19 self.conv1d = nn.Conv1d(IC, OC, kernel, stride=stride).to(device=device)
20 self.set_module_name("Conv1d")
23 return self.conv1d(input)

12345678910>>...13