Home
last modified time | relevance | path

Searched +refs:linear +refs:mod (Results 1 – 25 of 700) sorted by relevance

12345678910>>...28

/aosp_15_r20/external/pytorch/torch/ao/nn/intrinsic/quantized/modules/
H A Dlinear_relu.py47 def from_float(cls, mod, use_precomputed_fake_quant=False): argument
48 return super().from_float(mod, use_precomputed_fake_quant)
94 def from_float(cls, mod, use_precomputed_fake_quant=False): argument
96 type(mod) == nni.LinearLeakyReLU
98 assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
99 activation_post_process = mod.activation_post_process
100 leaky_relu = mod[1]
101 mod = mod[0]
102 weight_post_process = mod.qconfig.weight()
103 weight_post_process(mod.weight)
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/qat/modules/
H A Dlinear.py49 return F.linear(input, self.weight_fake_quant(self.weight), self.bias)
52 def from_float(cls, mod, use_precomputed_fake_quant=False): argument
57 assert type_before_parametrizations(mod) == cls._FLOAT_MODULE, (
63 assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
64 assert mod.qconfig, "Input float module must have a valid qconfig"
65 if type_before_parametrizations(mod) == LinearReLU:
66 mod = mod[0]
68 qconfig = mod.qconfig
70 mod.in_features,
71 mod.out_features,
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/intrinsic/qat/modules/
H A Dlinear_fused.py16 class LinearBn1d(nn.modules.linear.Linear, nni._FusedModule):
49 nn.modules.linear.Linear.__init__(self, in_features, out_features, bias)
127 linear_out = F.linear(input, scaled_weight, zero_bias)
147 def from_float(cls, mod, use_precomputed_fake_quant=False): argument
153 assert type(mod) == nni.LinearBn1d, (
159 assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
160 assert mod.qconfig, "Input float module must have a valid config"
161 qconfig = mod.qconfig
162 linear, bn = mod[0], mod[1]
164 linear.in_features,
[all …]
H A Dlinear_relu.py37 return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))
40 def from_float(cls, mod, use_precomputed_fake_quant=False): argument
41 return super().from_float(mod, use_precomputed_fake_quant)
44 linear = torch.nn.Linear(
47 linear.weight = torch.nn.Parameter(self.weight.detach())
49 linear.bias = torch.nn.Parameter(self.bias.detach())
51 return torch.ao.nn.intrinsic.LinearReLU(linear, relu)
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_cpu_select_algorithm.py161 self.linear = torch.nn.Linear(in_features, out_features, bias)
164 return self.linear(x)
167 mod = M(bias=bias).to(dtype=dtype).eval()
171 self.common(mod, (v,), atol=atol, rtol=rtol)
196 self.linear = torch.nn.Linear(in_features, out_features, bias)
197 self.linear.weight = self.embeddings.weight
201 return self.linear(x)
204 mod = M(bias=bias).to(dtype=dtype).eval()
207 self.common(mod, (v,), atol=atol, rtol=rtol)
224 self.linear = torch.nn.Linear(in_features, out_features, bias)
[all …]
H A Dtest_mkldnn_pattern_matcher.py86 def cal_conv_generated_kernel_number(mod, input, dtype): argument
97 mod = copy.deepcopy(mod)
104 output = mod(input)
133 self, mod, inputs, is_qat=False, is_dynamic=False, quantizer=None
138 mod,
156 mod, argument
193 mod, inputs, is_qat, is_dynamic, quantizer
211 expected = mod(*inputs)
212 actual = torch.compile(mod)(*clone_inputs)
228 mod, argument
[all …]
H A Dtest_cudagraph_trees.py339 def foo(mod, x): argument
340 return mod(x)
343 mod = Mod()
347 self.assertEqual(foo(mod, inp()), mod2(inp()))
348 self.assertEqual(mod.buf, mod2.buf)
796 mod = make_fx(fn)(*get_aligned_inputs())
804 mod, inps, static_input_idxs=[0], cudagraphs=True
1029 def foo(mod, x): argument
1030 return mod(x)
1576 def foo(mod, inp): argument
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_mkldnn_fusion.py198 self.linear = torch.nn.Linear(
204 x = self.linear(x)
214 mod = M(pointwise_info.pointwise_module, input_shape[-1], 10, bias).eval()
218 ref = mod(v)
223 v, mod.linear.weight, mod.linear.bias, attr, scalars, algorithm
250mod = M(pointwise_info.pointwise_module, dim, iC, oC, dilation, groups, bias, kernel_size=3)
251 mod = mod.to(memory_format=memory_format).eval()
253 ref = mod(x)
258 … x, mod.conv.weight, mod.conv.bias, mod.conv.padding, mod.conv.stride, mod.conv.dilation,
259 mod.conv.groups, attr, scalars, algorithm
[all …]
H A Dtest_fx_experimental.py126 self.linear = torch.nn.Linear(4, 4)
129 linear = self.linear(a)
130 add = linear + a
183 self.linear = torch.nn.Linear(4, 4)
188 linear = self.linear(add_1)
189 add_2 = linear + self.c
215 self.linear = torch.nn.Linear(4, 4)
220 linear_1 = self.linear(add_1)
308 self.linear = torch.nn.Linear(4, 4)
313 linear_1 = self.linear(add_1)
[all …]
/aosp_15_r20/external/executorch/exir/tests/
H A Dtest_memory_planning.py64 for name, mod in module.named_children():
65 swap_modules(mod, condition, convert_func)
66 if condition(mod):
67 out = convert_func(mod)
97 for linear in self.linears:
98 o1 = linear(o1)
100 for linear in self.linears:
101 o2 = linear(o2)
432 F.linear,
445 lambda mod: isinstance(mod, torch.nn.Linear),
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/sparse/quantized/dynamic/
H A Dlinear.py10 from torch.ao.nn.sparse.quantized import linear
52 self._packed_params = linear.LinearPackedParams(
66 return _hide_packed_params_repr(self, linear.LinearPackedParams)
137 def from_float(cls, mod, use_precomputed_fake_quant=False): argument
142 assert type(mod) == cls._FLOAT_MODULE, (
150 assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
151 if type(mod) == nni.LinearReLU:
152 mod = mod[0]
153 if mod.qconfig is not None and mod.qconfig.weight is not None:
154 weight_observer = mod.qconfig.weight()
[all …]
/aosp_15_r20/external/pytorch/torch/ao/ns/fx/
H A Dweight_utils.py22 def mod_weight_detach(mod: nn.Module) -> torch.Tensor:
23 return mod.weight.detach() # type: ignore[operator]
26 def mod_0_weight_detach(mod: nn.Module) -> torch.Tensor:
27 return mod[0].weight.detach() # type: ignore[index]
30 def mod_weight_bias_0(mod: nn.Module) -> torch.Tensor:
31 return mod._weight_bias()[0] # type: ignore[operator]
34 def get_lstm_weight(mod: nn.Module) -> List[torch.Tensor]:
36 for idx, param_name in enumerate(mod._flat_weights_names): # type: ignore[arg-type]
38 param_value = mod._flat_weights[idx].detach() # type: ignore[index]
43 def get_qlstm_weight(mod: nn.Module) -> List[torch.Tensor]:
[all …]
/aosp_15_r20/external/pytorch/test/dynamo/
H A Dtest_modules.py106 mod = self.linear1
108 mod = self.linear2
109 return F.relu(mod(x))
125 def call_and_scale(self, mod, x): argument
126 x = mod(x)
141 def call_and_scale(self, mod, x): argument
142 x = mod(x)
165 self.mod = UnsupportedModule()
168 return 1 + self.mod(x * 1.5)
180 self.mod = ModuleWithStaticForward()
[all …]
H A Dtest_export.py1078 x = torch.nn.functional.linear(x, torch.randn(4, 4))
1121 x = torch.nn.functional.linear(x, torch.randn(4, 4))
1158 linear = torch.nn.Linear(2, 2)
1164 y = linear(y)
1198 self.linear = torch.nn.Linear(2, 2)
1206 x = self.linear(x)
1224 self.linear = torch.nn.Linear(2, 2)
1232 x = self.linear(x)
1254 self.linear = torch.nn.Linear(2, 2)
1262 x = self.linear(x)
[all …]
H A Dtest_activation_checkpointing.py313 self.linear = torch.nn.Linear(10, 10)
316 return torch.sigmoid(self.linear(x))
318 mod = MockModule().cuda()
322 mod, torch.sin(x), use_reentrant=True
342 self.linear = torch.nn.Linear(10, 10)
345 return torch.nn.functional.gelu(self.linear(x))
347 mod = MockModule().cuda()
351 mod, torch.sin(x), use_reentrant=True
429 self.linear = torch.nn.Linear(10, 10)
433 return self.dropout(self.linear(x))
[all …]
/aosp_15_r20/external/pytorch/test/jit/
H A Dtest_freezing.py1075 mod = torch.jit.freeze(torch.jit.script(Mod().eval()))
1076 obj = mod.graph.findNode("prim::Constant")
1080 torch.jit.save(mod, buffer)
1084 obj = mod.graph.findNode("prim::Constant")
1263 mod = nn.Conv2d(8, 3, 4, 2, 1)
1264 self.assertTrue(mod.weight.requires_grad)
1265 smod = torch.jit.script(mod)
1268 self.assertTrue(mod.weight.requires_grad)
1511 mod = self.mod1
1513 mod = self.mod2
[all …]
H A Dtest_module_containers.py77 for name, mod in self.moduledict.items():
80 x = mod(x)
83 for mod in self.moduledict.values():
84 x = mod(x)
102 for i, (name, mod) in enumerate(self.moduledict.items()):
106 x = mod(x)
109 for i, mod in enumerate(self.moduledict.values()):
111 x = mod(x)
118 for mod, mod in zip(self.moduledict.values(), self.moduledict.values()):
120 x2 = mod(mod(x2))
[all …]
/aosp_15_r20/external/pytorch/test/distributed/_composable/
H A Dtest_replicate.py111 def _compare_module(self, mod, replicate_mod): argument
128 step_model(mod, input, target)
140 len(list(mod.parameters())),
143 for i, j in zip(mod.parameters(), replicate_mod.parameters()):
280 for linear in model.linears:
281 fully_shard(linear)
284 for linear in model.linears:
285 self.assertTrue(isinstance(linear.weight, DTensor))
289 for linear in model.linears:
290 self.assertTrue(isinstance(linear.weight, DTensor))
/aosp_15_r20/external/pytorch/test/quantization/pt2e/
H A Dtest_xnnpack_quantizer.py305 self.linear = torch.nn.Linear(5, 5)
308 return self.linear(x)
313 self.linear = torch.nn.Linear(5, 5)
317 x = self.linear(x)
327 torch.ops.aten.linear.default: 2,
334 torch.ops.aten.linear.default,
338 torch.ops.aten.linear.default,
371 if n.op == "call_function" and n.target == torch.ops.aten.linear.default:
391 self.linear = torch.nn.Linear(5, 5)
394 return self.linear(x)
[all …]
/aosp_15_r20/external/executorch/backends/xnnpack/test/ops/
H A Dlinear.py53 self.linear = torch.nn.Linear(
65 return self.linear(x)
85 self.linear = torch.nn.Linear(in_size, out_size, bias=use_bias).to(dtype=dtype)
88 return torch.nn.functional.relu(self.linear(x))
123 a = torch.nn.functional.linear(x, self.linear1_weight, self.linear1_bias)
124 b = torch.nn.functional.linear(y, self.linear2_weight, self.linear2_bias)
125 c = torch.nn.functional.linear(b, self.linear3_weight, self.linear3_bias)
159 a = torch.nn.functional.linear(x, self.linear1_weight, self.linear1_bias)
160 b = torch.nn.functional.linear(a, self.linear2_weight, self.linear2_bias)
330 a = torch.nn.functional.linear(
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/
H A Dir_array.cc40 llvm::Value* linear, const Shape& shape, in Index() argument
43 CHECK_NE(linear, nullptr); in Index()
44 linear_ = linear; in Index()
48 llvm::Value* linear, const Shape& shape, in Delinearize() argument
66 auto* quot = b->CreateUDiv(linear, GetConstantWithIndexType(divisor)); in Delinearize()
78 llvm::Value* linear, const Shape& shape, in Delinearize() argument
92 auto* quot = b->CreateUDiv(linear, divisor, "quot"); in Delinearize()
106 IrArray::Index::Index(llvm::Value* linear, const Shape& shape, in Index() argument
109 linear_(linear), in Index()
112 CHECK_NE(linear, nullptr); in Index()
[all …]
/aosp_15_r20/external/executorch/backends/apple/mps/test/
H A Dtest_mps_linear.py81 self.linear = torch.nn.Linear(in_size, out_size, bias=use_bias)
84 return torch.nn.functional.relu(self.linear(x))
103 self.linear = torch.nn.Linear(in_size, out_size, bias=use_bias)
106 return torch.nn.functional.relu(self.linear(x))
158 mod = self.ManualDQLinear(
170 mod,
189 mod = self.ManualDQLinear(
201 mod,
491 return torch.nn.functional.linear(input, w, self.bias)
495 mod: torch.nn.Module,
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/
H A Dlinear.py149 _FLOAT_MODULE = (nn.Linear, nn.modules.linear.NonDynamicallyQuantizableLinear)
190 return torch.ops.quantized.linear(
285 def from_float(cls, mod, use_precomputed_fake_quant=False): argument
294 if hasattr(mod, "weight_fake_quant"):
295 if type_before_parametrizations(mod) == nniqat.LinearBn1d:
296 mod.weight, mod.bias = fuse_linear_bn_weights(
297 mod.weight,
298 mod.bias,
299 mod.bn.running_mean,
300 mod.bn.running_var,
[all …]
/aosp_15_r20/external/pytorch/test/quantization/core/
H A Dtest_workflow_module.py1092 mod = FusedMovingAvgObsFakeQuantize()
1093 torch.ao.quantization.enable_fake_quant(mod)
1094 torch.ao.quantization.enable_observer(mod)
1095 mod.to(device)
1096 out = mod(x)
1103 mod.observer_enabled,
1104 mod.fake_quant_enabled,
1119 running_min_op, mod.activation_post_process.min_val
1122 running_max_op, mod.activation_post_process.max_val
1139 mod = FusedMovingAvgObsFakeQuantize(averaging_constant=0.001)
[all …]
/aosp_15_r20/external/pytorch/torch/testing/_internal/
H A Dcommon_quantization.py622 def checkQuantDequant(self, mod): argument
626 self.assertEqual(type(mod.quant), nnq.Quantize)
627 self.assertEqual(type(mod.dequant), nnq.DeQuantize)
629 def checkWrappedQuantizedLinear(self, mod): argument
634 self.assertEqual(type(mod.module), nnq.Linear)
635 self.checkQuantDequant(mod)
637 def checkQuantizedLinear(self, mod): argument
638 self.assertEqual(type(mod), nnq.Linear)
640 def checkDynamicQuantizedLinear(self, mod, dtype): argument
644 self.assertEqual(type(mod), nnqd.Linear)
[all …]

12345678910>>...28