/aosp_15_r20/external/executorch/backends/xnnpack/partition/config/ |
H A D | node_configs.py | 38 conv = node.all_input_nodes[0] 62 deps.extend(node.all_input_nodes[1:5]) 89 input_node = node.all_input_nodes[0] 98 max_input = node.all_input_nodes[0] 132 weight = node.all_input_nodes[1] 145 weight = node.all_input_nodes[1]
|
H A D | generic_node_configs.py | 46 if [(is_dequant(dq_input)) for dq_input in node.all_input_nodes].count( 52 quantized_deps.extend(node.all_input_nodes) 180 num_tensors = len(node.all_input_nodes) 237 node_input = node.all_input_nodes[0] 495 if len(node.all_input_nodes) < 4: 497 mask_node = node.all_input_nodes[3]
|
H A D | gemm_configs.py | 145 if len(dequant_node.all_input_nodes) < 2: 149 gemm_deps.extend(dequant_node.all_input_nodes[1:3]) 196 if len(node.all_input_nodes) > 2 and self.bias_idx: 383 len(input_node.all_input_nodes) != 0 386 input_node = input_node.all_input_nodes[0]
|
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | ddp_fusion.py | 186 all_input_nodes: List[fx.Node], 193 for input_node in all_input_nodes: 206 divisors = [div.args[1] for div in all_input_nodes] 244 all_input_nodes: List[fx.Node], 253 dividends = [div.args[0] for div in all_input_nodes] 254 divisors = [div.args[1] for div in all_input_nodes] 275 for idx in range(len(all_input_nodes)): 404 all_input_nodes = [] 407 all_input_nodes.append(input_node) 416 graph, last_input_node, all_input_nodes, comm_blocks[-1] [all …]
|
H A D | micro_pipeline_tp.py | 50 for inp in node.all_input_nodes: 759 node_to_ancestors[node] = set(node.all_input_nodes) 760 for dep in node.all_input_nodes:
|
/aosp_15_r20/external/executorch/backends/xnnpack/_passes/ |
H A D | channels_last_tagged_reshape_pass.py | 352 if len(node.all_input_nodes) == 0: 362 for input_node in node.all_input_nodes[1:]: 370 for input_node in node.all_input_nodes: 376 self.is_nhwc_node(input_node) for input_node in node.all_input_nodes 389 for input_node in node.all_input_nodes 391 for input_node in node.all_input_nodes: 395 for input_node in node.all_input_nodes:
|
/aosp_15_r20/external/executorch/backends/arm/operators/ |
H A D | op_conv2d.py | 86 get_quant_arg_upstream(node.all_input_nodes[0]).zp if is_quant_node else 0 99 if len(node.all_input_nodes) == 2: 162 input_scale = get_quant_arg_upstream(node.all_input_nodes[0]).scale 163 weight_scale = get_quant_arg_upstream(node.all_input_nodes[1]).scale
|
H A D | op_exp.py | 45 assert len(node.all_input_nodes) == 1 51 input_node = node.all_input_nodes[0]
|
H A D | op_tanh.py | 45 assert len(node.all_input_nodes) == 1 52 input_node = node.all_input_nodes[0]
|
H A D | op_sigmoid.py | 45 assert len(node.all_input_nodes) == 1 52 input_node = node.all_input_nodes[0]
|
H A D | op_log.py | 45 assert len(node.all_input_nodes) == 1 52 input_node = node.all_input_nodes[0]
|
/aosp_15_r20/external/pytorch/torch/fx/passes/ |
H A D | tools_common.py | 119 for n in node.all_input_nodes 156 if self.recursive_add_node(fusion_group, arg.all_input_nodes, visited): 179 inputs=set(node.all_input_nodes), 206 for arg in node.all_input_nodes:
|
H A D | splitter_base.py | 675 for arg in n.all_input_nodes: 692 for arg in node.all_input_nodes: 736 for arg in node.all_input_nodes:
|
/aosp_15_r20/external/executorch/backends/arm/ |
H A D | tosa_utils.py | 212 num_inputs = len(node.all_input_nodes) 215 input1 = node.all_input_nodes[0] 217 input2 = node.all_input_nodes[0] 219 input2 = node.all_input_nodes[1]
|
H A D | tosa_quant_utils.py | 100 for input_node in node.all_input_nodes: 188 input_nodes = list(node.all_input_nodes)
|
/aosp_15_r20/external/executorch/exir/backend/ |
H A D | utils.py | 43 and (len(node_left.all_input_nodes) == len(node_right.all_input_nodes)) 47 node_left.all_input_nodes, node_right.all_input_nodes
|
/aosp_15_r20/external/executorch/backends/xnnpack/operators/ |
H A D | quant_params.py | 164 q_input = quant_node.all_input_nodes[0] 257 dq_input = src.all_input_nodes[0] 264 src.all_input_nodes[0].op in ["get_attr", "placeholder"],
|
H A D | op_max_pool2d.py | 44 kwargs["input_id"] = vals_to_ids[node.all_input_nodes[0]] 50 kwargs["input_id"] = vals_to_ids[node.all_input_nodes[0]]
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/experimental/ |
H A D | _tp_transform.py | 203 input_nodes = node.all_input_nodes 367 for idx, input_arg in enumerate(node.all_input_nodes): 383 for input_arg in node.all_input_nodes: 494 for input_arg in node.all_input_nodes:
|
/aosp_15_r20/external/pytorch/torch/_dynamo/ |
H A D | compiled_autograd.py | 366 and len(a.all_input_nodes) == len(b.all_input_nodes) 410 for i, inp in enumerate(aot_node.all_input_nodes): 411 ca_node.all_input_nodes[i].name = f"aot{aot_id}_{inp.name}"
|
/aosp_15_r20/external/pytorch/torch/fx/experimental/ |
H A D | merge_matmul.py | 59 if len(a.all_input_nodes) == 0: 68 for inp in a.all_input_nodes:
|
/aosp_15_r20/external/executorch/backends/xnnpack/utils/ |
H A D | quant_utils.py | 102 input_val = node.all_input_nodes[0].meta["val"] 103 scale_val = node.all_input_nodes[1].meta["val"]
|
/aosp_15_r20/external/pytorch/torch/onnx/_internal/exporter/ |
H A D | _core.py | 303 assert len(node.all_input_nodes) == 1 304 source = node.all_input_nodes[0] 393 source = arg.all_input_nodes[0] 427 source = node.all_input_nodes[0]
|
/aosp_15_r20/external/pytorch/torch/fx/passes/infra/ |
H A D | partitioner.py | 41 for input_node in node.all_input_nodes: 293 for input_n in node.all_input_nodes:
|
/aosp_15_r20/external/pytorch/torch/fx/passes/utils/ |
H A D | matcher_utils.py | 96 self.pattern_returning_nodes: List[Node] = output_node.all_input_nodes 104 self.pattern_anchors = [n for n in output_node.all_input_nodes if len(n.users) == 1]
|