Lines Matching full:smmu
412 struct acpi_iort_smmu_v3 *smmu; in iort_get_id_mapping_index() local
424 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in iort_get_id_mapping_index()
430 if (smmu->event_gsiv && smmu->pri_gsiv && in iort_get_id_mapping_index()
431 smmu->gerr_gsiv && smmu->sync_gsiv) in iort_get_id_mapping_index()
433 } else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) { in iort_get_id_mapping_index()
437 if (smmu->id_mapping_index >= node->mapping_count) { in iort_get_id_mapping_index()
443 return smmu->id_mapping_index; in iort_get_id_mapping_index()
536 * as NC (named component) -> SMMU -> ITS. If the type is matched, in iort_node_map_platform_id()
556 * device (such as SMMU, PMCG),its iort node already cached in iort_find_dev_node()
888 struct acpi_iort_node *smmu, in iort_get_rmrs() argument
981 struct acpi_iort_node *smmu = NULL; in iort_node_get_rmr_info() local
1002 * Go through the ID mappings and see if we have a match for SMMU in iort_node_get_rmr_info()
1032 iort_get_rmrs(node, smmu, sids, num_sids, head); in iort_node_get_rmr_info()
1093 struct acpi_iort_smmu_v3 *smmu; in iort_get_msi_resv_iommu() local
1095 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; in iort_get_msi_resv_iommu()
1096 if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) in iort_get_msi_resv_iommu()
1208 pr_warn("IORT node type %u does not describe an SMMU\n", type); in iort_iommu_driver_enabled()
1237 /* If there's no SMMU driver at all, give up now */ in iort_iommu_xlate()
1246 * If the SMMU drivers are enabled but not loaded/probed in iort_iommu_xlate()
1457 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_count_resources() local
1462 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_count_resources()
1464 if (smmu->event_gsiv) in arm_smmu_v3_count_resources()
1467 if (smmu->pri_gsiv) in arm_smmu_v3_count_resources()
1470 if (smmu->gerr_gsiv) in arm_smmu_v3_count_resources()
1473 if (smmu->sync_gsiv) in arm_smmu_v3_count_resources()
1479 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) in arm_smmu_v3_is_combined_irq() argument
1485 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) in arm_smmu_v3_is_combined_irq()
1489 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking in arm_smmu_v3_is_combined_irq()
1492 return smmu->event_gsiv == smmu->pri_gsiv && in arm_smmu_v3_is_combined_irq()
1493 smmu->event_gsiv == smmu->gerr_gsiv && in arm_smmu_v3_is_combined_irq()
1494 smmu->event_gsiv == smmu->sync_gsiv; in arm_smmu_v3_is_combined_irq()
1497 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) in arm_smmu_v3_resource_size() argument
1501 * which doesn't support the page 1 SMMU register space. in arm_smmu_v3_resource_size()
1503 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) in arm_smmu_v3_resource_size()
1512 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_init_resources() local
1516 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_init_resources()
1518 res[num_res].start = smmu->base_address; in arm_smmu_v3_init_resources()
1519 res[num_res].end = smmu->base_address + in arm_smmu_v3_init_resources()
1520 arm_smmu_v3_resource_size(smmu) - 1; in arm_smmu_v3_init_resources()
1524 if (arm_smmu_v3_is_combined_irq(smmu)) { in arm_smmu_v3_init_resources()
1525 if (smmu->event_gsiv) in arm_smmu_v3_init_resources()
1526 acpi_iort_register_irq(smmu->event_gsiv, "combined", in arm_smmu_v3_init_resources()
1531 if (smmu->event_gsiv) in arm_smmu_v3_init_resources()
1532 acpi_iort_register_irq(smmu->event_gsiv, "eventq", in arm_smmu_v3_init_resources()
1536 if (smmu->pri_gsiv) in arm_smmu_v3_init_resources()
1537 acpi_iort_register_irq(smmu->pri_gsiv, "priq", in arm_smmu_v3_init_resources()
1541 if (smmu->gerr_gsiv) in arm_smmu_v3_init_resources()
1542 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", in arm_smmu_v3_init_resources()
1546 if (smmu->sync_gsiv) in arm_smmu_v3_init_resources()
1547 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", in arm_smmu_v3_init_resources()
1556 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_dma_configure() local
1560 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_dma_configure()
1562 attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? in arm_smmu_v3_dma_configure()
1579 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_set_proximity() local
1581 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_set_proximity()
1582 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { in arm_smmu_v3_set_proximity()
1583 int dev_node = pxm_to_node(smmu->pxm); in arm_smmu_v3_set_proximity()
1589 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", in arm_smmu_v3_set_proximity()
1590 smmu->base_address, in arm_smmu_v3_set_proximity()
1591 smmu->pxm); in arm_smmu_v3_set_proximity()
1601 struct acpi_iort_smmu *smmu; in arm_smmu_count_resources() local
1603 /* Retrieve SMMU specific data */ in arm_smmu_count_resources()
1604 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_count_resources()
1614 return smmu->context_interrupt_count + 2; in arm_smmu_count_resources()
1620 struct acpi_iort_smmu *smmu; in arm_smmu_init_resources() local
1624 /* Retrieve SMMU specific data */ in arm_smmu_init_resources()
1625 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_init_resources()
1627 res[num_res].start = smmu->base_address; in arm_smmu_init_resources()
1628 res[num_res].end = smmu->base_address + smmu->span - 1; in arm_smmu_init_resources()
1632 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); in arm_smmu_init_resources()
1637 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, in arm_smmu_init_resources()
1641 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); in arm_smmu_init_resources()
1642 for (i = 0; i < smmu->context_interrupt_count; i++) { in arm_smmu_init_resources()
1646 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, in arm_smmu_init_resources()
1654 struct acpi_iort_smmu *smmu; in arm_smmu_dma_configure() local
1657 /* Retrieve SMMU specific data */ in arm_smmu_dma_configure()
1658 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_dma_configure()
1660 attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? in arm_smmu_dma_configure()
1663 /* We expect the dma masks to be equivalent for SMMU set-ups */ in arm_smmu_dma_configure()
1721 /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
1759 .name = "arm-smmu-v3",
1767 .name = "arm-smmu",
1774 .name = "arm-smmu-v3-pmcg",
1905 * If we detect a RC->SMMU mapping, make sure in iort_enable_acs()