Lines Matching +full:ats +full:- +full:supported
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2014 Intel Corporation.
17 #include <linux/dma-direct.h>
21 #include <linux/pci-ats.h>
28 #include "../dma-iommu.h"
30 #include "../iommu-pages.h"
37 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
38 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
39 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
40 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
48 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
49 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
54 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
61 * set to 1 to panic kernel if can't successfully enable VT-d
76 if (!(re->lo & 1)) in root_entry_lctp()
79 return re->lo & VTD_PAGE_MASK; in root_entry_lctp()
88 if (!(re->hi & 1)) in root_entry_uctp()
91 return re->hi & VTD_PAGE_MASK; in root_entry_uctp()
100 if (*rid_lhs < PCI_DEVID(info->bus, info->devfn)) in device_rid_cmp_key()
101 return -1; in device_rid_cmp_key()
103 if (*rid_lhs > PCI_DEVID(info->bus, info->devfn)) in device_rid_cmp_key()
113 u16 key = PCI_DEVID(info->bus, info->devfn); in device_rid_cmp()
119 * Looks up an IOMMU-probed device using its source ID.
135 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
136 node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key); in device_rbtree_find()
139 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
141 return info ? info->dev : NULL; in device_rbtree_find()
150 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
151 curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp); in device_rbtree_insert()
152 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
154 return -EEXIST; in device_rbtree_insert()
161 struct intel_iommu *iommu = info->iommu; in device_rbtree_remove()
164 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
165 rb_erase(&info->node, &iommu->device_rbtree); in device_rbtree_remove()
166 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
192 u8 atc_required:1; /* ATS is required */
222 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
227 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
234 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
236 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
242 return -EINVAL; in intel_iommu_setup()
262 pr_info("Disable supported super page\n"); in intel_iommu_setup()
271 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
274 pr_notice("Unknown option - '%s'\n", str); in intel_iommu_setup()
288 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in domain_pfn_supported()
294 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
295 * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
302 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
303 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
306 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
310 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
322 for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { in __iommu_calculate_agaw()
341 * get a supported less agaw for iommus that don't support the default agaw.
351 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
354 /* Return the super pagesize bitmap if supported. */
360 * 1-level super page supports page size of 2MiB, 2-level super page in domain_super_pgsize_bitmap()
363 if (domain->iommu_superpage == 1) in domain_super_pgsize_bitmap()
365 else if (domain->iommu_superpage == 2) in domain_super_pgsize_bitmap()
374 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
385 entry = &root->lo; in iommu_context_addr()
388 devfn -= 0x80; in iommu_context_addr()
389 entry = &root->hi; in iommu_context_addr()
400 context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); in iommu_context_addr()
413 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
414 * sub-hierarchy of a candidate PCI-PCI bridge
415 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
416 * @bridge: the candidate PCI-PCI bridge
418 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
431 if (pbridge->subordinate && in is_downstream_to_pci_bridge()
432 pbridge->subordinate->number <= pdev->bus->number && in is_downstream_to_pci_bridge()
433 pbridge->subordinate->busn_res.end >= pdev->bus->number) in is_downstream_to_pci_bridge()
450 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); in quirk_ioat_snb_local_iommu()
453 dev_info(&pdev->dev, "failed to run vt-d quirk\n"); in quirk_ioat_snb_local_iommu()
460 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { in quirk_ioat_snb_local_iommu()
461 …pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"… in quirk_ioat_snb_local_iommu()
471 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
477 if (pdev->vendor == PCI_VENDOR_ID_INTEL && in iommu_is_dummy()
478 pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB && in iommu_is_dummy()
506 dev = &pf_pdev->dev; in device_lookup_iommu()
507 segment = pci_domain_nr(pdev->bus); in device_lookup_iommu()
509 dev = &ACPI_COMPANION(dev)->dev; in device_lookup_iommu()
513 if (pdev && segment != drhd->segment) in device_lookup_iommu()
516 for_each_active_dev_scope(drhd->devices, in device_lookup_iommu()
517 drhd->devices_cnt, i, tmp) { in device_lookup_iommu()
523 if (pdev && pdev->is_virtfn) in device_lookup_iommu()
527 *bus = drhd->devices[i].bus; in device_lookup_iommu()
528 *devfn = drhd->devices[i].devfn; in device_lookup_iommu()
537 if (pdev && drhd->include_all) { in device_lookup_iommu()
540 *bus = pdev->bus->number; in device_lookup_iommu()
541 *devfn = pdev->devfn; in device_lookup_iommu()
559 if (!domain->iommu_coherency) in domain_flush_cache()
568 if (!iommu->root_entry) in free_context_table()
584 iommu_free_page(iommu->root_entry); in free_context_table()
585 iommu->root_entry = NULL; in free_context_table()
599 pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val); in pgtable_walk()
602 pr_info("page table not present at level %d\n", level - 1); in pgtable_walk()
610 level--; in pgtable_walk()
626 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
629 if (!iommu->root_entry) { in dmar_fault_dump_ptes()
633 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
637 rt_entry->hi, rt_entry->lo); in dmar_fault_dump_ptes()
639 pr_info("root entry: 0x%016llx", rt_entry->lo); in dmar_fault_dump_ptes()
649 ctx_entry->hi, ctx_entry->lo); in dmar_fault_dump_ptes()
657 level = agaw_to_level(ctx_entry->hi & 7); in dmar_fault_dump_ptes()
658 pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
668 dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
670 /* For request-without-pasid, get the pasid from context entry */ in dmar_fault_dump_ptes()
676 pr_info("pasid dir entry: 0x%016llx\n", pde->val); in dmar_fault_dump_ptes()
686 for (i = 0; i < ARRAY_SIZE(pte->val); i++) in dmar_fault_dump_ptes()
687 pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]); in dmar_fault_dump_ptes()
695 level = pte->val[2] & BIT_ULL(2) ? 5 : 4; in dmar_fault_dump_ptes()
696 pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
698 level = agaw_to_level((pte->val[0] >> 2) & 0x7); in dmar_fault_dump_ptes()
699 pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
712 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
719 parent = domain->pgd; in pfn_to_dma_pte()
734 tmp_page = iommu_alloc_page_node(domain->nid, gfp); in pfn_to_dma_pte()
741 if (domain->use_first_level) in pfn_to_dma_pte()
745 if (!try_cmpxchg64(&pte->val, &tmp, pteval)) in pfn_to_dma_pte()
755 level--; in pfn_to_dma_pte()
770 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
773 parent = domain->pgd; in dma_pfn_level_pte()
791 total--; in dma_pfn_level_pte()
823 (void *)pte - (void *)first_pte); in dma_pte_clear_range()
847 dma_pte_free_level(domain, level - 1, retain_level, in dma_pte_free_level()
857 last_pfn < level_pfn + level_size(level) - 1)) { in dma_pte_free_level()
879 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level, in dma_pte_free_pagetable()
880 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
883 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
884 iommu_free_page(domain->pgd); in dma_pte_free_pagetable()
885 domain->pgd = NULL; in dma_pte_free_pagetable()
892 know the hardware page-walk will no longer touch them.
902 list_add_tail(&pg->lru, freelist); in dma_pte_list_pagetables()
910 dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_list_pagetables()
933 last_pfn >= level_pfn + level_size(level) - 1) { in dma_pte_clear_level()
937 dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_clear_level()
945 dma_pte_clear_level(domain, level - 1, in dma_pte_clear_level()
956 (void *)++last_pte - (void *)first_pte); in dma_pte_clear_level()
970 dma_pte_clear_level(domain, agaw_to_level(domain->agaw), in domain_unmap()
971 domain->pgd, 0, start_pfn, last_pfn, freelist); in domain_unmap()
974 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
975 struct page *pgd_page = virt_to_page(domain->pgd); in domain_unmap()
976 list_add_tail(&pgd_page->lru, freelist); in domain_unmap()
977 domain->pgd = NULL; in domain_unmap()
986 root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); in iommu_alloc_root_entry()
989 iommu->name); in iommu_alloc_root_entry()
990 return -ENOMEM; in iommu_alloc_root_entry()
994 iommu->root_entry = root; in iommu_alloc_root_entry()
1005 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1009 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1010 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1012 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1018 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1024 if (cap_esrtps(iommu->cap)) in iommu_set_root_entry()
1027 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1030 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1038 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1041 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1042 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1048 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1071 pr_warn("%s: Unexpected context-cache invalidation type 0x%llx\n", in __iommu_flush_context()
1072 iommu->name, type); in __iommu_flush_context()
1077 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1078 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1084 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1090 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1109 iommu->name, type); in __iommu_flush_iotlb()
1113 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1116 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1119 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1120 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1126 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1144 spin_lock_irqsave(&domain->lock, flags); in domain_lookup_dev_info()
1145 list_for_each_entry(info, &domain->devices, link) { in domain_lookup_dev_info()
1146 if (info->iommu == iommu && info->bus == bus && in domain_lookup_dev_info()
1147 info->devfn == devfn) { in domain_lookup_dev_info()
1148 spin_unlock_irqrestore(&domain->lock, flags); in domain_lookup_dev_info()
1152 spin_unlock_irqrestore(&domain->lock, flags); in domain_lookup_dev_info()
1160 * check because it applies only to the built-in QAT devices and it doesn't
1166 if (pdev->vendor != PCI_VENDOR_ID_INTEL) in dev_needs_extra_dtlb_flush()
1169 if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK) in dev_needs_extra_dtlb_flush()
1179 if (!dev_is_pci(info->dev)) in iommu_enable_pci_caps()
1182 pdev = to_pci_dev(info->dev); in iommu_enable_pci_caps()
1183 if (info->ats_supported && pci_ats_page_aligned(pdev) && in iommu_enable_pci_caps()
1185 info->ats_enabled = 1; in iommu_enable_pci_caps()
1192 if (!dev_is_pci(info->dev)) in iommu_disable_pci_caps()
1195 pdev = to_pci_dev(info->dev); in iommu_disable_pci_caps()
1197 if (info->ats_enabled) { in iommu_disable_pci_caps()
1199 info->ats_enabled = 0; in iommu_disable_pci_caps()
1213 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1216 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1217 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1219 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1225 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1233 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1234 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1235 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1241 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1249 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1250 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1253 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1254 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1255 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1261 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1268 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1269 pr_debug("%s: Number of Domains supported <%d>\n", in iommu_init_domains()
1270 iommu->name, ndomains); in iommu_init_domains()
1272 spin_lock_init(&iommu->lock); in iommu_init_domains()
1274 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL); in iommu_init_domains()
1275 if (!iommu->domain_ids) in iommu_init_domains()
1276 return -ENOMEM; in iommu_init_domains()
1280 * with domain-id 0, hence we need to pre-allocate it. We also in iommu_init_domains()
1281 * use domain-id 0 as a marker for non-allocated domain-id, so in iommu_init_domains()
1284 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1287 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid in iommu_init_domains()
1288 * entry for first-level or pass-through translation modes should in iommu_init_domains()
1290 * second-level or nested translation. We reserve a domain id for in iommu_init_domains()
1294 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1301 if (!iommu->domain_ids) in disable_dmar_iommu()
1308 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap)) in disable_dmar_iommu()
1312 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1318 if (iommu->domain_ids) { in free_dmar_iommu()
1319 bitmap_free(iommu->domain_ids); in free_dmar_iommu()
1320 iommu->domain_ids = NULL; in free_dmar_iommu()
1323 if (iommu->copied_tables) { in free_dmar_iommu()
1324 bitmap_free(iommu->copied_tables); in free_dmar_iommu()
1325 iommu->copied_tables = NULL; in free_dmar_iommu()
1331 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1346 if (ecap_flts(iommu->ecap) ^ ecap_slts(iommu->ecap)) in first_level_by_default()
1347 return ecap_flts(iommu->ecap); in first_level_by_default()
1356 int num, ret = -ENOSPC; in domain_attach_iommu()
1358 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_attach_iommu()
1363 return -ENOMEM; in domain_attach_iommu()
1365 spin_lock(&iommu->lock); in domain_attach_iommu()
1366 curr = xa_load(&domain->iommu_array, iommu->seq_id); in domain_attach_iommu()
1368 curr->refcnt++; in domain_attach_iommu()
1369 spin_unlock(&iommu->lock); in domain_attach_iommu()
1374 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1375 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1377 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1381 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1382 info->refcnt = 1; in domain_attach_iommu()
1383 info->did = num; in domain_attach_iommu()
1384 info->iommu = iommu; in domain_attach_iommu()
1385 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, in domain_attach_iommu()
1388 ret = xa_err(curr) ? : -EBUSY; in domain_attach_iommu()
1392 spin_unlock(&iommu->lock); in domain_attach_iommu()
1396 clear_bit(info->did, iommu->domain_ids); in domain_attach_iommu()
1398 spin_unlock(&iommu->lock); in domain_attach_iommu()
1407 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_detach_iommu()
1410 spin_lock(&iommu->lock); in domain_detach_iommu()
1411 info = xa_load(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1412 if (--info->refcnt == 0) { in domain_detach_iommu()
1413 clear_bit(info->did, iommu->domain_ids); in domain_detach_iommu()
1414 xa_erase(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1415 domain->nid = NUMA_NO_NODE; in domain_detach_iommu()
1418 spin_unlock(&iommu->lock); in domain_detach_iommu()
1423 if (domain->pgd) { in domain_exit()
1426 domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); in domain_exit()
1430 if (WARN_ON(!list_empty(&domain->devices))) in domain_exit()
1433 kfree(domain->qi_batch); in domain_exit()
1439 * in-flight DMA and copied pgtable, but there is no unmapping
1441 * the newly-mapped device. For kdump, at this point, the device
1443 * in-flight DMA will exist, and we don't need to worry anymore
1455 assert_spin_locked(&iommu->lock); in copied_context_tear_down()
1460 if (did_old < cap_ndoms(iommu->cap)) { in copied_context_tear_down()
1461 iommu->flush.flush_context(iommu, did_old, in copied_context_tear_down()
1465 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in copied_context_tear_down()
1473 * It's a non-present to present mapping. If hardware doesn't cache
1474 * non-present entry we only need to flush the write-buffer. If the
1475 * _does_ cache non-present entries, then it does so in the special
1481 if (cap_caching_mode(iommu->cap)) { in context_present_cache_flush()
1482 iommu->flush.flush_context(iommu, 0, in context_present_cache_flush()
1486 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in context_present_cache_flush()
1500 struct dma_pte *pgd = domain->pgd; in domain_context_mapping_one()
1507 spin_lock(&iommu->lock); in domain_context_mapping_one()
1508 ret = -ENOMEM; in domain_context_mapping_one()
1521 if (info && info->ats_supported) in domain_context_mapping_one()
1527 context_set_address_width(context, domain->agaw); in domain_context_mapping_one()
1531 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
1537 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1545 struct device_domain_info *info = dev_iommu_priv_get(&pdev->dev); in domain_context_mapping_cb()
1546 struct intel_iommu *iommu = info->iommu; in domain_context_mapping_cb()
1557 struct intel_iommu *iommu = info->iommu; in domain_context_mapping()
1558 u8 bus = info->bus, devfn = info->devfn; in domain_context_mapping()
1574 support = domain->iommu_superpage; in hardware_largepage_caps()
1588 support--; in hardware_largepage_caps()
1612 start_pfn + lvl_pages - 1, in switch_to_super_page()
1637 if (unlikely(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1))) in __domain_mapping()
1638 return -EINVAL; in __domain_mapping()
1641 return -EINVAL; in __domain_mapping()
1643 if (!(prot & DMA_PTE_WRITE) && domain->nested_parent) { in __domain_mapping()
1644 …pr_err_ratelimited("Read-only mapping is disallowed on the domain which serves as the parent in a … in __domain_mapping()
1645 return -EINVAL; in __domain_mapping()
1650 if (domain->use_first_level) { in __domain_mapping()
1656 domain->has_mappings = true; in __domain_mapping()
1670 return -ENOMEM; in __domain_mapping()
1683 end_pfn = iov_pfn + pages_to_remove - 1; in __domain_mapping()
1694 if (!try_cmpxchg64_local(&pte->val, &tmp, pteval)) { in __domain_mapping()
1699 dumps--; in __domain_mapping()
1705 nr_pages -= lvl_pages; in __domain_mapping()
1726 (void *)pte - (void *)first_pte); in __domain_mapping()
1736 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one()
1740 spin_lock(&iommu->lock); in domain_context_clear_one()
1743 spin_unlock(&iommu->lock); in domain_context_clear_one()
1750 spin_unlock(&iommu->lock); in domain_context_clear_one()
1796 struct dma_pte *pgd = domain->pgd; in domain_setup_first_level()
1799 level = agaw_to_level(domain->agaw); in domain_setup_first_level()
1801 return -EINVAL; in domain_setup_first_level()
1806 if (domain->force_snooping) in domain_setup_first_level()
1818 struct intel_iommu *iommu = info->iommu; in dmar_domain_attach_device()
1826 info->domain = domain; in dmar_domain_attach_device()
1827 spin_lock_irqsave(&domain->lock, flags); in dmar_domain_attach_device()
1828 list_add(&info->link, &domain->devices); in dmar_domain_attach_device()
1829 spin_unlock_irqrestore(&domain->lock, flags); in dmar_domain_attach_device()
1836 else if (domain->use_first_level) in dmar_domain_attach_device()
1860 * device_rmrr_is_relaxable - Test whether the RMRR of this device
1891 struct intel_iommu *iommu = info->iommu; in device_def_domain_type()
1897 if (!ecap_pass_through(iommu->ecap)) in device_def_domain_type()
1915 * (for example, while enabling interrupt-remapping) then in intel_iommu_init_qi()
1918 if (!iommu->qi) { in intel_iommu_init_qi()
1922 dmar_fault(-1, iommu); in intel_iommu_init_qi()
1924 * Disable queued invalidation if supported and already enabled in intel_iommu_init_qi()
1934 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
1935 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
1937 iommu->name); in intel_iommu_init_qi()
1939 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
1940 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
1941 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
1991 ret = -ENOMEM; in copy_context_table()
1997 new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL); in copy_context_table()
2011 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2012 set_bit(did, iommu->domain_ids); in copy_context_table()
2039 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
2050 return -EINVAL; in copy_translation_tables()
2052 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); in copy_translation_tables()
2053 if (!iommu->copied_tables) in copy_translation_tables()
2054 return -ENOMEM; in copy_translation_tables()
2058 return -EINVAL; in copy_translation_tables()
2062 return -ENOMEM; in copy_translation_tables()
2064 /* This is too big for the stack - allocate it from slab */ in copy_translation_tables()
2066 ret = -ENOMEM; in copy_translation_tables()
2076 iommu->name, bus); in copy_translation_tables()
2081 spin_lock(&iommu->lock); in copy_translation_tables()
2090 iommu->root_entry[bus].lo = val; in copy_translation_tables()
2097 iommu->root_entry[bus].hi = val; in copy_translation_tables()
2100 spin_unlock(&iommu->lock); in copy_translation_tables()
2104 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
2121 if (drhd->ignored) { in init_dmars()
2129 * than the smallest supported. in init_dmars()
2132 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
2150 iommu->name); in init_dmars()
2163 pr_info("Translation already enabled - trying to copy translation structures\n"); in init_dmars()
2169 * enabled - but failed to copy over the in init_dmars()
2170 * old root-entry table. Try to proceed in init_dmars()
2172 * allocating a clean root-entry table. in init_dmars()
2177 iommu->name); in init_dmars()
2182 iommu->name); in init_dmars()
2209 if (drhd->ignored) { in init_dmars()
2221 if (ecap_prs(iommu->ecap)) { in init_dmars()
2256 if (!drhd->include_all) { in init_no_remapping_devices()
2257 for_each_active_dev_scope(drhd->devices, in init_no_remapping_devices()
2258 drhd->devices_cnt, i, dev) in init_no_remapping_devices()
2261 if (i == drhd->devices_cnt) in init_no_remapping_devices()
2262 drhd->ignored = 1; in init_no_remapping_devices()
2267 if (drhd->include_all) in init_no_remapping_devices()
2270 for_each_active_dev_scope(drhd->devices, in init_no_remapping_devices()
2271 drhd->devices_cnt, i, dev) in init_no_remapping_devices()
2274 if (i < drhd->devices_cnt) in init_no_remapping_devices()
2279 drhd->gfx_dedicated = 1; in init_no_remapping_devices()
2281 drhd->ignored = 1; in init_no_remapping_devices()
2293 if (iommu->qi) { in init_iommu_hw()
2301 if (drhd->ignored) { in init_iommu_hw()
2326 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
2328 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
2344 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
2346 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
2347 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
2348 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
2349 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
2350 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
2351 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
2352 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
2353 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
2355 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
2376 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
2378 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
2379 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
2380 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
2381 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
2382 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
2383 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
2384 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
2385 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
2387 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
2407 if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) || in rmrr_sanity_check()
2408 !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) || in rmrr_sanity_check()
2409 rmrr->end_address <= rmrr->base_address || in rmrr_sanity_check()
2411 return -EINVAL; in rmrr_sanity_check()
2424 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n" in dmar_parse_one_rmrr()
2426 rmrr->base_address, rmrr->end_address, in dmar_parse_one_rmrr()
2437 rmrru->hdr = header; in dmar_parse_one_rmrr()
2439 rmrru->base_address = rmrr->base_address; in dmar_parse_one_rmrr()
2440 rmrru->end_address = rmrr->end_address; in dmar_parse_one_rmrr()
2442 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), in dmar_parse_one_rmrr()
2443 ((void *)rmrr) + rmrr->header.length, in dmar_parse_one_rmrr()
2444 &rmrru->devices_cnt); in dmar_parse_one_rmrr()
2445 if (rmrru->devices_cnt && rmrru->devices == NULL) in dmar_parse_one_rmrr()
2448 list_add(&rmrru->list, &dmar_rmrr_units); in dmar_parse_one_rmrr()
2454 return -ENOMEM; in dmar_parse_one_rmrr()
2464 tmp = (struct acpi_dmar_atsr *)atsru->hdr; in dmar_find_atsr()
2465 if (atsr->segment != tmp->segment) in dmar_find_atsr()
2467 if (atsr->header.length != tmp->header.length) in dmar_find_atsr()
2469 if (memcmp(atsr, tmp, atsr->header.length) == 0) in dmar_find_atsr()
2489 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); in dmar_parse_one_atsr()
2491 return -ENOMEM; in dmar_parse_one_atsr()
2498 atsru->hdr = (void *)(atsru + 1); in dmar_parse_one_atsr()
2499 memcpy(atsru->hdr, hdr, hdr->length); in dmar_parse_one_atsr()
2500 atsru->include_all = atsr->flags & 0x1; in dmar_parse_one_atsr()
2501 if (!atsru->include_all) { in dmar_parse_one_atsr()
2502 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), in dmar_parse_one_atsr()
2503 (void *)atsr + atsr->header.length, in dmar_parse_one_atsr()
2504 &atsru->devices_cnt); in dmar_parse_one_atsr()
2505 if (atsru->devices_cnt && atsru->devices == NULL) { in dmar_parse_one_atsr()
2507 return -ENOMEM; in dmar_parse_one_atsr()
2511 list_add_rcu(&atsru->list, &dmar_atsr_units); in dmar_parse_one_atsr()
2518 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); in intel_iommu_free_atsr()
2530 list_del_rcu(&atsru->list); in dmar_release_one_atsr()
2550 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) { in dmar_check_one_atsr()
2551 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, in dmar_check_one_atsr()
2553 return -EBUSY; in dmar_check_one_atsr()
2566 tmp = (struct acpi_dmar_satc *)satcu->hdr; in dmar_find_satc()
2567 if (satc->segment != tmp->segment) in dmar_find_satc()
2569 if (satc->header.length != tmp->header.length) in dmar_find_satc()
2571 if (memcmp(satc, tmp, satc->header.length) == 0) in dmar_find_satc()
2591 satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL); in dmar_parse_one_satc()
2593 return -ENOMEM; in dmar_parse_one_satc()
2595 satcu->hdr = (void *)(satcu + 1); in dmar_parse_one_satc()
2596 memcpy(satcu->hdr, hdr, hdr->length); in dmar_parse_one_satc()
2597 satcu->atc_required = satc->flags & 0x1; in dmar_parse_one_satc()
2598 satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1), in dmar_parse_one_satc()
2599 (void *)satc + satc->header.length, in dmar_parse_one_satc()
2600 &satcu->devices_cnt); in dmar_parse_one_satc()
2601 if (satcu->devices_cnt && !satcu->devices) { in dmar_parse_one_satc()
2603 return -ENOMEM; in dmar_parse_one_satc()
2605 list_add_rcu(&satcu->list, &dmar_satc_units); in dmar_parse_one_satc()
2612 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add()
2618 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
2629 if (dmaru->ignored) { in intel_iommu_add()
2641 if (ecap_prs(iommu->ecap)) { in intel_iommu_add()
2667 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug()
2672 return -EINVAL; in dmar_iommu_hotplug()
2691 list_del(&rmrru->list); in intel_iommu_free_dmars()
2692 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); in intel_iommu_free_dmars()
2697 list_del(&atsru->list); in intel_iommu_free_dmars()
2701 list_del(&satcu->list); in intel_iommu_free_dmars()
2702 dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt); in intel_iommu_free_dmars()
2718 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); in dmar_find_matched_satc_unit()
2719 if (satc->segment != pci_domain_nr(dev->bus)) in dmar_find_matched_satc_unit()
2721 for_each_dev_scope(satcu->devices, satcu->devices_cnt, i, tmp) in dmar_find_matched_satc_unit()
2745 * This device supports ATS as it is in SATC table. in dmar_ats_supported()
2746 * When IOMMU is in legacy mode, enabling ATS is done in dmar_ats_supported()
2748 * ATS, hence OS should not enable this device ATS in dmar_ats_supported()
2751 return !(satcu->atc_required && !sm_supported(iommu)); in dmar_ats_supported()
2753 for (bus = dev->bus; bus; bus = bus->parent) { in dmar_ats_supported()
2754 bridge = bus->self; in dmar_ats_supported()
2755 /* If it's an integrated device, allow ATS */ in dmar_ats_supported()
2758 /* Connected via non-PCIe: no ATS */ in dmar_ats_supported()
2769 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); in dmar_ats_supported()
2770 if (atsr->segment != pci_domain_nr(dev->bus)) in dmar_ats_supported()
2773 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp) in dmar_ats_supported()
2774 if (tmp == &bridge->dev) in dmar_ats_supported()
2777 if (atsru->include_all) in dmar_ats_supported()
2801 rmrr = container_of(rmrru->hdr, in dmar_iommu_notify_scope_dev()
2803 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2805 ((void *)rmrr) + rmrr->header.length, in dmar_iommu_notify_scope_dev()
2806 rmrr->segment, rmrru->devices, in dmar_iommu_notify_scope_dev()
2807 rmrru->devices_cnt); in dmar_iommu_notify_scope_dev()
2810 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2811 dmar_remove_dev_scope(info, rmrr->segment, in dmar_iommu_notify_scope_dev()
2812 rmrru->devices, rmrru->devices_cnt); in dmar_iommu_notify_scope_dev()
2817 if (atsru->include_all) in dmar_iommu_notify_scope_dev()
2820 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); in dmar_iommu_notify_scope_dev()
2821 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2823 (void *)atsr + atsr->header.length, in dmar_iommu_notify_scope_dev()
2824 atsr->segment, atsru->devices, in dmar_iommu_notify_scope_dev()
2825 atsru->devices_cnt); in dmar_iommu_notify_scope_dev()
2830 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2831 if (dmar_remove_dev_scope(info, atsr->segment, in dmar_iommu_notify_scope_dev()
2832 atsru->devices, atsru->devices_cnt)) in dmar_iommu_notify_scope_dev()
2837 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); in dmar_iommu_notify_scope_dev()
2838 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2840 (void *)satc + satc->header.length, in dmar_iommu_notify_scope_dev()
2841 satc->segment, satcu->devices, in dmar_iommu_notify_scope_dev()
2842 satcu->devices_cnt); in dmar_iommu_notify_scope_dev()
2847 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2848 if (dmar_remove_dev_scope(info, satc->segment, in dmar_iommu_notify_scope_dev()
2849 satcu->devices, satcu->devices_cnt)) in dmar_iommu_notify_scope_dev()
2879 iommu = drhd->iommu; in intel_iommu_shutdown()
2900 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
2910 return sysfs_emit(buf, "%llx\n", iommu->reg_phys); in address_show()
2918 return sysfs_emit(buf, "%llx\n", iommu->cap); in cap_show()
2926 return sysfs_emit(buf, "%llx\n", iommu->ecap); in ecap_show()
2934 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
2943 bitmap_weight(iommu->domain_ids, in domains_used_show()
2944 cap_ndoms(iommu->cap))); in domains_used_show()
2959 .name = "intel-iommu",
2973 if (pdev->external_facing) { in has_external_pci()
2987 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
2990 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
3005 /* To avoid a -Wunused-but-set-variable warning. */ in probe_acpi_namespace_devices()
3011 for_each_active_dev_scope(drhd->devices, in probe_acpi_namespace_devices()
3012 drhd->devices_cnt, i, dev) { in probe_acpi_namespace_devices()
3016 if (dev->bus != &acpi_bus_type) in probe_acpi_namespace_devices()
3021 mutex_lock(&adev->physical_node_lock); in probe_acpi_namespace_devices()
3023 &adev->physical_node_list, node) { in probe_acpi_namespace_devices()
3024 ret = iommu_probe_device(pn->dev); in probe_acpi_namespace_devices()
3028 mutex_unlock(&adev->physical_node_lock); in probe_acpi_namespace_devices()
3045 pr_warn("Forcing Intel-IOMMU to enabled\n"); in tboot_force_iommu()
3055 int ret = -ENODEV; in intel_iommu_init()
3141 * page-selective invalidations that are required for efficient in intel_iommu_init()
3144 * the virtual and physical IOMMU page-tables. in intel_iommu_init()
3146 if (cap_caching_mode(iommu->cap) && in intel_iommu_init()
3151 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
3153 "%s", iommu->name); in intel_iommu_init()
3160 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
3171 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
3199 * NB - intel-iommu lacks any sort of reference counting for the users of
3206 if (!dev_is_pci(info->dev)) { in domain_context_clear()
3207 domain_context_clear_one(info, info->bus, info->devfn); in domain_context_clear()
3211 pci_for_each_dma_alias(to_pci_dev(info->dev), in domain_context_clear()
3223 struct intel_iommu *iommu = info->iommu; in device_block_translation()
3226 if (info->domain) in device_block_translation()
3227 cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID); in device_block_translation()
3238 if (!info->domain) in device_block_translation()
3241 spin_lock_irqsave(&info->domain->lock, flags); in device_block_translation()
3242 list_del(&info->link); in device_block_translation()
3243 spin_unlock_irqrestore(&info->domain->lock, flags); in device_block_translation()
3245 domain_detach_iommu(info->domain, iommu); in device_block_translation()
3246 info->domain = NULL; in device_block_translation()
3274 return cap_fl1gp_support(iommu->cap) ? 2 : 1; in iommu_superpage_capability()
3276 return fls(cap_super_page_val(iommu->cap)); in iommu_superpage_capability()
3282 struct intel_iommu *iommu = info->iommu; in paging_domain_alloc()
3288 return ERR_PTR(-ENOMEM); in paging_domain_alloc()
3290 INIT_LIST_HEAD(&domain->devices); in paging_domain_alloc()
3291 INIT_LIST_HEAD(&domain->dev_pasids); in paging_domain_alloc()
3292 INIT_LIST_HEAD(&domain->cache_tags); in paging_domain_alloc()
3293 spin_lock_init(&domain->lock); in paging_domain_alloc()
3294 spin_lock_init(&domain->cache_lock); in paging_domain_alloc()
3295 xa_init(&domain->iommu_array); in paging_domain_alloc()
3297 domain->nid = dev_to_node(dev); in paging_domain_alloc()
3298 domain->use_first_level = first_stage; in paging_domain_alloc()
3301 addr_width = agaw_to_width(iommu->agaw); in paging_domain_alloc()
3302 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_alloc()
3303 addr_width = cap_mgaw(iommu->cap); in paging_domain_alloc()
3304 domain->gaw = addr_width; in paging_domain_alloc()
3305 domain->agaw = iommu->agaw; in paging_domain_alloc()
3306 domain->max_addr = __DOMAIN_MAX_ADDR(addr_width); in paging_domain_alloc()
3309 domain->iommu_coherency = iommu_paging_structure_coherency(iommu); in paging_domain_alloc()
3312 domain->domain.pgsize_bitmap = SZ_4K; in paging_domain_alloc()
3313 domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage); in paging_domain_alloc()
3314 domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); in paging_domain_alloc()
3317 * IOVA aperture: First-level translation restricts the input-address in paging_domain_alloc()
3319 * as address bit [N-1], where N is 48-bits with 4-level paging and in paging_domain_alloc()
3320 * 57-bits with 5-level paging). Hence, skip bit [N-1]. in paging_domain_alloc()
3322 domain->domain.geometry.force_aperture = true; in paging_domain_alloc()
3323 domain->domain.geometry.aperture_start = 0; in paging_domain_alloc()
3325 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); in paging_domain_alloc()
3327 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); in paging_domain_alloc()
3330 domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL); in paging_domain_alloc()
3331 if (!domain->pgd) { in paging_domain_alloc()
3333 return ERR_PTR(-ENOMEM); in paging_domain_alloc()
3335 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in paging_domain_alloc()
3347 struct intel_iommu *iommu = info->iommu; in intel_iommu_domain_alloc_paging_flags()
3354 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3356 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3358 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3366 if (!sm_supported(iommu) || !ecap_slts(iommu->ecap)) in intel_iommu_domain_alloc_paging_flags()
3367 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3376 domain = &dmar_domain->domain; in intel_iommu_domain_alloc_paging_flags()
3377 domain->type = IOMMU_DOMAIN_UNMANAGED; in intel_iommu_domain_alloc_paging_flags()
3378 domain->owner = &intel_iommu_ops; in intel_iommu_domain_alloc_paging_flags()
3379 domain->ops = intel_iommu_ops.default_domain_ops; in intel_iommu_domain_alloc_paging_flags()
3382 dmar_domain->nested_parent = true; in intel_iommu_domain_alloc_paging_flags()
3383 INIT_LIST_HEAD(&dmar_domain->s1_domains); in intel_iommu_domain_alloc_paging_flags()
3384 spin_lock_init(&dmar_domain->s1_lock); in intel_iommu_domain_alloc_paging_flags()
3388 if (dmar_domain->use_first_level) { in intel_iommu_domain_alloc_paging_flags()
3390 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3392 domain->dirty_ops = &intel_dirty_ops; in intel_iommu_domain_alloc_paging_flags()
3402 WARN_ON(dmar_domain->nested_parent && in intel_iommu_domain_free()
3403 !list_empty(&dmar_domain->s1_domains)); in intel_iommu_domain_free()
3411 struct intel_iommu *iommu = info->iommu; in paging_domain_compatible()
3414 if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) in paging_domain_compatible()
3415 return -EPERM; in paging_domain_compatible()
3417 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in paging_domain_compatible()
3418 return -EINVAL; in paging_domain_compatible()
3420 if (domain->dirty_ops && !ssads_supported(iommu)) in paging_domain_compatible()
3421 return -EINVAL; in paging_domain_compatible()
3423 if (dmar_domain->iommu_coherency != in paging_domain_compatible()
3425 return -EINVAL; in paging_domain_compatible()
3427 if (dmar_domain->iommu_superpage != in paging_domain_compatible()
3428 iommu_superpage_capability(iommu, dmar_domain->use_first_level)) in paging_domain_compatible()
3429 return -EINVAL; in paging_domain_compatible()
3431 if (dmar_domain->use_first_level && in paging_domain_compatible()
3432 (!sm_supported(iommu) || !ecap_flts(iommu->ecap))) in paging_domain_compatible()
3433 return -EINVAL; in paging_domain_compatible()
3436 addr_width = agaw_to_width(iommu->agaw); in paging_domain_compatible()
3437 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_compatible()
3438 addr_width = cap_mgaw(iommu->cap); in paging_domain_compatible()
3440 if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw) in paging_domain_compatible()
3441 return -EINVAL; in paging_domain_compatible()
3444 context_copied(iommu, info->bus, info->devfn)) in paging_domain_compatible()
3476 if (dmar_domain->set_pte_snp) in intel_iommu_map()
3480 if (dmar_domain->max_addr < max_addr) { in intel_iommu_map()
3484 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; in intel_iommu_map()
3488 __func__, dmar_domain->gaw, max_addr); in intel_iommu_map()
3489 return -EFAULT; in intel_iommu_map()
3491 dmar_domain->max_addr = max_addr; in intel_iommu_map()
3510 return -EINVAL; in intel_iommu_map_pages()
3513 return -EINVAL; in intel_iommu_map_pages()
3531 size argument if it happens to be a large-page mapping. */ in intel_iommu_unmap()
3540 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT; in intel_iommu_unmap()
3542 domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist); in intel_iommu_unmap()
3544 if (dmar_domain->max_addr == iova + size) in intel_iommu_unmap()
3545 dmar_domain->max_addr = iova; in intel_iommu_unmap()
3548 * We do not use page-selective IOTLB invalidation in flush queue, in intel_iommu_unmap()
3571 cache_tag_flush_range(to_dmar_domain(domain), gather->start, in intel_iommu_tlb_sync()
3572 gather->end, list_empty(&gather->freelist)); in intel_iommu_tlb_sync()
3573 iommu_put_pages_list(&gather->freelist); in intel_iommu_tlb_sync()
3589 VTD_PAGE_SHIFT) - 1)); in intel_iommu_iova_to_phys()
3599 assert_spin_locked(&domain->lock); in domain_support_force_snooping()
3600 list_for_each_entry(info, &domain->devices, link) { in domain_support_force_snooping()
3601 if (!ecap_sc_support(info->iommu->ecap)) { in domain_support_force_snooping()
3614 assert_spin_locked(&domain->lock); in domain_set_force_snooping()
3616 * Second level page table supports per-PTE snoop control. The in domain_set_force_snooping()
3619 if (!domain->use_first_level) { in domain_set_force_snooping()
3620 domain->set_pte_snp = true; in domain_set_force_snooping()
3624 list_for_each_entry(info, &domain->devices, link) in domain_set_force_snooping()
3625 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, in domain_set_force_snooping()
3634 if (dmar_domain->force_snooping) in intel_iommu_enforce_cache_coherency()
3637 spin_lock_irqsave(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3639 (!dmar_domain->use_first_level && dmar_domain->has_mappings)) { in intel_iommu_enforce_cache_coherency()
3640 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3645 dmar_domain->force_snooping = true; in intel_iommu_enforce_cache_coherency()
3646 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3662 return ecap_sc_support(info->iommu->ecap); in intel_iommu_capable()
3664 return ssads_supported(info->iommu); in intel_iommu_capable()
3679 if (!iommu || !iommu->iommu.ops) in intel_iommu_probe_device()
3680 return ERR_PTR(-ENODEV); in intel_iommu_probe_device()
3684 return ERR_PTR(-ENOMEM); in intel_iommu_probe_device()
3687 info->bus = pdev->bus->number; in intel_iommu_probe_device()
3688 info->devfn = pdev->devfn; in intel_iommu_probe_device()
3689 info->segment = pci_domain_nr(pdev->bus); in intel_iommu_probe_device()
3691 info->bus = bus; in intel_iommu_probe_device()
3692 info->devfn = devfn; in intel_iommu_probe_device()
3693 info->segment = iommu->segment; in intel_iommu_probe_device()
3696 info->dev = dev; in intel_iommu_probe_device()
3697 info->iommu = iommu; in intel_iommu_probe_device()
3699 if (ecap_dev_iotlb_support(iommu->ecap) && in intel_iommu_probe_device()
3702 info->ats_supported = 1; in intel_iommu_probe_device()
3703 info->dtlb_extra_inval = dev_needs_extra_dtlb_flush(pdev); in intel_iommu_probe_device()
3712 if (ecap_dit(iommu->ecap)) in intel_iommu_probe_device()
3713 info->pfsid = pci_dev_id(pci_physfn(pdev)); in intel_iommu_probe_device()
3714 info->ats_qdep = pci_ats_queue_depth(pdev); in intel_iommu_probe_device()
3721 info->pasid_supported = features | 1; in intel_iommu_probe_device()
3724 if (info->ats_supported && ecap_prs(iommu->ecap) && in intel_iommu_probe_device()
3726 info->pri_supported = 1; in intel_iommu_probe_device()
3745 if (!context_copied(iommu, info->bus, info->devfn)) { in intel_iommu_probe_device()
3756 * device is undefined if you enable PASID support after ATS support. in intel_iommu_probe_device()
3760 if (info->pasid_supported && in intel_iommu_probe_device()
3761 !pci_enable_pasid(pdev, info->pasid_supported & ~1)) in intel_iommu_probe_device()
3762 info->pasid_enabled = 1; in intel_iommu_probe_device()
3764 return &iommu->iommu; in intel_iommu_probe_device()
3778 struct intel_iommu *iommu = info->iommu; in intel_iommu_release_device()
3780 if (info->pasid_enabled) { in intel_iommu_release_device()
3782 info->pasid_enabled = 0; in intel_iommu_release_device()
3785 mutex_lock(&iommu->iopf_lock); in intel_iommu_release_device()
3788 mutex_unlock(&iommu->iopf_lock); in intel_iommu_release_device()
3791 !context_copied(iommu, info->bus, info->devfn)) in intel_iommu_release_device()
3811 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, in intel_iommu_get_resv_regions()
3821 length = rmrr->end_address - rmrr->base_address + 1; in intel_iommu_get_resv_regions()
3826 resv = iommu_alloc_resv_region(rmrr->base_address, in intel_iommu_get_resv_regions()
3832 list_add_tail(&resv->list, head); in intel_iommu_get_resv_regions()
3841 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { in intel_iommu_get_resv_regions()
3846 list_add_tail(®->list, head); in intel_iommu_get_resv_regions()
3852 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, in intel_iommu_get_resv_regions()
3856 list_add_tail(®->list, head); in intel_iommu_get_resv_regions()
3872 return -EINVAL; in intel_iommu_enable_sva()
3874 iommu = info->iommu; in intel_iommu_enable_sva()
3876 return -EINVAL; in intel_iommu_enable_sva()
3878 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) in intel_iommu_enable_sva()
3879 return -ENODEV; in intel_iommu_enable_sva()
3881 if (!info->pasid_enabled || !info->ats_enabled) in intel_iommu_enable_sva()
3882 return -EINVAL; in intel_iommu_enable_sva()
3885 * Devices having device-specific I/O fault handling should not in intel_iommu_enable_sva()
3887 * capability of device-specific IOPF. Therefore, IOMMU can only in intel_iommu_enable_sva()
3888 * default that if the device driver enables SVA on a non-PRI in intel_iommu_enable_sva()
3891 if (!info->pri_supported) in intel_iommu_enable_sva()
3895 if (!info->pri_enabled) in intel_iommu_enable_sva()
3896 return -EINVAL; in intel_iommu_enable_sva()
3903 struct intel_iommu *iommu = info->iommu; in context_flip_pri()
3904 u8 bus = info->bus, devfn = info->devfn; in context_flip_pri()
3908 spin_lock(&iommu->lock); in context_flip_pri()
3910 spin_unlock(&iommu->lock); in context_flip_pri()
3911 return -EINVAL; in context_flip_pri()
3916 spin_unlock(&iommu->lock); in context_flip_pri()
3917 return -ENODEV; in context_flip_pri()
3926 if (!ecap_coherent(iommu->ecap)) in context_flip_pri()
3929 spin_unlock(&iommu->lock); in context_flip_pri()
3941 if (!pdev || !info || !info->ats_enabled || !info->pri_supported) in intel_iommu_enable_iopf()
3942 return -ENODEV; in intel_iommu_enable_iopf()
3944 if (info->pri_enabled) in intel_iommu_enable_iopf()
3945 return -EBUSY; in intel_iommu_enable_iopf()
3947 iommu = info->iommu; in intel_iommu_enable_iopf()
3949 return -EINVAL; in intel_iommu_enable_iopf()
3952 if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev)) in intel_iommu_enable_iopf()
3953 return -EINVAL; in intel_iommu_enable_iopf()
3959 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
3971 info->pri_enabled = 1; in intel_iommu_enable_iopf()
3977 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
3985 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_iopf()
3987 if (!info->pri_enabled) in intel_iommu_disable_iopf()
3988 return -EINVAL; in intel_iommu_disable_iopf()
3997 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_disable_iopf()
4008 info->pri_enabled = 0; in intel_iommu_disable_iopf()
4024 return -ENODEV; in intel_iommu_dev_enable_feat()
4039 return -ENODEV; in intel_iommu_dev_disable_feat()
4047 return translation_pre_enabled(info->iommu) && !info->domain; in intel_iommu_is_attach_deferred()
4057 if (pdev->untrusted) { in risky_device()
4060 pdev->vendor, pdev->device); in risky_device()
4070 cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1); in intel_iommu_iotlb_sync_map()
4080 struct intel_iommu *iommu = info->iommu; in domain_remove_dev_pasid()
4088 if (domain->type == IOMMU_DOMAIN_IDENTITY) in domain_remove_dev_pasid()
4092 spin_lock_irqsave(&dmar_domain->lock, flags); in domain_remove_dev_pasid()
4093 list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { in domain_remove_dev_pasid()
4094 if (curr->dev == dev && curr->pasid == pasid) { in domain_remove_dev_pasid()
4095 list_del(&curr->link_domain); in domain_remove_dev_pasid()
4100 spin_unlock_irqrestore(&dmar_domain->lock, flags); in domain_remove_dev_pasid()
4116 intel_pasid_tear_down_entry(info->iommu, dev, pasid, false); in blocking_domain_set_dev_pasid()
4128 struct intel_iommu *iommu = info->iommu; in domain_add_dev_pasid()
4135 return ERR_PTR(-ENOMEM); in domain_add_dev_pasid()
4145 dev_pasid->dev = dev; in domain_add_dev_pasid()
4146 dev_pasid->pasid = pasid; in domain_add_dev_pasid()
4147 spin_lock_irqsave(&dmar_domain->lock, flags); in domain_add_dev_pasid()
4148 list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids); in domain_add_dev_pasid()
4149 spin_unlock_irqrestore(&dmar_domain->lock, flags); in domain_add_dev_pasid()
4165 struct intel_iommu *iommu = info->iommu; in intel_iommu_set_dev_pasid()
4169 if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) in intel_iommu_set_dev_pasid()
4170 return -EINVAL; in intel_iommu_set_dev_pasid()
4173 return -EOPNOTSUPP; in intel_iommu_set_dev_pasid()
4175 if (domain->dirty_ops) in intel_iommu_set_dev_pasid()
4176 return -EINVAL; in intel_iommu_set_dev_pasid()
4178 if (context_copied(iommu, info->bus, info->devfn)) in intel_iommu_set_dev_pasid()
4179 return -EBUSY; in intel_iommu_set_dev_pasid()
4189 if (dmar_domain->use_first_level) in intel_iommu_set_dev_pasid()
4212 struct intel_iommu *iommu = info->iommu; in intel_iommu_hw_info()
4217 return ERR_PTR(-ENOMEM); in intel_iommu_hw_info()
4219 vtd->flags = IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17; in intel_iommu_hw_info()
4220 vtd->cap_reg = iommu->cap; in intel_iommu_hw_info()
4221 vtd->ecap_reg = iommu->ecap; in intel_iommu_hw_info()
4229 * hold the domain->lock when calling it.
4237 ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev, in device_set_dirty_tracking()
4253 spin_lock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4254 list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { in parent_domain_set_dirty_tracking()
4255 spin_lock_irqsave(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4256 ret = device_set_dirty_tracking(&s1_domain->devices, enable); in parent_domain_set_dirty_tracking()
4257 spin_unlock_irqrestore(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4261 spin_unlock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4265 list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { in parent_domain_set_dirty_tracking()
4266 spin_lock_irqsave(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4267 device_set_dirty_tracking(&s1_domain->devices, in parent_domain_set_dirty_tracking()
4268 domain->dirty_tracking); in parent_domain_set_dirty_tracking()
4269 spin_unlock_irqrestore(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4271 spin_unlock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4281 spin_lock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4282 if (dmar_domain->dirty_tracking == enable) in intel_iommu_set_dirty_tracking()
4285 ret = device_set_dirty_tracking(&dmar_domain->devices, enable); in intel_iommu_set_dirty_tracking()
4289 if (dmar_domain->nested_parent) { in intel_iommu_set_dirty_tracking()
4295 dmar_domain->dirty_tracking = enable; in intel_iommu_set_dirty_tracking()
4297 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4302 device_set_dirty_tracking(&dmar_domain->devices, in intel_iommu_set_dirty_tracking()
4303 dmar_domain->dirty_tracking); in intel_iommu_set_dirty_tracking()
4304 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4314 unsigned long end = iova + size - 1; in intel_iommu_read_and_clear_dirty()
4323 if (!dmar_domain->dirty_tracking && dirty->bitmap) in intel_iommu_read_and_clear_dirty()
4324 return -EINVAL; in intel_iommu_read_and_clear_dirty()
4354 struct intel_iommu *iommu = info->iommu; in context_setup_pass_through()
4357 spin_lock(&iommu->lock); in context_setup_pass_through()
4360 spin_unlock(&iommu->lock); in context_setup_pass_through()
4361 return -ENOMEM; in context_setup_pass_through()
4365 spin_unlock(&iommu->lock); in context_setup_pass_through()
4375 * AGAW value supported by hardware. And ASR is ignored by hardware. in context_setup_pass_through()
4377 context_set_address_width(context, iommu->msagaw); in context_setup_pass_through()
4381 if (!ecap_coherent(iommu->ecap)) in context_setup_pass_through()
4384 spin_unlock(&iommu->lock); in context_setup_pass_through()
4401 return context_setup_pass_through(dev, info->bus, info->devfn); in device_setup_pass_through()
4410 struct intel_iommu *iommu = info->iommu; in identity_domain_attach_dev()
4434 struct intel_iommu *iommu = info->iommu; in identity_domain_set_dev_pasid()
4438 return -EOPNOTSUPP; in identity_domain_set_dev_pasid()
4542 pci_info(dev, "Forcing write-buffer flush capability\n"); in quirk_iommu_rwbf()
4595 ver = (dev->device >> 8) & 0xff; in quirk_igfx_skip_te_disable()
4614 message if VT-d is actually disabled.
4635 known-broken BIOSes _don't_ actually hide it, so far. */ in check_tylersburg_isoch()
4652 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ in check_tylersburg_isoch()
4679 * Here we deal with a device TLB defect where device may inadvertently issue ATS
4710 if (likely(!info->dtlb_extra_inval)) in quirk_extra_dev_tlb_flush()
4713 sid = PCI_DEVID(info->bus, info->devfn); in quirk_extra_dev_tlb_flush()
4715 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4718 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4728 * VT-d spec. The VT-d hardware implementation may support some but not
4733 * - 0: Command successful without any error;
4734 * - Negative: software error value;
4735 * - Nonzero positive: failure status code defined in Table 48.
4743 if (!cap_ecmds(iommu->cap)) in ecmd_submit_sync()
4744 return -ENODEV; in ecmd_submit_sync()
4746 raw_spin_lock_irqsave(&iommu->register_lock, flags); in ecmd_submit_sync()
4748 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG); in ecmd_submit_sync()
4750 ret = -EBUSY; in ecmd_submit_sync()
4756 * - There is no side effect if an ecmd doesn't require an in ecmd_submit_sync()
4758 * - It's not invoked in any critical path. The extra MMIO in ecmd_submit_sync()
4761 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob); in ecmd_submit_sync()
4762 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT)); in ecmd_submit_sync()
4768 ret = -ETIMEDOUT; in ecmd_submit_sync()
4774 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in ecmd_submit_sync()