Lines Matching +full:iommu +full:- +full:base
1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "DMAR-IR: " fmt
21 #include <asm/pci-direct.h>
24 #include "iommu.h"
26 #include "../iommu-pages.h"
29 struct intel_iommu *iommu; member
36 struct intel_iommu *iommu; member
43 struct intel_iommu *iommu; member
68 * ->dmar_global_lock
69 * ->irq_2_ir_lock
70 * ->qi->q_lock
71 * ->iommu->register_lock
74 * in single-threaded environment with interrupt disabled, so no need to tabke
80 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
84 static bool ir_pre_enabled(struct intel_iommu *iommu) in ir_pre_enabled() argument
86 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); in ir_pre_enabled()
89 static void clear_ir_pre_enabled(struct intel_iommu *iommu) in clear_ir_pre_enabled() argument
91 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in clear_ir_pre_enabled()
94 static void init_ir_status(struct intel_iommu *iommu) in init_ir_status() argument
98 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_ir_status()
100 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in init_ir_status()
103 static int alloc_irte(struct intel_iommu *iommu, in alloc_irte() argument
106 struct ir_table *table = iommu->ir_table; in alloc_irte()
112 return -1; in alloc_irte()
119 if (mask > ecap_max_handle_mask(iommu->ecap)) { in alloc_irte()
122 ecap_max_handle_mask(iommu->ecap)); in alloc_irte()
123 return -1; in alloc_irte()
127 index = bitmap_find_free_region(table->bitmap, in alloc_irte()
130 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); in alloc_irte()
132 irq_iommu->iommu = iommu; in alloc_irte()
133 irq_iommu->irte_index = index; in alloc_irte()
134 irq_iommu->sub_handle = 0; in alloc_irte()
135 irq_iommu->irte_mask = mask; in alloc_irte()
142 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) in qi_flush_iec() argument
152 return qi_submit_sync(iommu, &desc, 1, 0); in qi_flush_iec()
158 struct intel_iommu *iommu; in modify_irte() local
164 return -1; in modify_irte()
168 iommu = irq_iommu->iommu; in modify_irte()
170 index = irq_iommu->irte_index + irq_iommu->sub_handle; in modify_irte()
171 irte = &iommu->ir_table->base[index]; in modify_irte()
173 if ((irte->pst == 1) || (irte_modified->pst == 1)) { in modify_irte()
175 * We use cmpxchg16 to atomically update the 128-bit IRTE, in modify_irte()
180 u128 old = irte->irte; in modify_irte()
181 WARN_ON(!try_cmpxchg128(&irte->irte, &old, irte_modified->irte)); in modify_irte()
183 WRITE_ONCE(irte->low, irte_modified->low); in modify_irte()
184 WRITE_ONCE(irte->high, irte_modified->high); in modify_irte()
186 __iommu_flush_cache(iommu, irte, sizeof(*irte)); in modify_irte()
188 rc = qi_flush_iec(iommu, index, 0); in modify_irte()
200 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) in map_hpet_to_iommu()
201 return ir_hpet[i].iommu; in map_hpet_to_iommu()
211 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) in map_ioapic_to_iommu()
212 return ir_ioapic[i].iommu; in map_ioapic_to_iommu()
221 return drhd ? drhd->iommu->ir_domain : NULL; in map_dev_to_ir()
227 struct intel_iommu *iommu; in clear_entries() local
230 if (irq_iommu->sub_handle) in clear_entries()
233 iommu = irq_iommu->iommu; in clear_entries()
234 index = irq_iommu->irte_index; in clear_entries()
236 start = iommu->ir_table->base + index; in clear_entries()
237 end = start + (1 << irq_iommu->irte_mask); in clear_entries()
240 WRITE_ONCE(entry->low, 0); in clear_entries()
241 WRITE_ONCE(entry->high, 0); in clear_entries()
243 bitmap_release_region(iommu->ir_table->bitmap, index, in clear_entries()
244 irq_iommu->irte_mask); in clear_entries()
246 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); in clear_entries()
254 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
257 * source-id qualifier
259 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
279 irte->svt = svt; in set_irte_sid()
280 irte->sq = sq; in set_irte_sid()
281 irte->sid = sid; in set_irte_sid()
286 * this IRTE must have a requester-id whose bus number is between or equal
302 return -1; in set_ioapic_sid()
305 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { in set_ioapic_sid()
312 pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic); in set_ioapic_sid()
313 return -1; in set_ioapic_sid()
327 return -1; in set_hpet_sid()
330 if (ir_hpet[i].iommu && ir_hpet[i].id == id) { in set_hpet_sid()
337 pr_warn("Failed to set source-id of HPET block (%d)\n", id); in set_hpet_sid()
338 return -1; in set_hpet_sid()
362 if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias)) in set_msi_sid_cb()
363 data->busmatch_count++; in set_msi_sid_cb()
365 data->pdev = pdev; in set_msi_sid_cb()
366 data->alias = alias; in set_msi_sid_cb()
367 data->count++; in set_msi_sid_cb()
377 return -1; in set_msi_sid()
386 * device is the case of a PCIe-to-PCI bridge, where the alias is for in set_msi_sid()
401 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number) in set_msi_sid()
403 dev->bus->number); in set_msi_sid()
405 set_irte_verify_bus(irte, dev->bus->number, dev->bus->number); in set_msi_sid()
406 else if (data.pdev->bus->number != dev->bus->number) in set_msi_sid()
415 static int iommu_load_old_irte(struct intel_iommu *iommu) in iommu_load_old_irte() argument
423 /* Check whether the old ir-table has the same size as ours */ in iommu_load_old_irte()
424 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); in iommu_load_old_irte()
427 return -EINVAL; in iommu_load_old_irte()
435 return -ENOMEM; in iommu_load_old_irte()
438 memcpy(iommu->ir_table->base, old_ir_table, size); in iommu_load_old_irte()
440 __iommu_flush_cache(iommu, iommu->ir_table->base, size); in iommu_load_old_irte()
447 if (iommu->ir_table->base[i].present) in iommu_load_old_irte()
448 bitmap_set(iommu->ir_table->bitmap, i, 1); in iommu_load_old_irte()
457 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) in iommu_set_irq_remapping() argument
463 addr = virt_to_phys((void *)iommu->ir_table->base); in iommu_set_irq_remapping()
465 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_set_irq_remapping()
467 dmar_writeq(iommu->reg + DMAR_IRTA_REG, in iommu_set_irq_remapping()
470 /* Set interrupt-remapping table pointer */ in iommu_set_irq_remapping()
471 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_irq_remapping()
473 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_irq_remapping()
475 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_set_irq_remapping()
481 if (!cap_esirtps(iommu->cap)) in iommu_set_irq_remapping()
482 qi_global_iec(iommu); in iommu_set_irq_remapping()
485 static void iommu_enable_irq_remapping(struct intel_iommu *iommu) in iommu_enable_irq_remapping() argument
490 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
492 /* Enable interrupt-remapping */ in iommu_enable_irq_remapping()
493 iommu->gcmd |= DMA_GCMD_IRE; in iommu_enable_irq_remapping()
494 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
495 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
498 /* Block compatibility-format MSIs */ in iommu_enable_irq_remapping()
500 iommu->gcmd &= ~DMA_GCMD_CFI; in iommu_enable_irq_remapping()
501 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
502 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
513 "Compatibility-format IRQs enabled despite intr remapping;\n" in iommu_enable_irq_remapping()
516 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
519 static int intel_setup_irq_remapping(struct intel_iommu *iommu) in intel_setup_irq_remapping() argument
526 if (iommu->ir_table) in intel_setup_irq_remapping()
531 return -ENOMEM; in intel_setup_irq_remapping()
533 ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, in intel_setup_irq_remapping()
537 iommu->seq_id, INTR_REMAP_PAGE_ORDER); in intel_setup_irq_remapping()
543 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); in intel_setup_irq_remapping()
547 fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id); in intel_setup_irq_remapping()
551 iommu->ir_domain = in intel_setup_irq_remapping()
555 iommu); in intel_setup_irq_remapping()
556 if (!iommu->ir_domain) { in intel_setup_irq_remapping()
557 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); in intel_setup_irq_remapping()
561 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_DMAR); in intel_setup_irq_remapping()
562 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | in intel_setup_irq_remapping()
564 iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops; in intel_setup_irq_remapping()
566 ir_table->base = ir_table_base; in intel_setup_irq_remapping()
567 ir_table->bitmap = bitmap; in intel_setup_irq_remapping()
568 iommu->ir_table = ir_table; in intel_setup_irq_remapping()
574 if (!iommu->qi) { in intel_setup_irq_remapping()
578 dmar_fault(-1, iommu); in intel_setup_irq_remapping()
579 dmar_disable_qi(iommu); in intel_setup_irq_remapping()
581 if (dmar_enable_qi(iommu)) { in intel_setup_irq_remapping()
587 init_ir_status(iommu); in intel_setup_irq_remapping()
589 if (ir_pre_enabled(iommu)) { in intel_setup_irq_remapping()
592 iommu->name); in intel_setup_irq_remapping()
593 clear_ir_pre_enabled(iommu); in intel_setup_irq_remapping()
594 iommu_disable_irq_remapping(iommu); in intel_setup_irq_remapping()
595 } else if (iommu_load_old_irte(iommu)) in intel_setup_irq_remapping()
597 iommu->name); in intel_setup_irq_remapping()
600 iommu->name); in intel_setup_irq_remapping()
603 iommu_set_irq_remapping(iommu, eim_mode); in intel_setup_irq_remapping()
608 irq_domain_remove(iommu->ir_domain); in intel_setup_irq_remapping()
609 iommu->ir_domain = NULL; in intel_setup_irq_remapping()
619 iommu->ir_table = NULL; in intel_setup_irq_remapping()
621 return -ENOMEM; in intel_setup_irq_remapping()
624 static void intel_teardown_irq_remapping(struct intel_iommu *iommu) in intel_teardown_irq_remapping() argument
628 if (iommu && iommu->ir_table) { in intel_teardown_irq_remapping()
629 if (iommu->ir_domain) { in intel_teardown_irq_remapping()
630 fn = iommu->ir_domain->fwnode; in intel_teardown_irq_remapping()
632 irq_domain_remove(iommu->ir_domain); in intel_teardown_irq_remapping()
634 iommu->ir_domain = NULL; in intel_teardown_irq_remapping()
636 iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER); in intel_teardown_irq_remapping()
637 bitmap_free(iommu->ir_table->bitmap); in intel_teardown_irq_remapping()
638 kfree(iommu->ir_table); in intel_teardown_irq_remapping()
639 iommu->ir_table = NULL; in intel_teardown_irq_remapping()
646 static void iommu_disable_irq_remapping(struct intel_iommu *iommu) in iommu_disable_irq_remapping() argument
651 if (!ecap_ir_support(iommu->ecap)) in iommu_disable_irq_remapping()
656 * interrupt-remapping. in iommu_disable_irq_remapping()
658 if (!cap_esirtps(iommu->cap)) in iommu_disable_irq_remapping()
659 qi_global_iec(iommu); in iommu_disable_irq_remapping()
661 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
663 sts = readl(iommu->reg + DMAR_GSTS_REG); in iommu_disable_irq_remapping()
667 iommu->gcmd &= ~DMA_GCMD_IRE; in iommu_disable_irq_remapping()
668 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_irq_remapping()
670 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_irq_remapping()
674 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
683 return dmar->flags & DMAR_X2APIC_OPT_OUT; in dmar_x2apic_optout()
689 struct intel_iommu *iommu; in intel_cleanup_irq_remapping() local
691 for_each_iommu(iommu, drhd) { in intel_cleanup_irq_remapping()
692 if (ecap_ir_support(iommu->ecap)) { in intel_cleanup_irq_remapping()
693 iommu_disable_irq_remapping(iommu); in intel_cleanup_irq_remapping()
694 intel_teardown_irq_remapping(iommu); in intel_cleanup_irq_remapping()
699 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); in intel_cleanup_irq_remapping()
705 struct intel_iommu *iommu; in intel_prepare_irq_remapping() local
715 return -ENODEV; in intel_prepare_irq_remapping()
719 return -ENODEV; in intel_prepare_irq_remapping()
722 return -ENODEV; in intel_prepare_irq_remapping()
730 for_each_iommu(iommu, drhd) in intel_prepare_irq_remapping()
731 if (!ecap_ir_support(iommu->ecap)) in intel_prepare_irq_remapping()
743 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
744 if (eim && !ecap_eim_support(iommu->ecap)) { in intel_prepare_irq_remapping()
745 pr_info("%s does not support EIM\n", iommu->name); in intel_prepare_irq_remapping()
752 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); in intel_prepare_irq_remapping()
755 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
756 if (intel_setup_irq_remapping(iommu)) { in intel_prepare_irq_remapping()
758 iommu->name); in intel_prepare_irq_remapping()
767 return -ENODEV; in intel_prepare_irq_remapping()
771 * Set Posted-Interrupts capability.
776 struct intel_iommu *iommu; in set_irq_posting_cap() local
781 * 64-bit boundary, we need use cmpxchg16b to atomically update in set_irq_posting_cap()
782 * it. We only expose posted-interrupt when X86_FEATURE_CX16 in set_irq_posting_cap()
790 for_each_iommu(iommu, drhd) in set_irq_posting_cap()
791 if (!cap_pi_support(iommu->cap)) { in set_irq_posting_cap()
802 struct intel_iommu *iommu; in intel_enable_irq_remapping() local
806 * Setup Interrupt-remapping for all the DRHD's now. in intel_enable_irq_remapping()
808 for_each_iommu(iommu, drhd) { in intel_enable_irq_remapping()
809 if (!ir_pre_enabled(iommu)) in intel_enable_irq_remapping()
810 iommu_enable_irq_remapping(iommu); in intel_enable_irq_remapping()
827 return -1; in intel_enable_irq_remapping()
831 struct intel_iommu *iommu, in ir_parse_one_hpet_scope() argument
836 int count, free = -1; in ir_parse_one_hpet_scope()
838 bus = scope->bus; in ir_parse_one_hpet_scope()
840 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) in ir_parse_one_hpet_scope()
843 while (--count > 0) { in ir_parse_one_hpet_scope()
848 bus = read_pci_config_byte(bus, path->device, path->function, in ir_parse_one_hpet_scope()
854 if (ir_hpet[count].iommu == iommu && in ir_parse_one_hpet_scope()
855 ir_hpet[count].id == scope->enumeration_id) in ir_parse_one_hpet_scope()
857 else if (ir_hpet[count].iommu == NULL && free == -1) in ir_parse_one_hpet_scope()
860 if (free == -1) { in ir_parse_one_hpet_scope()
862 return -ENOSPC; in ir_parse_one_hpet_scope()
865 ir_hpet[free].iommu = iommu; in ir_parse_one_hpet_scope()
866 ir_hpet[free].id = scope->enumeration_id; in ir_parse_one_hpet_scope()
868 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); in ir_parse_one_hpet_scope()
869 pr_info("HPET id %d under DRHD base 0x%Lx\n", in ir_parse_one_hpet_scope()
870 scope->enumeration_id, drhd->address); in ir_parse_one_hpet_scope()
876 struct intel_iommu *iommu, in ir_parse_one_ioapic_scope() argument
881 int count, free = -1; in ir_parse_one_ioapic_scope()
883 bus = scope->bus; in ir_parse_one_ioapic_scope()
885 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) in ir_parse_one_ioapic_scope()
888 while (--count > 0) { in ir_parse_one_ioapic_scope()
893 bus = read_pci_config_byte(bus, path->device, path->function, in ir_parse_one_ioapic_scope()
899 if (ir_ioapic[count].iommu == iommu && in ir_parse_one_ioapic_scope()
900 ir_ioapic[count].id == scope->enumeration_id) in ir_parse_one_ioapic_scope()
902 else if (ir_ioapic[count].iommu == NULL && free == -1) in ir_parse_one_ioapic_scope()
905 if (free == -1) { in ir_parse_one_ioapic_scope()
907 return -ENOSPC; in ir_parse_one_ioapic_scope()
911 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); in ir_parse_one_ioapic_scope()
912 ir_ioapic[free].iommu = iommu; in ir_parse_one_ioapic_scope()
913 ir_ioapic[free].id = scope->enumeration_id; in ir_parse_one_ioapic_scope()
914 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", in ir_parse_one_ioapic_scope()
915 scope->enumeration_id, drhd->address, iommu->seq_id); in ir_parse_one_ioapic_scope()
921 struct intel_iommu *iommu) in ir_parse_ioapic_hpet_scope() argument
930 end = ((void *)drhd) + header->length; in ir_parse_ioapic_hpet_scope()
934 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) in ir_parse_ioapic_hpet_scope()
935 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
936 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) in ir_parse_ioapic_hpet_scope()
937 ret = ir_parse_one_hpet_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
938 start += scope->length; in ir_parse_ioapic_hpet_scope()
944 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) in ir_remove_ioapic_hpet_scope() argument
949 if (ir_hpet[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
950 ir_hpet[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
953 if (ir_ioapic[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
954 ir_ioapic[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
958 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
964 struct intel_iommu *iommu; in parse_ioapics_under_ir() local
968 for_each_iommu(iommu, drhd) { in parse_ioapics_under_ir()
971 if (!ecap_ir_support(iommu->ecap)) in parse_ioapics_under_ir()
974 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu); in parse_ioapics_under_ir()
982 return -ENODEV; in parse_ioapics_under_ir()
987 pr_err(FW_BUG "ioapic %d has no mapping iommu, " in parse_ioapics_under_ir()
990 return -1; in parse_ioapics_under_ir()
1015 struct intel_iommu *iommu = NULL; in disable_irq_remapping() local
1018 * Disable Interrupt-remapping for all the DRHD's now. in disable_irq_remapping()
1020 for_each_iommu(iommu, drhd) { in disable_irq_remapping()
1021 if (!ecap_ir_support(iommu->ecap)) in disable_irq_remapping()
1024 iommu_disable_irq_remapping(iommu); in disable_irq_remapping()
1028 * Clear Posted-Interrupts capability. in disable_irq_remapping()
1038 struct intel_iommu *iommu = NULL; in reenable_irq_remapping() local
1040 for_each_iommu(iommu, drhd) in reenable_irq_remapping()
1041 if (iommu->qi) in reenable_irq_remapping()
1042 dmar_reenable_qi(iommu); in reenable_irq_remapping()
1045 * Setup Interrupt-remapping for all the DRHD's now. in reenable_irq_remapping()
1047 for_each_iommu(iommu, drhd) { in reenable_irq_remapping()
1048 if (!ecap_ir_support(iommu->ecap)) in reenable_irq_remapping()
1051 /* Set up interrupt remapping for iommu.*/ in reenable_irq_remapping()
1052 iommu_set_irq_remapping(iommu, eim); in reenable_irq_remapping()
1053 iommu_enable_irq_remapping(iommu); in reenable_irq_remapping()
1068 return -1; in reenable_irq_remapping()
1081 if (!irq_remapping_enabled || !pci_dev_has_default_msi_parent_domain(info->dev)) in intel_irq_remap_add_device()
1084 dev_set_msi_domain(&info->dev->dev, map_dev_to_ir(info->dev)); in intel_irq_remap_add_device()
1091 irte->present = 1; in prepare_irte()
1092 irte->dst_mode = apic->dest_mode_logical; in prepare_irte()
1094 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the in prepare_irte()
1095 * actual level or edge trigger will be setup in the IO-APIC in prepare_irte()
1097 * For more details, see the comments (in io_apic.c) explainig IO-APIC in prepare_irte()
1098 * irq migration in the presence of interrupt-remapping. in prepare_irte()
1100 irte->trigger_mode = 0; in prepare_irte()
1101 irte->dlvry_mode = APIC_DELIVERY_MODE_FIXED; in prepare_irte()
1102 irte->vector = vector; in prepare_irte()
1103 irte->dest_id = IRTE_DEST(dest); in prepare_irte()
1104 irte->redir_hint = 1; in prepare_irte()
1111 irte->present = 1; in prepare_irte_posted()
1112 irte->p_pst = 1; in prepare_irte_posted()
1137 struct intel_ir_data *ir_data = irqd->chip_data; in intel_ir_reconfigure_irte_posted()
1138 struct irte *irte = &ir_data->irte_entry; in intel_ir_reconfigure_irte_posted()
1145 pr_warn("Failed to setup IRQ %d for posted mode", irqd->irq); in intel_ir_reconfigure_irte_posted()
1154 irte_pi.pda_l = (pid_addr >> (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT); in intel_ir_reconfigure_irte_posted()
1155 irte_pi.pda_h = (pid_addr >> 32) & ~(-1UL << PDA_HIGH_BIT); in intel_ir_reconfigure_irte_posted()
1157 modify_irte(&ir_data->irq_2_iommu, &irte_pi); in intel_ir_reconfigure_irte_posted()
1166 struct intel_ir_data *ir_data = irqd->chip_data; in __intel_ir_reconfigure_irte()
1172 if (ir_data->irq_2_iommu.posted_vcpu && !force_host) in __intel_ir_reconfigure_irte()
1175 ir_data->irq_2_iommu.posted_vcpu = false; in __intel_ir_reconfigure_irte()
1177 if (ir_data->irq_2_iommu.posted_msi) in __intel_ir_reconfigure_irte()
1180 modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry); in __intel_ir_reconfigure_irte()
1185 struct intel_ir_data *ir_data = irqd->chip_data; in intel_ir_reconfigure_irte()
1186 struct irte *irte = &ir_data->irte_entry; in intel_ir_reconfigure_irte()
1193 irte->vector = cfg->vector; in intel_ir_reconfigure_irte()
1194 irte->dest_id = IRTE_DEST(cfg->dest_apicid); in intel_ir_reconfigure_irte()
1200 * Migrate the IO-APIC irq in the presence of intr-remapping.
1205 * For level triggered, we eliminate the io-apic RTE modification (with the
1206 * updated vector information), by using a virtual vector (io-apic pin number).
1208 * the interrupt-remapping table entry.
1211 * is used to migrate MSI irq's in the presence of interrupt-remapping.
1217 struct irq_data *parent = data->parent_data; in intel_ir_set_affinity()
1221 ret = parent->chip->irq_set_affinity(parent, mask, force); in intel_ir_set_affinity()
1239 struct intel_ir_data *ir_data = irq_data->chip_data; in intel_ir_compose_msi_msg()
1241 *msg = ir_data->msi_entry; in intel_ir_compose_msi_msg()
1246 struct intel_ir_data *ir_data = data->chip_data; in intel_ir_set_vcpu_affinity()
1263 dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry); in intel_ir_set_vcpu_affinity()
1268 irte_pi.p_vector = vcpu_pi_info->vector; in intel_ir_set_vcpu_affinity()
1269 irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >> in intel_ir_set_vcpu_affinity()
1270 (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT); in intel_ir_set_vcpu_affinity()
1271 irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) & in intel_ir_set_vcpu_affinity()
1272 ~(-1UL << PDA_HIGH_BIT); in intel_ir_set_vcpu_affinity()
1274 ir_data->irq_2_iommu.posted_vcpu = true; in intel_ir_set_vcpu_affinity()
1275 modify_irte(&ir_data->irq_2_iommu, &irte_pi); in intel_ir_set_vcpu_affinity()
1282 .name = "INTEL-IR",
1326 .name = "INTEL-IR-POST",
1337 msg->arch_addr_lo.dmar_base_address = X86_MSI_BASE_ADDRESS_LOW; in fill_msi_msg()
1338 msg->arch_addr_lo.dmar_subhandle_valid = true; in fill_msi_msg()
1339 msg->arch_addr_lo.dmar_format = true; in fill_msi_msg()
1340 msg->arch_addr_lo.dmar_index_0_14 = index & 0x7FFF; in fill_msi_msg()
1341 msg->arch_addr_lo.dmar_index_15 = !!(index & 0x8000); in fill_msi_msg()
1343 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; in fill_msi_msg()
1345 msg->arch_data.dmar_subhandle = subhandle; in fill_msi_msg()
1353 struct irte *irte = &data->irte_entry; in intel_irq_remapping_prepare_irte()
1355 prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid); in intel_irq_remapping_prepare_irte()
1357 switch (info->type) { in intel_irq_remapping_prepare_irte()
1359 /* Set source-id of interrupt request */ in intel_irq_remapping_prepare_irte()
1360 set_ioapic_sid(irte, info->devid); in intel_irq_remapping_prepare_irte()
1362 info->devid, irte->present, irte->fpd, irte->dst_mode, in intel_irq_remapping_prepare_irte()
1363 irte->redir_hint, irte->trigger_mode, irte->dlvry_mode, in intel_irq_remapping_prepare_irte()
1364 irte->avail, irte->vector, irte->dest_id, irte->sid, in intel_irq_remapping_prepare_irte()
1365 irte->sq, irte->svt); in intel_irq_remapping_prepare_irte()
1366 sub_handle = info->ioapic.pin; in intel_irq_remapping_prepare_irte()
1369 set_hpet_sid(irte, info->devid); in intel_irq_remapping_prepare_irte()
1375 data->irq_2_iommu.posted_msi = 1; in intel_irq_remapping_prepare_irte()
1379 pci_real_dma_dev(msi_desc_to_pci_dev(info->desc))); in intel_irq_remapping_prepare_irte()
1385 fill_msi_msg(&data->msi_entry, index, sub_handle); in intel_irq_remapping_prepare_irte()
1398 if (irq_data && irq_data->chip_data) { in intel_free_irq_resources()
1399 data = irq_data->chip_data; in intel_free_irq_resources()
1400 irq_iommu = &data->irq_2_iommu; in intel_free_irq_resources()
1414 struct intel_iommu *iommu = domain->host_data; in intel_irq_remapping_alloc() local
1421 if (!info || !iommu) in intel_irq_remapping_alloc()
1422 return -EINVAL; in intel_irq_remapping_alloc()
1423 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI) in intel_irq_remapping_alloc()
1424 return -EINVAL; in intel_irq_remapping_alloc()
1430 ret = -ENOMEM; in intel_irq_remapping_alloc()
1435 index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs); in intel_irq_remapping_alloc()
1448 ret = -EINVAL; in intel_irq_remapping_alloc()
1457 ird->irq_2_iommu = data->irq_2_iommu; in intel_irq_remapping_alloc()
1458 ird->irq_2_iommu.sub_handle = i; in intel_irq_remapping_alloc()
1463 irq_data->hwirq = (index << 16) + i; in intel_irq_remapping_alloc()
1464 irq_data->chip_data = ird; in intel_irq_remapping_alloc()
1466 ((info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) || in intel_irq_remapping_alloc()
1467 (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX))) in intel_irq_remapping_alloc()
1468 irq_data->chip = &intel_ir_chip_post_msi; in intel_irq_remapping_alloc()
1470 irq_data->chip = &intel_ir_chip; in intel_irq_remapping_alloc()
1499 struct intel_ir_data *data = irq_data->chip_data; in intel_irq_remapping_deactivate()
1502 WARN_ON_ONCE(data->irq_2_iommu.posted_vcpu); in intel_irq_remapping_deactivate()
1503 data->irq_2_iommu.posted_vcpu = false; in intel_irq_remapping_deactivate()
1506 modify_irte(&data->irq_2_iommu, &entry); in intel_irq_remapping_deactivate()
1513 struct intel_iommu *iommu = NULL; in intel_irq_remapping_select() local
1516 iommu = map_ioapic_to_iommu(fwspec->param[0]); in intel_irq_remapping_select()
1518 iommu = map_hpet_to_iommu(fwspec->param[0]); in intel_irq_remapping_select()
1520 return iommu && d == iommu->ir_domain; in intel_irq_remapping_select()
1533 .prefix = "IR-",
1540 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) in dmar_ir_add() argument
1545 if (eim && !ecap_eim_support(iommu->ecap)) { in dmar_ir_add()
1547 iommu->reg_phys, iommu->ecap); in dmar_ir_add()
1548 return -ENODEV; in dmar_ir_add()
1551 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { in dmar_ir_add()
1553 iommu->reg_phys); in dmar_ir_add()
1554 return -ENODEV; in dmar_ir_add()
1557 /* TODO: check all IOAPICs are covered by IOMMU */ in dmar_ir_add()
1559 /* Setup Interrupt-remapping now. */ in dmar_ir_add()
1560 ret = intel_setup_irq_remapping(iommu); in dmar_ir_add()
1563 iommu->name); in dmar_ir_add()
1564 intel_teardown_irq_remapping(iommu); in dmar_ir_add()
1565 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_add()
1567 iommu_enable_irq_remapping(iommu); in dmar_ir_add()
1576 struct intel_iommu *iommu = dmaru->iommu; in dmar_ir_hotplug() local
1580 if (iommu == NULL) in dmar_ir_hotplug()
1581 return -EINVAL; in dmar_ir_hotplug()
1582 if (!ecap_ir_support(iommu->ecap)) in dmar_ir_hotplug()
1585 !cap_pi_support(iommu->cap)) in dmar_ir_hotplug()
1586 return -EBUSY; in dmar_ir_hotplug()
1589 if (!iommu->ir_table) in dmar_ir_hotplug()
1590 ret = dmar_ir_add(dmaru, iommu); in dmar_ir_hotplug()
1592 if (iommu->ir_table) { in dmar_ir_hotplug()
1593 if (!bitmap_empty(iommu->ir_table->bitmap, in dmar_ir_hotplug()
1595 ret = -EBUSY; in dmar_ir_hotplug()
1597 iommu_disable_irq_remapping(iommu); in dmar_ir_hotplug()
1598 intel_teardown_irq_remapping(iommu); in dmar_ir_hotplug()
1599 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_hotplug()