Lines Matching +full:0 +full:xfee00000
51 #define MSI_RANGE_START (0xfee00000)
52 #define MSI_RANGE_END (0xfeefffff)
53 #define HT_RANGE_START (0xfd00000000ULL)
54 #define HT_RANGE_END (0xffffffffffULL)
127 amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]); in write_dte_lower128()
150 if (!(ptr->data[0] & DTE_FLAG_V)) { in update_dte256()
155 } else if (!(new->data[0] & DTE_FLAG_V)) { in update_dte256()
160 } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) { in update_dte256()
168 } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) { in update_dte256()
217 dte->data128[0] = ptr->data128[0]; in get_dte256()
253 p->uid[0] ? p->uid : NULL)) { in get_acpihid_device_id()
327 if (devid < 0) in rlookup_amd_iommu()
374 int ret = 0; in clone_alias()
377 return 0; in clone_alias()
381 return 0; in clone_alias()
386 pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid); in clone_alias()
395 pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias); in clone_alias()
441 pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1); in setup_aliases()
473 if (devid < 0) in acpihid_device_group()
497 u32 flags = 0; in pdev_get_caps()
506 if (features >= 0) { in pdev_get_caps()
525 return 0; in pdev_enable_cap_ats()
545 dev_data->ats_enabled = 0; in pdev_disable_cap_ats()
555 return 0; in pdev_enable_cap_pri()
558 return 0; in pdev_enable_cap_pri()
569 ret = 0; in pdev_enable_cap_pri()
582 dev_data->pri_enabled = 0; in pdev_disable_cap_pri()
592 return 0; in pdev_enable_cap_pasid()
596 ret = pci_enable_pasid(pdev, 0); in pdev_enable_cap_pasid()
610 dev_data->pasid_enabled = 0; in pdev_disable_cap_pasid()
642 if (sbdf < 0) in check_device()
664 return 0; in iommu_init_device()
667 if (sbdf < 0) in iommu_init_device()
695 return 0; in iommu_init_device()
705 if (sbdf < 0) in iommu_ignore_device()
710 memset(&dev_table[devid], 0, sizeof(struct dev_table_entry)); in iommu_ignore_device()
730 for (i = 0; i < 4; ++i) in dump_dte_entry()
739 for (i = 0; i < 4; ++i) in dump_command()
750 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in amd_iommu_report_rmp_hw_error()
751 vmg_tag = (event[1]) & 0xFFFF; in amd_iommu_report_rmp_hw_error()
753 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8); in amd_iommu_report_rmp_hw_error()
756 devid & 0xff); in amd_iommu_report_rmp_hw_error()
762 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", in amd_iommu_report_rmp_hw_error()
766 …("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n… in amd_iommu_report_rmp_hw_error()
782 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in amd_iommu_report_rmp_fault()
783 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF; in amd_iommu_report_rmp_fault()
784 vmg_tag = (event[1]) & 0xFFFF; in amd_iommu_report_rmp_fault()
789 devid & 0xff); in amd_iommu_report_rmp_fault()
795 …pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x… in amd_iommu_report_rmp_fault()
799 …ged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=… in amd_iommu_report_rmp_fault()
809 (((flags) & EVENT_FLAG_I) == 0)
822 devid & 0xff); in amd_iommu_report_page_fault()
836 pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n", in amd_iommu_report_page_fault()
851 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n", in amd_iommu_report_page_fault()
855 …("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\… in amd_iommu_report_page_fault()
870 int count = 0; in iommu_print_event()
876 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in iommu_print_event()
877 pasid = (event[0] & EVENT_DOMID_MASK_HI) | in iommu_print_event()
882 if (type == 0) { in iommu_print_event()
899 …ogged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\… in iommu_print_event()
906 "address=0x%llx flags=0x%04x]\n", in iommu_print_event()
911 …ogged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\… in iommu_print_event()
916 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address); in iommu_print_event()
920 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n", in iommu_print_event()
924 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n", in iommu_print_event()
929 …logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\… in iommu_print_event()
941 tag = event[1] & 0x03FF; in iommu_print_event()
942 …ged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%… in iommu_print_event()
947 …dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08… in iommu_print_event()
948 event[0], event[1], event[2], event[3]); in iommu_print_event()
958 memset(__evt, 0, 4 * sizeof(u32)); in iommu_print_event()
985 return 0; in amd_iommu_register_ga_log_notifier()
1022 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0) in iommu_poll_ga_log()
1136 int i = 0; in wait_on_sem()
1148 return 0; in wait_on_sem()
1175 memset(cmd, 0, sizeof(*cmd)); in build_completion_wait()
1176 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; in build_completion_wait()
1185 memset(cmd, 0, sizeof(*cmd)); in build_inv_dte()
1186 cmd->data[0] = devid; in build_inv_dte()
1225 /* Clear bits 11:0 */ in build_inv_address()
1238 memset(cmd, 0, sizeof(*cmd)); in build_inv_iommu_pages()
1246 cmd->data[0] |= pasid; in build_inv_iommu_pages()
1258 memset(cmd, 0, sizeof(*cmd)); in build_inv_iotlb_pages()
1260 cmd->data[0] = devid; in build_inv_iotlb_pages()
1261 cmd->data[0] |= (qdep & 0xff) << 24; in build_inv_iotlb_pages()
1266 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; in build_inv_iotlb_pages()
1267 cmd->data[1] |= (pasid & 0xff) << 16; in build_inv_iotlb_pages()
1277 memset(cmd, 0, sizeof(*cmd)); in build_complete_ppr()
1279 cmd->data[0] = devid; in build_complete_ppr()
1284 cmd->data[3] = tag & 0x1ff; in build_complete_ppr()
1292 memset(cmd, 0, sizeof(*cmd)); in build_inv_all()
1298 memset(cmd, 0, sizeof(*cmd)); in build_inv_irt()
1299 cmd->data[0] = devid; in build_inv_irt()
1311 unsigned int count = 0; in __iommu_queue_command_sync()
1318 if (left <= 0x20) { in __iommu_queue_command_sync()
1341 return 0; in __iommu_queue_command_sync()
1375 return 0; in iommu_completion_wait()
1432 for (devid = 0; devid <= last_bdf; ++devid) in amd_iommu_flush_dte_all()
1447 for (dom_id = 0; dom_id <= last_bdf; ++dom_id) { in amd_iommu_flush_tlb_all()
1449 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, in amd_iommu_flush_tlb_all()
1461 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, in amd_iommu_flush_tlb_domid()
1495 for (devid = 0; devid <= last_bdf; devid++) in amd_iommu_flush_irt_all()
1567 ret = device_flush_iotlb(dev_data, 0, ~0UL, in device_flush_dte()
1579 int ret = 0; in domain_flush_pages_v2()
1600 int ret = 0; in domain_flush_pages_v1()
1627 int ret = 0; in __domain_flush_pages()
1675 while (size != 0) { in amd_iommu_domain_flush_pages()
1687 if (likely((unsigned long)address != 0)) in amd_iommu_domain_flush_pages()
1706 amd_iommu_domain_flush_pages(domain, 0, in amd_iommu_domain_flush_all()
1729 amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, in dev_flush_pasid_all()
1809 for (i = 0; i < 512; ++i) { in free_gcr3_tbl_level1()
1824 for (i = 0; i < 512; ++i) { in free_gcr3_tbl_level2()
1841 WARN_ON_ONCE(gcr3_info->glx != 0); in free_gcr3_table()
1843 gcr3_info->glx = 0; in free_gcr3_table()
1883 if (domid <= 0) in setup_gcr3_table()
1895 return 0; in setup_gcr3_table()
1908 index = (pasid >> (9 * level)) & 0x1ff; in __get_gcr3_pte()
1911 if (level == 0) in __get_gcr3_pte()
1946 *pte = 0; in update_gcr3()
1949 return 0; in update_gcr3()
1975 ret = update_gcr3(dev_data, pasid, 0, false); in amd_iommu_clear_gcr3()
1987 new->data128[0] = DTE_FLAG_V; in make_clear_dte()
1988 new->data128[1] = 0; in make_clear_dte()
2011 target->data[0] |= DTE_FLAG_GV | in set_dte_gcr3_table()
2015 target->data[0] |= DTE_FLAG_GIOV; in set_dte_gcr3_table()
2046 new.data[0] |= iommu_virt_to_phys(domain->iop.root); in set_dte_entry()
2048 new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) in set_dte_entry()
2051 new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW; in set_dte_entry()
2058 WARN_ON(amd_iommu_snp_en && (domid == 0)); in set_dte_entry()
2059 new.data[0] |= DTE_FLAG_TV; in set_dte_entry()
2062 new.data[0] |= 1ULL << DEV_ENTRY_PPR; in set_dte_entry()
2065 new.data[0] |= DTE_FLAG_HAD; in set_dte_entry()
2079 new.data128[0] |= initial_dte->data128[0]; in set_dte_entry()
2126 * in v2 page table mode then update GCR3[0].
2133 int ret = 0; in init_gcr3_table()
2147 max_pasids > 0 ? max_pasids : 1); in init_gcr3_table()
2151 /* Setup GCR3[0] only if domain is setup with v2 page table mode */ in init_gcr3_table()
2155 ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true); in init_gcr3_table()
2168 update_gcr3(dev_data, 0, 0, false); in destroy_gcr3_table()
2181 int ret = 0; in pdom_attach_iommu()
2228 if (pdom_iommu_info->refcnt == 0) { in pdom_detach_iommu()
2247 int ret = 0; in attach_device()
2462 if (domid <= 0) { in protection_domain_alloc()
2493 return 0; in pdom_setup_pgtable()
2499 return ~0ULL; in dma_max_address()
2531 domain->domain.geometry.aperture_start = 0; in do_iommu_domain_alloc()
2568 case 0: in amd_iommu_domain_alloc_paging_flags()
2570 return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable); in amd_iommu_domain_alloc_paging_flags()
2597 return 0; in blocked_domain_attach_device()
2605 return 0; in blocked_domain_set_dev_pasid()
2656 return 0; in amd_iommu_attach_device()
2677 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2692 return 0; in amd_iommu_iotlb_sync_map()
2701 int prot = 0; in amd_iommu_map_pages()
2752 return 0; in amd_iommu_unmap_pages()
2754 r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0; in amd_iommu_unmap_pages()
2810 return 0; in amd_iommu_set_dirty_tracking()
2817 new = dte->data[0]; in amd_iommu_set_dirty_tracking()
2819 dte->data[0] = new; in amd_iommu_set_dirty_tracking()
2834 return 0; in amd_iommu_set_dirty_tracking()
2869 if (sbdf < 0) in amd_iommu_get_resv_regions()
2877 int type, prot = 0; in amd_iommu_get_resv_regions()
2905 0, IOMMU_RESV_MSI, GFP_KERNEL); in amd_iommu_get_resv_regions()
2912 0, IOMMU_RESV_RESERVED, GFP_KERNEL); in amd_iommu_get_resv_regions()
2953 return 0; in amd_iommu_def_domain_type()
2964 * - SNP is enabled, because it prohibits DTE[Mode]=0. in amd_iommu_def_domain_type()
2972 return 0; in amd_iommu_def_domain_type()
2989 int ret = 0; in amd_iommu_dev_enable_feature()
3005 int ret = 0; in amd_iommu_dev_disable_feature()
3140 memset(table->table, 0, in __alloc_irq_table()
3143 memset(table->table, 0, in __alloc_irq_table()
3173 return 0; in set_remap_table_entry_alias()
3259 for (index = ALIGN(table->min_index, alignment), c = 0; in alloc_irq_index()
3264 c = 0; in alloc_irq_index()
3270 for (; c != 0; --c) in alloc_irq_index()
3316 return 0; in __modify_irte_ga()
3330 return 0; in modify_irte_ga()
3349 return 0; in modify_irte()
3374 irte->val = 0; in irte_prepare()
3388 irte->lo.val = 0; in irte_ga_prepare()
3389 irte->hi.val = 0; in irte_ga_prepare()
3418 irte->fields.valid = 0; in irte_deactivate()
3426 irte->lo.fields_remap.valid = 0; in irte_ga_deactivate()
3466 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3467 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_set_allocated()
3468 irte->hi.fields.vector = 0xff; in irte_ga_set_allocated()
3476 return irte->val != 0; in irte_is_allocated()
3484 return irte->hi.fields.vector != 0; in irte_ga_is_allocated()
3489 table->table[index] = 0; in irte_clear_allocated()
3497 memset(&irte->lo.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3498 memset(&irte->hi.val, 0, sizeof(u64)); in irte_ga_clear_allocated()
3528 msg->address_lo = 0; in fill_msi_msg()
3601 if (sbdf < 0) in irq_remapping_alloc()
3611 if (ret < 0) in irq_remapping_alloc()
3625 for (i = 0; i < 32; ++i) in irq_remapping_alloc()
3643 if (index < 0) { in irq_remapping_alloc()
3649 for (i = 0; i < nr_irqs; i++) { in irq_remapping_alloc()
3679 return 0; in irq_remapping_alloc()
3682 for (i--; i >= 0; i--) { in irq_remapping_alloc()
3687 for (i = 0; i < nr_irqs; i++) in irq_remapping_alloc()
3702 for (i = 0; i < nr_irqs; i++) { in irq_remapping_free()
3729 return 0; in irq_remapping_activate()
3734 return 0; in irq_remapping_activate()
3756 return 0; in irq_remapping_select()
3759 devid = get_ioapic_devid(fwspec->param[0]); in irq_remapping_select()
3761 devid = get_hpet_devid(fwspec->param[0]); in irq_remapping_select()
3763 if (devid < 0) in irq_remapping_select()
3764 return 0; in irq_remapping_select()
3765 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); in irq_remapping_select()
3785 return 0; in amd_iommu_activate_guest_mode()
3789 entry->lo.val = 0; in amd_iommu_activate_guest_mode()
3790 entry->hi.val = 0; in amd_iommu_activate_guest_mode()
3813 return 0; in amd_iommu_deactivate_guest_mode()
3817 entry->lo.val = 0; in amd_iommu_deactivate_guest_mode()
3818 entry->hi.val = 0; in amd_iommu_deactivate_guest_mode()
3853 return 0; in amd_ir_set_vcpu_affinity()
3884 ir_data->cached_ga_tag = 0; in amd_ir_set_vcpu_affinity()
3920 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) in amd_ir_set_affinity()
3962 iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0, in amd_iommu_create_irq_domain()
3974 return 0; in amd_iommu_create_irq_domain()
3984 return 0; in amd_iommu_update_ga()
3989 if (cpu >= 0) { in amd_iommu_update_ga()