Lines Matching refs:dev_data

84 			  struct iommu_dev_data *dev_data);
141 static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, in update_dte256() argument
146 struct dev_table_entry *ptr = &dev_table[dev_data->devid]; in update_dte256()
148 spin_lock_irqsave(&dev_data->dte_lock, flags); in update_dte256()
154 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
159 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
167 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
176 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
189 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
194 iommu_flush_dte_sync(iommu, dev_data->devid); in update_dte256()
204 spin_unlock_irqrestore(&dev_data->dte_lock, flags); in update_dte256()
207 static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, in get_dte256() argument
214 ptr = &dev_table[dev_data->devid]; in get_dte256()
216 spin_lock_irqsave(&dev_data->dte_lock, flags); in get_dte256()
219 spin_unlock_irqrestore(&dev_data->dte_lock, flags); in get_dte256()
334 struct iommu_dev_data *dev_data; in alloc_dev_data() local
337 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
338 if (!dev_data) in alloc_dev_data()
341 mutex_init(&dev_data->mutex); in alloc_dev_data()
342 spin_lock_init(&dev_data->dte_lock); in alloc_dev_data()
343 dev_data->devid = devid; in alloc_dev_data()
344 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
346 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); in alloc_dev_data()
347 return dev_data; in alloc_dev_data()
352 struct iommu_dev_data *dev_data; in search_dev_data() local
360 llist_for_each_entry(dev_data, node, dev_data_list) { in search_dev_data()
361 if (dev_data->devid == devid) in search_dev_data()
362 return dev_data; in search_dev_data()
372 struct iommu_dev_data *dev_data, *alias_data; in clone_alias() local
384 dev_data = dev_iommu_priv_get(&pdev->dev); in clone_alias()
385 if (!dev_data) { in clone_alias()
390 get_dte256(iommu, dev_data, &new); in clone_alias()
448 struct iommu_dev_data *dev_data; in find_dev_data() local
450 dev_data = search_dev_data(iommu, devid); in find_dev_data()
452 if (dev_data == NULL) { in find_dev_data()
453 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
454 if (!dev_data) in find_dev_data()
458 dev_data->defer_attach = true; in find_dev_data()
461 return dev_data; in find_dev_data()
489 static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data) in pdev_pasid_supported() argument
491 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP); in pdev_pasid_supported()
521 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_ats() local
524 if (dev_data->ats_enabled) in pdev_enable_cap_ats()
528 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) { in pdev_enable_cap_ats()
531 dev_data->ats_enabled = 1; in pdev_enable_cap_ats()
532 dev_data->ats_qdep = pci_ats_queue_depth(pdev); in pdev_enable_cap_ats()
541 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_ats() local
543 if (dev_data->ats_enabled) { in pdev_disable_cap_ats()
545 dev_data->ats_enabled = 0; in pdev_disable_cap_ats()
551 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_pri() local
554 if (dev_data->pri_enabled) in pdev_enable_cap_pri()
557 if (!dev_data->ats_enabled) in pdev_enable_cap_pri()
560 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { in pdev_enable_cap_pri()
566 dev_data->pri_enabled = 1; in pdev_enable_cap_pri()
567 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in pdev_enable_cap_pri()
578 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_pri() local
580 if (dev_data->pri_enabled) { in pdev_disable_cap_pri()
582 dev_data->pri_enabled = 0; in pdev_disable_cap_pri()
588 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_pasid() local
591 if (dev_data->pasid_enabled) in pdev_enable_cap_pasid()
594 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) { in pdev_enable_cap_pasid()
598 dev_data->pasid_enabled = 1; in pdev_enable_cap_pasid()
606 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_pasid() local
608 if (dev_data->pasid_enabled) { in pdev_disable_cap_pasid()
610 dev_data->pasid_enabled = 0; in pdev_disable_cap_pasid()
660 struct iommu_dev_data *dev_data; in iommu_init_device() local
671 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
672 if (!dev_data) in iommu_init_device()
675 dev_data->dev = dev; in iommu_init_device()
681 dev_iommu_priv_set(dev, dev_data); in iommu_init_device()
692 dev_data->flags = pdev_get_caps(to_pci_dev(dev)); in iommu_init_device()
726 struct iommu_dev_data *dev_data = find_dev_data(iommu, devid); in dump_dte_entry() local
728 get_dte256(iommu, dev_data, &dte); in dump_dte_entry()
745 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_hw_error() local
758 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
760 if (dev_data) { in amd_iommu_report_rmp_hw_error()
761 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
777 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_fault() local
791 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
793 if (dev_data) { in amd_iommu_report_rmp_fault()
794 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
818 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_page_fault() local
824 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
826 if (dev_data) { in amd_iommu_report_page_fault()
834 if (dev_data->domain == NULL) { in amd_iommu_report_page_fault()
842 if (!report_iommu_fault(&dev_data->domain->domain, in amd_iommu_report_page_fault()
850 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
1515 static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address, in device_flush_iotlb() argument
1518 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_iotlb()
1520 int qdep = dev_data->ats_qdep; in device_flush_iotlb()
1522 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, in device_flush_iotlb()
1538 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1540 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_dte()
1546 if (dev_is_pci(dev_data->dev)) in device_flush_dte()
1547 pdev = to_pci_dev(dev_data->dev); in device_flush_dte()
1553 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1558 alias = pci_seg->alias_table[dev_data->devid]; in device_flush_dte()
1559 if (alias != dev_data->devid) { in device_flush_dte()
1565 if (dev_data->ats_enabled) { in device_flush_dte()
1567 ret = device_flush_iotlb(dev_data, 0, ~0UL, in device_flush_dte()
1577 struct iommu_dev_data *dev_data; in domain_flush_pages_v2() local
1582 list_for_each_entry(dev_data, &pdom->dev_list, list) { in domain_flush_pages_v2()
1583 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in domain_flush_pages_v2()
1584 u16 domid = dev_data->gcr3_info.domid; in domain_flush_pages_v2()
1626 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1640 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1642 if (!dev_data->ats_enabled) in __domain_flush_pages()
1645 ret |= device_flush_iotlb(dev_data, address, size, pasid, gn); in __domain_flush_pages()
1710 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, in amd_iommu_dev_flush_pasid_pages() argument
1714 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in amd_iommu_dev_flush_pasid_pages()
1717 dev_data->gcr3_info.domid, pasid, true); in amd_iommu_dev_flush_pasid_pages()
1720 if (dev_data->ats_enabled) in amd_iommu_dev_flush_pasid_pages()
1721 device_flush_iotlb(dev_data, address, size, pasid, true); in amd_iommu_dev_flush_pasid_pages()
1726 static void dev_flush_pasid_all(struct iommu_dev_data *dev_data, in dev_flush_pasid_all() argument
1729 amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, in dev_flush_pasid_all()
1752 struct iommu_dev_data *dev_data; in amd_iommu_update_and_flush_device_table() local
1756 list_for_each_entry(dev_data, &domain->dev_list, list) { in amd_iommu_update_and_flush_device_table()
1757 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in amd_iommu_update_and_flush_device_table()
1759 set_dte_entry(iommu, dev_data); in amd_iommu_update_and_flush_device_table()
1760 clone_aliases(iommu, dev_data->dev); in amd_iommu_update_and_flush_device_table()
1763 list_for_each_entry(dev_data, &domain->dev_list, list) in amd_iommu_update_and_flush_device_table()
1764 device_flush_dte(dev_data); in amd_iommu_update_and_flush_device_table()
1771 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
1775 dev_data = dev_iommu_priv_get(dev); in amd_iommu_complete_ppr()
1778 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
1779 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
1933 static int update_gcr3(struct iommu_dev_data *dev_data, in update_gcr3() argument
1936 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in update_gcr3()
1948 dev_flush_pasid_all(dev_data, pasid); in update_gcr3()
1952 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid, in amd_iommu_set_gcr3() argument
1955 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in amd_iommu_set_gcr3()
1958 iommu_group_mutex_assert(dev_data->dev); in amd_iommu_set_gcr3()
1960 ret = update_gcr3(dev_data, pasid, gcr3, true); in amd_iommu_set_gcr3()
1968 int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid) in amd_iommu_clear_gcr3() argument
1970 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in amd_iommu_clear_gcr3()
1973 iommu_group_mutex_assert(dev_data->dev); in amd_iommu_clear_gcr3()
1975 ret = update_gcr3(dev_data, pasid, 0, false); in amd_iommu_clear_gcr3()
1983 static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr, in make_clear_dte() argument
1996 struct iommu_dev_data *dev_data, in set_dte_gcr3_table() argument
1999 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in set_dte_gcr3_table()
2006 __func__, dev_data->devid, gcr3_info->glx, in set_dte_gcr3_table()
2014 if (pdom_is_v2_pgtbl_mode(dev_data->domain)) in set_dte_gcr3_table()
2028 struct iommu_dev_data *dev_data) in set_dte_entry() argument
2034 struct protection_domain *domain = dev_data->domain; in set_dte_entry()
2035 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in set_dte_entry()
2036 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; in set_dte_entry()
2039 domid = dev_data->gcr3_info.domid; in set_dte_entry()
2043 make_clear_dte(dev_data, dte, &new); in set_dte_entry()
2061 if (dev_data->ppr) in set_dte_entry()
2067 if (dev_data->ats_enabled) in set_dte_entry()
2077 initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid); in set_dte_entry()
2083 set_dte_gcr3_table(iommu, dev_data, &new); in set_dte_entry()
2085 update_dte256(iommu, dev_data, &new); in set_dte_entry()
2100 static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) in clear_dte_entry() argument
2103 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; in clear_dte_entry()
2105 make_clear_dte(dev_data, dte, &new); in clear_dte_entry()
2106 update_dte256(iommu, dev_data, &new); in clear_dte_entry()
2110 static void dev_update_dte(struct iommu_dev_data *dev_data, bool set) in dev_update_dte() argument
2112 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in dev_update_dte()
2115 set_dte_entry(iommu, dev_data); in dev_update_dte()
2117 clear_dte_entry(iommu, dev_data); in dev_update_dte()
2119 clone_aliases(iommu, dev_data->dev); in dev_update_dte()
2120 device_flush_dte(dev_data); in dev_update_dte()
2128 static int init_gcr3_table(struct iommu_dev_data *dev_data, in init_gcr3_table() argument
2131 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in init_gcr3_table()
2132 int max_pasids = dev_data->max_pasids; in init_gcr3_table()
2139 if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data)) in init_gcr3_table()
2146 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, in init_gcr3_table()
2155 ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true); in init_gcr3_table()
2157 free_gcr3_table(&dev_data->gcr3_info); in init_gcr3_table()
2162 static void destroy_gcr3_table(struct iommu_dev_data *dev_data, in destroy_gcr3_table() argument
2165 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in destroy_gcr3_table()
2168 update_gcr3(dev_data, 0, 0, false); in destroy_gcr3_table()
2243 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in attach_device() local
2244 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in attach_device()
2249 mutex_lock(&dev_data->mutex); in attach_device()
2251 if (dev_data->domain != NULL) { in attach_device()
2263 ret = init_gcr3_table(dev_data, domain); in attach_device()
2270 pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; in attach_device()
2279 if (amd_iommu_iopf_add_device(iommu, dev_data)) in attach_device()
2286 dev_data->domain = domain; in attach_device()
2288 list_add(&dev_data->list, &domain->dev_list); in attach_device()
2292 dev_update_dte(dev_data, true); in attach_device()
2295 mutex_unlock(&dev_data->mutex); in attach_device()
2305 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in detach_device() local
2306 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in detach_device()
2307 struct protection_domain *domain = dev_data->domain; in detach_device()
2310 mutex_lock(&dev_data->mutex); in detach_device()
2318 if (WARN_ON(!dev_data->domain)) in detach_device()
2322 if (dev_data->ppr) { in detach_device()
2324 amd_iommu_iopf_remove_device(iommu, dev_data); in detach_device()
2331 dev_update_dte(dev_data, false); in detach_device()
2336 list_del(&dev_data->list); in detach_device()
2341 destroy_gcr3_table(dev_data, domain); in detach_device()
2344 dev_data->domain = NULL; in detach_device()
2350 mutex_unlock(&dev_data->mutex); in detach_device()
2357 struct iommu_dev_data *dev_data; in amd_iommu_probe_device() local
2389 dev_data = dev_iommu_priv_get(dev); in amd_iommu_probe_device()
2391 pdev_pasid_supported(dev_data)) { in amd_iommu_probe_device()
2392 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, in amd_iommu_probe_device()
2407 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_release_device() local
2409 WARN_ON(dev_data->domain); in amd_iommu_release_device()
2587 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in blocked_domain_attach_device() local
2589 if (dev_data->domain) in blocked_domain_attach_device()
2593 mutex_lock(&dev_data->mutex); in blocked_domain_attach_device()
2594 dev_update_dte(dev_data, false); in blocked_domain_attach_device()
2595 mutex_unlock(&dev_data->mutex); in blocked_domain_attach_device()
2646 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_attach_device() local
2655 if (dev_data->domain == domain) in amd_iommu_attach_device()
2658 dev_data->defer_attach = false; in amd_iommu_attach_device()
2667 if (dev_data->domain) in amd_iommu_attach_device()
2675 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2677 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2801 struct iommu_dev_data *dev_data; in amd_iommu_set_dirty_tracking() local
2813 list_for_each_entry(dev_data, &pdomain->dev_list, list) { in amd_iommu_set_dirty_tracking()
2814 spin_lock(&dev_data->dte_lock); in amd_iommu_set_dirty_tracking()
2815 iommu = get_amd_iommu_from_dev_data(dev_data); in amd_iommu_set_dirty_tracking()
2816 dte = &get_dev_table(iommu)[dev_data->devid]; in amd_iommu_set_dirty_tracking()
2820 spin_unlock(&dev_data->dte_lock); in amd_iommu_set_dirty_tracking()
2823 device_flush_dte(dev_data); in amd_iommu_set_dirty_tracking()
2920 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_is_attach_deferred() local
2922 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2949 struct iommu_dev_data *dev_data; in amd_iommu_def_domain_type() local
2951 dev_data = dev_iommu_priv_get(dev); in amd_iommu_def_domain_type()
2952 if (!dev_data) in amd_iommu_def_domain_type()
2966 if (pdev_pasid_supported(dev_data) && in amd_iommu_def_domain_type()
3089 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid); in set_dte_irq_entry() local
3091 if (dev_data) in set_dte_irq_entry()
3092 spin_lock(&dev_data->dte_lock); in set_dte_irq_entry()
3102 if (dev_data) in set_dte_irq_entry()
3103 spin_unlock(&dev_data->dte_lock); in set_dte_irq_entry()
3841 struct iommu_dev_data *dev_data; in amd_ir_set_vcpu_affinity() local
3846 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3852 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()