Lines Matching full:iommu

15 #include <linux/iommu.h>
21 #include "iommu.h"
23 #include "../iommu-pages.h"
26 * Intel IOMMU system wide PASID name space:
63 dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order); in intel_pasid_alloc_table()
74 if (!ecap_coherent(info->iommu->ecap)) in intel_pasid_alloc_table()
151 entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC); in intel_pasid_get_entry()
167 if (!ecap_coherent(info->iommu->ecap)) { in intel_pasid_get_entry()
195 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, in pasid_cache_invalidation_with_pasid() argument
206 qi_submit_sync(iommu, &desc, 1, 0); in pasid_cache_invalidation_with_pasid()
210 devtlb_invalidation_with_pasid(struct intel_iommu *iommu, in devtlb_invalidation_with_pasid() argument
234 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); in devtlb_invalidation_with_pasid()
236 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); in devtlb_invalidation_with_pasid()
239 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, in intel_pasid_tear_down_entry() argument
245 spin_lock(&iommu->lock); in intel_pasid_tear_down_entry()
248 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
255 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
266 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
275 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
277 if (!ecap_coherent(iommu->ecap)) in intel_pasid_tear_down_entry()
280 pasid_cache_invalidation_with_pasid(iommu, did, pasid); in intel_pasid_tear_down_entry()
283 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); in intel_pasid_tear_down_entry()
285 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_pasid_tear_down_entry()
287 devtlb_invalidation_with_pasid(iommu, dev, pasid); in intel_pasid_tear_down_entry()
296 static void pasid_flush_caches(struct intel_iommu *iommu, in pasid_flush_caches() argument
300 if (!ecap_coherent(iommu->ecap)) in pasid_flush_caches()
303 if (cap_caching_mode(iommu->cap)) { in pasid_flush_caches()
304 pasid_cache_invalidation_with_pasid(iommu, did, pasid); in pasid_flush_caches()
305 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); in pasid_flush_caches()
307 iommu_flush_write_buffer(iommu); in pasid_flush_caches()
319 static void intel_pasid_flush_present(struct intel_iommu *iommu, in intel_pasid_flush_present() argument
324 if (!ecap_coherent(iommu->ecap)) in intel_pasid_flush_present()
338 pasid_cache_invalidation_with_pasid(iommu, did, pasid); in intel_pasid_flush_present()
339 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); in intel_pasid_flush_present()
341 devtlb_invalidation_with_pasid(iommu, dev, pasid); in intel_pasid_flush_present()
348 static void pasid_pte_config_first_level(struct intel_iommu *iommu, in pasid_pte_config_first_level() argument
352 lockdep_assert_held(&iommu->lock); in pasid_pte_config_first_level()
366 pasid_set_address_width(pte, iommu->agaw); in pasid_pte_config_first_level()
367 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in pasid_pte_config_first_level()
374 int intel_pasid_setup_first_level(struct intel_iommu *iommu, in intel_pasid_setup_first_level() argument
380 if (!ecap_flts(iommu->ecap)) { in intel_pasid_setup_first_level()
382 iommu->name); in intel_pasid_setup_first_level()
386 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) { in intel_pasid_setup_first_level()
388 iommu->name); in intel_pasid_setup_first_level()
392 spin_lock(&iommu->lock); in intel_pasid_setup_first_level()
395 spin_unlock(&iommu->lock); in intel_pasid_setup_first_level()
400 spin_unlock(&iommu->lock); in intel_pasid_setup_first_level()
404 pasid_pte_config_first_level(iommu, pte, pgd, did, flags); in intel_pasid_setup_first_level()
406 spin_unlock(&iommu->lock); in intel_pasid_setup_first_level()
408 pasid_flush_caches(iommu, pte, pasid, did); in intel_pasid_setup_first_level()
413 int intel_pasid_replace_first_level(struct intel_iommu *iommu, in intel_pasid_replace_first_level() argument
420 if (!ecap_flts(iommu->ecap)) { in intel_pasid_replace_first_level()
422 iommu->name); in intel_pasid_replace_first_level()
426 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) { in intel_pasid_replace_first_level()
428 iommu->name); in intel_pasid_replace_first_level()
432 pasid_pte_config_first_level(iommu, &new_pte, pgd, did, flags); in intel_pasid_replace_first_level()
434 spin_lock(&iommu->lock); in intel_pasid_replace_first_level()
437 spin_unlock(&iommu->lock); in intel_pasid_replace_first_level()
442 spin_unlock(&iommu->lock); in intel_pasid_replace_first_level()
449 spin_unlock(&iommu->lock); in intel_pasid_replace_first_level()
451 intel_pasid_flush_present(iommu, dev, pasid, old_did, pte); in intel_pasid_replace_first_level()
460 static void pasid_pte_config_second_level(struct intel_iommu *iommu, in pasid_pte_config_second_level() argument
465 lockdep_assert_held(&iommu->lock); in pasid_pte_config_second_level()
473 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in pasid_pte_config_second_level()
480 int intel_pasid_setup_second_level(struct intel_iommu *iommu, in intel_pasid_setup_second_level() argument
493 if (!ecap_slts(iommu->ecap)) { in intel_pasid_setup_second_level()
495 iommu->name); in intel_pasid_setup_second_level()
501 did = domain_id_iommu(domain, iommu); in intel_pasid_setup_second_level()
503 spin_lock(&iommu->lock); in intel_pasid_setup_second_level()
506 spin_unlock(&iommu->lock); in intel_pasid_setup_second_level()
511 spin_unlock(&iommu->lock); in intel_pasid_setup_second_level()
515 pasid_pte_config_second_level(iommu, pte, pgd_val, domain->agaw, in intel_pasid_setup_second_level()
517 spin_unlock(&iommu->lock); in intel_pasid_setup_second_level()
519 pasid_flush_caches(iommu, pte, pasid, did); in intel_pasid_setup_second_level()
524 int intel_pasid_replace_second_level(struct intel_iommu *iommu, in intel_pasid_replace_second_level() argument
538 if (!ecap_slts(iommu->ecap)) { in intel_pasid_replace_second_level()
540 iommu->name); in intel_pasid_replace_second_level()
546 did = domain_id_iommu(domain, iommu); in intel_pasid_replace_second_level()
548 pasid_pte_config_second_level(iommu, &new_pte, pgd_val, in intel_pasid_replace_second_level()
552 spin_lock(&iommu->lock); in intel_pasid_replace_second_level()
555 spin_unlock(&iommu->lock); in intel_pasid_replace_second_level()
560 spin_unlock(&iommu->lock); in intel_pasid_replace_second_level()
567 spin_unlock(&iommu->lock); in intel_pasid_replace_second_level()
569 intel_pasid_flush_present(iommu, dev, pasid, old_did, pte); in intel_pasid_replace_second_level()
578 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, in intel_pasid_setup_dirty_tracking() argument
585 spin_lock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
589 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
599 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
608 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
616 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
618 if (!ecap_coherent(iommu->ecap)) in intel_pasid_setup_dirty_tracking()
635 pasid_cache_invalidation_with_pasid(iommu, did, pasid); in intel_pasid_setup_dirty_tracking()
637 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_pasid_setup_dirty_tracking()
639 devtlb_invalidation_with_pasid(iommu, dev, pasid); in intel_pasid_setup_dirty_tracking()
647 static void pasid_pte_config_pass_through(struct intel_iommu *iommu, in pasid_pte_config_pass_through() argument
650 lockdep_assert_held(&iommu->lock); in pasid_pte_config_pass_through()
654 pasid_set_address_width(pte, iommu->agaw); in pasid_pte_config_pass_through()
657 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in pasid_pte_config_pass_through()
661 int intel_pasid_setup_pass_through(struct intel_iommu *iommu, in intel_pasid_setup_pass_through() argument
667 spin_lock(&iommu->lock); in intel_pasid_setup_pass_through()
670 spin_unlock(&iommu->lock); in intel_pasid_setup_pass_through()
675 spin_unlock(&iommu->lock); in intel_pasid_setup_pass_through()
679 pasid_pte_config_pass_through(iommu, pte, did); in intel_pasid_setup_pass_through()
680 spin_unlock(&iommu->lock); in intel_pasid_setup_pass_through()
682 pasid_flush_caches(iommu, pte, pasid, did); in intel_pasid_setup_pass_through()
687 int intel_pasid_replace_pass_through(struct intel_iommu *iommu, in intel_pasid_replace_pass_through() argument
694 pasid_pte_config_pass_through(iommu, &new_pte, did); in intel_pasid_replace_pass_through()
696 spin_lock(&iommu->lock); in intel_pasid_replace_pass_through()
699 spin_unlock(&iommu->lock); in intel_pasid_replace_pass_through()
704 spin_unlock(&iommu->lock); in intel_pasid_replace_pass_through()
711 spin_unlock(&iommu->lock); in intel_pasid_replace_pass_through()
713 intel_pasid_flush_present(iommu, dev, pasid, old_did, pte); in intel_pasid_replace_pass_through()
722 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, in intel_pasid_setup_page_snoop_control() argument
728 spin_lock(&iommu->lock); in intel_pasid_setup_page_snoop_control()
731 spin_unlock(&iommu->lock); in intel_pasid_setup_page_snoop_control()
737 spin_unlock(&iommu->lock); in intel_pasid_setup_page_snoop_control()
739 intel_pasid_flush_present(iommu, dev, pasid, did, pte); in intel_pasid_setup_page_snoop_control()
742 static void pasid_pte_config_nestd(struct intel_iommu *iommu, in pasid_pte_config_nestd() argument
750 lockdep_assert_held(&iommu->lock); in pasid_pte_config_nestd()
775 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in pasid_pte_config_nestd()
784 * @iommu: IOMMU which the device belong to
793 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, in intel_pasid_setup_nested() argument
798 u16 did = domain_id_iommu(domain, iommu); in intel_pasid_setup_nested()
806 if (!cap_fl5lp_support(iommu->cap)) { in intel_pasid_setup_nested()
818 if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) { in intel_pasid_setup_nested()
820 iommu->name); in intel_pasid_setup_nested()
824 if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) { in intel_pasid_setup_nested()
826 iommu->name); in intel_pasid_setup_nested()
830 spin_lock(&iommu->lock); in intel_pasid_setup_nested()
833 spin_unlock(&iommu->lock); in intel_pasid_setup_nested()
837 spin_unlock(&iommu->lock); in intel_pasid_setup_nested()
841 pasid_pte_config_nestd(iommu, pte, s1_cfg, s2_domain, did); in intel_pasid_setup_nested()
842 spin_unlock(&iommu->lock); in intel_pasid_setup_nested()
844 pasid_flush_caches(iommu, pte, pasid, did); in intel_pasid_setup_nested()
849 int intel_pasid_replace_nested(struct intel_iommu *iommu, in intel_pasid_replace_nested() argument
855 u16 did = domain_id_iommu(domain, iommu); in intel_pasid_replace_nested()
863 if (!cap_fl5lp_support(iommu->cap)) { in intel_pasid_replace_nested()
875 if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) { in intel_pasid_replace_nested()
877 iommu->name); in intel_pasid_replace_nested()
881 if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) { in intel_pasid_replace_nested()
883 iommu->name); in intel_pasid_replace_nested()
887 pasid_pte_config_nestd(iommu, &new_pte, s1_cfg, s2_domain, did); in intel_pasid_replace_nested()
889 spin_lock(&iommu->lock); in intel_pasid_replace_nested()
892 spin_unlock(&iommu->lock); in intel_pasid_replace_nested()
897 spin_unlock(&iommu->lock); in intel_pasid_replace_nested()
904 spin_unlock(&iommu->lock); in intel_pasid_replace_nested()
906 intel_pasid_flush_present(iommu, dev, pasid, old_did, pte); in intel_pasid_replace_nested()
920 struct intel_iommu *iommu = info->iommu; in device_pasid_table_teardown() local
924 spin_lock(&iommu->lock); in device_pasid_table_teardown()
925 context = iommu_context_addr(iommu, bus, devfn, false); in device_pasid_table_teardown()
927 spin_unlock(&iommu->lock); in device_pasid_table_teardown()
933 __iommu_flush_cache(iommu, context, sizeof(*context)); in device_pasid_table_teardown()
934 spin_unlock(&iommu->lock); in device_pasid_table_teardown()
982 struct intel_iommu *iommu = info->iommu; in context_entry_set_pasid_table() local
998 __iommu_flush_cache(iommu, context, sizeof(*context)); in context_entry_set_pasid_table()
1006 struct intel_iommu *iommu = info->iommu; in device_pasid_table_setup() local
1009 spin_lock(&iommu->lock); in device_pasid_table_setup()
1010 context = iommu_context_addr(iommu, bus, devfn, true); in device_pasid_table_setup()
1012 spin_unlock(&iommu->lock); in device_pasid_table_setup()
1016 if (context_present(context) && !context_copied(iommu, bus, devfn)) { in device_pasid_table_setup()
1017 spin_unlock(&iommu->lock); in device_pasid_table_setup()
1021 if (context_copied(iommu, bus, devfn)) { in device_pasid_table_setup()
1023 __iommu_flush_cache(iommu, context, sizeof(*context)); in device_pasid_table_setup()
1034 iommu->flush.flush_context(iommu, 0, in device_pasid_table_setup()
1038 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in device_pasid_table_setup()
1039 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in device_pasid_table_setup()
1040 devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID); in device_pasid_table_setup()
1047 clear_context_copied(iommu, bus, devfn); in device_pasid_table_setup()
1051 spin_unlock(&iommu->lock); in device_pasid_table_setup()
1059 if (cap_caching_mode(iommu->cap)) { in device_pasid_table_setup()
1060 iommu->flush.flush_context(iommu, 0, in device_pasid_table_setup()
1064 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); in device_pasid_table_setup()
1105 qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn), in __context_flush_dev_iotlb()
1121 * IOMMU is in scalable mode and all PASID table entries of the device were
1128 struct intel_iommu *iommu = info->iommu; in intel_context_flush_present() local
1138 iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn), in intel_context_flush_present()
1146 if (!sm_supported(iommu)) { in intel_context_flush_present()
1147 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_context_flush_present()
1161 * If the IOMMU is running in scalable mode and there might in intel_context_flush_present()
1166 assert_spin_locked(&iommu->lock); in intel_context_flush_present()
1173 qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0); in intel_context_flush_present()
1174 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_context_flush_present()