Lines Matching +full:smmu +full:- +full:v3

1 // SPDX-License-Identifier: GPL-2.0
13 #include "arm-smmu-v3.h"
14 #include "../../io-pgtable-arm.h"
25 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_update_s1_domain_cd_entry()
26 list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) { in arm_smmu_update_s1_domain_cd_entry()
27 struct arm_smmu_master *master = master_domain->master; in arm_smmu_update_s1_domain_cd_entry()
30 cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid); in arm_smmu_update_s1_domain_cd_entry()
35 arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr, in arm_smmu_update_s1_domain_cd_entry()
38 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_update_s1_domain_cd_entry()
65 target->data[0] = cpu_to_le64( in arm_smmu_make_sva_cd()
73 (master->stall_enabled ? CTXDESC_CD_0_S : 0) | in arm_smmu_make_sva_cd()
85 target->data[0] |= cpu_to_le64( in arm_smmu_make_sva_cd()
87 64ULL - vabits_actual) | in arm_smmu_make_sva_cd()
95 target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) & in arm_smmu_make_sva_cd()
98 target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0); in arm_smmu_make_sva_cd()
105 if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) in arm_smmu_make_sva_cd()
106 target->data[0] &= in arm_smmu_make_sva_cd()
114 target->data[3] = cpu_to_le64(read_sysreg(mair_el1)); in arm_smmu_make_sva_cd()
117 * Note that we don't bother with S1PIE on the SMMU, we just rely on in arm_smmu_make_sva_cd()
119 * SMMU has no notion of S1POE nor GCS, so make sure that is clear if in arm_smmu_make_sva_cd()
123 dev_warn_once(master->smmu->dev, "SVA devices ignore permission overlays and GCS\n"); in arm_smmu_make_sva_cd()
129 * is used as a threshold to replace per-page TLBI commands to issue in the
130 * command queue with an address-space TLBI command, when SMMU w/o a range
131 * invalidation feature handles too many per-page TLBI commands, which will
134 #define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
150 size = end - start; in arm_smmu_mm_arch_invalidate_secondary_tlbs()
151 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) { in arm_smmu_mm_arch_invalidate_secondary_tlbs()
160 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_mm_arch_invalidate_secondary_tlbs()
162 arm_smmu_tlb_inv_range_asid(start, size, smmu_domain->cd.asid, in arm_smmu_mm_arch_invalidate_secondary_tlbs()
179 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_mm_release()
180 list_for_each_entry(master_domain, &smmu_domain->devices, in arm_smmu_mm_release()
182 struct arm_smmu_master *master = master_domain->master; in arm_smmu_mm_release()
186 cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid); in arm_smmu_mm_release()
190 smmu_domain->cd.asid); in arm_smmu_mm_release()
191 arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr, in arm_smmu_mm_release()
194 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_mm_release()
196 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_mm_release()
211 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) in arm_smmu_sva_supported() argument
225 if ((smmu->features & feat_mask) != feat_mask) in arm_smmu_sva_supported()
228 if (!(smmu->pgsize_bitmap & PAGE_SIZE)) in arm_smmu_sva_supported()
239 if (smmu->oas < oas) in arm_smmu_sva_supported()
245 if (smmu->asid_bits < asid_bits) in arm_smmu_sva_supported()
253 asid_bits--; in arm_smmu_sva_supported()
254 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) - in arm_smmu_sva_supported()
255 num_possible_cpus() - 2); in arm_smmu_sva_supported()
263 if (master->num_streams != 1) in arm_smmu_master_iopf_supported()
266 return master->stall_enabled; in arm_smmu_master_iopf_supported()
271 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) in arm_smmu_master_sva_supported()
275 return master->ssid_bits; in arm_smmu_master_sva_supported()
283 enabled = master->sva_enabled; in arm_smmu_master_sva_enabled()
290 struct device *dev = master->dev; in arm_smmu_master_sva_enable_iopf()
294 * Others have device-specific fault handlers and don't need IOPF. in arm_smmu_master_sva_enable_iopf()
299 if (!master->iopf_enabled) in arm_smmu_master_sva_enable_iopf()
300 return -EINVAL; in arm_smmu_master_sva_enable_iopf()
302 return iopf_queue_add_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_enable_iopf()
307 struct device *dev = master->dev; in arm_smmu_master_sva_disable_iopf()
309 if (!master->iopf_enabled) in arm_smmu_master_sva_disable_iopf()
312 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_disable_iopf()
322 master->sva_enabled = true; in arm_smmu_master_enable_sva()
332 master->sva_enabled = false; in arm_smmu_master_disable_sva()
357 if (!mmget_not_zero(domain->mm)) in arm_smmu_sva_set_dev_pasid()
358 return -EINVAL; in arm_smmu_sva_set_dev_pasid()
364 arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid); in arm_smmu_sva_set_dev_pasid()
367 mmput(domain->mm); in arm_smmu_sva_set_dev_pasid()
378 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_sva_domain_free()
386 xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); in arm_smmu_sva_domain_free()
392 mmu_notifier_put(&smmu_domain->mmu_notifier); in arm_smmu_sva_domain_free()
404 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_sva_domain_alloc() local
412 smmu_domain->domain.type = IOMMU_DOMAIN_SVA; in arm_smmu_sva_domain_alloc()
413 smmu_domain->domain.ops = &arm_smmu_sva_domain_ops; in arm_smmu_sva_domain_alloc()
414 smmu_domain->smmu = smmu; in arm_smmu_sva_domain_alloc()
417 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_sva_domain_alloc()
421 smmu_domain->cd.asid = asid; in arm_smmu_sva_domain_alloc()
422 smmu_domain->mmu_notifier.ops = &arm_smmu_mmu_notifier_ops; in arm_smmu_sva_domain_alloc()
423 ret = mmu_notifier_register(&smmu_domain->mmu_notifier, mm); in arm_smmu_sva_domain_alloc()
427 return &smmu_domain->domain; in arm_smmu_sva_domain_alloc()
430 xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); in arm_smmu_sva_domain_alloc()