1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Implementation of the IOMMU SVA API for the ARM SMMUv3
4 */
5
6 #include <linux/mm.h>
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/sched/mm.h>
10 #include <linux/slab.h>
11 #include <kunit/visibility.h>
12
13 #include "arm-smmu-v3.h"
14 #include "../../io-pgtable-arm.h"
15
16 static DEFINE_MUTEX(sva_lock);
17
18 static void __maybe_unused
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain * smmu_domain)19 arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
20 {
21 struct arm_smmu_master_domain *master_domain;
22 struct arm_smmu_cd target_cd;
23 unsigned long flags;
24
25 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
26 list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) {
27 struct arm_smmu_master *master = master_domain->master;
28 struct arm_smmu_cd *cdptr;
29
30 cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid);
31 if (WARN_ON(!cdptr))
32 continue;
33
34 arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
35 arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr,
36 &target_cd);
37 }
38 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
39 }
40
page_size_to_cd(void)41 static u64 page_size_to_cd(void)
42 {
43 static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
44 PAGE_SIZE == SZ_64K);
45 if (PAGE_SIZE == SZ_64K)
46 return ARM_LPAE_TCR_TG0_64K;
47 if (PAGE_SIZE == SZ_16K)
48 return ARM_LPAE_TCR_TG0_16K;
49 return ARM_LPAE_TCR_TG0_4K;
50 }
51
52 VISIBLE_IF_KUNIT
arm_smmu_make_sva_cd(struct arm_smmu_cd * target,struct arm_smmu_master * master,struct mm_struct * mm,u16 asid)53 void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
54 struct arm_smmu_master *master, struct mm_struct *mm,
55 u16 asid)
56 {
57 u64 par;
58
59 memset(target, 0, sizeof(*target));
60
61 par = cpuid_feature_extract_unsigned_field(
62 read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1),
63 ID_AA64MMFR0_EL1_PARANGE_SHIFT);
64
65 target->data[0] = cpu_to_le64(
66 CTXDESC_CD_0_TCR_EPD1 |
67 #ifdef __BIG_ENDIAN
68 CTXDESC_CD_0_ENDI |
69 #endif
70 CTXDESC_CD_0_V |
71 FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par) |
72 CTXDESC_CD_0_AA64 |
73 (master->stall_enabled ? CTXDESC_CD_0_S : 0) |
74 CTXDESC_CD_0_R |
75 CTXDESC_CD_0_A |
76 CTXDESC_CD_0_ASET |
77 FIELD_PREP(CTXDESC_CD_0_ASID, asid));
78
79 /*
80 * If no MM is passed then this creates a SVA entry that faults
81 * everything. arm_smmu_write_cd_entry() can hitlessly go between these
82 * two entries types since TTB0 is ignored by HW when EPD0 is set.
83 */
84 if (mm) {
85 target->data[0] |= cpu_to_le64(
86 FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ,
87 64ULL - vabits_actual) |
88 FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) |
89 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0,
90 ARM_LPAE_TCR_RGN_WBWA) |
91 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0,
92 ARM_LPAE_TCR_RGN_WBWA) |
93 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS));
94
95 target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) &
96 CTXDESC_CD_1_TTB0_MASK);
97 } else {
98 target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0);
99
100 /*
101 * Disable stall and immediately generate an abort if stall
102 * disable is permitted. This speeds up cleanup for an unclean
103 * exit if the device is still doing a lot of DMA.
104 */
105 if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
106 target->data[0] &=
107 cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R));
108 }
109
110 /*
111 * MAIR value is pretty much constant and global, so we can just get it
112 * from the current CPU register
113 */
114 target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
115
116 /*
117 * Note that we don't bother with S1PIE on the SMMU, we just rely on
118 * our default encoding scheme matching direct permissions anyway.
119 * SMMU has no notion of S1POE nor GCS, so make sure that is clear if
120 * either is enabled for CPUs, just in case anyone imagines otherwise.
121 */
122 if (system_supports_poe() || system_supports_gcs())
123 dev_warn_once(master->smmu->dev, "SVA devices ignore permission overlays and GCS\n");
124 }
125 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
126
127 /*
128 * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
129 * is used as a threshold to replace per-page TLBI commands to issue in the
130 * command queue with an address-space TLBI command, when SMMU w/o a range
131 * invalidation feature handles too many per-page TLBI commands, which will
132 * otherwise result in a soft lockup.
133 */
134 #define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
135
arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)136 static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
137 struct mm_struct *mm,
138 unsigned long start,
139 unsigned long end)
140 {
141 struct arm_smmu_domain *smmu_domain =
142 container_of(mn, struct arm_smmu_domain, mmu_notifier);
143 size_t size;
144
145 /*
146 * The mm_types defines vm_end as the first byte after the end address,
147 * different from IOMMU subsystem using the last address of an address
148 * range. So do a simple translation here by calculating size correctly.
149 */
150 size = end - start;
151 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
152 if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
153 size = 0;
154 } else {
155 if (size == ULONG_MAX)
156 size = 0;
157 }
158
159 if (!size)
160 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid);
161 else
162 arm_smmu_tlb_inv_range_asid(start, size, smmu_domain->cd.asid,
163 PAGE_SIZE, false, smmu_domain);
164
165 arm_smmu_atc_inv_domain(smmu_domain, start, size);
166 }
167
arm_smmu_mm_release(struct mmu_notifier * mn,struct mm_struct * mm)168 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
169 {
170 struct arm_smmu_domain *smmu_domain =
171 container_of(mn, struct arm_smmu_domain, mmu_notifier);
172 struct arm_smmu_master_domain *master_domain;
173 unsigned long flags;
174
175 /*
176 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
177 * but disable translation.
178 */
179 spin_lock_irqsave(&smmu_domain->devices_lock, flags);
180 list_for_each_entry(master_domain, &smmu_domain->devices,
181 devices_elm) {
182 struct arm_smmu_master *master = master_domain->master;
183 struct arm_smmu_cd target;
184 struct arm_smmu_cd *cdptr;
185
186 cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid);
187 if (WARN_ON(!cdptr))
188 continue;
189 arm_smmu_make_sva_cd(&target, master, NULL,
190 smmu_domain->cd.asid);
191 arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr,
192 &target);
193 }
194 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
195
196 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid);
197 arm_smmu_atc_inv_domain(smmu_domain, 0, 0);
198 }
199
arm_smmu_mmu_notifier_free(struct mmu_notifier * mn)200 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
201 {
202 kfree(container_of(mn, struct arm_smmu_domain, mmu_notifier));
203 }
204
205 static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
206 .arch_invalidate_secondary_tlbs = arm_smmu_mm_arch_invalidate_secondary_tlbs,
207 .release = arm_smmu_mm_release,
208 .free_notifier = arm_smmu_mmu_notifier_free,
209 };
210
arm_smmu_sva_supported(struct arm_smmu_device * smmu)211 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
212 {
213 unsigned long reg, fld;
214 unsigned long oas;
215 unsigned long asid_bits;
216 u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
217
218 if (vabits_actual == 52) {
219 /* We don't support LPA2 */
220 if (PAGE_SIZE != SZ_64K)
221 return false;
222 feat_mask |= ARM_SMMU_FEAT_VAX;
223 }
224
225 if ((smmu->features & feat_mask) != feat_mask)
226 return false;
227
228 if (!(smmu->pgsize_bitmap & PAGE_SIZE))
229 return false;
230
231 /*
232 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
233 * not even pretending to support AArch32 here. Abort if the MMU outputs
234 * addresses larger than what we support.
235 */
236 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
237 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
238 oas = id_aa64mmfr0_parange_to_phys_shift(fld);
239 if (smmu->oas < oas)
240 return false;
241
242 /* We can support bigger ASIDs than the CPU, but not smaller */
243 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
244 asid_bits = fld ? 16 : 8;
245 if (smmu->asid_bits < asid_bits)
246 return false;
247
248 /*
249 * See max_pinned_asids in arch/arm64/mm/context.c. The following is
250 * generally the maximum number of bindable processes.
251 */
252 if (arm64_kernel_unmapped_at_el0())
253 asid_bits--;
254 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
255 num_possible_cpus() - 2);
256
257 return true;
258 }
259
arm_smmu_master_iopf_supported(struct arm_smmu_master * master)260 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
261 {
262 /* We're not keeping track of SIDs in fault events */
263 if (master->num_streams != 1)
264 return false;
265
266 return master->stall_enabled;
267 }
268
arm_smmu_master_sva_supported(struct arm_smmu_master * master)269 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
270 {
271 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
272 return false;
273
274 /* SSID support is mandatory for the moment */
275 return master->ssid_bits;
276 }
277
arm_smmu_master_sva_enabled(struct arm_smmu_master * master)278 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
279 {
280 bool enabled;
281
282 mutex_lock(&sva_lock);
283 enabled = master->sva_enabled;
284 mutex_unlock(&sva_lock);
285 return enabled;
286 }
287
arm_smmu_master_sva_enable_iopf(struct arm_smmu_master * master)288 static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
289 {
290 struct device *dev = master->dev;
291
292 /*
293 * Drivers for devices supporting PRI or stall should enable IOPF first.
294 * Others have device-specific fault handlers and don't need IOPF.
295 */
296 if (!arm_smmu_master_iopf_supported(master))
297 return 0;
298
299 if (!master->iopf_enabled)
300 return -EINVAL;
301
302 return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
303 }
304
arm_smmu_master_sva_disable_iopf(struct arm_smmu_master * master)305 static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
306 {
307 struct device *dev = master->dev;
308
309 if (!master->iopf_enabled)
310 return;
311
312 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
313 }
314
arm_smmu_master_enable_sva(struct arm_smmu_master * master)315 int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
316 {
317 int ret;
318
319 mutex_lock(&sva_lock);
320 ret = arm_smmu_master_sva_enable_iopf(master);
321 if (!ret)
322 master->sva_enabled = true;
323 mutex_unlock(&sva_lock);
324
325 return ret;
326 }
327
arm_smmu_master_disable_sva(struct arm_smmu_master * master)328 int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
329 {
330 mutex_lock(&sva_lock);
331 arm_smmu_master_sva_disable_iopf(master);
332 master->sva_enabled = false;
333 mutex_unlock(&sva_lock);
334
335 return 0;
336 }
337
arm_smmu_sva_notifier_synchronize(void)338 void arm_smmu_sva_notifier_synchronize(void)
339 {
340 /*
341 * Some MMU notifiers may still be waiting to be freed, using
342 * arm_smmu_mmu_notifier_free(). Wait for them.
343 */
344 mmu_notifier_synchronize();
345 }
346
arm_smmu_sva_set_dev_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t id,struct iommu_domain * old)347 static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
348 struct device *dev, ioasid_t id,
349 struct iommu_domain *old)
350 {
351 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
352 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
353 struct arm_smmu_cd target;
354 int ret;
355
356 /* Prevent arm_smmu_mm_release from being called while we are attaching */
357 if (!mmget_not_zero(domain->mm))
358 return -EINVAL;
359
360 /*
361 * This does not need the arm_smmu_asid_lock because SVA domains never
362 * get reassigned
363 */
364 arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid);
365 ret = arm_smmu_set_pasid(master, smmu_domain, id, &target, old);
366
367 mmput(domain->mm);
368 return ret;
369 }
370
arm_smmu_sva_domain_free(struct iommu_domain * domain)371 static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
372 {
373 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
374
375 /*
376 * Ensure the ASID is empty in the iommu cache before allowing reuse.
377 */
378 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid);
379
380 /*
381 * Notice that the arm_smmu_mm_arch_invalidate_secondary_tlbs op can
382 * still be called/running at this point. We allow the ASID to be
383 * reused, and if there is a race then it just suffers harmless
384 * unnecessary invalidation.
385 */
386 xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid);
387
388 /*
389 * Actual free is defered to the SRCU callback
390 * arm_smmu_mmu_notifier_free()
391 */
392 mmu_notifier_put(&smmu_domain->mmu_notifier);
393 }
394
395 static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
396 .set_dev_pasid = arm_smmu_sva_set_dev_pasid,
397 .free = arm_smmu_sva_domain_free
398 };
399
arm_smmu_sva_domain_alloc(struct device * dev,struct mm_struct * mm)400 struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
401 struct mm_struct *mm)
402 {
403 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
404 struct arm_smmu_device *smmu = master->smmu;
405 struct arm_smmu_domain *smmu_domain;
406 u32 asid;
407 int ret;
408
409 smmu_domain = arm_smmu_domain_alloc();
410 if (IS_ERR(smmu_domain))
411 return ERR_CAST(smmu_domain);
412 smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
413 smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
414 smmu_domain->smmu = smmu;
415
416 ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
417 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
418 if (ret)
419 goto err_free;
420
421 smmu_domain->cd.asid = asid;
422 smmu_domain->mmu_notifier.ops = &arm_smmu_mmu_notifier_ops;
423 ret = mmu_notifier_register(&smmu_domain->mmu_notifier, mm);
424 if (ret)
425 goto err_asid;
426
427 return &smmu_domain->domain;
428
429 err_asid:
430 xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid);
431 err_free:
432 kfree(smmu_domain);
433 return ERR_PTR(ret);
434 }
435