1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3  *
4  * Kernel side components to support tools/testing/selftests/iommu
5  */
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/fault-inject.h>
9 #include <linux/file.h>
10 #include <linux/iommu.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
15 
16 #include "../iommu-priv.h"
17 #include "io_pagetable.h"
18 #include "iommufd_private.h"
19 #include "iommufd_test.h"
20 
21 static DECLARE_FAULT_ATTR(fail_iommufd);
22 static struct dentry *dbgfs_root;
23 static struct platform_device *selftest_iommu_dev;
24 static const struct iommu_ops mock_ops;
25 static struct iommu_domain_ops domain_nested_ops;
26 
27 size_t iommufd_test_memory_limit = 65536;
28 
29 struct mock_bus_type {
30 	struct bus_type bus;
31 	struct notifier_block nb;
32 };
33 
34 static struct mock_bus_type iommufd_mock_bus_type = {
35 	.bus = {
36 		.name = "iommufd_mock",
37 	},
38 };
39 
40 static DEFINE_IDA(mock_dev_ida);
41 
42 enum {
43 	MOCK_DIRTY_TRACK = 1,
44 	MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
45 	MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
46 
47 	/*
48 	 * Like a real page table alignment requires the low bits of the address
49 	 * to be zero. xarray also requires the high bit to be zero, so we store
50 	 * the pfns shifted. The upper bits are used for metadata.
51 	 */
52 	MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
53 
54 	_MOCK_PFN_START = MOCK_PFN_MASK + 1,
55 	MOCK_PFN_START_IOVA = _MOCK_PFN_START,
56 	MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
57 	MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
58 	MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
59 };
60 
61 /*
62  * Syzkaller has trouble randomizing the correct iova to use since it is linked
63  * to the map ioctl's output, and it has no ide about that. So, simplify things.
64  * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
65  * value. This has a much smaller randomization space and syzkaller can hit it.
66  */
__iommufd_test_syz_conv_iova(struct io_pagetable * iopt,u64 * iova)67 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
68 						  u64 *iova)
69 {
70 	struct syz_layout {
71 		__u32 nth_area;
72 		__u32 offset;
73 	};
74 	struct syz_layout *syz = (void *)iova;
75 	unsigned int nth = syz->nth_area;
76 	struct iopt_area *area;
77 
78 	down_read(&iopt->iova_rwsem);
79 	for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
80 	     area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
81 		if (nth == 0) {
82 			up_read(&iopt->iova_rwsem);
83 			return iopt_area_iova(area) + syz->offset;
84 		}
85 		nth--;
86 	}
87 	up_read(&iopt->iova_rwsem);
88 
89 	return 0;
90 }
91 
iommufd_test_syz_conv_iova(struct iommufd_access * access,u64 * iova)92 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
93 						u64 *iova)
94 {
95 	unsigned long ret;
96 
97 	mutex_lock(&access->ioas_lock);
98 	if (!access->ioas) {
99 		mutex_unlock(&access->ioas_lock);
100 		return 0;
101 	}
102 	ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
103 	mutex_unlock(&access->ioas_lock);
104 	return ret;
105 }
106 
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)107 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
108 				   unsigned int ioas_id, u64 *iova, u32 *flags)
109 {
110 	struct iommufd_ioas *ioas;
111 
112 	if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
113 		return;
114 	*flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
115 
116 	ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
117 	if (IS_ERR(ioas))
118 		return;
119 	*iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
120 	iommufd_put_object(ucmd->ictx, &ioas->obj);
121 }
122 
123 struct mock_iommu_domain {
124 	unsigned long flags;
125 	struct iommu_domain domain;
126 	struct xarray pfns;
127 };
128 
129 static inline struct mock_iommu_domain *
to_mock_domain(struct iommu_domain * domain)130 to_mock_domain(struct iommu_domain *domain)
131 {
132 	return container_of(domain, struct mock_iommu_domain, domain);
133 }
134 
135 struct mock_iommu_domain_nested {
136 	struct iommu_domain domain;
137 	struct mock_viommu *mock_viommu;
138 	struct mock_iommu_domain *parent;
139 	u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
140 };
141 
142 static inline struct mock_iommu_domain_nested *
to_mock_nested(struct iommu_domain * domain)143 to_mock_nested(struct iommu_domain *domain)
144 {
145 	return container_of(domain, struct mock_iommu_domain_nested, domain);
146 }
147 
148 struct mock_viommu {
149 	struct iommufd_viommu core;
150 	struct mock_iommu_domain *s2_parent;
151 };
152 
to_mock_viommu(struct iommufd_viommu * viommu)153 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
154 {
155 	return container_of(viommu, struct mock_viommu, core);
156 }
157 
158 enum selftest_obj_type {
159 	TYPE_IDEV,
160 };
161 
162 struct mock_dev {
163 	struct device dev;
164 	unsigned long flags;
165 	int id;
166 	u32 cache[MOCK_DEV_CACHE_NUM];
167 };
168 
to_mock_dev(struct device * dev)169 static inline struct mock_dev *to_mock_dev(struct device *dev)
170 {
171 	return container_of(dev, struct mock_dev, dev);
172 }
173 
174 struct selftest_obj {
175 	struct iommufd_object obj;
176 	enum selftest_obj_type type;
177 
178 	union {
179 		struct {
180 			struct iommufd_device *idev;
181 			struct iommufd_ctx *ictx;
182 			struct mock_dev *mock_dev;
183 		} idev;
184 	};
185 };
186 
to_selftest_obj(struct iommufd_object * obj)187 static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
188 {
189 	return container_of(obj, struct selftest_obj, obj);
190 }
191 
mock_domain_nop_attach(struct iommu_domain * domain,struct device * dev)192 static int mock_domain_nop_attach(struct iommu_domain *domain,
193 				  struct device *dev)
194 {
195 	struct mock_dev *mdev = to_mock_dev(dev);
196 
197 	if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
198 		return -EINVAL;
199 
200 	return 0;
201 }
202 
203 static const struct iommu_domain_ops mock_blocking_ops = {
204 	.attach_dev = mock_domain_nop_attach,
205 };
206 
207 static struct iommu_domain mock_blocking_domain = {
208 	.type = IOMMU_DOMAIN_BLOCKED,
209 	.ops = &mock_blocking_ops,
210 };
211 
mock_domain_hw_info(struct device * dev,u32 * length,u32 * type)212 static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
213 {
214 	struct iommu_test_hw_info *info;
215 
216 	info = kzalloc(sizeof(*info), GFP_KERNEL);
217 	if (!info)
218 		return ERR_PTR(-ENOMEM);
219 
220 	info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
221 	*length = sizeof(*info);
222 	*type = IOMMU_HW_INFO_TYPE_SELFTEST;
223 
224 	return info;
225 }
226 
mock_domain_set_dirty_tracking(struct iommu_domain * domain,bool enable)227 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
228 					  bool enable)
229 {
230 	struct mock_iommu_domain *mock = to_mock_domain(domain);
231 	unsigned long flags = mock->flags;
232 
233 	if (enable && !domain->dirty_ops)
234 		return -EINVAL;
235 
236 	/* No change? */
237 	if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
238 		return 0;
239 
240 	flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
241 
242 	mock->flags = flags;
243 	return 0;
244 }
245 
mock_test_and_clear_dirty(struct mock_iommu_domain * mock,unsigned long iova,size_t page_size,unsigned long flags)246 static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
247 				      unsigned long iova, size_t page_size,
248 				      unsigned long flags)
249 {
250 	unsigned long cur, end = iova + page_size - 1;
251 	bool dirty = false;
252 	void *ent, *old;
253 
254 	for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
255 		ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
256 		if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
257 			continue;
258 
259 		dirty = true;
260 		/* Clear dirty */
261 		if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
262 			unsigned long val;
263 
264 			val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
265 			old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
266 				       xa_mk_value(val), GFP_KERNEL);
267 			WARN_ON_ONCE(ent != old);
268 		}
269 	}
270 
271 	return dirty;
272 }
273 
mock_domain_read_and_clear_dirty(struct iommu_domain * domain,unsigned long iova,size_t size,unsigned long flags,struct iommu_dirty_bitmap * dirty)274 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
275 					    unsigned long iova, size_t size,
276 					    unsigned long flags,
277 					    struct iommu_dirty_bitmap *dirty)
278 {
279 	struct mock_iommu_domain *mock = to_mock_domain(domain);
280 	unsigned long end = iova + size;
281 	void *ent;
282 
283 	if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
284 		return -EINVAL;
285 
286 	do {
287 		unsigned long pgsize = MOCK_IO_PAGE_SIZE;
288 		unsigned long head;
289 
290 		ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
291 		if (!ent) {
292 			iova += pgsize;
293 			continue;
294 		}
295 
296 		if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
297 			pgsize = MOCK_HUGE_PAGE_SIZE;
298 		head = iova & ~(pgsize - 1);
299 
300 		/* Clear dirty */
301 		if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
302 			iommu_dirty_bitmap_record(dirty, iova, pgsize);
303 		iova += pgsize;
304 	} while (iova < end);
305 
306 	return 0;
307 }
308 
309 static const struct iommu_dirty_ops dirty_ops = {
310 	.set_dirty_tracking = mock_domain_set_dirty_tracking,
311 	.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
312 };
313 
314 static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data * user_data)315 __mock_domain_alloc_nested(const struct iommu_user_data *user_data)
316 {
317 	struct mock_iommu_domain_nested *mock_nested;
318 	struct iommu_hwpt_selftest user_cfg;
319 	int rc, i;
320 
321 	if (user_data->type != IOMMU_HWPT_DATA_SELFTEST)
322 		return ERR_PTR(-EOPNOTSUPP);
323 
324 	rc = iommu_copy_struct_from_user(&user_cfg, user_data,
325 					 IOMMU_HWPT_DATA_SELFTEST, iotlb);
326 	if (rc)
327 		return ERR_PTR(rc);
328 
329 	mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
330 	if (!mock_nested)
331 		return ERR_PTR(-ENOMEM);
332 	mock_nested->domain.ops = &domain_nested_ops;
333 	mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
334 	for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
335 		mock_nested->iotlb[i] = user_cfg.iotlb;
336 	return mock_nested;
337 }
338 
339 static struct iommu_domain *
mock_domain_alloc_nested(struct device * dev,struct iommu_domain * parent,u32 flags,const struct iommu_user_data * user_data)340 mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
341 			 u32 flags, const struct iommu_user_data *user_data)
342 {
343 	struct mock_iommu_domain_nested *mock_nested;
344 	struct mock_iommu_domain *mock_parent;
345 
346 	if (flags)
347 		return ERR_PTR(-EOPNOTSUPP);
348 	if (!parent || parent->ops != mock_ops.default_domain_ops)
349 		return ERR_PTR(-EINVAL);
350 
351 	mock_parent = to_mock_domain(parent);
352 	if (!mock_parent)
353 		return ERR_PTR(-EINVAL);
354 
355 	mock_nested = __mock_domain_alloc_nested(user_data);
356 	if (IS_ERR(mock_nested))
357 		return ERR_CAST(mock_nested);
358 	mock_nested->parent = mock_parent;
359 	return &mock_nested->domain;
360 }
361 
362 static struct iommu_domain *
mock_domain_alloc_paging_flags(struct device * dev,u32 flags,const struct iommu_user_data * user_data)363 mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
364 			       const struct iommu_user_data *user_data)
365 {
366 	bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
367 	const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
368 				 IOMMU_HWPT_ALLOC_NEST_PARENT;
369 	struct mock_dev *mdev = to_mock_dev(dev);
370 	bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
371 	struct mock_iommu_domain *mock;
372 
373 	if (user_data)
374 		return ERR_PTR(-EOPNOTSUPP);
375 	if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
376 		return ERR_PTR(-EOPNOTSUPP);
377 
378 	mock = kzalloc(sizeof(*mock), GFP_KERNEL);
379 	if (!mock)
380 		return ERR_PTR(-ENOMEM);
381 	mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
382 	mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
383 	mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
384 	if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
385 		mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
386 	mock->domain.ops = mock_ops.default_domain_ops;
387 	mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
388 	xa_init(&mock->pfns);
389 
390 	if (has_dirty_flag)
391 		mock->domain.dirty_ops = &dirty_ops;
392 	return &mock->domain;
393 }
394 
mock_domain_free(struct iommu_domain * domain)395 static void mock_domain_free(struct iommu_domain *domain)
396 {
397 	struct mock_iommu_domain *mock = to_mock_domain(domain);
398 
399 	WARN_ON(!xa_empty(&mock->pfns));
400 	kfree(mock);
401 }
402 
mock_domain_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)403 static int mock_domain_map_pages(struct iommu_domain *domain,
404 				 unsigned long iova, phys_addr_t paddr,
405 				 size_t pgsize, size_t pgcount, int prot,
406 				 gfp_t gfp, size_t *mapped)
407 {
408 	struct mock_iommu_domain *mock = to_mock_domain(domain);
409 	unsigned long flags = MOCK_PFN_START_IOVA;
410 	unsigned long start_iova = iova;
411 
412 	/*
413 	 * xarray does not reliably work with fault injection because it does a
414 	 * retry allocation, so put our own failure point.
415 	 */
416 	if (iommufd_should_fail())
417 		return -ENOENT;
418 
419 	WARN_ON(iova % MOCK_IO_PAGE_SIZE);
420 	WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
421 	for (; pgcount; pgcount--) {
422 		size_t cur;
423 
424 		for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
425 			void *old;
426 
427 			if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
428 				flags = MOCK_PFN_LAST_IOVA;
429 			if (pgsize != MOCK_IO_PAGE_SIZE) {
430 				flags |= MOCK_PFN_HUGE_IOVA;
431 			}
432 			old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
433 				       xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
434 						   flags),
435 				       gfp);
436 			if (xa_is_err(old)) {
437 				for (; start_iova != iova;
438 				     start_iova += MOCK_IO_PAGE_SIZE)
439 					xa_erase(&mock->pfns,
440 						 start_iova /
441 							 MOCK_IO_PAGE_SIZE);
442 				return xa_err(old);
443 			}
444 			WARN_ON(old);
445 			iova += MOCK_IO_PAGE_SIZE;
446 			paddr += MOCK_IO_PAGE_SIZE;
447 			*mapped += MOCK_IO_PAGE_SIZE;
448 			flags = 0;
449 		}
450 	}
451 	return 0;
452 }
453 
mock_domain_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * iotlb_gather)454 static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
455 				      unsigned long iova, size_t pgsize,
456 				      size_t pgcount,
457 				      struct iommu_iotlb_gather *iotlb_gather)
458 {
459 	struct mock_iommu_domain *mock = to_mock_domain(domain);
460 	bool first = true;
461 	size_t ret = 0;
462 	void *ent;
463 
464 	WARN_ON(iova % MOCK_IO_PAGE_SIZE);
465 	WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
466 
467 	for (; pgcount; pgcount--) {
468 		size_t cur;
469 
470 		for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
471 			ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
472 
473 			/*
474 			 * iommufd generates unmaps that must be a strict
475 			 * superset of the map's performend So every
476 			 * starting/ending IOVA should have been an iova passed
477 			 * to map.
478 			 *
479 			 * This simple logic doesn't work when the HUGE_PAGE is
480 			 * turned on since the core code will automatically
481 			 * switch between the two page sizes creating a break in
482 			 * the unmap calls. The break can land in the middle of
483 			 * contiguous IOVA.
484 			 */
485 			if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
486 				if (first) {
487 					WARN_ON(ent && !(xa_to_value(ent) &
488 							 MOCK_PFN_START_IOVA));
489 					first = false;
490 				}
491 				if (pgcount == 1 &&
492 				    cur + MOCK_IO_PAGE_SIZE == pgsize)
493 					WARN_ON(ent && !(xa_to_value(ent) &
494 							 MOCK_PFN_LAST_IOVA));
495 			}
496 
497 			iova += MOCK_IO_PAGE_SIZE;
498 			ret += MOCK_IO_PAGE_SIZE;
499 		}
500 	}
501 	return ret;
502 }
503 
mock_domain_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)504 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
505 					    dma_addr_t iova)
506 {
507 	struct mock_iommu_domain *mock = to_mock_domain(domain);
508 	void *ent;
509 
510 	WARN_ON(iova % MOCK_IO_PAGE_SIZE);
511 	ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
512 	WARN_ON(!ent);
513 	return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
514 }
515 
mock_domain_capable(struct device * dev,enum iommu_cap cap)516 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
517 {
518 	struct mock_dev *mdev = to_mock_dev(dev);
519 
520 	switch (cap) {
521 	case IOMMU_CAP_CACHE_COHERENCY:
522 		return true;
523 	case IOMMU_CAP_DIRTY_TRACKING:
524 		return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
525 	default:
526 		break;
527 	}
528 
529 	return false;
530 }
531 
532 static struct iopf_queue *mock_iommu_iopf_queue;
533 
534 static struct mock_iommu_device {
535 	struct iommu_device iommu_dev;
536 	struct completion complete;
537 	refcount_t users;
538 } mock_iommu;
539 
mock_probe_device(struct device * dev)540 static struct iommu_device *mock_probe_device(struct device *dev)
541 {
542 	if (dev->bus != &iommufd_mock_bus_type.bus)
543 		return ERR_PTR(-ENODEV);
544 	return &mock_iommu.iommu_dev;
545 }
546 
mock_domain_page_response(struct device * dev,struct iopf_fault * evt,struct iommu_page_response * msg)547 static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt,
548 				      struct iommu_page_response *msg)
549 {
550 }
551 
mock_dev_enable_feat(struct device * dev,enum iommu_dev_features feat)552 static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
553 {
554 	if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
555 		return -ENODEV;
556 
557 	return iopf_queue_add_device(mock_iommu_iopf_queue, dev);
558 }
559 
mock_dev_disable_feat(struct device * dev,enum iommu_dev_features feat)560 static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
561 {
562 	if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
563 		return -ENODEV;
564 
565 	iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
566 
567 	return 0;
568 }
569 
mock_viommu_destroy(struct iommufd_viommu * viommu)570 static void mock_viommu_destroy(struct iommufd_viommu *viommu)
571 {
572 	struct mock_iommu_device *mock_iommu = container_of(
573 		viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
574 
575 	if (refcount_dec_and_test(&mock_iommu->users))
576 		complete(&mock_iommu->complete);
577 
578 	/* iommufd core frees mock_viommu and viommu */
579 }
580 
581 static struct iommu_domain *
mock_viommu_alloc_domain_nested(struct iommufd_viommu * viommu,u32 flags,const struct iommu_user_data * user_data)582 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
583 				const struct iommu_user_data *user_data)
584 {
585 	struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
586 	struct mock_iommu_domain_nested *mock_nested;
587 
588 	if (flags)
589 		return ERR_PTR(-EOPNOTSUPP);
590 
591 	mock_nested = __mock_domain_alloc_nested(user_data);
592 	if (IS_ERR(mock_nested))
593 		return ERR_CAST(mock_nested);
594 	mock_nested->mock_viommu = mock_viommu;
595 	mock_nested->parent = mock_viommu->s2_parent;
596 	return &mock_nested->domain;
597 }
598 
mock_viommu_cache_invalidate(struct iommufd_viommu * viommu,struct iommu_user_data_array * array)599 static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
600 					struct iommu_user_data_array *array)
601 {
602 	struct iommu_viommu_invalidate_selftest *cmds;
603 	struct iommu_viommu_invalidate_selftest *cur;
604 	struct iommu_viommu_invalidate_selftest *end;
605 	int rc;
606 
607 	/* A zero-length array is allowed to validate the array type */
608 	if (array->entry_num == 0 &&
609 	    array->type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) {
610 		array->entry_num = 0;
611 		return 0;
612 	}
613 
614 	cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
615 	if (!cmds)
616 		return -ENOMEM;
617 	cur = cmds;
618 	end = cmds + array->entry_num;
619 
620 	static_assert(sizeof(*cmds) == 3 * sizeof(u32));
621 	rc = iommu_copy_struct_from_full_user_array(
622 		cmds, sizeof(*cmds), array,
623 		IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST);
624 	if (rc)
625 		goto out;
626 
627 	while (cur != end) {
628 		struct mock_dev *mdev;
629 		struct device *dev;
630 		int i;
631 
632 		if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
633 			rc = -EOPNOTSUPP;
634 			goto out;
635 		}
636 
637 		if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) {
638 			rc = -EINVAL;
639 			goto out;
640 		}
641 
642 		xa_lock(&viommu->vdevs);
643 		dev = iommufd_viommu_find_dev(viommu,
644 					      (unsigned long)cur->vdev_id);
645 		if (!dev) {
646 			xa_unlock(&viommu->vdevs);
647 			rc = -EINVAL;
648 			goto out;
649 		}
650 		mdev = container_of(dev, struct mock_dev, dev);
651 
652 		if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
653 			/* Invalidate all cache entries and ignore cache_id */
654 			for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
655 				mdev->cache[i] = 0;
656 		} else {
657 			mdev->cache[cur->cache_id] = 0;
658 		}
659 		xa_unlock(&viommu->vdevs);
660 
661 		cur++;
662 	}
663 out:
664 	array->entry_num = cur - cmds;
665 	kfree(cmds);
666 	return rc;
667 }
668 
669 static struct iommufd_viommu_ops mock_viommu_ops = {
670 	.destroy = mock_viommu_destroy,
671 	.alloc_domain_nested = mock_viommu_alloc_domain_nested,
672 	.cache_invalidate = mock_viommu_cache_invalidate,
673 };
674 
mock_viommu_alloc(struct device * dev,struct iommu_domain * domain,struct iommufd_ctx * ictx,unsigned int viommu_type)675 static struct iommufd_viommu *mock_viommu_alloc(struct device *dev,
676 						struct iommu_domain *domain,
677 						struct iommufd_ctx *ictx,
678 						unsigned int viommu_type)
679 {
680 	struct mock_iommu_device *mock_iommu =
681 		iommu_get_iommu_dev(dev, struct mock_iommu_device, iommu_dev);
682 	struct mock_viommu *mock_viommu;
683 
684 	if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST)
685 		return ERR_PTR(-EOPNOTSUPP);
686 
687 	mock_viommu = iommufd_viommu_alloc(ictx, struct mock_viommu, core,
688 					   &mock_viommu_ops);
689 	if (IS_ERR(mock_viommu))
690 		return ERR_CAST(mock_viommu);
691 
692 	refcount_inc(&mock_iommu->users);
693 	return &mock_viommu->core;
694 }
695 
696 static const struct iommu_ops mock_ops = {
697 	/*
698 	 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
699 	 * because it is zero.
700 	 */
701 	.default_domain = &mock_blocking_domain,
702 	.blocked_domain = &mock_blocking_domain,
703 	.owner = THIS_MODULE,
704 	.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
705 	.hw_info = mock_domain_hw_info,
706 	.domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
707 	.domain_alloc_nested = mock_domain_alloc_nested,
708 	.capable = mock_domain_capable,
709 	.device_group = generic_device_group,
710 	.probe_device = mock_probe_device,
711 	.page_response = mock_domain_page_response,
712 	.dev_enable_feat = mock_dev_enable_feat,
713 	.dev_disable_feat = mock_dev_disable_feat,
714 	.user_pasid_table = true,
715 	.viommu_alloc = mock_viommu_alloc,
716 	.default_domain_ops =
717 		&(struct iommu_domain_ops){
718 			.free = mock_domain_free,
719 			.attach_dev = mock_domain_nop_attach,
720 			.map_pages = mock_domain_map_pages,
721 			.unmap_pages = mock_domain_unmap_pages,
722 			.iova_to_phys = mock_domain_iova_to_phys,
723 		},
724 };
725 
mock_domain_free_nested(struct iommu_domain * domain)726 static void mock_domain_free_nested(struct iommu_domain *domain)
727 {
728 	kfree(to_mock_nested(domain));
729 }
730 
731 static int
mock_domain_cache_invalidate_user(struct iommu_domain * domain,struct iommu_user_data_array * array)732 mock_domain_cache_invalidate_user(struct iommu_domain *domain,
733 				  struct iommu_user_data_array *array)
734 {
735 	struct mock_iommu_domain_nested *mock_nested = to_mock_nested(domain);
736 	struct iommu_hwpt_invalidate_selftest inv;
737 	u32 processed = 0;
738 	int i = 0, j;
739 	int rc = 0;
740 
741 	if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
742 		rc = -EINVAL;
743 		goto out;
744 	}
745 
746 	for ( ; i < array->entry_num; i++) {
747 		rc = iommu_copy_struct_from_user_array(&inv, array,
748 						       IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
749 						       i, iotlb_id);
750 		if (rc)
751 			break;
752 
753 		if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
754 			rc = -EOPNOTSUPP;
755 			break;
756 		}
757 
758 		if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
759 			rc = -EINVAL;
760 			break;
761 		}
762 
763 		if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
764 			/* Invalidate all mock iotlb entries and ignore iotlb_id */
765 			for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
766 				mock_nested->iotlb[j] = 0;
767 		} else {
768 			mock_nested->iotlb[inv.iotlb_id] = 0;
769 		}
770 
771 		processed++;
772 	}
773 
774 out:
775 	array->entry_num = processed;
776 	return rc;
777 }
778 
779 static struct iommu_domain_ops domain_nested_ops = {
780 	.free = mock_domain_free_nested,
781 	.attach_dev = mock_domain_nop_attach,
782 	.cache_invalidate_user = mock_domain_cache_invalidate_user,
783 };
784 
785 static inline struct iommufd_hw_pagetable *
__get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,u32 hwpt_type)786 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
787 {
788 	struct iommufd_object *obj;
789 
790 	obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
791 	if (IS_ERR(obj))
792 		return ERR_CAST(obj);
793 	return container_of(obj, struct iommufd_hw_pagetable, obj);
794 }
795 
796 static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain ** mock)797 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
798 		 struct mock_iommu_domain **mock)
799 {
800 	struct iommufd_hw_pagetable *hwpt;
801 
802 	hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
803 	if (IS_ERR(hwpt))
804 		return hwpt;
805 	if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
806 	    hwpt->domain->ops != mock_ops.default_domain_ops) {
807 		iommufd_put_object(ucmd->ictx, &hwpt->obj);
808 		return ERR_PTR(-EINVAL);
809 	}
810 	*mock = to_mock_domain(hwpt->domain);
811 	return hwpt;
812 }
813 
814 static inline struct iommufd_hw_pagetable *
get_md_pagetable_nested(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain_nested ** mock_nested)815 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
816 			struct mock_iommu_domain_nested **mock_nested)
817 {
818 	struct iommufd_hw_pagetable *hwpt;
819 
820 	hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
821 	if (IS_ERR(hwpt))
822 		return hwpt;
823 	if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
824 	    hwpt->domain->ops != &domain_nested_ops) {
825 		iommufd_put_object(ucmd->ictx, &hwpt->obj);
826 		return ERR_PTR(-EINVAL);
827 	}
828 	*mock_nested = to_mock_nested(hwpt->domain);
829 	return hwpt;
830 }
831 
mock_dev_release(struct device * dev)832 static void mock_dev_release(struct device *dev)
833 {
834 	struct mock_dev *mdev = to_mock_dev(dev);
835 
836 	ida_free(&mock_dev_ida, mdev->id);
837 	kfree(mdev);
838 }
839 
mock_dev_create(unsigned long dev_flags)840 static struct mock_dev *mock_dev_create(unsigned long dev_flags)
841 {
842 	struct mock_dev *mdev;
843 	int rc, i;
844 
845 	if (dev_flags &
846 	    ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
847 		return ERR_PTR(-EINVAL);
848 
849 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
850 	if (!mdev)
851 		return ERR_PTR(-ENOMEM);
852 
853 	device_initialize(&mdev->dev);
854 	mdev->flags = dev_flags;
855 	mdev->dev.release = mock_dev_release;
856 	mdev->dev.bus = &iommufd_mock_bus_type.bus;
857 	for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
858 		mdev->cache[i] = IOMMU_TEST_DEV_CACHE_DEFAULT;
859 
860 	rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
861 	if (rc < 0)
862 		goto err_put;
863 	mdev->id = rc;
864 
865 	rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
866 	if (rc)
867 		goto err_put;
868 
869 	rc = device_add(&mdev->dev);
870 	if (rc)
871 		goto err_put;
872 	return mdev;
873 
874 err_put:
875 	put_device(&mdev->dev);
876 	return ERR_PTR(rc);
877 }
878 
mock_dev_destroy(struct mock_dev * mdev)879 static void mock_dev_destroy(struct mock_dev *mdev)
880 {
881 	device_unregister(&mdev->dev);
882 }
883 
iommufd_selftest_is_mock_dev(struct device * dev)884 bool iommufd_selftest_is_mock_dev(struct device *dev)
885 {
886 	return dev->release == mock_dev_release;
887 }
888 
889 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
iommufd_test_mock_domain(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)890 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
891 				    struct iommu_test_cmd *cmd)
892 {
893 	struct iommufd_device *idev;
894 	struct selftest_obj *sobj;
895 	u32 pt_id = cmd->id;
896 	u32 dev_flags = 0;
897 	u32 idev_id;
898 	int rc;
899 
900 	sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
901 	if (IS_ERR(sobj))
902 		return PTR_ERR(sobj);
903 
904 	sobj->idev.ictx = ucmd->ictx;
905 	sobj->type = TYPE_IDEV;
906 
907 	if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
908 		dev_flags = cmd->mock_domain_flags.dev_flags;
909 
910 	sobj->idev.mock_dev = mock_dev_create(dev_flags);
911 	if (IS_ERR(sobj->idev.mock_dev)) {
912 		rc = PTR_ERR(sobj->idev.mock_dev);
913 		goto out_sobj;
914 	}
915 
916 	idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
917 				   &idev_id);
918 	if (IS_ERR(idev)) {
919 		rc = PTR_ERR(idev);
920 		goto out_mdev;
921 	}
922 	sobj->idev.idev = idev;
923 
924 	rc = iommufd_device_attach(idev, &pt_id);
925 	if (rc)
926 		goto out_unbind;
927 
928 	/* Userspace must destroy the device_id to destroy the object */
929 	cmd->mock_domain.out_hwpt_id = pt_id;
930 	cmd->mock_domain.out_stdev_id = sobj->obj.id;
931 	cmd->mock_domain.out_idev_id = idev_id;
932 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
933 	if (rc)
934 		goto out_detach;
935 	iommufd_object_finalize(ucmd->ictx, &sobj->obj);
936 	return 0;
937 
938 out_detach:
939 	iommufd_device_detach(idev);
940 out_unbind:
941 	iommufd_device_unbind(idev);
942 out_mdev:
943 	mock_dev_destroy(sobj->idev.mock_dev);
944 out_sobj:
945 	iommufd_object_abort(ucmd->ictx, &sobj->obj);
946 	return rc;
947 }
948 
949 /* Replace the mock domain with a manually allocated hw_pagetable */
iommufd_test_mock_domain_replace(struct iommufd_ucmd * ucmd,unsigned int device_id,u32 pt_id,struct iommu_test_cmd * cmd)950 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
951 					    unsigned int device_id, u32 pt_id,
952 					    struct iommu_test_cmd *cmd)
953 {
954 	struct iommufd_object *dev_obj;
955 	struct selftest_obj *sobj;
956 	int rc;
957 
958 	/*
959 	 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
960 	 * it doesn't race with detach, which is not allowed.
961 	 */
962 	dev_obj =
963 		iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST);
964 	if (IS_ERR(dev_obj))
965 		return PTR_ERR(dev_obj);
966 
967 	sobj = to_selftest_obj(dev_obj);
968 	if (sobj->type != TYPE_IDEV) {
969 		rc = -EINVAL;
970 		goto out_dev_obj;
971 	}
972 
973 	rc = iommufd_device_replace(sobj->idev.idev, &pt_id);
974 	if (rc)
975 		goto out_dev_obj;
976 
977 	cmd->mock_domain_replace.pt_id = pt_id;
978 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
979 
980 out_dev_obj:
981 	iommufd_put_object(ucmd->ictx, dev_obj);
982 	return rc;
983 }
984 
985 /* Add an additional reserved IOVA to the IOAS */
iommufd_test_add_reserved(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long start,size_t length)986 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
987 				     unsigned int mockpt_id,
988 				     unsigned long start, size_t length)
989 {
990 	struct iommufd_ioas *ioas;
991 	int rc;
992 
993 	ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
994 	if (IS_ERR(ioas))
995 		return PTR_ERR(ioas);
996 	down_write(&ioas->iopt.iova_rwsem);
997 	rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
998 	up_write(&ioas->iopt.iova_rwsem);
999 	iommufd_put_object(ucmd->ictx, &ioas->obj);
1000 	return rc;
1001 }
1002 
1003 /* Check that every pfn under each iova matches the pfn under a user VA */
iommufd_test_md_check_pa(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,void __user * uptr)1004 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
1005 				    unsigned int mockpt_id, unsigned long iova,
1006 				    size_t length, void __user *uptr)
1007 {
1008 	struct iommufd_hw_pagetable *hwpt;
1009 	struct mock_iommu_domain *mock;
1010 	uintptr_t end;
1011 	int rc;
1012 
1013 	if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
1014 	    (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
1015 	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1016 		return -EINVAL;
1017 
1018 	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1019 	if (IS_ERR(hwpt))
1020 		return PTR_ERR(hwpt);
1021 
1022 	for (; length; length -= MOCK_IO_PAGE_SIZE) {
1023 		struct page *pages[1];
1024 		unsigned long pfn;
1025 		long npages;
1026 		void *ent;
1027 
1028 		npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
1029 					     pages);
1030 		if (npages < 0) {
1031 			rc = npages;
1032 			goto out_put;
1033 		}
1034 		if (WARN_ON(npages != 1)) {
1035 			rc = -EFAULT;
1036 			goto out_put;
1037 		}
1038 		pfn = page_to_pfn(pages[0]);
1039 		put_page(pages[0]);
1040 
1041 		ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
1042 		if (!ent ||
1043 		    (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
1044 			    pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
1045 			rc = -EINVAL;
1046 			goto out_put;
1047 		}
1048 		iova += MOCK_IO_PAGE_SIZE;
1049 		uptr += MOCK_IO_PAGE_SIZE;
1050 	}
1051 	rc = 0;
1052 
1053 out_put:
1054 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1055 	return rc;
1056 }
1057 
1058 /* Check that the page ref count matches, to look for missing pin/unpins */
iommufd_test_md_check_refs(struct iommufd_ucmd * ucmd,void __user * uptr,size_t length,unsigned int refs)1059 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
1060 				      void __user *uptr, size_t length,
1061 				      unsigned int refs)
1062 {
1063 	uintptr_t end;
1064 
1065 	if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
1066 	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1067 		return -EINVAL;
1068 
1069 	for (; length; length -= PAGE_SIZE) {
1070 		struct page *pages[1];
1071 		long npages;
1072 
1073 		npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
1074 		if (npages < 0)
1075 			return npages;
1076 		if (WARN_ON(npages != 1))
1077 			return -EFAULT;
1078 		if (!PageCompound(pages[0])) {
1079 			unsigned int count;
1080 
1081 			count = page_ref_count(pages[0]);
1082 			if (count / GUP_PIN_COUNTING_BIAS != refs) {
1083 				put_page(pages[0]);
1084 				return -EIO;
1085 			}
1086 		}
1087 		put_page(pages[0]);
1088 		uptr += PAGE_SIZE;
1089 	}
1090 	return 0;
1091 }
1092 
iommufd_test_md_check_iotlb(struct iommufd_ucmd * ucmd,u32 mockpt_id,unsigned int iotlb_id,u32 iotlb)1093 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd,
1094 				       u32 mockpt_id, unsigned int iotlb_id,
1095 				       u32 iotlb)
1096 {
1097 	struct mock_iommu_domain_nested *mock_nested;
1098 	struct iommufd_hw_pagetable *hwpt;
1099 	int rc = 0;
1100 
1101 	hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
1102 	if (IS_ERR(hwpt))
1103 		return PTR_ERR(hwpt);
1104 
1105 	mock_nested = to_mock_nested(hwpt->domain);
1106 
1107 	if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
1108 	    mock_nested->iotlb[iotlb_id] != iotlb)
1109 		rc = -EINVAL;
1110 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1111 	return rc;
1112 }
1113 
iommufd_test_dev_check_cache(struct iommufd_ucmd * ucmd,u32 idev_id,unsigned int cache_id,u32 cache)1114 static int iommufd_test_dev_check_cache(struct iommufd_ucmd *ucmd, u32 idev_id,
1115 					unsigned int cache_id, u32 cache)
1116 {
1117 	struct iommufd_device *idev;
1118 	struct mock_dev *mdev;
1119 	int rc = 0;
1120 
1121 	idev = iommufd_get_device(ucmd, idev_id);
1122 	if (IS_ERR(idev))
1123 		return PTR_ERR(idev);
1124 	mdev = container_of(idev->dev, struct mock_dev, dev);
1125 
1126 	if (cache_id > MOCK_DEV_CACHE_ID_MAX || mdev->cache[cache_id] != cache)
1127 		rc = -EINVAL;
1128 	iommufd_put_object(ucmd->ictx, &idev->obj);
1129 	return rc;
1130 }
1131 
1132 struct selftest_access {
1133 	struct iommufd_access *access;
1134 	struct file *file;
1135 	struct mutex lock;
1136 	struct list_head items;
1137 	unsigned int next_id;
1138 	bool destroying;
1139 };
1140 
1141 struct selftest_access_item {
1142 	struct list_head items_elm;
1143 	unsigned long iova;
1144 	size_t length;
1145 	unsigned int id;
1146 };
1147 
1148 static const struct file_operations iommfd_test_staccess_fops;
1149 
iommufd_access_get(int fd)1150 static struct selftest_access *iommufd_access_get(int fd)
1151 {
1152 	struct file *file;
1153 
1154 	file = fget(fd);
1155 	if (!file)
1156 		return ERR_PTR(-EBADFD);
1157 
1158 	if (file->f_op != &iommfd_test_staccess_fops) {
1159 		fput(file);
1160 		return ERR_PTR(-EBADFD);
1161 	}
1162 	return file->private_data;
1163 }
1164 
iommufd_test_access_unmap(void * data,unsigned long iova,unsigned long length)1165 static void iommufd_test_access_unmap(void *data, unsigned long iova,
1166 				      unsigned long length)
1167 {
1168 	unsigned long iova_last = iova + length - 1;
1169 	struct selftest_access *staccess = data;
1170 	struct selftest_access_item *item;
1171 	struct selftest_access_item *tmp;
1172 
1173 	mutex_lock(&staccess->lock);
1174 	list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
1175 		if (iova > item->iova + item->length - 1 ||
1176 		    iova_last < item->iova)
1177 			continue;
1178 		list_del(&item->items_elm);
1179 		iommufd_access_unpin_pages(staccess->access, item->iova,
1180 					   item->length);
1181 		kfree(item);
1182 	}
1183 	mutex_unlock(&staccess->lock);
1184 }
1185 
iommufd_test_access_item_destroy(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int item_id)1186 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
1187 					    unsigned int access_id,
1188 					    unsigned int item_id)
1189 {
1190 	struct selftest_access_item *item;
1191 	struct selftest_access *staccess;
1192 
1193 	staccess = iommufd_access_get(access_id);
1194 	if (IS_ERR(staccess))
1195 		return PTR_ERR(staccess);
1196 
1197 	mutex_lock(&staccess->lock);
1198 	list_for_each_entry(item, &staccess->items, items_elm) {
1199 		if (item->id == item_id) {
1200 			list_del(&item->items_elm);
1201 			iommufd_access_unpin_pages(staccess->access, item->iova,
1202 						   item->length);
1203 			mutex_unlock(&staccess->lock);
1204 			kfree(item);
1205 			fput(staccess->file);
1206 			return 0;
1207 		}
1208 	}
1209 	mutex_unlock(&staccess->lock);
1210 	fput(staccess->file);
1211 	return -ENOENT;
1212 }
1213 
iommufd_test_staccess_release(struct inode * inode,struct file * filep)1214 static int iommufd_test_staccess_release(struct inode *inode,
1215 					 struct file *filep)
1216 {
1217 	struct selftest_access *staccess = filep->private_data;
1218 
1219 	if (staccess->access) {
1220 		iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
1221 		iommufd_access_destroy(staccess->access);
1222 	}
1223 	mutex_destroy(&staccess->lock);
1224 	kfree(staccess);
1225 	return 0;
1226 }
1227 
1228 static const struct iommufd_access_ops selftest_access_ops_pin = {
1229 	.needs_pin_pages = 1,
1230 	.unmap = iommufd_test_access_unmap,
1231 };
1232 
1233 static const struct iommufd_access_ops selftest_access_ops = {
1234 	.unmap = iommufd_test_access_unmap,
1235 };
1236 
1237 static const struct file_operations iommfd_test_staccess_fops = {
1238 	.release = iommufd_test_staccess_release,
1239 };
1240 
iommufd_test_alloc_access(void)1241 static struct selftest_access *iommufd_test_alloc_access(void)
1242 {
1243 	struct selftest_access *staccess;
1244 	struct file *filep;
1245 
1246 	staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
1247 	if (!staccess)
1248 		return ERR_PTR(-ENOMEM);
1249 	INIT_LIST_HEAD(&staccess->items);
1250 	mutex_init(&staccess->lock);
1251 
1252 	filep = anon_inode_getfile("[iommufd_test_staccess]",
1253 				   &iommfd_test_staccess_fops, staccess,
1254 				   O_RDWR);
1255 	if (IS_ERR(filep)) {
1256 		kfree(staccess);
1257 		return ERR_CAST(filep);
1258 	}
1259 	staccess->file = filep;
1260 	return staccess;
1261 }
1262 
iommufd_test_create_access(struct iommufd_ucmd * ucmd,unsigned int ioas_id,unsigned int flags)1263 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
1264 				      unsigned int ioas_id, unsigned int flags)
1265 {
1266 	struct iommu_test_cmd *cmd = ucmd->cmd;
1267 	struct selftest_access *staccess;
1268 	struct iommufd_access *access;
1269 	u32 id;
1270 	int fdno;
1271 	int rc;
1272 
1273 	if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
1274 		return -EOPNOTSUPP;
1275 
1276 	staccess = iommufd_test_alloc_access();
1277 	if (IS_ERR(staccess))
1278 		return PTR_ERR(staccess);
1279 
1280 	fdno = get_unused_fd_flags(O_CLOEXEC);
1281 	if (fdno < 0) {
1282 		rc = -ENOMEM;
1283 		goto out_free_staccess;
1284 	}
1285 
1286 	access = iommufd_access_create(
1287 		ucmd->ictx,
1288 		(flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
1289 			&selftest_access_ops_pin :
1290 			&selftest_access_ops,
1291 		staccess, &id);
1292 	if (IS_ERR(access)) {
1293 		rc = PTR_ERR(access);
1294 		goto out_put_fdno;
1295 	}
1296 	rc = iommufd_access_attach(access, ioas_id);
1297 	if (rc)
1298 		goto out_destroy;
1299 	cmd->create_access.out_access_fd = fdno;
1300 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1301 	if (rc)
1302 		goto out_destroy;
1303 
1304 	staccess->access = access;
1305 	fd_install(fdno, staccess->file);
1306 	return 0;
1307 
1308 out_destroy:
1309 	iommufd_access_destroy(access);
1310 out_put_fdno:
1311 	put_unused_fd(fdno);
1312 out_free_staccess:
1313 	fput(staccess->file);
1314 	return rc;
1315 }
1316 
iommufd_test_access_replace_ioas(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int ioas_id)1317 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
1318 					    unsigned int access_id,
1319 					    unsigned int ioas_id)
1320 {
1321 	struct selftest_access *staccess;
1322 	int rc;
1323 
1324 	staccess = iommufd_access_get(access_id);
1325 	if (IS_ERR(staccess))
1326 		return PTR_ERR(staccess);
1327 
1328 	rc = iommufd_access_replace(staccess->access, ioas_id);
1329 	fput(staccess->file);
1330 	return rc;
1331 }
1332 
1333 /* Check that the pages in a page array match the pages in the user VA */
iommufd_test_check_pages(void __user * uptr,struct page ** pages,size_t npages)1334 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
1335 				    size_t npages)
1336 {
1337 	for (; npages; npages--) {
1338 		struct page *tmp_pages[1];
1339 		long rc;
1340 
1341 		rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
1342 		if (rc < 0)
1343 			return rc;
1344 		if (WARN_ON(rc != 1))
1345 			return -EFAULT;
1346 		put_page(tmp_pages[0]);
1347 		if (tmp_pages[0] != *pages)
1348 			return -EBADE;
1349 		pages++;
1350 		uptr += PAGE_SIZE;
1351 	}
1352 	return 0;
1353 }
1354 
iommufd_test_access_pages(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * uptr,u32 flags)1355 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
1356 				     unsigned int access_id, unsigned long iova,
1357 				     size_t length, void __user *uptr,
1358 				     u32 flags)
1359 {
1360 	struct iommu_test_cmd *cmd = ucmd->cmd;
1361 	struct selftest_access_item *item;
1362 	struct selftest_access *staccess;
1363 	struct page **pages;
1364 	size_t npages;
1365 	int rc;
1366 
1367 	/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1368 	if (length > 16*1024*1024)
1369 		return -ENOMEM;
1370 
1371 	if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
1372 		return -EOPNOTSUPP;
1373 
1374 	staccess = iommufd_access_get(access_id);
1375 	if (IS_ERR(staccess))
1376 		return PTR_ERR(staccess);
1377 
1378 	if (staccess->access->ops != &selftest_access_ops_pin) {
1379 		rc = -EOPNOTSUPP;
1380 		goto out_put;
1381 	}
1382 
1383 	if (flags & MOCK_FLAGS_ACCESS_SYZ)
1384 		iova = iommufd_test_syz_conv_iova(staccess->access,
1385 					&cmd->access_pages.iova);
1386 
1387 	npages = (ALIGN(iova + length, PAGE_SIZE) -
1388 		  ALIGN_DOWN(iova, PAGE_SIZE)) /
1389 		 PAGE_SIZE;
1390 	pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
1391 	if (!pages) {
1392 		rc = -ENOMEM;
1393 		goto out_put;
1394 	}
1395 
1396 	/*
1397 	 * Drivers will need to think very carefully about this locking. The
1398 	 * core code can do multiple unmaps instantaneously after
1399 	 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1400 	 * the range is unpinned. This simple implementation puts a global lock
1401 	 * around the pin, which may not suit drivers that want this to be a
1402 	 * performance path. drivers that get this wrong will trigger WARN_ON
1403 	 * races and cause EDEADLOCK failures to userspace.
1404 	 */
1405 	mutex_lock(&staccess->lock);
1406 	rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
1407 				      flags & MOCK_FLAGS_ACCESS_WRITE);
1408 	if (rc)
1409 		goto out_unlock;
1410 
1411 	/* For syzkaller allow uptr to be NULL to skip this check */
1412 	if (uptr) {
1413 		rc = iommufd_test_check_pages(
1414 			uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
1415 			npages);
1416 		if (rc)
1417 			goto out_unaccess;
1418 	}
1419 
1420 	item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
1421 	if (!item) {
1422 		rc = -ENOMEM;
1423 		goto out_unaccess;
1424 	}
1425 
1426 	item->iova = iova;
1427 	item->length = length;
1428 	item->id = staccess->next_id++;
1429 	list_add_tail(&item->items_elm, &staccess->items);
1430 
1431 	cmd->access_pages.out_access_pages_id = item->id;
1432 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1433 	if (rc)
1434 		goto out_free_item;
1435 	goto out_unlock;
1436 
1437 out_free_item:
1438 	list_del(&item->items_elm);
1439 	kfree(item);
1440 out_unaccess:
1441 	iommufd_access_unpin_pages(staccess->access, iova, length);
1442 out_unlock:
1443 	mutex_unlock(&staccess->lock);
1444 	kvfree(pages);
1445 out_put:
1446 	fput(staccess->file);
1447 	return rc;
1448 }
1449 
iommufd_test_access_rw(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * ubuf,unsigned int flags)1450 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
1451 				  unsigned int access_id, unsigned long iova,
1452 				  size_t length, void __user *ubuf,
1453 				  unsigned int flags)
1454 {
1455 	struct iommu_test_cmd *cmd = ucmd->cmd;
1456 	struct selftest_access *staccess;
1457 	void *tmp;
1458 	int rc;
1459 
1460 	/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1461 	if (length > 16*1024*1024)
1462 		return -ENOMEM;
1463 
1464 	if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
1465 		      MOCK_FLAGS_ACCESS_SYZ))
1466 		return -EOPNOTSUPP;
1467 
1468 	staccess = iommufd_access_get(access_id);
1469 	if (IS_ERR(staccess))
1470 		return PTR_ERR(staccess);
1471 
1472 	tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
1473 	if (!tmp) {
1474 		rc = -ENOMEM;
1475 		goto out_put;
1476 	}
1477 
1478 	if (flags & MOCK_ACCESS_RW_WRITE) {
1479 		if (copy_from_user(tmp, ubuf, length)) {
1480 			rc = -EFAULT;
1481 			goto out_free;
1482 		}
1483 	}
1484 
1485 	if (flags & MOCK_FLAGS_ACCESS_SYZ)
1486 		iova = iommufd_test_syz_conv_iova(staccess->access,
1487 				&cmd->access_rw.iova);
1488 
1489 	rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
1490 	if (rc)
1491 		goto out_free;
1492 	if (!(flags & MOCK_ACCESS_RW_WRITE)) {
1493 		if (copy_to_user(ubuf, tmp, length)) {
1494 			rc = -EFAULT;
1495 			goto out_free;
1496 		}
1497 	}
1498 
1499 out_free:
1500 	kvfree(tmp);
1501 out_put:
1502 	fput(staccess->file);
1503 	return rc;
1504 }
1505 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
1506 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
1507 	      __IOMMUFD_ACCESS_RW_SLOW_PATH);
1508 
iommufd_test_dirty(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,unsigned long page_size,void __user * uptr,u32 flags)1509 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
1510 			      unsigned long iova, size_t length,
1511 			      unsigned long page_size, void __user *uptr,
1512 			      u32 flags)
1513 {
1514 	unsigned long i, max;
1515 	struct iommu_test_cmd *cmd = ucmd->cmd;
1516 	struct iommufd_hw_pagetable *hwpt;
1517 	struct mock_iommu_domain *mock;
1518 	int rc, count = 0;
1519 	void *tmp;
1520 
1521 	if (!page_size || !length || iova % page_size || length % page_size ||
1522 	    !uptr)
1523 		return -EINVAL;
1524 
1525 	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1526 	if (IS_ERR(hwpt))
1527 		return PTR_ERR(hwpt);
1528 
1529 	if (!(mock->flags & MOCK_DIRTY_TRACK)) {
1530 		rc = -EINVAL;
1531 		goto out_put;
1532 	}
1533 
1534 	max = length / page_size;
1535 	tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
1536 		       GFP_KERNEL_ACCOUNT);
1537 	if (!tmp) {
1538 		rc = -ENOMEM;
1539 		goto out_put;
1540 	}
1541 
1542 	if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) {
1543 		rc = -EFAULT;
1544 		goto out_free;
1545 	}
1546 
1547 	for (i = 0; i < max; i++) {
1548 		unsigned long cur = iova + i * page_size;
1549 		void *ent, *old;
1550 
1551 		if (!test_bit(i, (unsigned long *)tmp))
1552 			continue;
1553 
1554 		ent = xa_load(&mock->pfns, cur / page_size);
1555 		if (ent) {
1556 			unsigned long val;
1557 
1558 			val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
1559 			old = xa_store(&mock->pfns, cur / page_size,
1560 				       xa_mk_value(val), GFP_KERNEL);
1561 			WARN_ON_ONCE(ent != old);
1562 			count++;
1563 		}
1564 	}
1565 
1566 	cmd->dirty.out_nr_dirty = count;
1567 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1568 out_free:
1569 	kvfree(tmp);
1570 out_put:
1571 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1572 	return rc;
1573 }
1574 
iommufd_test_trigger_iopf(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1575 static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
1576 				     struct iommu_test_cmd *cmd)
1577 {
1578 	struct iopf_fault event = { };
1579 	struct iommufd_device *idev;
1580 
1581 	idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
1582 	if (IS_ERR(idev))
1583 		return PTR_ERR(idev);
1584 
1585 	event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1586 	if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID)
1587 		event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1588 	event.fault.type = IOMMU_FAULT_PAGE_REQ;
1589 	event.fault.prm.addr = cmd->trigger_iopf.addr;
1590 	event.fault.prm.pasid = cmd->trigger_iopf.pasid;
1591 	event.fault.prm.grpid = cmd->trigger_iopf.grpid;
1592 	event.fault.prm.perm = cmd->trigger_iopf.perm;
1593 
1594 	iommu_report_device_fault(idev->dev, &event);
1595 	iommufd_put_object(ucmd->ictx, &idev->obj);
1596 
1597 	return 0;
1598 }
1599 
iommufd_selftest_destroy(struct iommufd_object * obj)1600 void iommufd_selftest_destroy(struct iommufd_object *obj)
1601 {
1602 	struct selftest_obj *sobj = to_selftest_obj(obj);
1603 
1604 	switch (sobj->type) {
1605 	case TYPE_IDEV:
1606 		iommufd_device_detach(sobj->idev.idev);
1607 		iommufd_device_unbind(sobj->idev.idev);
1608 		mock_dev_destroy(sobj->idev.mock_dev);
1609 		break;
1610 	}
1611 }
1612 
iommufd_test(struct iommufd_ucmd * ucmd)1613 int iommufd_test(struct iommufd_ucmd *ucmd)
1614 {
1615 	struct iommu_test_cmd *cmd = ucmd->cmd;
1616 
1617 	switch (cmd->op) {
1618 	case IOMMU_TEST_OP_ADD_RESERVED:
1619 		return iommufd_test_add_reserved(ucmd, cmd->id,
1620 						 cmd->add_reserved.start,
1621 						 cmd->add_reserved.length);
1622 	case IOMMU_TEST_OP_MOCK_DOMAIN:
1623 	case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
1624 		return iommufd_test_mock_domain(ucmd, cmd);
1625 	case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
1626 		return iommufd_test_mock_domain_replace(
1627 			ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
1628 	case IOMMU_TEST_OP_MD_CHECK_MAP:
1629 		return iommufd_test_md_check_pa(
1630 			ucmd, cmd->id, cmd->check_map.iova,
1631 			cmd->check_map.length,
1632 			u64_to_user_ptr(cmd->check_map.uptr));
1633 	case IOMMU_TEST_OP_MD_CHECK_REFS:
1634 		return iommufd_test_md_check_refs(
1635 			ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
1636 			cmd->check_refs.length, cmd->check_refs.refs);
1637 	case IOMMU_TEST_OP_MD_CHECK_IOTLB:
1638 		return iommufd_test_md_check_iotlb(ucmd, cmd->id,
1639 						   cmd->check_iotlb.id,
1640 						   cmd->check_iotlb.iotlb);
1641 	case IOMMU_TEST_OP_DEV_CHECK_CACHE:
1642 		return iommufd_test_dev_check_cache(ucmd, cmd->id,
1643 						    cmd->check_dev_cache.id,
1644 						    cmd->check_dev_cache.cache);
1645 	case IOMMU_TEST_OP_CREATE_ACCESS:
1646 		return iommufd_test_create_access(ucmd, cmd->id,
1647 						  cmd->create_access.flags);
1648 	case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
1649 		return iommufd_test_access_replace_ioas(
1650 			ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
1651 	case IOMMU_TEST_OP_ACCESS_PAGES:
1652 		return iommufd_test_access_pages(
1653 			ucmd, cmd->id, cmd->access_pages.iova,
1654 			cmd->access_pages.length,
1655 			u64_to_user_ptr(cmd->access_pages.uptr),
1656 			cmd->access_pages.flags);
1657 	case IOMMU_TEST_OP_ACCESS_RW:
1658 		return iommufd_test_access_rw(
1659 			ucmd, cmd->id, cmd->access_rw.iova,
1660 			cmd->access_rw.length,
1661 			u64_to_user_ptr(cmd->access_rw.uptr),
1662 			cmd->access_rw.flags);
1663 	case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
1664 		return iommufd_test_access_item_destroy(
1665 			ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
1666 	case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
1667 		/* Protect _batch_init(), can not be less than elmsz */
1668 		if (cmd->memory_limit.limit <
1669 		    sizeof(unsigned long) + sizeof(u32))
1670 			return -EINVAL;
1671 		iommufd_test_memory_limit = cmd->memory_limit.limit;
1672 		return 0;
1673 	case IOMMU_TEST_OP_DIRTY:
1674 		return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
1675 					  cmd->dirty.length,
1676 					  cmd->dirty.page_size,
1677 					  u64_to_user_ptr(cmd->dirty.uptr),
1678 					  cmd->dirty.flags);
1679 	case IOMMU_TEST_OP_TRIGGER_IOPF:
1680 		return iommufd_test_trigger_iopf(ucmd, cmd);
1681 	default:
1682 		return -EOPNOTSUPP;
1683 	}
1684 }
1685 
iommufd_should_fail(void)1686 bool iommufd_should_fail(void)
1687 {
1688 	return should_fail(&fail_iommufd, 1);
1689 }
1690 
iommufd_test_init(void)1691 int __init iommufd_test_init(void)
1692 {
1693 	struct platform_device_info pdevinfo = {
1694 		.name = "iommufd_selftest_iommu",
1695 	};
1696 	int rc;
1697 
1698 	dbgfs_root =
1699 		fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
1700 
1701 	selftest_iommu_dev = platform_device_register_full(&pdevinfo);
1702 	if (IS_ERR(selftest_iommu_dev)) {
1703 		rc = PTR_ERR(selftest_iommu_dev);
1704 		goto err_dbgfs;
1705 	}
1706 
1707 	rc = bus_register(&iommufd_mock_bus_type.bus);
1708 	if (rc)
1709 		goto err_platform;
1710 
1711 	rc = iommu_device_sysfs_add(&mock_iommu.iommu_dev,
1712 				    &selftest_iommu_dev->dev, NULL, "%s",
1713 				    dev_name(&selftest_iommu_dev->dev));
1714 	if (rc)
1715 		goto err_bus;
1716 
1717 	rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops,
1718 				  &iommufd_mock_bus_type.bus,
1719 				  &iommufd_mock_bus_type.nb);
1720 	if (rc)
1721 		goto err_sysfs;
1722 
1723 	refcount_set(&mock_iommu.users, 1);
1724 	init_completion(&mock_iommu.complete);
1725 
1726 	mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
1727 
1728 	return 0;
1729 
1730 err_sysfs:
1731 	iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
1732 err_bus:
1733 	bus_unregister(&iommufd_mock_bus_type.bus);
1734 err_platform:
1735 	platform_device_unregister(selftest_iommu_dev);
1736 err_dbgfs:
1737 	debugfs_remove_recursive(dbgfs_root);
1738 	return rc;
1739 }
1740 
iommufd_test_wait_for_users(void)1741 static void iommufd_test_wait_for_users(void)
1742 {
1743 	if (refcount_dec_and_test(&mock_iommu.users))
1744 		return;
1745 	/*
1746 	 * Time out waiting for iommu device user count to become 0.
1747 	 *
1748 	 * Note that this is just making an example here, since the selftest is
1749 	 * built into the iommufd module, i.e. it only unplugs the iommu device
1750 	 * when unloading the module. So, it is expected that this WARN_ON will
1751 	 * not trigger, as long as any iommufd FDs are open.
1752 	 */
1753 	WARN_ON(!wait_for_completion_timeout(&mock_iommu.complete,
1754 					     msecs_to_jiffies(10000)));
1755 }
1756 
iommufd_test_exit(void)1757 void iommufd_test_exit(void)
1758 {
1759 	if (mock_iommu_iopf_queue) {
1760 		iopf_queue_free(mock_iommu_iopf_queue);
1761 		mock_iommu_iopf_queue = NULL;
1762 	}
1763 
1764 	iommufd_test_wait_for_users();
1765 	iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
1766 	iommu_device_unregister_bus(&mock_iommu.iommu_dev,
1767 				    &iommufd_mock_bus_type.bus,
1768 				    &iommufd_mock_bus_type.nb);
1769 	bus_unregister(&iommufd_mock_bus_type.bus);
1770 	platform_device_unregister(selftest_iommu_dev);
1771 	debugfs_remove_recursive(dbgfs_root);
1772 }
1773