xref: /aosp_15_r20/external/igt-gpu-tools/tests/i915/gem_userptr_blits.c (revision d83cc019efdc2edc6c4b16e9034a3ceb8d35d77c)
1 /*
2  * Copyright © 2009-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <[email protected]>
25  *    Chris Wilson <[email protected]>
26  *    Tvrtko Ursulin <[email protected]>
27  *
28  */
29 
30 /** @file gem_userptr_blits.c
31  *
32  * This is a test of doing many blits using a mixture of normal system pages
33  * and uncached linear buffers, with a working set larger than the
34  * aperture size.
35  *
36  * The goal is to simply ensure the basics work.
37  */
38 
39 #include "igt.h"
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <string.h>
43 #include <fcntl.h>
44 #include <inttypes.h>
45 #include <errno.h>
46 #include <setjmp.h>
47 #include <sys/stat.h>
48 #include <sys/time.h>
49 #include <sys/mman.h>
50 #include <glib.h>
51 #include <signal.h>
52 #include <pthread.h>
53 #include <time.h>
54 
55 #include <linux/memfd.h>
56 
57 #include "drm.h"
58 #include "i915_drm.h"
59 
60 #include "intel_bufmgr.h"
61 
62 #include "eviction_common.c"
63 
64 #ifndef PAGE_SIZE
65 #define PAGE_SIZE 4096
66 #endif
67 
68 #define LOCAL_EXEC_OBJECT_SUPPORTS_48B (1 << 3)
69 
70 static uint32_t userptr_flags = LOCAL_I915_USERPTR_UNSYNCHRONIZED;
71 
72 #define WIDTH 512
73 #define HEIGHT 512
74 
75 static uint32_t linear[WIDTH*HEIGHT];
76 
gem_userptr_test_unsynchronized(void)77 static void gem_userptr_test_unsynchronized(void)
78 {
79 	userptr_flags = LOCAL_I915_USERPTR_UNSYNCHRONIZED;
80 }
81 
gem_userptr_test_synchronized(void)82 static void gem_userptr_test_synchronized(void)
83 {
84 	userptr_flags = 0;
85 }
86 
gem_userptr_sync(int fd,uint32_t handle)87 static void gem_userptr_sync(int fd, uint32_t handle)
88 {
89 	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
90 }
91 
copy(int fd,uint32_t dst,uint32_t src)92 static int copy(int fd, uint32_t dst, uint32_t src)
93 {
94 	uint32_t batch[12];
95 	struct drm_i915_gem_relocation_entry reloc[2];
96 	struct drm_i915_gem_exec_object2 obj[3];
97 	struct drm_i915_gem_execbuffer2 exec;
98 	uint32_t handle;
99 	int ret, i=0;
100 
101 	batch[i++] = XY_SRC_COPY_BLT_CMD |
102 		  XY_SRC_COPY_BLT_WRITE_ALPHA |
103 		  XY_SRC_COPY_BLT_WRITE_RGB;
104 	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
105 		batch[i - 1] |= 8;
106 	else
107 		batch[i - 1] |= 6;
108 
109 	batch[i++] = (3 << 24) | /* 32 bits */
110 		  (0xcc << 16) | /* copy ROP */
111 		  WIDTH*4;
112 	batch[i++] = 0; /* dst x1,y1 */
113 	batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
114 	batch[i++] = 0; /* dst reloc */
115 	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
116 		batch[i++] = 0;
117 	batch[i++] = 0; /* src x1,y1 */
118 	batch[i++] = WIDTH*4;
119 	batch[i++] = 0; /* src reloc */
120 	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
121 		batch[i++] = 0;
122 	batch[i++] = MI_BATCH_BUFFER_END;
123 	batch[i++] = MI_NOOP;
124 
125 	handle = gem_create(fd, 4096);
126 	gem_write(fd, handle, 0, batch, sizeof(batch));
127 
128 	reloc[0].target_handle = dst;
129 	reloc[0].delta = 0;
130 	reloc[0].offset = 4 * sizeof(batch[0]);
131 	reloc[0].presumed_offset = 0;
132 	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
133 	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
134 
135 	reloc[1].target_handle = src;
136 	reloc[1].delta = 0;
137 	reloc[1].offset = 7 * sizeof(batch[0]);
138 	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
139 		reloc[1].offset += sizeof(batch[0]);
140 	reloc[1].presumed_offset = 0;
141 	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
142 	reloc[1].write_domain = 0;
143 
144 	memset(&exec, 0, sizeof(exec));
145 	memset(obj, 0, sizeof(obj));
146 
147 	obj[exec.buffer_count].handle = dst;
148 	obj[exec.buffer_count].flags = LOCAL_EXEC_OBJECT_SUPPORTS_48B;
149 	exec.buffer_count++;
150 
151 	if (src != dst) {
152 		obj[exec.buffer_count].handle = src;
153 		obj[exec.buffer_count].flags = LOCAL_EXEC_OBJECT_SUPPORTS_48B;
154 		exec.buffer_count++;
155 	}
156 
157 	obj[exec.buffer_count].handle = handle;
158 	obj[exec.buffer_count].relocation_count = 2;
159 	obj[exec.buffer_count].relocs_ptr = to_user_pointer(reloc);
160 	obj[exec.buffer_count].flags = LOCAL_EXEC_OBJECT_SUPPORTS_48B;
161 	exec.buffer_count++;
162 	exec.buffers_ptr = to_user_pointer(obj);
163 	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
164 
165 	ret = __gem_execbuf(fd, &exec);
166 	gem_close(fd, handle);
167 
168 	return ret;
169 }
170 
171 static int
blit(int fd,uint32_t dst,uint32_t src,uint32_t * all_bo,int n_bo)172 blit(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo)
173 {
174 	uint32_t batch[12];
175 	struct drm_i915_gem_relocation_entry reloc[2];
176 	struct drm_i915_gem_exec_object2 *obj;
177 	struct drm_i915_gem_execbuffer2 exec;
178 	uint32_t handle;
179 	int n, ret, i=0;
180 
181 	batch[i++] = XY_SRC_COPY_BLT_CMD |
182 		  XY_SRC_COPY_BLT_WRITE_ALPHA |
183 		  XY_SRC_COPY_BLT_WRITE_RGB;
184 	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
185 		batch[i - 1] |= 8;
186 	else
187 		batch[i - 1] |= 6;
188 	batch[i++] = (3 << 24) | /* 32 bits */
189 		  (0xcc << 16) | /* copy ROP */
190 		  WIDTH*4;
191 	batch[i++] = 0; /* dst x1,y1 */
192 	batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
193 	batch[i++] = 0; /* dst reloc */
194 	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
195 		batch[i++] = 0;
196 	batch[i++] = 0; /* src x1,y1 */
197 	batch[i++] = WIDTH*4;
198 	batch[i++] = 0; /* src reloc */
199 	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
200 		batch[i++] = 0;
201 	batch[i++] = MI_BATCH_BUFFER_END;
202 	batch[i++] = MI_NOOP;
203 
204 	handle = gem_create(fd, 4096);
205 	gem_write(fd, handle, 0, batch, sizeof(batch));
206 
207 	reloc[0].target_handle = dst;
208 	reloc[0].delta = 0;
209 	reloc[0].offset = 4 * sizeof(batch[0]);
210 	reloc[0].presumed_offset = 0;
211 	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
212 	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
213 
214 	reloc[1].target_handle = src;
215 	reloc[1].delta = 0;
216 	reloc[1].offset = 7 * sizeof(batch[0]);
217 	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
218 		reloc[1].offset += sizeof(batch[0]);
219 	reloc[1].presumed_offset = 0;
220 	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
221 	reloc[1].write_domain = 0;
222 
223 	memset(&exec, 0, sizeof(exec));
224 	obj = calloc(n_bo + 1, sizeof(*obj));
225 	for (n = 0; n < n_bo; n++) {
226 		obj[n].handle = all_bo[n];
227 		obj[n].flags = LOCAL_EXEC_OBJECT_SUPPORTS_48B;
228 	}
229 	obj[n].handle = handle;
230 	obj[n].flags = LOCAL_EXEC_OBJECT_SUPPORTS_48B;
231 	obj[n].relocation_count = 2;
232 	obj[n].relocs_ptr = to_user_pointer(reloc);
233 
234 	exec.buffers_ptr = to_user_pointer(obj);
235 	exec.buffer_count = n_bo + 1;
236 	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
237 
238 	ret = __gem_execbuf(fd, &exec);
239 	gem_close(fd, handle);
240 	free(obj);
241 
242 	return ret;
243 }
244 
store_dword(int fd,uint32_t target,uint32_t offset,uint32_t value)245 static void store_dword(int fd, uint32_t target,
246 			uint32_t offset, uint32_t value)
247 {
248 	const int gen = intel_gen(intel_get_drm_devid(fd));
249 	struct drm_i915_gem_exec_object2 obj[2];
250 	struct drm_i915_gem_relocation_entry reloc;
251 	struct drm_i915_gem_execbuffer2 execbuf;
252 	uint32_t batch[16];
253 	int i;
254 
255 	memset(&execbuf, 0, sizeof(execbuf));
256 	execbuf.buffers_ptr = to_user_pointer(obj);
257 	execbuf.buffer_count = ARRAY_SIZE(obj);
258 	execbuf.flags = 0;
259 	if (gen < 6)
260 		execbuf.flags |= I915_EXEC_SECURE;
261 
262 	memset(obj, 0, sizeof(obj));
263 	obj[0].handle = target;
264 	obj[1].handle = gem_create(fd, 4096);
265 
266 	memset(&reloc, 0, sizeof(reloc));
267 	reloc.target_handle = obj[0].handle;
268 	reloc.presumed_offset = 0;
269 	reloc.offset = sizeof(uint32_t);
270 	reloc.delta = offset;
271 	reloc.read_domains = I915_GEM_DOMAIN_RENDER;
272 	reloc.write_domain = I915_GEM_DOMAIN_RENDER;
273 	obj[1].relocs_ptr = to_user_pointer(&reloc);
274 	obj[1].relocation_count = 1;
275 
276 	i = 0;
277 	batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
278 	if (gen >= 8) {
279 		batch[++i] = offset;
280 		batch[++i] = 0;
281 	} else if (gen >= 4) {
282 		batch[++i] = 0;
283 		batch[++i] = offset;
284 		reloc.offset += sizeof(uint32_t);
285 	} else {
286 		batch[i]--;
287 		batch[++i] = offset;
288 	}
289 	batch[++i] = value;
290 	batch[++i] = MI_BATCH_BUFFER_END;
291 	gem_write(fd, obj[1].handle, 0, batch, sizeof(batch));
292 	gem_execbuf(fd, &execbuf);
293 	gem_close(fd, obj[1].handle);
294 }
295 
296 static uint32_t
create_userptr(int fd,uint32_t val,uint32_t * ptr)297 create_userptr(int fd, uint32_t val, uint32_t *ptr)
298 {
299 	uint32_t handle;
300 	int i;
301 
302 	gem_userptr(fd, ptr, sizeof(linear), 0, userptr_flags, &handle);
303 	igt_assert(handle != 0);
304 
305 	/* Fill the BO with dwords starting at val */
306 	for (i = 0; i < WIDTH*HEIGHT; i++)
307 		ptr[i] = val++;
308 
309 	return handle;
310 }
311 
312 static void **handle_ptr_map;
313 static unsigned *handle_size_map;
314 static unsigned int num_handle_map;
315 
reset_handle_ptr(void)316 static void reset_handle_ptr(void)
317 {
318 	if (num_handle_map == 0)
319 		return;
320 
321 	free(handle_ptr_map);
322 	handle_ptr_map = NULL;
323 
324 	free(handle_size_map);
325 	handle_size_map = NULL;
326 
327 	num_handle_map = 0;
328 }
329 
add_handle_ptr(uint32_t handle,void * ptr,int size)330 static void add_handle_ptr(uint32_t handle, void *ptr, int size)
331 {
332 	if (handle >= num_handle_map) {
333 		int max = (4096 + handle) & -4096;
334 
335 		handle_ptr_map = realloc(handle_ptr_map,
336 					 max * sizeof(void*));
337 		igt_assert(handle_ptr_map);
338 		memset(handle_ptr_map + num_handle_map, 0,
339 		       (max - num_handle_map) * sizeof(void*));
340 
341 		handle_size_map = realloc(handle_size_map,
342 					  max * sizeof(unsigned));
343 		igt_assert(handle_size_map);
344 		memset(handle_ptr_map + num_handle_map, 0,
345 		       (max - num_handle_map) * sizeof(unsigned));
346 
347 		num_handle_map = max;
348 	}
349 
350 	handle_ptr_map[handle] = ptr;
351 	handle_size_map[handle] = size;
352 }
353 
get_handle_ptr(uint32_t handle)354 static void *get_handle_ptr(uint32_t handle)
355 {
356 	igt_assert(handle < num_handle_map);
357 	return handle_ptr_map[handle];
358 }
359 
free_handle_ptr(uint32_t handle)360 static void free_handle_ptr(uint32_t handle)
361 {
362 	igt_assert(handle < num_handle_map);
363 	igt_assert(handle_ptr_map[handle]);
364 
365 	munmap(handle_ptr_map[handle], handle_size_map[handle]);
366 	handle_ptr_map[handle] = NULL;
367 }
368 
create_userptr_bo(int fd,uint64_t size)369 static uint32_t create_userptr_bo(int fd, uint64_t size)
370 {
371 	void *ptr;
372 	uint32_t handle;
373 
374 	ptr = mmap(NULL, size,
375 		   PROT_READ | PROT_WRITE,
376 		   MAP_ANONYMOUS | MAP_SHARED,
377 		   -1, 0);
378 	igt_assert(ptr != MAP_FAILED);
379 
380 	gem_userptr(fd, (uint32_t *)ptr, size, 0, userptr_flags, &handle);
381 	add_handle_ptr(handle, ptr, size);
382 
383 	return handle;
384 }
385 
flink_userptr_bo(uint32_t old_handle,uint32_t new_handle)386 static void flink_userptr_bo(uint32_t old_handle, uint32_t new_handle)
387 {
388 	igt_assert(old_handle < num_handle_map);
389 	igt_assert(handle_ptr_map[old_handle]);
390 
391 	add_handle_ptr(new_handle,
392 		       handle_ptr_map[old_handle],
393 		       handle_size_map[old_handle]);
394 }
395 
clear(int fd,uint32_t handle,uint64_t size)396 static void clear(int fd, uint32_t handle, uint64_t size)
397 {
398 	void *ptr = get_handle_ptr(handle);
399 
400 	igt_assert(ptr != NULL);
401 
402 	memset(ptr, 0, size);
403 }
404 
free_userptr_bo(int fd,uint32_t handle)405 static void free_userptr_bo(int fd, uint32_t handle)
406 {
407 	gem_close(fd, handle);
408 	free_handle_ptr(handle);
409 }
410 
411 static uint32_t
create_bo(int fd,uint32_t val)412 create_bo(int fd, uint32_t val)
413 {
414 	uint32_t handle;
415 	int i;
416 
417 	handle = gem_create(fd, sizeof(linear));
418 
419 	/* Fill the BO with dwords starting at val */
420 	for (i = 0; i < WIDTH*HEIGHT; i++)
421 		linear[i] = val++;
422 	gem_write(fd, handle, 0, linear, sizeof(linear));
423 
424 	return handle;
425 }
426 
427 static void
check_cpu(uint32_t * ptr,uint32_t val)428 check_cpu(uint32_t *ptr, uint32_t val)
429 {
430 	int i;
431 
432 	for (i = 0; i < WIDTH*HEIGHT; i++) {
433 		igt_assert_f(ptr[i] == val,
434 			     "Expected 0x%08x, found 0x%08x "
435 			     "at offset 0x%08x\n",
436 			     val, ptr[i], i * 4);
437 		val++;
438 	}
439 }
440 
441 static void
check_gpu(int fd,uint32_t handle,uint32_t val)442 check_gpu(int fd, uint32_t handle, uint32_t val)
443 {
444 	gem_read(fd, handle, 0, linear, sizeof(linear));
445 	check_cpu(linear, val);
446 }
447 
has_userptr(int fd)448 static int has_userptr(int fd)
449 {
450 	uint32_t handle = 0;
451 	void *ptr;
452 	uint32_t oldflags;
453 	int ret;
454 
455 	igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
456 	oldflags = userptr_flags;
457 	gem_userptr_test_unsynchronized();
458 	ret = __gem_userptr(fd, ptr, PAGE_SIZE, 0, userptr_flags, &handle);
459 	userptr_flags = oldflags;
460 	if (ret != 0) {
461 		free(ptr);
462 		return 0;
463 	}
464 
465 	gem_close(fd, handle);
466 	free(ptr);
467 
468 	return handle != 0;
469 }
470 
test_input_checking(int fd)471 static int test_input_checking(int fd)
472 {
473 	struct local_i915_gem_userptr userptr;
474 	int ret;
475 
476 	/* Invalid flags. */
477 	memset(&userptr, 0, sizeof(userptr));
478 	userptr.user_ptr = 0;
479 	userptr.user_size = 0;
480 	userptr.flags = ~0;
481 	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
482 	igt_assert_neq(ret, 0);
483 
484 	/* Too big. */
485 	memset(&userptr, 0, sizeof(userptr));
486 	userptr.user_ptr = 0;
487 	userptr.user_size = ~0;
488 	userptr.flags = 0;
489 	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
490 	igt_assert_neq(ret, 0);
491 
492 	/* Both wrong. */
493 	memset(&userptr, 0, sizeof(userptr));
494 	userptr.user_ptr = 0;
495 	userptr.user_size = ~0;
496 	userptr.flags = ~0;
497 	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
498 	igt_assert_neq(ret, 0);
499 
500 	/* Zero user_size. */
501 	memset(&userptr, 0, sizeof(userptr));
502 	userptr.user_ptr = 0;
503 	userptr.user_size = 0;
504 	userptr.flags = 0;
505 	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
506 	igt_assert_neq(ret, 0);
507 
508 	return 0;
509 }
510 
test_access_control(int fd)511 static int test_access_control(int fd)
512 {
513 	/* CAP_SYS_ADMIN is needed for UNSYNCHRONIZED mappings. */
514 	gem_userptr_test_unsynchronized();
515 	igt_require(has_userptr(fd));
516 
517 	igt_fork(child, 1) {
518 		void *ptr;
519 		int ret;
520 		uint32_t handle;
521 
522 		igt_drop_root();
523 
524 		igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
525 
526 		ret = __gem_userptr(fd, ptr, PAGE_SIZE, 0, userptr_flags, &handle);
527 		if (ret == 0)
528 			gem_close(fd, handle);
529 		free(ptr);
530 		igt_assert_eq(ret, -EPERM);
531 	}
532 
533 	igt_waitchildren();
534 
535 	return 0;
536 }
537 
test_invalid_null_pointer(int fd)538 static int test_invalid_null_pointer(int fd)
539 {
540 	uint32_t handle;
541 
542 	/* NULL pointer. */
543 	gem_userptr(fd, NULL, PAGE_SIZE, 0, userptr_flags, &handle);
544 
545 	igt_assert_neq(copy(fd, handle, handle), 0); /* QQQ Precise errno? */
546 	gem_close(fd, handle);
547 
548 	return 0;
549 }
550 
test_invalid_gtt_mapping(int fd)551 static int test_invalid_gtt_mapping(int fd)
552 {
553 	struct drm_i915_gem_mmap_gtt arg;
554 	uint32_t handle;
555 	char *gtt, *map;
556 
557 	/* Anonymous mapping to find a hole */
558 	map = mmap(NULL, sizeof(linear) + 2 * PAGE_SIZE,
559 		   PROT_READ | PROT_WRITE,
560 		   MAP_PRIVATE | MAP_ANONYMOUS,
561 		   -1, 0);
562 	igt_assert(map != MAP_FAILED);
563 
564 	gem_userptr(fd, map, sizeof(linear) + 2 * PAGE_SIZE, 0, userptr_flags, &handle);
565 	igt_assert_eq(copy(fd, handle, handle), 0);
566 	gem_close(fd, handle);
567 
568 	gem_userptr(fd, map, PAGE_SIZE, 0, userptr_flags, &handle);
569 	igt_assert_eq(copy(fd, handle, handle), 0);
570 	gem_close(fd, handle);
571 
572 	gem_userptr(fd, map + sizeof(linear) + PAGE_SIZE, PAGE_SIZE, 0, userptr_flags, &handle);
573 	igt_assert_eq(copy(fd, handle, handle), 0);
574 	gem_close(fd, handle);
575 
576 	/* GTT mapping */
577 	memset(&arg, 0, sizeof(arg));
578 	arg.handle = create_bo(fd, 0);
579 	do_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &arg);
580 	gtt = mmap(map + PAGE_SIZE, sizeof(linear),
581 		   PROT_READ | PROT_WRITE,
582 		   MAP_SHARED | MAP_FIXED,
583 		   fd, arg.offset);
584 	igt_assert(gtt == map + PAGE_SIZE);
585 	gem_close(fd, arg.handle);
586 	igt_assert(((unsigned long)gtt & (PAGE_SIZE - 1)) == 0);
587 	igt_assert((sizeof(linear) & (PAGE_SIZE - 1)) == 0);
588 
589 	gem_userptr(fd, gtt, sizeof(linear), 0, userptr_flags, &handle);
590 	igt_assert_eq(copy(fd, handle, handle), -EFAULT);
591 	gem_close(fd, handle);
592 
593 	gem_userptr(fd, gtt, PAGE_SIZE, 0, userptr_flags, &handle);
594 	igt_assert_eq(copy(fd, handle, handle), -EFAULT);
595 	gem_close(fd, handle);
596 
597 	gem_userptr(fd, gtt + sizeof(linear) - PAGE_SIZE, PAGE_SIZE, 0, userptr_flags, &handle);
598 	igt_assert_eq(copy(fd, handle, handle), -EFAULT);
599 	gem_close(fd, handle);
600 
601 	/* boundaries */
602 	gem_userptr(fd, map, 2*PAGE_SIZE, 0, userptr_flags, &handle);
603 	igt_assert_eq(copy(fd, handle, handle), -EFAULT);
604 	gem_close(fd, handle);
605 
606 	gem_userptr(fd, map + sizeof(linear), 2*PAGE_SIZE, 0, userptr_flags, &handle);
607 	igt_assert_eq(copy(fd, handle, handle), -EFAULT);
608 	gem_close(fd, handle);
609 
610 	munmap(map, sizeof(linear) + 2*PAGE_SIZE);
611 
612 	return 0;
613 }
614 
615 #define PE_GTT_MAP 0x1
616 #define PE_BUSY 0x2
test_process_exit(int fd,int flags)617 static void test_process_exit(int fd, int flags)
618 {
619 	if (flags & PE_GTT_MAP)
620 		igt_require(gem_has_llc(fd));
621 
622 	igt_fork(child, 1) {
623 		uint32_t handle;
624 
625 		handle = create_userptr_bo(fd, sizeof(linear));
626 
627 		if (flags & PE_GTT_MAP) {
628 			uint32_t *ptr = __gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE);
629 			if (ptr)
630 				*ptr = 0;
631 		}
632 
633 		if (flags & PE_BUSY)
634 			igt_assert_eq(copy(fd, handle, handle), 0);
635 	}
636 	igt_waitchildren();
637 }
638 
test_forked_access(int fd)639 static void test_forked_access(int fd)
640 {
641 	uint32_t handle1 = 0, handle2 = 0;
642 	void *ptr1 = NULL, *ptr2 = NULL;
643 	int ret;
644 
645 	ret = posix_memalign(&ptr1, PAGE_SIZE, sizeof(linear));
646 #ifdef MADV_DONTFORK
647 	ret |= madvise(ptr1, sizeof(linear), MADV_DONTFORK);
648 #endif
649 	gem_userptr(fd, ptr1, sizeof(linear), 0, userptr_flags, &handle1);
650 	igt_assert(ptr1);
651 	igt_assert(handle1);
652 
653 	ret = posix_memalign(&ptr2, PAGE_SIZE, sizeof(linear));
654 #ifdef MADV_DONTFORK
655 	ret |= madvise(ptr2, sizeof(linear), MADV_DONTFORK);
656 #endif
657 	gem_userptr(fd, ptr2, sizeof(linear), 0, userptr_flags, &handle2);
658 	igt_assert(ptr2);
659 	igt_assert(handle2);
660 
661 	memset(ptr1, 0x1, sizeof(linear));
662 	memset(ptr2, 0x2, sizeof(linear));
663 
664 	igt_fork(child, 1)
665 		igt_assert_eq(copy(fd, handle1, handle2), 0);
666 	igt_waitchildren();
667 
668 	gem_userptr_sync(fd, handle1);
669 	gem_userptr_sync(fd, handle2);
670 
671 	gem_close(fd, handle1);
672 	gem_close(fd, handle2);
673 
674 	igt_assert(memcmp(ptr1, ptr2, sizeof(linear)) == 0);
675 
676 #ifdef MADV_DOFORK
677 	ret = madvise(ptr1, sizeof(linear), MADV_DOFORK);
678 	igt_assert_eq(ret, 0);
679 #endif
680 	free(ptr1);
681 
682 #ifdef MADV_DOFORK
683 	ret = madvise(ptr2, sizeof(linear), MADV_DOFORK);
684 	igt_assert_eq(ret, 0);
685 #endif
686 	free(ptr2);
687 }
688 
689 #define MAP_FIXED_INVALIDATE_OVERLAP	(1<<0)
690 #define MAP_FIXED_INVALIDATE_BUSY	(1<<1)
691 #define MAP_FIXED_INVALIDATE_GET_PAGES	(1<<2)
692 #define ALL_MAP_FIXED_INVALIDATE (MAP_FIXED_INVALIDATE_OVERLAP | \
693 				  MAP_FIXED_INVALIDATE_BUSY | \
694 				  MAP_FIXED_INVALIDATE_GET_PAGES)
695 
test_map_fixed_invalidate(int fd,uint32_t flags)696 static int test_map_fixed_invalidate(int fd, uint32_t flags)
697 {
698 	const size_t ptr_size = sizeof(linear) + 2*PAGE_SIZE;
699 	const int num_handles = (flags & MAP_FIXED_INVALIDATE_OVERLAP) ? 2 : 1;
700 	uint32_t handle[num_handles];
701 	uint32_t *ptr;
702 
703 	ptr = mmap(NULL, ptr_size,
704 		   PROT_READ | PROT_WRITE,
705 		   MAP_SHARED | MAP_ANONYMOUS,
706 		   -1, 0);
707 	igt_assert(ptr != MAP_FAILED);
708 
709 	for (int i = 0; i < num_handles; i++)
710 		handle[i] = create_userptr(fd, 0, ptr + PAGE_SIZE/sizeof(*ptr));
711 
712 	for (char *fixed = (char *)ptr, *end = fixed + ptr_size;
713 	     fixed + 2*PAGE_SIZE <= end;
714 	     fixed += PAGE_SIZE) {
715 		struct drm_i915_gem_mmap_gtt mmap_gtt;
716 		uint32_t *map;
717 
718 		map = mmap(ptr, ptr_size, PROT_READ | PROT_WRITE,
719 			   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED,
720 			   -1, 0);
721 		igt_assert(map != MAP_FAILED);
722 		igt_assert(map == ptr);
723 
724 		memset(&mmap_gtt, 0, sizeof(mmap_gtt));
725 		mmap_gtt.handle = gem_create(fd, 2*PAGE_SIZE);
726 		do_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_gtt);
727 
728 		if (flags & MAP_FIXED_INVALIDATE_GET_PAGES)
729 			igt_assert_eq(__gem_set_domain(fd, handle[0],
730 						       I915_GEM_DOMAIN_GTT,
731 						       I915_GEM_DOMAIN_GTT),
732 				      0);
733 
734 		if (flags & MAP_FIXED_INVALIDATE_BUSY)
735 			igt_assert_eq(copy(fd, handle[0], handle[num_handles-1]), 0);
736 
737 		map = mmap(fixed, 2*PAGE_SIZE,
738 			   PROT_READ | PROT_WRITE,
739 			   MAP_SHARED | MAP_FIXED,
740 			   fd, mmap_gtt.offset);
741 		igt_assert(map != MAP_FAILED);
742 		igt_assert(map == (uint32_t *)fixed);
743 
744 		gem_set_tiling(fd, mmap_gtt.handle, I915_TILING_NONE, 0);
745 		*map = 0xdead;
746 
747 		if (flags & MAP_FIXED_INVALIDATE_GET_PAGES) {
748 			igt_assert_eq(__gem_set_domain(fd, handle[0],
749 						       I915_GEM_DOMAIN_GTT,
750 						       I915_GEM_DOMAIN_GTT),
751 				      -EFAULT);
752 
753 			/* Errors are permanent, so we have to recreate */
754 			gem_close(fd, handle[0]);
755 			handle[0] = create_userptr(fd, 0, ptr + PAGE_SIZE/sizeof(*ptr));
756 		}
757 
758 		gem_set_tiling(fd, mmap_gtt.handle, I915_TILING_Y, 512 * 4);
759 		*(uint32_t*)map = 0xbeef;
760 
761 		gem_close(fd, mmap_gtt.handle);
762 	}
763 
764 	for (int i = 0; i < num_handles; i++)
765 		gem_close(fd, handle[i]);
766 	munmap(ptr, ptr_size);
767 
768 	return 0;
769 }
770 
test_forbidden_ops(int fd)771 static int test_forbidden_ops(int fd)
772 {
773 	struct drm_i915_gem_pread gem_pread;
774 	struct drm_i915_gem_pwrite gem_pwrite;
775 	uint32_t handle;
776 	void *ptr;
777 
778 	igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
779 	gem_userptr(fd, ptr, PAGE_SIZE, 0, userptr_flags, &handle);
780 
781 	/* pread/pwrite are not always forbidden, but when they
782 	 * are they should fail with EINVAL.
783 	 */
784 
785 	memset(&gem_pread, 0, sizeof(gem_pread));
786 	gem_pread.handle = handle;
787 	gem_pread.offset = 0;
788 	gem_pread.size = PAGE_SIZE;
789 	gem_pread.data_ptr = to_user_pointer(ptr);
790 	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_PREAD, &gem_pread))
791 		igt_assert_eq(errno, EINVAL);
792 
793 	memset(&gem_pwrite, 0, sizeof(gem_pwrite));
794 	gem_pwrite.handle = handle;
795 	gem_pwrite.offset = 0;
796 	gem_pwrite.size = PAGE_SIZE;
797 	gem_pwrite.data_ptr = to_user_pointer(ptr);
798 	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite))
799 		igt_assert_eq(errno, EINVAL);
800 
801 	gem_close(fd, handle);
802 	free(ptr);
803 
804 	return 0;
805 }
806 
test_relocations(int fd)807 static void test_relocations(int fd)
808 {
809 	struct drm_i915_gem_relocation_entry *reloc;
810 	struct drm_i915_gem_exec_object2 obj;
811 	struct drm_i915_gem_execbuffer2 exec;
812 	unsigned size;
813 	void *ptr;
814 	int i;
815 
816 	size = PAGE_SIZE + ALIGN(sizeof(*reloc)*256, PAGE_SIZE);
817 
818 	memset(&obj, 0, sizeof(obj));
819 	igt_assert(posix_memalign(&ptr, PAGE_SIZE, size) == 0);
820 	gem_userptr(fd, ptr, size, 0, userptr_flags, &obj.handle);
821 	if (!gem_has_llc(fd))
822 		gem_set_caching(fd, obj.handle, 0);
823 	*(uint32_t *)ptr = MI_BATCH_BUFFER_END;
824 
825 	reloc = (typeof(reloc))((char *)ptr + PAGE_SIZE);
826 	obj.relocs_ptr = to_user_pointer(reloc);
827 	obj.relocation_count = 256;
828 
829 	memset(reloc, 0, 256*sizeof(*reloc));
830 	for (i = 0; i < 256; i++) {
831 		reloc[i].offset = 2048 - 4*i;
832 		reloc[i].target_handle = obj.handle;
833 		reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
834 	}
835 
836 	memset(&exec, 0, sizeof(exec));
837 	exec.buffers_ptr = to_user_pointer(&obj);
838 	exec.buffer_count = 1;
839 	gem_execbuf(fd, &exec);
840 
841 	gem_sync(fd, obj.handle);
842 	gem_close(fd, obj.handle);
843 	free(ptr);
844 }
845 
846 static unsigned char counter;
847 
848 static void (* volatile orig_sigbus)(int sig, siginfo_t *info, void *param);
849 static volatile unsigned long sigbus_start;
850 static volatile long sigbus_cnt = -1;
851 
umap(int fd,uint32_t handle)852 static void *umap(int fd, uint32_t handle)
853 {
854 	void *ptr;
855 
856 	if (gem_has_llc(fd)) {
857 		ptr = gem_mmap__gtt(fd, handle, sizeof(linear),
858 				    PROT_READ | PROT_WRITE);
859 	} else {
860 		uint32_t tmp = gem_create(fd, sizeof(linear));
861 		igt_assert_eq(copy(fd, tmp, handle), 0);
862 		ptr = gem_mmap__cpu(fd, tmp, 0, sizeof(linear), PROT_READ);
863 		gem_close(fd, tmp);
864 	}
865 
866 	return ptr;
867 }
868 
869 static void
check_bo(int fd1,uint32_t handle1,int is_userptr,int fd2,uint32_t handle2)870 check_bo(int fd1, uint32_t handle1, int is_userptr, int fd2, uint32_t handle2)
871 {
872 	unsigned char *ptr1, *ptr2;
873 	unsigned long size = sizeof(linear);
874 
875 	ptr2 = umap(fd2, handle2);
876 	if (is_userptr)
877 		ptr1 = is_userptr > 0 ? get_handle_ptr(handle1) : ptr2;
878 	else
879 		ptr1 = umap(fd1, handle1);
880 
881 	igt_assert(ptr1);
882 	igt_assert(ptr2);
883 
884 	sigbus_start = (unsigned long)ptr2;
885 	igt_assert(memcmp(ptr1, ptr2, sizeof(linear)) == 0);
886 
887 	if (gem_has_llc(fd1)) {
888 		counter++;
889 		memset(ptr1, counter, size);
890 		memset(ptr2, counter, size);
891 	}
892 
893 	if (!is_userptr)
894 		munmap(ptr1, sizeof(linear));
895 	munmap(ptr2, sizeof(linear));
896 }
897 
export_handle(int fd,uint32_t handle,int * outfd)898 static int export_handle(int fd, uint32_t handle, int *outfd)
899 {
900 	struct drm_prime_handle args;
901 	int ret;
902 
903 	args.handle = handle;
904 	args.flags = DRM_CLOEXEC;
905 	args.fd = -1;
906 
907 	ret = drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
908 	if (ret)
909 		ret = errno;
910 	*outfd = args.fd;
911 
912 	return ret;
913 }
914 
sigbus(int sig,siginfo_t * info,void * param)915 static void sigbus(int sig, siginfo_t *info, void *param)
916 {
917 	unsigned long ptr = (unsigned long)info->si_addr;
918 	void *addr;
919 
920 	if (ptr >= sigbus_start &&
921 	    ptr < sigbus_start + sizeof(linear)) {
922 		/* replace mapping to allow progress */
923 		munmap((void *)sigbus_start, sizeof(linear));
924 		addr = mmap((void *)sigbus_start, sizeof(linear),
925 			    PROT_READ | PROT_WRITE,
926 			    MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
927 		igt_assert((unsigned long)addr == sigbus_start);
928 		memset(addr, counter, sizeof(linear));
929 
930 		sigbus_cnt++;
931 		return;
932 	}
933 
934 	if (orig_sigbus)
935 		orig_sigbus(sig, info, param);
936 	igt_assert(0);
937 }
938 
test_dmabuf(void)939 static int test_dmabuf(void)
940 {
941 	int fd1, fd2;
942 	uint32_t handle, handle_import;
943 	int dma_buf_fd = -1;
944 	int ret;
945 
946 	fd1 = drm_open_driver(DRIVER_INTEL);
947 
948 	handle = create_userptr_bo(fd1, sizeof(linear));
949 	memset(get_handle_ptr(handle), counter, sizeof(linear));
950 
951 	ret = export_handle(fd1, handle, &dma_buf_fd);
952 	if (userptr_flags & LOCAL_I915_USERPTR_UNSYNCHRONIZED && ret) {
953 		igt_assert(ret == EINVAL || ret == ENODEV);
954 		free_userptr_bo(fd1, handle);
955 		close(fd1);
956 		return 0;
957 	} else {
958 		igt_assert_eq(ret, 0);
959 		igt_assert_lte(0, dma_buf_fd);
960 	}
961 
962 	fd2 = drm_open_driver(DRIVER_INTEL);
963 	handle_import = prime_fd_to_handle(fd2, dma_buf_fd);
964 	check_bo(fd1, handle, 1, fd2, handle_import);
965 
966 	/* close dma_buf, check whether nothing disappears. */
967 	close(dma_buf_fd);
968 	check_bo(fd1, handle, 1, fd2, handle_import);
969 
970 	/* destroy userptr object and expect SIGBUS */
971 	free_userptr_bo(fd1, handle);
972 	close(fd1);
973 
974 	if (gem_has_llc(fd2)) {
975 		struct sigaction sigact, orig_sigact;
976 
977 		memset(&sigact, 0, sizeof(sigact));
978 		sigact.sa_sigaction = sigbus;
979 		sigact.sa_flags = SA_SIGINFO;
980 		ret = sigaction(SIGBUS, &sigact, &orig_sigact);
981 		igt_assert_eq(ret, 0);
982 
983 		orig_sigbus = orig_sigact.sa_sigaction;
984 
985 		sigbus_cnt = 0;
986 		check_bo(fd2, handle_import, -1, fd2, handle_import);
987 		igt_assert(sigbus_cnt > 0);
988 
989 		ret = sigaction(SIGBUS, &orig_sigact, NULL);
990 		igt_assert_eq(ret, 0);
991 	}
992 
993 	close(fd2);
994 	reset_handle_ptr();
995 
996 	return 0;
997 }
998 
store_dword_rand(int i915,unsigned int engine,uint32_t target,uint64_t sz,int count)999 static void store_dword_rand(int i915, unsigned int engine,
1000 			     uint32_t target, uint64_t sz,
1001 			     int count)
1002 {
1003 	const int gen = intel_gen(intel_get_drm_devid(i915));
1004 	struct drm_i915_gem_relocation_entry *reloc;
1005 	struct drm_i915_gem_exec_object2 obj[2];
1006 	struct drm_i915_gem_execbuffer2 exec;
1007 	unsigned int batchsz;
1008 	uint32_t *batch;
1009 	int i;
1010 
1011 	batchsz = count * 16 + 4;
1012 	batchsz = ALIGN(batchsz, 4096);
1013 
1014 	reloc = calloc(sizeof(*reloc), count);
1015 
1016 	memset(obj, 0, sizeof(obj));
1017 	obj[0].handle = target;
1018 	obj[0].flags = LOCAL_EXEC_OBJECT_SUPPORTS_48B;
1019 	obj[1].handle = gem_create(i915, batchsz);
1020 	obj[1].relocation_count = count;
1021 	obj[1].relocs_ptr = to_user_pointer(reloc);
1022 
1023 	batch = gem_mmap__wc(i915, obj[1].handle, 0, batchsz, PROT_WRITE);
1024 
1025 	memset(&exec, 0, sizeof(exec));
1026 	exec.buffer_count = 2;
1027 	exec.buffers_ptr = to_user_pointer(obj);
1028 	exec.flags = engine;
1029 	if (gen < 6)
1030 		exec.flags |= I915_EXEC_SECURE;
1031 
1032 	i = 0;
1033 	for (int n = 0; n < count; n++) {
1034 		uint64_t offset;
1035 
1036 		reloc[n].target_handle = obj[0].handle;
1037 		reloc[n].delta = rand() % (sz / 4) * 4;
1038 		reloc[n].offset = (i + 1) * sizeof(uint32_t);
1039 		reloc[n].presumed_offset = obj[0].offset;
1040 		reloc[n].read_domains = I915_GEM_DOMAIN_RENDER;
1041 		reloc[n].write_domain = I915_GEM_DOMAIN_RENDER;
1042 
1043 		offset = reloc[n].presumed_offset + reloc[n].delta;
1044 
1045 		batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
1046 		if (gen >= 8) {
1047 			batch[++i] = offset;
1048 			batch[++i] = offset >> 32;
1049 		} else if (gen >= 4) {
1050 			batch[++i] = 0;
1051 			batch[++i] = offset;
1052 			reloc[n].offset += sizeof(uint32_t);
1053 		} else {
1054 			batch[i]--;
1055 			batch[++i] = offset;
1056 		}
1057 		batch[++i] = rand();
1058 		i++;
1059 	}
1060 	batch[i] = MI_BATCH_BUFFER_END;
1061 	igt_assert(i * sizeof(uint32_t) < batchsz);
1062 	munmap(batch, batchsz);
1063 
1064 	gem_execbuf(i915, &exec);
1065 
1066 	gem_close(i915, obj[1].handle);
1067 	free(reloc);
1068 }
1069 
test_readonly(int i915)1070 static void test_readonly(int i915)
1071 {
1072 	uint64_t aperture_size;
1073 	uint32_t whandle, rhandle;
1074 	size_t sz, total;
1075 	void *pages, *space;
1076 	int memfd;
1077 
1078 	/*
1079 	 * A small batch of pages; small enough to cheaply check for stray
1080 	 * writes but large enough that we don't create too many VMA pointing
1081 	 * back to this set from the large arena. The limit on total number
1082 	 * of VMA for a process is 65,536 (at least on this kernel).
1083 	 *
1084 	 * We then write from the GPU through the large arena into the smaller
1085 	 * backing storage, which we can cheaply check to see if those writes
1086 	 * have landed (using a SHA1sum). Repeating the same random GPU writes
1087 	 * though a read-only handle to confirm that this time the writes are
1088 	 * discarded and the backing store unchanged.
1089 	 */
1090 	sz = 16 << 12;
1091 	memfd = memfd_create("pages", 0);
1092 	igt_require(memfd != -1);
1093 	igt_require(ftruncate(memfd, sz) == 0);
1094 
1095 	pages = mmap(NULL, sz, PROT_WRITE, MAP_SHARED, memfd, 0);
1096 	igt_assert(pages != MAP_FAILED);
1097 
1098 	igt_require(__gem_userptr(i915, pages, sz, true, userptr_flags, &rhandle) == 0);
1099 	gem_close(i915, rhandle);
1100 
1101 	gem_userptr(i915, pages, sz, false, userptr_flags, &whandle);
1102 
1103 	/*
1104 	 * We have only a 31bit delta which we use for generating
1105 	 * the target address for MI_STORE_DWORD_IMM, so our maximum
1106 	 * usable object size is only 2GiB. For now.
1107 	 */
1108 	total = 2048ull << 20;
1109 	aperture_size = gem_aperture_size(i915) / 2;
1110 	if (aperture_size < total)
1111 		total = aperture_size;
1112 	total = total / sz * sz;
1113 	igt_info("Using a %'zuB (%'zu pages) arena onto %zu pages\n",
1114 		 total, total >> 12, sz >> 12);
1115 
1116 	/* Create an arena all pointing to the same set of pages */
1117 	space = mmap(NULL, total, PROT_READ, MAP_ANON | MAP_SHARED, -1, 0);
1118 	igt_require(space != MAP_FAILED);
1119 	for (size_t offset = 0; offset < total; offset += sz) {
1120 		igt_assert(mmap(space + offset, sz,
1121 				PROT_WRITE, MAP_SHARED | MAP_FIXED,
1122 				memfd, 0) != MAP_FAILED);
1123 		*(uint32_t *)(space + offset) = offset;
1124 	}
1125 	igt_assert_eq_u32(*(uint32_t *)pages, (uint32_t)(total - sz));
1126 	igt_assert(mlock(pages, sz) == 0);
1127 	close(memfd);
1128 
1129 	/* Check we can create a normal userptr bo wrapping the wrapper */
1130 	gem_userptr(i915, space, total, false, userptr_flags, &rhandle);
1131 	gem_set_domain(i915, rhandle, I915_GEM_DOMAIN_CPU, 0);
1132 	for (size_t offset = 0; offset < total; offset += sz)
1133 		store_dword(i915, rhandle, offset + 4, offset / sz);
1134 	gem_sync(i915, rhandle);
1135 	igt_assert_eq_u32(*(uint32_t *)(pages + 0), (uint32_t)(total - sz));
1136 	igt_assert_eq_u32(*(uint32_t *)(pages + 4), (uint32_t)(total / sz - 1));
1137 	gem_close(i915, rhandle);
1138 
1139 	/* Now enforce read-only henceforth */
1140 	igt_assert(mprotect(space, total, PROT_READ) == 0);
1141 
1142 	igt_fork(child, 1) {
1143 		unsigned int engine;
1144 		char *orig;
1145 
1146 		orig = g_compute_checksum_for_data(G_CHECKSUM_SHA1, pages, sz);
1147 
1148 		gem_userptr(i915, space, total, true, userptr_flags, &rhandle);
1149 
1150 		for_each_engine(i915, engine) {
1151 			char *ref, *result;
1152 
1153 			/* First tweak the backing store through the write */
1154 			store_dword_rand(i915, engine, whandle, sz, 1024);
1155 			gem_sync(i915, whandle);
1156 			ref = g_compute_checksum_for_data(G_CHECKSUM_SHA1,
1157 							  pages, sz);
1158 
1159 			/* Check some writes did land */
1160 			igt_assert(strcmp(ref, orig));
1161 
1162 			/* Now try the same through the read-only handle */
1163 			store_dword_rand(i915, engine, rhandle, total, 1024);
1164 			gem_sync(i915, rhandle);
1165 			result = g_compute_checksum_for_data(G_CHECKSUM_SHA1,
1166 							     pages, sz);
1167 
1168 			/*
1169 			 * As the writes into the read-only GPU bo should fail,
1170 			 * the SHA1 hash of the backing store should be
1171 			 * unaffected.
1172 			 */
1173 			igt_assert(strcmp(ref, result) == 0);
1174 
1175 			g_free(result);
1176 			g_free(orig);
1177 			orig = ref;
1178 		}
1179 
1180 		gem_close(i915, rhandle);
1181 
1182 		g_free(orig);
1183 	}
1184 	igt_waitchildren();
1185 
1186 	munlock(pages, sz);
1187 	munmap(space, total);
1188 	munmap(pages, sz);
1189 }
1190 
1191 static jmp_buf sigjmp;
sigjmp_handler(int sig)1192 static void sigjmp_handler(int sig)
1193 {
1194 	siglongjmp(sigjmp, sig);
1195 }
1196 
test_readonly_mmap(int i915)1197 static void test_readonly_mmap(int i915)
1198 {
1199 	char *original, *result;
1200 	uint32_t handle;
1201 	uint32_t sz;
1202 	void *pages;
1203 	void *ptr;
1204 	int sig;
1205 
1206 	/*
1207 	 * A quick check to ensure that we cannot circumvent the
1208 	 * read-only nature of our memory by creating a GTT mmap into
1209 	 * the pages. Imagine receiving a readonly SHM segment from
1210 	 * another process, or a readonly file mmap, it must remain readonly
1211 	 * on the GPU as well.
1212 	 */
1213 
1214 	igt_require(igt_setup_clflush());
1215 
1216 	sz = 16 << 12;
1217 	pages = mmap(NULL, sz, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
1218 	igt_assert(pages != MAP_FAILED);
1219 
1220 	igt_require(__gem_userptr(i915, pages, sz, true, userptr_flags, &handle) == 0);
1221 	gem_set_caching(i915, handle, 0);
1222 
1223 	memset(pages, 0xa5, sz);
1224 	igt_clflush_range(pages, sz);
1225 	original = g_compute_checksum_for_data(G_CHECKSUM_SHA1, pages, sz);
1226 
1227 	ptr = __gem_mmap__gtt(i915, handle, sz, PROT_WRITE);
1228 	igt_assert(ptr == NULL);
1229 
1230 	ptr = gem_mmap__gtt(i915, handle, sz, PROT_READ);
1231 	gem_close(i915, handle);
1232 
1233 	/* Check that a write into the GTT readonly map fails */
1234 	if (!(sig = sigsetjmp(sigjmp, 1))) {
1235 		signal(SIGBUS, sigjmp_handler);
1236 		signal(SIGSEGV, sigjmp_handler);
1237 		memset(ptr, 0x5a, sz);
1238 		igt_assert(0);
1239 	}
1240 	igt_assert_eq(sig, SIGSEGV);
1241 
1242 	/* Check that we disallow removing the readonly protection */
1243 	igt_assert(mprotect(ptr, sz, PROT_WRITE));
1244 	if (!(sig = sigsetjmp(sigjmp, 1))) {
1245 		signal(SIGBUS, sigjmp_handler);
1246 		signal(SIGSEGV, sigjmp_handler);
1247 		memset(ptr, 0x5a, sz);
1248 		igt_assert(0);
1249 	}
1250 	igt_assert_eq(sig, SIGSEGV);
1251 
1252 	/* A single read from the GTT pointer to prove that works */
1253 	igt_assert_eq_u32(*(uint8_t *)ptr, 0xa5);
1254 	munmap(ptr, sz);
1255 
1256 	/* Double check that the kernel did indeed not let any writes through */
1257 	igt_clflush_range(pages, sz);
1258 	result = g_compute_checksum_for_data(G_CHECKSUM_SHA1, pages, sz);
1259 	igt_assert(!strcmp(original, result));
1260 
1261 	g_free(original);
1262 	g_free(result);
1263 
1264 	munmap(pages, sz);
1265 }
1266 
test_readonly_pwrite(int i915)1267 static void test_readonly_pwrite(int i915)
1268 {
1269 	char *original, *result;
1270 	uint32_t handle;
1271 	uint32_t sz;
1272 	void *pages;
1273 
1274 	/*
1275 	 * Same as for GTT mmapings, we cannot alone ourselves to
1276 	 * circumvent readonly protection on a piece of memory via the
1277 	 * pwrite ioctl.
1278 	 */
1279 
1280 	igt_require(igt_setup_clflush());
1281 
1282 	sz = 16 << 12;
1283 	pages = mmap(NULL, sz, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
1284 	igt_assert(pages != MAP_FAILED);
1285 
1286 	igt_require(__gem_userptr(i915, pages, sz, true, userptr_flags, &handle) == 0);
1287 	memset(pages, 0xa5, sz);
1288 	original = g_compute_checksum_for_data(G_CHECKSUM_SHA1, pages, sz);
1289 
1290 	for (int page = 0; page < 16; page++) {
1291 		char data[4096];
1292 
1293 		memset(data, page, sizeof(data));
1294 		igt_assert_eq(__gem_write(i915, handle, page << 12, data, sizeof(data)), -EINVAL);
1295 	}
1296 
1297 	gem_close(i915, handle);
1298 
1299 	result = g_compute_checksum_for_data(G_CHECKSUM_SHA1, pages, sz);
1300 	igt_assert(!strcmp(original, result));
1301 
1302 	g_free(original);
1303 	g_free(result);
1304 
1305 	munmap(pages, sz);
1306 }
1307 
test_usage_restrictions(int fd)1308 static int test_usage_restrictions(int fd)
1309 {
1310 	void *ptr;
1311 	int ret;
1312 	uint32_t handle;
1313 
1314 	igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE * 2) == 0);
1315 
1316 	/* Address not aligned. */
1317 	ret = __gem_userptr(fd, (char *)ptr + 1, PAGE_SIZE, 0, userptr_flags, &handle);
1318 	igt_assert_neq(ret, 0);
1319 
1320 	/* Size not rounded to page size. */
1321 	ret = __gem_userptr(fd, ptr, PAGE_SIZE - 1, 0, userptr_flags, &handle);
1322 	igt_assert_neq(ret, 0);
1323 
1324 	/* Both wrong. */
1325 	ret = __gem_userptr(fd, (char *)ptr + 1, PAGE_SIZE - 1, 0, userptr_flags, &handle);
1326 	igt_assert_neq(ret, 0);
1327 
1328 	free(ptr);
1329 
1330 	return 0;
1331 }
1332 
test_create_destroy(int fd,int time)1333 static int test_create_destroy(int fd, int time)
1334 {
1335 	struct timespec start, now;
1336 	uint32_t handle;
1337 	void *ptr;
1338 	int n;
1339 
1340 	igt_fork_signal_helper();
1341 
1342 	clock_gettime(CLOCK_MONOTONIC, &start);
1343 	do {
1344 		for (n = 0; n < 1000; n++) {
1345 			igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
1346 
1347 			gem_userptr(fd, ptr, PAGE_SIZE, 0, userptr_flags, &handle);
1348 
1349 			gem_close(fd, handle);
1350 			free(ptr);
1351 		}
1352 
1353 		clock_gettime(CLOCK_MONOTONIC, &now);
1354 		now.tv_sec -= time;
1355 	} while (now.tv_sec < start.tv_sec ||
1356 		 (now.tv_sec == start.tv_sec && now.tv_nsec < start.tv_nsec));
1357 
1358 	igt_stop_signal_helper();
1359 
1360 	return 0;
1361 }
1362 
test_coherency(int fd,int count)1363 static int test_coherency(int fd, int count)
1364 {
1365 	uint32_t *memory;
1366 	uint32_t *cpu, *cpu_val;
1367 	uint32_t *gpu, *gpu_val;
1368 	uint32_t start = 0;
1369 	int i, ret;
1370 
1371 	igt_info("Using 2x%d 1MiB buffers\n", count);
1372 	intel_require_memory(2*count, sizeof(linear), CHECK_RAM);
1373 
1374 	ret = posix_memalign((void **)&memory, PAGE_SIZE, count*sizeof(linear));
1375 	igt_assert(ret == 0 && memory);
1376 
1377 	gpu = malloc(sizeof(uint32_t)*count*4);
1378 	gpu_val = gpu + count;
1379 	cpu = gpu_val + count;
1380 	cpu_val = cpu + count;
1381 
1382 	for (i = 0; i < count; i++) {
1383 		gpu[i] = create_bo(fd, start);
1384 		gpu_val[i] = start;
1385 		start += WIDTH*HEIGHT;
1386 	}
1387 
1388 	for (i = 0; i < count; i++) {
1389 		cpu[i] = create_userptr(fd, start, memory+i*WIDTH*HEIGHT);
1390 		cpu_val[i] = start;
1391 		start += WIDTH*HEIGHT;
1392 	}
1393 
1394 	igt_info("Verifying initialisation...\n");
1395 	for (i = 0; i < count; i++) {
1396 		check_gpu(fd, gpu[i], gpu_val[i]);
1397 		check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
1398 	}
1399 
1400 	igt_info("Cyclic blits cpu->gpu, forward...\n");
1401 	for (i = 0; i < count * 4; i++) {
1402 		int src = i % count;
1403 		int dst = (i + 1) % count;
1404 
1405 		igt_assert_eq(copy(fd, gpu[dst], cpu[src]), 0);
1406 		gpu_val[dst] = cpu_val[src];
1407 	}
1408 	for (i = 0; i < count; i++)
1409 		check_gpu(fd, gpu[i], gpu_val[i]);
1410 
1411 	igt_info("Cyclic blits gpu->cpu, backward...\n");
1412 	for (i = 0; i < count * 4; i++) {
1413 		int src = (i + 1) % count;
1414 		int dst = i % count;
1415 
1416 		igt_assert_eq(copy(fd, cpu[dst], gpu[src]), 0);
1417 		cpu_val[dst] = gpu_val[src];
1418 	}
1419 	for (i = 0; i < count; i++) {
1420 		gem_userptr_sync(fd, cpu[i]);
1421 		check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
1422 	}
1423 
1424 	igt_info("Random blits...\n");
1425 	for (i = 0; i < count * 4; i++) {
1426 		int src = random() % count;
1427 		int dst = random() % count;
1428 
1429 		if (random() & 1) {
1430 			igt_assert_eq(copy(fd, gpu[dst], cpu[src]), 0);
1431 			gpu_val[dst] = cpu_val[src];
1432 		} else {
1433 			igt_assert_eq(copy(fd, cpu[dst], gpu[src]), 0);
1434 			cpu_val[dst] = gpu_val[src];
1435 		}
1436 	}
1437 	for (i = 0; i < count; i++) {
1438 		check_gpu(fd, gpu[i], gpu_val[i]);
1439 		gem_close(fd, gpu[i]);
1440 
1441 		gem_userptr_sync(fd, cpu[i]);
1442 		check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
1443 		gem_close(fd, cpu[i]);
1444 	}
1445 
1446 	free(gpu);
1447 	free(memory);
1448 
1449 	return 0;
1450 }
1451 
1452 static struct igt_eviction_test_ops fault_ops = {
1453 	.create = create_userptr_bo,
1454 	.flink = flink_userptr_bo,
1455 	.close = free_userptr_bo,
1456 	.copy = blit,
1457 	.clear = clear,
1458 };
1459 
can_swap(void)1460 static int can_swap(void)
1461 {
1462 	unsigned long as, ram;
1463 
1464 	/* Cannot swap if not enough address space */
1465 
1466 	/* FIXME: Improve check criteria. */
1467 	if (sizeof(void*) < 8)
1468 		as = 3 * 1024;
1469 	else
1470 		as = 256 * 1024; /* Just a big number */
1471 
1472 	ram = intel_get_total_ram_mb();
1473 
1474 	if ((as - 128) < (ram - 256))
1475 		return 0;
1476 
1477 	return 1;
1478 }
1479 
test_forking_evictions(int fd,int size,int count,unsigned flags)1480 static void test_forking_evictions(int fd, int size, int count,
1481 			     unsigned flags)
1482 {
1483 	int trash_count;
1484 	int num_threads;
1485 
1486 	trash_count = intel_get_total_ram_mb() * 11 / 10;
1487 	/* Use the fact test will spawn a number of child
1488 	 * processes meaning swapping will be triggered system
1489 	 * wide even if one process on it's own can't do it.
1490 	 */
1491 	num_threads = min(sysconf(_SC_NPROCESSORS_ONLN) * 4, 12);
1492 	trash_count /= num_threads;
1493 	if (count > trash_count)
1494 		count = trash_count;
1495 
1496 	forking_evictions(fd, &fault_ops, size, count, trash_count, flags);
1497 	reset_handle_ptr();
1498 }
1499 
test_mlocked_evictions(int fd,int size,int count)1500 static void test_mlocked_evictions(int fd, int size, int count)
1501 {
1502 	count = min(256, count/2);
1503 	mlocked_evictions(fd, &fault_ops, size, count);
1504 	reset_handle_ptr();
1505 }
1506 
test_swapping_evictions(int fd,int size,int count)1507 static void test_swapping_evictions(int fd, int size, int count)
1508 {
1509 	int trash_count;
1510 
1511 	igt_skip_on_f(!can_swap(),
1512 		"Not enough process address space for swapping tests.\n");
1513 
1514 	trash_count = intel_get_total_ram_mb() * 11 / 10;
1515 
1516 	swapping_evictions(fd, &fault_ops, size, count, trash_count);
1517 	reset_handle_ptr();
1518 }
1519 
test_minor_evictions(int fd,int size,int count)1520 static void test_minor_evictions(int fd, int size, int count)
1521 {
1522 	minor_evictions(fd, &fault_ops, size, count);
1523 	reset_handle_ptr();
1524 }
1525 
test_major_evictions(int fd,int size,int count)1526 static void test_major_evictions(int fd, int size, int count)
1527 {
1528 	major_evictions(fd, &fault_ops, size, count);
1529 	reset_handle_ptr();
1530 }
1531 
test_overlap(int fd,int expected)1532 static void test_overlap(int fd, int expected)
1533 {
1534 	char *ptr;
1535 	int ret;
1536 	uint32_t handle, handle2;
1537 
1538 	igt_assert(posix_memalign((void *)&ptr, PAGE_SIZE, PAGE_SIZE * 3) == 0);
1539 
1540 	gem_userptr(fd, ptr + PAGE_SIZE, PAGE_SIZE, 0, userptr_flags, &handle);
1541 
1542 	/* before, no overlap */
1543 	ret = __gem_userptr(fd, ptr, PAGE_SIZE, 0, userptr_flags, &handle2);
1544 	if (ret == 0)
1545 		gem_close(fd, handle2);
1546 	igt_assert_eq(ret, 0);
1547 
1548 	/* after, no overlap */
1549 	ret = __gem_userptr(fd, ptr + PAGE_SIZE * 2, PAGE_SIZE, 0, userptr_flags, &handle2);
1550 	if (ret == 0)
1551 		gem_close(fd, handle2);
1552 	igt_assert_eq(ret, 0);
1553 
1554 	/* exactly overlapping */
1555 	ret = __gem_userptr(fd, ptr + PAGE_SIZE, PAGE_SIZE, 0, userptr_flags, &handle2);
1556 	if (ret == 0)
1557 		gem_close(fd, handle2);
1558 	igt_assert(ret == 0 || ret == expected);
1559 
1560 	/* start overlaps */
1561 	ret = __gem_userptr(fd, ptr, PAGE_SIZE * 2, 0, userptr_flags, &handle2);
1562 	if (ret == 0)
1563 		gem_close(fd, handle2);
1564 	igt_assert(ret == 0 || ret == expected);
1565 
1566 	/* end overlaps */
1567 	ret = __gem_userptr(fd, ptr + PAGE_SIZE, PAGE_SIZE * 2, 0, userptr_flags, &handle2);
1568 	if (ret == 0)
1569 		gem_close(fd, handle2);
1570 	igt_assert(ret == 0 || ret == expected);
1571 
1572 	/* subsumes */
1573 	ret = __gem_userptr(fd, ptr, PAGE_SIZE * 3, 0, userptr_flags, &handle2);
1574 	if (ret == 0)
1575 		gem_close(fd, handle2);
1576 	igt_assert(ret == 0 || ret == expected);
1577 
1578 	gem_close(fd, handle);
1579 	free(ptr);
1580 }
1581 
test_unmap(int fd,int expected)1582 static void test_unmap(int fd, int expected)
1583 {
1584 	char *ptr, *bo_ptr;
1585 	const unsigned int num_obj = 3;
1586 	unsigned int i;
1587 	uint32_t bo[num_obj + 1];
1588 	size_t map_size = sizeof(linear) * num_obj + (PAGE_SIZE - 1);
1589 	int ret;
1590 
1591 	ptr = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
1592 				MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
1593 	igt_assert(ptr != MAP_FAILED);
1594 
1595 	bo_ptr = (char *)ALIGN((unsigned long)ptr, PAGE_SIZE);
1596 
1597 	for (i = 0; i < num_obj; i++, bo_ptr += sizeof(linear)) {
1598 		gem_userptr(fd, bo_ptr, sizeof(linear), 0, userptr_flags, &bo[i]);
1599 	}
1600 
1601 	bo[num_obj] = create_bo(fd, 0);
1602 
1603 	for (i = 0; i < num_obj; i++)
1604 		igt_assert_eq(copy(fd, bo[num_obj], bo[i]), 0);
1605 
1606 	ret = munmap(ptr, map_size);
1607 	igt_assert_eq(ret, 0);
1608 
1609 	for (i = 0; i < num_obj; i++)
1610 		igt_assert_eq(copy(fd, bo[num_obj], bo[i]), -expected);
1611 
1612 	for (i = 0; i < (num_obj + 1); i++)
1613 		gem_close(fd, bo[i]);
1614 }
1615 
test_unmap_after_close(int fd)1616 static void test_unmap_after_close(int fd)
1617 {
1618 	char *ptr, *bo_ptr;
1619 	const unsigned int num_obj = 3;
1620 	unsigned int i;
1621 	uint32_t bo[num_obj + 1];
1622 	size_t map_size = sizeof(linear) * num_obj + (PAGE_SIZE - 1);
1623 	int ret;
1624 
1625 	ptr = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
1626 				MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
1627 	igt_assert(ptr != MAP_FAILED);
1628 
1629 	bo_ptr = (char *)ALIGN((unsigned long)ptr, PAGE_SIZE);
1630 
1631 	for (i = 0; i < num_obj; i++, bo_ptr += sizeof(linear)) {
1632 		gem_userptr(fd, bo_ptr, sizeof(linear), 0, userptr_flags, &bo[i]);
1633 	}
1634 
1635 	bo[num_obj] = create_bo(fd, 0);
1636 
1637 	for (i = 0; i < num_obj; i++)
1638 		igt_assert_eq(copy(fd, bo[num_obj], bo[i]), 0);
1639 
1640 	for (i = 0; i < (num_obj + 1); i++)
1641 		gem_close(fd, bo[i]);
1642 
1643 	ret = munmap(ptr, map_size);
1644 	igt_assert_eq(ret, 0);
1645 }
1646 
test_unmap_cycles(int fd,int expected)1647 static void test_unmap_cycles(int fd, int expected)
1648 {
1649 	int i;
1650 
1651 	for (i = 0; i < 1000; i++)
1652 		test_unmap(fd, expected);
1653 }
1654 
1655 #define MM_STRESS_LOOPS 100000
1656 
1657 struct stress_thread_data {
1658 	unsigned int stop;
1659 	int exit_code;
1660 };
1661 
mm_stress_thread(void * data)1662 static void *mm_stress_thread(void *data)
1663 {
1664 	struct stress_thread_data *stdata = (struct stress_thread_data *)data;
1665 	const size_t sz = 2 << 20;
1666 	void *ptr;
1667 
1668 	while (!stdata->stop) {
1669 		ptr = mmap(NULL, sz, PROT_READ | PROT_WRITE,
1670 			   MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
1671 		if (ptr == MAP_FAILED) {
1672 			stdata->exit_code = -EFAULT;
1673 			break;
1674 		}
1675 
1676 		madvise(ptr, sz, MADV_HUGEPAGE);
1677 		for (size_t page = 0; page < sz; page += PAGE_SIZE)
1678 			*(volatile uint32_t *)((unsigned char *)ptr + page) = 0;
1679 
1680 		if (munmap(ptr, sz)) {
1681 			stdata->exit_code = errno;
1682 			break;
1683 		}
1684 	}
1685 
1686 	return NULL;
1687 }
1688 
test_stress_mm(int fd)1689 static void test_stress_mm(int fd)
1690 {
1691 	int ret;
1692 	pthread_t t;
1693 	unsigned int loops = MM_STRESS_LOOPS;
1694 	uint32_t handle;
1695 	void *ptr;
1696 	struct stress_thread_data stdata;
1697 
1698 	memset(&stdata, 0, sizeof(stdata));
1699 
1700 	igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
1701 
1702 	ret = pthread_create(&t, NULL, mm_stress_thread, &stdata);
1703 	igt_assert_eq(ret, 0);
1704 
1705 	while (loops--) {
1706 		gem_userptr(fd, ptr, PAGE_SIZE, 0, userptr_flags, &handle);
1707 
1708 		gem_close(fd, handle);
1709 	}
1710 
1711 	free(ptr);
1712 
1713 	stdata.stop = 1;
1714 	ret = pthread_join(t, NULL);
1715 	igt_assert_eq(ret, 0);
1716 
1717 	igt_assert_eq(stdata.exit_code, 0);
1718 }
1719 
test_stress_purge(int fd)1720 static void test_stress_purge(int fd)
1721 {
1722 	struct stress_thread_data stdata;
1723 	uint32_t handle;
1724 	pthread_t t;
1725 	void *ptr;
1726 
1727 	memset(&stdata, 0, sizeof(stdata));
1728 
1729 	igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
1730 	igt_assert(!pthread_create(&t, NULL, mm_stress_thread, &stdata));
1731 
1732 	igt_until_timeout(150) {
1733 		gem_userptr(fd, ptr, PAGE_SIZE, 0, userptr_flags, &handle);
1734 
1735 		gem_set_domain(fd, handle,
1736 			       I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
1737 		intel_purge_vm_caches(fd);
1738 
1739 		gem_close(fd, handle);
1740 	}
1741 
1742 	free(ptr);
1743 
1744 	stdata.stop = 1;
1745 	igt_assert(!pthread_join(t, NULL));
1746 	igt_assert_eq(stdata.exit_code, 0);
1747 }
1748 
1749 struct userptr_close_thread_data {
1750 	int fd;
1751 	void *ptr;
1752 	bool overlap;
1753 	bool stop;
1754 	pthread_mutex_t mutex;
1755 };
1756 
mm_userptr_close_thread(void * data)1757 static void *mm_userptr_close_thread(void *data)
1758 {
1759 	struct userptr_close_thread_data *t = (struct userptr_close_thread_data *)data;
1760 	int num_handles = t->overlap ? 2 : 1;
1761 
1762 	uint32_t handle[num_handles];
1763 
1764 	/* Be pedantic and enforce the required memory barriers */
1765 	pthread_mutex_lock(&t->mutex);
1766 	while (!t->stop) {
1767 		pthread_mutex_unlock(&t->mutex);
1768 		for (int i = 0; i < num_handles; i++)
1769 			gem_userptr(t->fd, t->ptr, PAGE_SIZE, 0, userptr_flags, &handle[i]);
1770 		for (int i = 0; i < num_handles; i++)
1771 			gem_close(t->fd, handle[i]);
1772 		pthread_mutex_lock(&t->mutex);
1773 	}
1774 	pthread_mutex_unlock(&t->mutex);
1775 
1776 	return NULL;
1777 }
1778 
test_invalidate_close_race(int fd,bool overlap)1779 static void test_invalidate_close_race(int fd, bool overlap)
1780 {
1781 	pthread_t t;
1782 	unsigned int loops = MM_STRESS_LOOPS;
1783 	struct userptr_close_thread_data t_data;
1784 
1785 	memset(&t_data, 0, sizeof(t_data));
1786 	t_data.fd = fd;
1787 	t_data.overlap = overlap;
1788 	igt_assert(posix_memalign(&t_data.ptr, PAGE_SIZE, PAGE_SIZE) == 0);
1789 	pthread_mutex_init(&t_data.mutex, NULL);
1790 
1791 	igt_assert(pthread_create(&t, NULL, mm_userptr_close_thread, &t_data) == 0);
1792 
1793 	while (loops--) {
1794 		mprotect(t_data.ptr, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC);
1795 		mprotect(t_data.ptr, PAGE_SIZE, PROT_READ | PROT_WRITE);
1796 	}
1797 
1798 	pthread_mutex_lock(&t_data.mutex);
1799 	t_data.stop = 1;
1800 	pthread_mutex_unlock(&t_data.mutex);
1801 
1802 	pthread_join(t, NULL);
1803 
1804 	pthread_mutex_destroy(&t_data.mutex);
1805 	free(t_data.ptr);
1806 }
1807 
1808 uint64_t total_ram;
1809 uint64_t aperture_size;
1810 int fd, count;
1811 
opt_handler(int opt,int opt_index,void * data)1812 static int opt_handler(int opt, int opt_index, void *data)
1813 {
1814 	switch (opt) {
1815 	case 'c':
1816 		count = atoi(optarg);
1817 		break;
1818 	default:
1819 		return IGT_OPT_HANDLER_ERROR;
1820 	}
1821 
1822 	return IGT_OPT_HANDLER_SUCCESS;
1823 }
1824 
1825 const char *help_str = "  -c\tBuffer count\n";
1826 
1827 igt_main_args("c:", NULL, help_str, opt_handler, NULL)
1828 {
1829 	int size = sizeof(linear);
1830 
1831 	igt_fixture {
1832 		fd = drm_open_driver(DRIVER_INTEL);
1833 		igt_assert(fd >= 0);
1834 		igt_require_gem(fd);
1835 
1836 		size = sizeof(linear);
1837 
1838 		aperture_size = gem_aperture_size(fd);
1839 		igt_info("Aperture size is %lu MiB\n", (long)(aperture_size / (1024*1024)));
1840 
1841 		if (count == 0)
1842 			count = 2 * aperture_size / (1024*1024) / 3;
1843 
1844 		total_ram = intel_get_total_ram_mb();
1845 		igt_info("Total RAM is %'llu MiB\n", (long long)total_ram);
1846 
1847 		if (count > total_ram * 3 / 4) {
1848 			count = intel_get_total_ram_mb() * 3 / 4;
1849 			igt_info("Not enough RAM to run test, reducing buffer count.\n");
1850 		}
1851 	}
1852 
1853 	igt_subtest_group {
1854 		igt_fixture {
1855 			igt_require(has_userptr(fd));
1856 		}
1857 
1858 		igt_subtest("input-checking")
1859 			test_input_checking(fd);
1860 
1861 		igt_subtest("usage-restrictions")
1862 			test_usage_restrictions(fd);
1863 
1864 		igt_subtest("invalid-null-pointer")
1865 			test_invalid_null_pointer(fd);
1866 
1867 		igt_subtest("invalid-gtt-mapping")
1868 			test_invalid_gtt_mapping(fd);
1869 
1870 		igt_subtest("forked-access")
1871 			test_forked_access(fd);
1872 
1873 		igt_subtest("forbidden-operations")
1874 			test_forbidden_ops(fd);
1875 
1876 		igt_subtest("relocations")
1877 			test_relocations(fd);
1878 	}
1879 
1880 	igt_subtest_group {
1881 		gem_userptr_test_unsynchronized();
1882 
1883 		igt_fixture {
1884 			igt_require(has_userptr(fd));
1885 		}
1886 
1887 		igt_subtest("create-destroy-unsync")
1888 			test_create_destroy(fd, 5);
1889 
1890 		igt_subtest("unsync-overlap")
1891 			test_overlap(fd, 0);
1892 
1893 		igt_subtest("unsync-unmap")
1894 			test_unmap(fd, 0);
1895 
1896 		igt_subtest("unsync-unmap-cycles")
1897 			test_unmap_cycles(fd, 0);
1898 
1899 		igt_subtest("unsync-unmap-after-close")
1900 			test_unmap_after_close(fd);
1901 
1902 		igt_subtest("coherency-unsync")
1903 			test_coherency(fd, count);
1904 
1905 		igt_subtest("dmabuf-unsync")
1906 			test_dmabuf();
1907 
1908 		igt_subtest("readonly-unsync")
1909 			test_readonly(fd);
1910 
1911 		igt_subtest("readonly-mmap-unsync")
1912 			test_readonly_mmap(fd);
1913 
1914 		igt_subtest("readonly-pwrite-unsync")
1915 			test_readonly_pwrite(fd);
1916 
1917 		for (unsigned flags = 0; flags < ALL_FORKING_EVICTIONS + 1; flags++) {
1918 			igt_subtest_f("forked-unsync%s%s%s-%s",
1919 					flags & FORKING_EVICTIONS_SWAPPING ? "-swapping" : "",
1920 					flags & FORKING_EVICTIONS_DUP_DRMFD ? "-multifd" : "",
1921 					flags & FORKING_EVICTIONS_MEMORY_PRESSURE ?
1922 					"-mempressure" : "",
1923 					flags & FORKING_EVICTIONS_INTERRUPTIBLE ?
1924 					"interruptible" : "normal") {
1925 				test_forking_evictions(fd, size, count, flags);
1926 			}
1927 		}
1928 
1929 		igt_subtest("mlocked-unsync-normal")
1930 			test_mlocked_evictions(fd, size, count);
1931 
1932 		igt_subtest("swapping-unsync-normal")
1933 			test_swapping_evictions(fd, size, count);
1934 
1935 		igt_subtest("minor-unsync-normal")
1936 			test_minor_evictions(fd, size, count);
1937 
1938 		igt_subtest("major-unsync-normal") {
1939 			size = 200 * 1024 * 1024;
1940 			count = (gem_aperture_size(fd) / size) + 2;
1941 			test_major_evictions(fd, size, count);
1942 		}
1943 
1944 		igt_fixture {
1945 			size = sizeof(linear);
1946 			count = 2 * gem_aperture_size(fd) / (1024*1024) / 3;
1947 			if (count > total_ram * 3 / 4)
1948 				count = intel_get_total_ram_mb() * 3 / 4;
1949 		}
1950 
1951 		igt_fork_signal_helper();
1952 
1953 		igt_subtest("mlocked-unsync-interruptible")
1954 			test_mlocked_evictions(fd, size, count);
1955 
1956 		igt_subtest("swapping-unsync-interruptible")
1957 			test_swapping_evictions(fd, size, count);
1958 
1959 		igt_subtest("minor-unsync-interruptible")
1960 			test_minor_evictions(fd, size, count);
1961 
1962 		igt_subtest("major-unsync-interruptible") {
1963 			size = 200 * 1024 * 1024;
1964 			count = (gem_aperture_size(fd) / size) + 2;
1965 			test_major_evictions(fd, size, count);
1966 		}
1967 
1968 		igt_stop_signal_helper();
1969 	}
1970 
1971 	igt_subtest_group {
1972 		gem_userptr_test_synchronized();
1973 
1974 		igt_fixture {
1975 			igt_require(has_userptr(fd));
1976 			size = sizeof(linear);
1977 			count = 2 * gem_aperture_size(fd) / (1024*1024) / 3;
1978 			if (count > total_ram * 3 / 4)
1979 				count = intel_get_total_ram_mb() * 3 / 4;
1980 		}
1981 
1982 		igt_subtest("process-exit")
1983 			test_process_exit(fd, 0);
1984 
1985 		igt_subtest("process-exit-gtt")
1986 			test_process_exit(fd, PE_GTT_MAP);
1987 
1988 		igt_subtest("process-exit-busy")
1989 			test_process_exit(fd, PE_BUSY);
1990 
1991 		igt_subtest("process-exit-gtt-busy")
1992 			test_process_exit(fd, PE_GTT_MAP | PE_BUSY);
1993 
1994 		igt_subtest("create-destroy-sync")
1995 			test_create_destroy(fd, 5);
1996 
1997 		igt_subtest("sync-overlap")
1998 			test_overlap(fd, EINVAL);
1999 
2000 		igt_subtest("sync-unmap")
2001 			test_unmap(fd, EFAULT);
2002 
2003 		igt_subtest("sync-unmap-cycles")
2004 			test_unmap_cycles(fd, EFAULT);
2005 
2006 		igt_subtest("sync-unmap-after-close")
2007 			test_unmap_after_close(fd);
2008 
2009 		igt_subtest("stress-mm")
2010 			test_stress_mm(fd);
2011 		igt_subtest("stress-purge")
2012 			test_stress_purge(fd);
2013 
2014 		igt_subtest("stress-mm-invalidate-close")
2015 			test_invalidate_close_race(fd, false);
2016 
2017 		igt_subtest("stress-mm-invalidate-close-overlap")
2018 			test_invalidate_close_race(fd, true);
2019 
2020 		for (unsigned flags = 0; flags < ALL_MAP_FIXED_INVALIDATE + 1; flags++) {
2021 			igt_subtest_f("map-fixed-invalidate%s%s%s",
2022 				      flags & MAP_FIXED_INVALIDATE_OVERLAP ? "-overlap" : "",
2023 				      flags & MAP_FIXED_INVALIDATE_BUSY ? "-busy" : "",
2024 				      flags & MAP_FIXED_INVALIDATE_GET_PAGES ? "-gup" : "") {
2025 				test_map_fixed_invalidate(fd, flags);
2026 			}
2027 		}
2028 
2029 		igt_subtest("coherency-sync")
2030 			test_coherency(fd, count);
2031 
2032 		igt_subtest("dmabuf-sync")
2033 			test_dmabuf();
2034 
2035 		for (unsigned flags = 0; flags < ALL_FORKING_EVICTIONS + 1; flags++) {
2036 			igt_subtest_f("forked-sync%s%s%s-%s",
2037 					flags & FORKING_EVICTIONS_SWAPPING ? "-swapping" : "",
2038 					flags & FORKING_EVICTIONS_DUP_DRMFD ? "-multifd" : "",
2039 					flags & FORKING_EVICTIONS_MEMORY_PRESSURE ?
2040 					"-mempressure" : "",
2041 					flags & FORKING_EVICTIONS_INTERRUPTIBLE ?
2042 					"interruptible" : "normal") {
2043 				test_forking_evictions(fd, size, count, flags);
2044 			}
2045 		}
2046 
2047 		igt_subtest("mlocked-normal-sync")
2048 			test_mlocked_evictions(fd, size, count);
2049 
2050 		igt_subtest("swapping-normal-sync")
2051 			test_swapping_evictions(fd, size, count);
2052 
2053 		igt_subtest("minor-normal-sync")
2054 			test_minor_evictions(fd, size, count);
2055 
2056 		igt_subtest("major-normal-sync") {
2057 			size = 200 * 1024 * 1024;
2058 			count = (gem_aperture_size(fd) / size) + 2;
2059 			test_major_evictions(fd, size, count);
2060 		}
2061 
2062 		igt_fixture {
2063 			size = 1024 * 1024;
2064 			count = 2 * gem_aperture_size(fd) / (1024*1024) / 3;
2065 			if (count > total_ram * 3 / 4)
2066 				count = intel_get_total_ram_mb() * 3 / 4;
2067 		}
2068 
2069 		igt_fork_signal_helper();
2070 
2071 		igt_subtest("mlocked-sync-interruptible")
2072 			test_mlocked_evictions(fd, size, count);
2073 
2074 		igt_subtest("swapping-sync-interruptible")
2075 			test_swapping_evictions(fd, size, count);
2076 
2077 		igt_subtest("minor-sync-interruptible")
2078 			test_minor_evictions(fd, size, count);
2079 
2080 		igt_subtest("major-sync-interruptible") {
2081 			size = 200 * 1024 * 1024;
2082 			count = (gem_aperture_size(fd) / size) + 2;
2083 			test_major_evictions(fd, size, count);
2084 		}
2085 
2086 		igt_stop_signal_helper();
2087 	}
2088 
2089 
2090 	igt_subtest("access-control")
2091 		test_access_control(fd);
2092 }
2093