xref: /aosp_15_r20/external/igt-gpu-tools/tests/i915/gem_mmap_wc.c (revision d83cc019efdc2edc6c4b16e9034a3ceb8d35d77c)
1 /*
2  * Copyright © 2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Chris Wilson <[email protected]>
25  *
26  */
27 
28 #include "igt.h"
29 #include <unistd.h>
30 #include <stdlib.h>
31 #include <stdio.h>
32 #include <string.h>
33 #include <fcntl.h>
34 #include <inttypes.h>
35 #include <pthread.h>
36 #include <errno.h>
37 #include <sys/stat.h>
38 #include <sys/ioctl.h>
39 #include "drm.h"
40 
41 struct local_i915_gem_mmap_v2 {
42 	uint32_t handle;
43 	uint32_t pad;
44 	uint64_t offset;
45 	uint64_t size;
46 	uint64_t addr_ptr;
47 	uint64_t flags;
48 #define I915_MMAP_WC 0x1
49 };
50 #define LOCAL_IOCTL_I915_GEM_MMAP_v2 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct local_i915_gem_mmap_v2)
51 
52 static int OBJECT_SIZE = 16*1024*1024;
53 
54 /*
55  * Local WC mmap wrapper. This is used to make sure we go through
56  * the GEM_MMAP IOCTL.
57  * */
58 static void *
local_gem_mmap__wc(int fd,uint32_t handle,uint64_t offset,uint64_t size,unsigned prot)59 local_gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
60 {
61 	void *ptr;
62 
63 	ptr = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
64 	igt_assert(ptr);
65 
66 	return ptr;
67 }
68 
set_domain(int fd,uint32_t handle)69 static void set_domain(int fd, uint32_t handle)
70 {
71 	gem_set_domain(fd, handle, I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC);
72 }
73 
74 static void *
mmap_bo(int fd,uint32_t handle)75 mmap_bo(int fd, uint32_t handle)
76 {
77 	void *ptr;
78 
79 	ptr = local_gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
80 
81 	return ptr;
82 }
83 
84 static void *
create_pointer(int fd)85 create_pointer(int fd)
86 {
87 	uint32_t handle;
88 	void *ptr;
89 
90 	handle = gem_create(fd, OBJECT_SIZE);
91 
92 	ptr = mmap_bo(fd, handle);
93 	set_domain(fd, handle);
94 
95 	gem_close(fd, handle);
96 
97 	return ptr;
98 }
99 
100 static void
test_invalid_flags(int fd)101 test_invalid_flags(int fd)
102 {
103 	struct drm_i915_getparam gp;
104 	struct local_i915_gem_mmap_v2 arg;
105 	uint64_t flag = I915_MMAP_WC;
106 	int val = -1;
107 
108 	memset(&arg, 0, sizeof(arg));
109 	arg.handle = gem_create(fd, 4096);
110 	arg.offset = 0;
111 	arg.size = 4096;
112 
113 	memset(&gp, 0, sizeof(gp));
114 	gp.param = 30; /* MMAP_VERSION */
115 	gp.value = &val;
116 
117 	/* Do we have the new mmap_ioctl? */
118 	drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
119 
120 	if (val >= 1) {
121 		/*
122 		 * Only MMAP_WC flag is supported in version 1, so any other
123 		 * flag should be rejected.
124 		 */
125 		flag <<= 1;
126 		while (flag) {
127 			arg.flags = flag;
128 			igt_assert(drmIoctl(fd,
129 				   LOCAL_IOCTL_I915_GEM_MMAP_v2,
130 				   &arg) == -1);
131 			igt_assert_eq(errno, EINVAL);
132 			flag <<= 1;
133 		}
134 	}
135 
136 	gem_close(fd, arg.handle);
137 }
138 
139 static void
test_copy(int fd)140 test_copy(int fd)
141 {
142 	void *src, *dst;
143 
144 	/* copy from a fresh src to fresh dst to force pagefault on both */
145 	src = create_pointer(fd);
146 	dst = create_pointer(fd);
147 
148 	memcpy(dst, src, OBJECT_SIZE);
149 	memcpy(src, dst, OBJECT_SIZE);
150 
151 	munmap(dst, OBJECT_SIZE);
152 	munmap(src, OBJECT_SIZE);
153 }
154 
155 enum test_read_write {
156 	READ_BEFORE_WRITE,
157 	READ_AFTER_WRITE,
158 };
159 
160 static void
test_read_write(int fd,enum test_read_write order)161 test_read_write(int fd, enum test_read_write order)
162 {
163 	uint32_t handle;
164 	void *ptr;
165 	volatile uint32_t val = 0;
166 
167 	handle = gem_create(fd, OBJECT_SIZE);
168 	set_domain(fd, handle);
169 
170 	ptr = mmap_bo(fd, handle);
171 	igt_assert(ptr != MAP_FAILED);
172 
173 	if (order == READ_BEFORE_WRITE) {
174 		val = *(uint32_t *)ptr;
175 		*(uint32_t *)ptr = val;
176 	} else {
177 		*(uint32_t *)ptr = val;
178 		val = *(uint32_t *)ptr;
179 	}
180 
181 	gem_close(fd, handle);
182 	munmap(ptr, OBJECT_SIZE);
183 }
184 
185 static void
test_read_write2(int fd,enum test_read_write order)186 test_read_write2(int fd, enum test_read_write order)
187 {
188 	uint32_t handle;
189 	void *r, *w;
190 	volatile uint32_t val = 0;
191 
192 	handle = gem_create(fd, OBJECT_SIZE);
193 	set_domain(fd, handle);
194 
195 	r = local_gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
196 
197 	w = local_gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
198 
199 	if (order == READ_BEFORE_WRITE) {
200 		val = *(uint32_t *)r;
201 		*(uint32_t *)w = val;
202 	} else {
203 		*(uint32_t *)w = val;
204 		val = *(uint32_t *)r;
205 	}
206 
207 	gem_close(fd, handle);
208 	munmap(r, OBJECT_SIZE);
209 	munmap(w, OBJECT_SIZE);
210 }
211 
212 static void
test_write(int fd)213 test_write(int fd)
214 {
215 	void *src;
216 	uint32_t dst;
217 
218 	/* copy from a fresh src to fresh dst to force pagefault on both */
219 	src = create_pointer(fd);
220 	dst = gem_create(fd, OBJECT_SIZE);
221 
222 	gem_write(fd, dst, 0, src, OBJECT_SIZE);
223 
224 	gem_close(fd, dst);
225 	munmap(src, OBJECT_SIZE);
226 }
227 
228 static void
test_coherency(int fd)229 test_coherency(int fd)
230 {
231 	uint32_t handle;
232 	uint32_t *wc, *cpu;
233 	int i;
234 
235 	igt_require(igt_setup_clflush());
236 
237 	handle = gem_create(fd, OBJECT_SIZE);
238 
239 	wc = local_gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
240 	cpu = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
241 	gem_set_domain(fd, handle, I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC);
242 
243 	for (i = 0; i < OBJECT_SIZE / 64; i++) {
244 		int x = 16*i + (i%16);
245 		wc[x] = i;
246 		igt_clflush_range(&cpu[x], sizeof(cpu[x]));
247 		igt_assert_eq(cpu[x], i);
248 	}
249 
250 	munmap(cpu, OBJECT_SIZE);
251 	munmap(wc, OBJECT_SIZE);
252 	gem_close(fd, handle);
253 }
254 
255 static void
test_write_gtt(int fd)256 test_write_gtt(int fd)
257 {
258 	uint32_t dst;
259 	char *dst_gtt;
260 	void *src;
261 
262 	dst = gem_create(fd, OBJECT_SIZE);
263 	set_domain(fd, dst);
264 
265 	/* prefault object into gtt */
266 	dst_gtt = mmap_bo(fd, dst);
267 	memset(dst_gtt, 0, OBJECT_SIZE);
268 	munmap(dst_gtt, OBJECT_SIZE);
269 
270 	src = create_pointer(fd);
271 
272 	gem_write(fd, dst, 0, src, OBJECT_SIZE);
273 
274 	gem_close(fd, dst);
275 	munmap(src, OBJECT_SIZE);
276 }
277 
278 static void
test_read(int fd)279 test_read(int fd)
280 {
281 	void *dst;
282 	uint32_t src;
283 
284 	/* copy from a fresh src to fresh dst to force pagefault on both */
285 	dst = create_pointer(fd);
286 	src = gem_create(fd, OBJECT_SIZE);
287 
288 	gem_read(fd, src, 0, dst, OBJECT_SIZE);
289 
290 	gem_close(fd, src);
291 	munmap(dst, OBJECT_SIZE);
292 }
293 
294 static void
test_close(int fd)295 test_close(int fd)
296 {
297 	uint32_t handle = gem_create(fd, OBJECT_SIZE);
298 	uint8_t *ptr = mmap_bo(fd, handle);
299 	int i;
300 
301 	memset(ptr, 0xcc, OBJECT_SIZE);
302 	gem_close(fd, handle);
303 	for (i = 0; i < OBJECT_SIZE / 4096; i++)
304 		igt_assert(ptr[i * 4096 + i] == 0xcc);
305 
306 	munmap(ptr, OBJECT_SIZE);
307 }
308 
309 static void
test_write_cpu_read_wc(int fd,int force_domain)310 test_write_cpu_read_wc(int fd, int force_domain)
311 {
312 	uint32_t handle;
313 	uint32_t *src, *dst;
314 
315 	handle = gem_create(fd, OBJECT_SIZE);
316 
317 	dst = local_gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
318 
319 	src = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
320 
321 	memset(src, 0xaa, OBJECT_SIZE);
322 	if (force_domain)
323 		set_domain(fd, handle);
324 	igt_assert(memcmp(dst, src, OBJECT_SIZE) == 0);
325 	gem_close(fd, handle);
326 
327 	munmap(src, OBJECT_SIZE);
328 	munmap(dst, OBJECT_SIZE);
329 }
330 
331 static void
test_write_gtt_read_wc(int fd)332 test_write_gtt_read_wc(int fd)
333 {
334 	uint32_t handle;
335 	uint32_t *src, *dst;
336 
337 	handle = gem_create(fd, OBJECT_SIZE);
338 	set_domain(fd, handle);
339 
340 	dst = local_gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
341 
342 	src = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE);
343 
344 	memset(src, 0xaa, OBJECT_SIZE);
345 	igt_assert(memcmp(dst, src, OBJECT_SIZE) == 0);
346 	gem_close(fd, handle);
347 
348 	munmap(src, OBJECT_SIZE);
349 	munmap(dst, OBJECT_SIZE);
350 }
351 
352 static void
test_set_cache_level(int fd)353 test_set_cache_level(int fd)
354 {
355 	struct drm_mode_cursor arg;
356 	struct drm_mode_card_res res;
357 	uint32_t crtc[32];
358 	int active_crtc = 0;
359 	int n;
360 
361 	/* We want to trigger an old WARN in set-cache-level when
362 	 * it sees an unbound object in the GTT domain, following
363 	 * the introduction of mmap(wc).
364 	 */
365 
366 	memset(&arg, 0, sizeof(arg));
367 	arg.flags = DRM_MODE_CURSOR_BO;
368 	arg.width = arg.height = 64;
369 	arg.handle = gem_create(fd, 64*64*4);
370 	set_domain(fd, arg.handle);
371 
372 	/* Bind the object to the cursor to force set-cache-level(DISPLAY) */
373 	memset(&res, 0, sizeof(res));
374 	res.count_crtcs = 32;
375 	res.crtc_id_ptr = to_user_pointer(crtc);
376 	do_ioctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res);
377 	for (n = 0; n < res.count_crtcs; n++) {
378 		struct drm_mode_crtc mode;
379 
380 		memset(&mode, 0, sizeof(mode));
381 		mode.crtc_id = crtc[n];
382 		do_ioctl(fd, DRM_IOCTL_MODE_GETCRTC, &mode);
383 
384 		if (!mode.mode_valid)
385 			continue;
386 
387 		active_crtc++;
388 
389 		arg.crtc_id = crtc[n];
390 		do_ioctl(fd, DRM_IOCTL_MODE_CURSOR, &arg);
391 	}
392 
393 	gem_close(fd, arg.handle);
394 	igt_require(active_crtc);
395 }
396 
397 struct thread_fault_concurrent {
398 	pthread_t thread;
399 	int id;
400 	uint32_t **ptr;
401 };
402 
403 static void *
thread_fault_concurrent(void * closure)404 thread_fault_concurrent(void *closure)
405 {
406 	struct thread_fault_concurrent *t = closure;
407 	uint32_t val = 0;
408 	int n;
409 
410 	for (n = 0; n < 32; n++) {
411 		if (n & 1)
412 			*t->ptr[(n + t->id) % 32] = val;
413 		else
414 			val = *t->ptr[(n + t->id) % 32];
415 	}
416 
417 	return NULL;
418 }
419 
420 static void
test_fault_concurrent(int fd)421 test_fault_concurrent(int fd)
422 {
423 	uint32_t *ptr[32];
424 	struct thread_fault_concurrent thread[64];
425 	int n;
426 
427 	for (n = 0; n < 32; n++) {
428 		ptr[n] = create_pointer(fd);
429 	}
430 
431 	for (n = 0; n < 64; n++) {
432 		thread[n].ptr = ptr;
433 		thread[n].id = n;
434 		pthread_create(&thread[n].thread, NULL, thread_fault_concurrent, &thread[n]);
435 	}
436 
437 	for (n = 0; n < 64; n++)
438 		pthread_join(thread[n].thread, NULL);
439 
440 	for (n = 0; n < 32; n++) {
441 		munmap(ptr[n], OBJECT_SIZE);
442 	}
443 }
444 
445 static void
test_pf_nonblock(int i915)446 test_pf_nonblock(int i915)
447 {
448 	igt_spin_t *spin;
449 	uint32_t *ptr;
450 
451 	spin = igt_spin_new(i915);
452 
453 	igt_set_timeout(1, "initial pagefaulting did not complete within 1s");
454 
455 	ptr = gem_mmap__wc(i915, spin->handle, 0, 4096, PROT_WRITE);
456 	ptr[256] = 0;
457 	munmap(ptr, 4096);
458 
459 	igt_reset_timeout();
460 
461 	igt_spin_free(i915, spin);
462 }
463 
464 static void
run_without_prefault(int fd,void (* func)(int fd))465 run_without_prefault(int fd,
466 			void (*func)(int fd))
467 {
468 	igt_disable_prefault();
469 	func(fd);
470 	igt_enable_prefault();
471 }
472 
mmap_ioctl(int i915,struct drm_i915_gem_mmap * arg)473 static int mmap_ioctl(int i915, struct drm_i915_gem_mmap *arg)
474 {
475 	int err = 0;
476 
477 	if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_MMAP, arg))
478 		err = -errno;
479 
480 	errno = 0;
481 	return err;
482 }
483 
484 int fd;
485 
486 igt_main
487 {
488 	if (igt_run_in_simulation())
489 		OBJECT_SIZE = 1 * 1024 * 1024;
490 
491 	igt_fixture {
492 		fd = drm_open_driver(DRIVER_INTEL);
493 		gem_require_mmap_wc(fd);
494 	}
495 
496 	igt_subtest("bad-object") {
497 		uint32_t real_handle = gem_create(fd, 4096);
498 		uint32_t handles[20];
499 		size_t i = 0, len;
500 
501 		handles[i++] = 0xdeadbeef;
502 		for(int bit = 0; bit < 16; bit++)
503 			handles[i++] = real_handle | (1 << (bit + 16));
504 		handles[i++] = real_handle + 1;
505 		len = i;
506 
507 		for (i = 0; i < len; ++i) {
508 			struct drm_i915_gem_mmap arg = {
509 				.handle = handles[i],
510 				.size = 4096,
511 				.flags = I915_MMAP_WC,
512 			};
513 			igt_assert_eq(mmap_ioctl(fd, &arg), -ENOENT);
514 		}
515 
516 		gem_close(fd, real_handle);
517 	}
518 
519 	igt_subtest("bad-offset") {
520 		struct bad_offset {
521 			uint64_t size;
522 			uint64_t offset;
523 		} bad_offsets[] = {
524 			{4096, 4096 + 1},
525 			{4096, -4096},
526 			{ 2 * 4096, -4096},
527 			{ 4096, ~0},
528 			{}
529 		};
530 
531 		for (int i = 0; i < ARRAY_SIZE(bad_offsets); i++) {
532 			struct drm_i915_gem_mmap arg = {
533 				.handle = gem_create(fd, 4096),
534 
535 				.offset = bad_offsets[i].offset,
536 				.size = bad_offsets[i].size,
537 
538 				.flags = I915_MMAP_WC,
539 			};
540 
541 			igt_assert_eq(mmap_ioctl(fd, &arg), -EINVAL);
542 			gem_close(fd, arg.handle);
543 		}
544 	}
545 
546 	igt_subtest("bad-size") {
547 		uint64_t bad_size[] = {
548 			0,
549 			-4096,
550 			4096 + 1,
551 			2 * 4096,
552 			~0,
553 		};
554 
555 		for (int i = 0; i < ARRAY_SIZE(bad_size); i++) {
556 			struct drm_i915_gem_mmap arg = {
557 				.handle = gem_create(fd, 4096),
558 				.offset = 4096,
559 				.size = bad_size[i],
560 				.flags = I915_MMAP_WC,
561 			};
562 
563 			igt_assert_eq(mmap_ioctl(fd, &arg), -EINVAL);
564 			gem_close(fd, arg.handle);
565 		}
566 	}
567 
568 	igt_subtest("invalid-flags")
569 		test_invalid_flags(fd);
570 	igt_subtest("close")
571 		test_close(fd);
572 	igt_subtest("copy")
573 		test_copy(fd);
574 	igt_subtest("read")
575 		test_read(fd);
576 	igt_subtest("write")
577 		test_write(fd);
578 	igt_subtest("coherency")
579 		test_coherency(fd);
580 	igt_subtest("write-gtt")
581 		test_write_gtt(fd);
582 	igt_subtest("read-write")
583 		test_read_write(fd, READ_BEFORE_WRITE);
584 	igt_subtest("write-read")
585 		test_read_write(fd, READ_AFTER_WRITE);
586 	igt_subtest("read-write-distinct")
587 		test_read_write2(fd, READ_BEFORE_WRITE);
588 	igt_subtest("write-read-distinct")
589 		test_read_write2(fd, READ_AFTER_WRITE);
590 	igt_subtest("fault-concurrent")
591 		test_fault_concurrent(fd);
592 	igt_subtest("read-no-prefault")
593 		run_without_prefault(fd, test_read);
594 	igt_subtest("write-no-prefault")
595 		run_without_prefault(fd, test_write);
596 	igt_subtest("write-gtt-no-prefault")
597 		run_without_prefault(fd, test_write_gtt);
598 	igt_subtest("write-cpu-read-wc")
599 		test_write_cpu_read_wc(fd, 1);
600 	igt_subtest("write-cpu-read-wc-unflushed")
601 		test_write_cpu_read_wc(fd, 0);
602 	igt_subtest("write-gtt-read-wc")
603 		test_write_gtt_read_wc(fd);
604 	igt_subtest("pf-nonblock")
605 		test_pf_nonblock(fd);
606 	igt_subtest("set-cache-level")
607 		test_set_cache_level(fd);
608 
609 	igt_fixture
610 		close(fd);
611 }
612