1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "igt.h"
25 #include "igt_vgem.h"
26
27 #include <sys/ioctl.h>
28 #include <sys/poll.h>
29 #include <time.h>
30
31 IGT_TEST_DESCRIPTION("Basic check of polling for prime/vgem fences.");
32
test_read(int vgem,int i915)33 static void test_read(int vgem, int i915)
34 {
35 struct vgem_bo scratch;
36 uint32_t handle;
37 uint32_t *ptr;
38 int dmabuf, i;
39
40 scratch.width = 1024;
41 scratch.height = 1024;
42 scratch.bpp = 32;
43 vgem_create(vgem, &scratch);
44
45 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
46 handle = prime_fd_to_handle(i915, dmabuf);
47 close(dmabuf);
48
49 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
50 for (i = 0; i < 1024; i++)
51 ptr[1024*i] = i;
52 munmap(ptr, scratch.size);
53 gem_close(vgem, scratch.handle);
54
55 for (i = 0; i < 1024; i++) {
56 uint32_t tmp;
57 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
58 igt_assert_eq(tmp, i);
59 }
60 gem_close(i915, handle);
61 }
62
test_fence_read(int i915,int vgem)63 static void test_fence_read(int i915, int vgem)
64 {
65 struct vgem_bo scratch;
66 uint32_t handle;
67 uint32_t *ptr;
68 uint32_t fence;
69 int dmabuf, i;
70 int master[2], slave[2];
71
72 igt_assert(pipe(master) == 0);
73 igt_assert(pipe(slave) == 0);
74
75 scratch.width = 1024;
76 scratch.height = 1024;
77 scratch.bpp = 32;
78 vgem_create(vgem, &scratch);
79
80 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
81 handle = prime_fd_to_handle(i915, dmabuf);
82 close(dmabuf);
83
84 igt_fork(child, 1) {
85 close(master[0]);
86 close(slave[1]);
87 for (i = 0; i < 1024; i++) {
88 uint32_t tmp;
89 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
90 igt_assert_eq(tmp, 0);
91 }
92 write(master[1], &child, sizeof(child));
93 read(slave[0], &child, sizeof(child));
94 for (i = 0; i < 1024; i++) {
95 uint32_t tmp;
96 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
97 igt_assert_eq(tmp, i);
98 }
99 gem_close(i915, handle);
100 }
101
102 close(master[1]);
103 close(slave[0]);
104 read(master[0], &i, sizeof(i));
105 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
106 write(slave[1], &i, sizeof(i));
107
108 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
109 for (i = 0; i < 1024; i++)
110 ptr[1024*i] = i;
111 munmap(ptr, scratch.size);
112 vgem_fence_signal(vgem, fence);
113 gem_close(vgem, scratch.handle);
114
115 igt_waitchildren();
116 close(master[0]);
117 close(slave[1]);
118 }
119
test_fence_mmap(int i915,int vgem)120 static void test_fence_mmap(int i915, int vgem)
121 {
122 struct vgem_bo scratch;
123 uint32_t handle;
124 uint32_t *ptr;
125 uint32_t fence;
126 int dmabuf, i;
127 int master[2], slave[2];
128
129 igt_assert(pipe(master) == 0);
130 igt_assert(pipe(slave) == 0);
131
132 scratch.width = 1024;
133 scratch.height = 1024;
134 scratch.bpp = 32;
135 vgem_create(vgem, &scratch);
136
137 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
138 handle = prime_fd_to_handle(i915, dmabuf);
139 close(dmabuf);
140
141 igt_fork(child, 1) {
142 close(master[0]);
143 close(slave[1]);
144 ptr = gem_mmap__gtt(i915, handle, 4096*1024, PROT_READ);
145
146 gem_set_domain(i915, handle, I915_GEM_DOMAIN_GTT, 0);
147 for (i = 0; i < 1024; i++)
148 igt_assert_eq(ptr[1024*i], 0);
149
150 write(master[1], &child, sizeof(child));
151 read(slave[0], &child, sizeof(child));
152
153 gem_set_domain(i915, handle, I915_GEM_DOMAIN_GTT, 0);
154 for (i = 0; i < 1024; i++)
155 igt_assert_eq(ptr[1024*i], i);
156
157 gem_close(i915, handle);
158 }
159
160 close(master[1]);
161 close(slave[0]);
162 read(master[0], &i, sizeof(i));
163 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
164 write(slave[1], &i, sizeof(i));
165
166 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
167 for (i = 0; i < 1024; i++)
168 ptr[1024*i] = i;
169 munmap(ptr, scratch.size);
170 vgem_fence_signal(vgem, fence);
171 gem_close(vgem, scratch.handle);
172
173 igt_waitchildren();
174 close(master[0]);
175 close(slave[1]);
176 }
177
test_write(int vgem,int i915)178 static void test_write(int vgem, int i915)
179 {
180 struct vgem_bo scratch;
181 uint32_t handle;
182 uint32_t *ptr;
183 int dmabuf, i;
184
185 scratch.width = 1024;
186 scratch.height = 1024;
187 scratch.bpp = 32;
188 vgem_create(vgem, &scratch);
189
190 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
191 handle = prime_fd_to_handle(i915, dmabuf);
192 close(dmabuf);
193
194 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
195 gem_close(vgem, scratch.handle);
196
197 for (i = 0; i < 1024; i++)
198 gem_write(i915, handle, 4096*i, &i, sizeof(i));
199 gem_close(i915, handle);
200
201 for (i = 0; i < 1024; i++)
202 igt_assert_eq(ptr[1024*i], i);
203 munmap(ptr, scratch.size);
204 }
205
test_gtt(int vgem,int i915)206 static void test_gtt(int vgem, int i915)
207 {
208 struct vgem_bo scratch;
209 uint32_t handle;
210 uint32_t *ptr;
211 int dmabuf, i;
212
213 scratch.width = 1024;
214 scratch.height = 1024;
215 scratch.bpp = 32;
216 vgem_create(vgem, &scratch);
217
218 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
219 handle = prime_fd_to_handle(i915, dmabuf);
220 close(dmabuf);
221
222 ptr = gem_mmap__gtt(i915, handle, scratch.size, PROT_WRITE);
223 for (i = 0; i < 1024; i++)
224 ptr[1024*i] = i;
225 munmap(ptr, scratch.size);
226
227 ptr = vgem_mmap(vgem, &scratch, PROT_READ | PROT_WRITE);
228 for (i = 0; i < 1024; i++) {
229 igt_assert_eq(ptr[1024*i], i);
230 ptr[1024*i] = ~i;
231 }
232 munmap(ptr, scratch.size);
233
234 ptr = gem_mmap__gtt(i915, handle, scratch.size, PROT_READ);
235 for (i = 0; i < 1024; i++)
236 igt_assert_eq(ptr[1024*i], ~i);
237 munmap(ptr, scratch.size);
238
239 gem_close(i915, handle);
240 gem_close(vgem, scratch.handle);
241 }
242
test_shrink(int vgem,int i915)243 static void test_shrink(int vgem, int i915)
244 {
245 struct vgem_bo scratch = {
246 .width = 1024,
247 .height = 1024,
248 .bpp = 32
249 };
250 int dmabuf;
251
252 vgem_create(vgem, &scratch);
253
254 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
255 gem_close(vgem, scratch.handle);
256
257 scratch.handle = prime_fd_to_handle(i915, dmabuf);
258 close(dmabuf);
259
260 /* Populate the i915_bo->pages. */
261 gem_set_domain(i915, scratch.handle, I915_GEM_DOMAIN_GTT, 0);
262
263 /* Now evict them, establishing the link from i915:shrinker to vgem. */
264 igt_drop_caches_set(i915, DROP_SHRINK_ALL);
265
266 gem_close(i915, scratch.handle);
267 }
268
is_coherent(int i915)269 static bool is_coherent(int i915)
270 {
271 int val = 1; /* by default, we assume GTT is coherent, hence the test */
272 struct drm_i915_getparam gp = {
273 gp.param = 52, /* GTT_COHERENT */
274 gp.value = &val,
275 };
276
277 ioctl(i915, DRM_IOCTL_I915_GETPARAM, &gp);
278 return val;
279 }
280
test_gtt_interleaved(int vgem,int i915)281 static void test_gtt_interleaved(int vgem, int i915)
282 {
283 struct vgem_bo scratch;
284 uint32_t handle;
285 uint32_t *ptr, *gtt;
286 int dmabuf, i;
287
288 igt_require(is_coherent(i915));
289
290 scratch.width = 1024;
291 scratch.height = 1024;
292 scratch.bpp = 32;
293 vgem_create(vgem, &scratch);
294
295 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
296 handle = prime_fd_to_handle(i915, dmabuf);
297 close(dmabuf);
298
299 /* This assumes that GTT is perfectedly coherent. On certain machines,
300 * it is possible for a direct acces to bypass the GTT indirection.
301 *
302 * This test may fail. It tells us how far userspace can trust
303 * concurrent dmabuf/i915 access. In the future, we may have a kernel
304 * param to indicate whether or not this interleaving is possible.
305 * However, the mmaps may be passed around to third parties that do
306 * not know about the shortcommings...
307 */
308 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
309 gtt = gem_mmap__gtt(i915, handle, scratch.size, PROT_WRITE);
310 for (i = 0; i < 1024; i++) {
311 gtt[1024*i] = i;
312 /* The read from WC should act as a flush for the GTT wcb */
313 igt_assert_eq(ptr[1024*i], i);
314
315 ptr[1024*i] = ~i;
316 /* The read from GTT should act as a flush for the WC wcb */
317 igt_assert_eq(gtt[1024*i], ~i);
318 }
319 munmap(gtt, scratch.size);
320 munmap(ptr, scratch.size);
321
322 gem_close(i915, handle);
323 gem_close(vgem, scratch.handle);
324 }
325
prime_busy(int fd,bool excl)326 static bool prime_busy(int fd, bool excl)
327 {
328 struct pollfd pfd = { .fd = fd, .events = excl ? POLLOUT : POLLIN };
329 return poll(&pfd, 1, 0) == 0;
330 }
331
work(int i915,int dmabuf,unsigned ring,uint32_t flags)332 static void work(int i915, int dmabuf, unsigned ring, uint32_t flags)
333 {
334 const int SCRATCH = 0;
335 const int BATCH = 1;
336 const int gen = intel_gen(intel_get_drm_devid(i915));
337 struct drm_i915_gem_exec_object2 obj[2];
338 struct drm_i915_gem_relocation_entry store[1024+1];
339 struct drm_i915_gem_execbuffer2 execbuf;
340 unsigned size = ALIGN(ARRAY_SIZE(store)*16 + 4, 4096);
341 bool read_busy, write_busy;
342 uint32_t *batch, *bbe;
343 int i, count;
344
345 memset(&execbuf, 0, sizeof(execbuf));
346 execbuf.buffers_ptr = (uintptr_t)obj;
347 execbuf.buffer_count = 2;
348 execbuf.flags = ring | flags;
349 if (gen < 6)
350 execbuf.flags |= I915_EXEC_SECURE;
351
352 memset(obj, 0, sizeof(obj));
353 obj[SCRATCH].handle = prime_fd_to_handle(i915, dmabuf);
354
355 obj[BATCH].handle = gem_create(i915, size);
356 obj[BATCH].relocs_ptr = (uintptr_t)store;
357 obj[BATCH].relocation_count = ARRAY_SIZE(store);
358 memset(store, 0, sizeof(store));
359
360 batch = gem_mmap__wc(i915, obj[BATCH].handle, 0, size, PROT_WRITE);
361 gem_set_domain(i915, obj[BATCH].handle,
362 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
363
364 i = 0;
365 for (count = 0; count < 1024; count++) {
366 store[count].target_handle = obj[SCRATCH].handle;
367 store[count].presumed_offset = -1;
368 store[count].offset = sizeof(uint32_t) * (i + 1);
369 store[count].delta = sizeof(uint32_t) * count;
370 store[count].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
371 store[count].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
372 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
373 if (gen >= 8) {
374 batch[++i] = 0;
375 batch[++i] = 0;
376 } else if (gen >= 4) {
377 batch[++i] = 0;
378 batch[++i] = 0;
379 store[count].offset += sizeof(uint32_t);
380 } else {
381 batch[i]--;
382 batch[++i] = 0;
383 }
384 batch[++i] = count;
385 i++;
386 }
387
388 bbe = &batch[i];
389 store[count].target_handle = obj[BATCH].handle; /* recurse */
390 store[count].presumed_offset = 0;
391 store[count].offset = sizeof(uint32_t) * (i + 1);
392 store[count].delta = 0;
393 store[count].read_domains = I915_GEM_DOMAIN_COMMAND;
394 store[count].write_domain = 0;
395 batch[i] = MI_BATCH_BUFFER_START;
396 if (gen >= 8) {
397 batch[i] |= 1 << 8 | 1;
398 batch[++i] = 0;
399 batch[++i] = 0;
400 } else if (gen >= 6) {
401 batch[i] |= 1 << 8;
402 batch[++i] = 0;
403 } else {
404 batch[i] |= 2 << 6;
405 batch[++i] = 0;
406 if (gen < 4) {
407 batch[i] |= 1;
408 store[count].delta = 1;
409 }
410 }
411 i++;
412 igt_assert(i < size/sizeof(*batch));
413 igt_require(__gem_execbuf(i915, &execbuf) == 0);
414 gem_close(i915, obj[BATCH].handle);
415 gem_close(i915, obj[SCRATCH].handle);
416
417 write_busy = prime_busy(dmabuf, false);
418 read_busy = prime_busy(dmabuf, true);
419
420 *bbe = MI_BATCH_BUFFER_END;
421 __sync_synchronize();
422 munmap(batch, size);
423
424 igt_assert(read_busy && write_busy);
425 }
426
test_busy(int i915,int vgem,unsigned ring,uint32_t flags)427 static void test_busy(int i915, int vgem, unsigned ring, uint32_t flags)
428 {
429 struct vgem_bo scratch;
430 struct timespec tv;
431 uint32_t *ptr;
432 int dmabuf;
433 int i;
434
435 scratch.width = 1024;
436 scratch.height = 1;
437 scratch.bpp = 32;
438 vgem_create(vgem, &scratch);
439 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
440
441 work(i915, dmabuf, ring, flags);
442
443 /* Calling busy in a loop should be enough to flush the rendering */
444 memset(&tv, 0, sizeof(tv));
445 while (prime_busy(dmabuf, false))
446 igt_assert(igt_seconds_elapsed(&tv) < 10);
447
448 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
449 for (i = 0; i < 1024; i++)
450 igt_assert_eq_u32(ptr[i], i);
451 munmap(ptr, 4096);
452
453 gem_close(vgem, scratch.handle);
454 close(dmabuf);
455 }
456
test_wait(int i915,int vgem,unsigned ring,uint32_t flags)457 static void test_wait(int i915, int vgem, unsigned ring, uint32_t flags)
458 {
459 struct vgem_bo scratch;
460 struct pollfd pfd;
461 uint32_t *ptr;
462 int i;
463
464 scratch.width = 1024;
465 scratch.height = 1;
466 scratch.bpp = 32;
467 vgem_create(vgem, &scratch);
468 pfd.fd = prime_handle_to_fd(vgem, scratch.handle);
469
470 work(i915, pfd.fd, ring, flags);
471
472 pfd.events = POLLIN;
473 igt_assert_eq(poll(&pfd, 1, 10000), 1);
474
475 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
476 for (i = 0; i < 1024; i++)
477 igt_assert_eq_u32(ptr[i], i);
478 munmap(ptr, 4096);
479
480 gem_close(vgem, scratch.handle);
481 close(pfd.fd);
482 }
483
test_sync(int i915,int vgem,unsigned ring,uint32_t flags)484 static void test_sync(int i915, int vgem, unsigned ring, uint32_t flags)
485 {
486 struct vgem_bo scratch;
487 uint32_t *ptr;
488 int dmabuf;
489 int i;
490
491 scratch.width = 1024;
492 scratch.height = 1;
493 scratch.bpp = 32;
494 vgem_create(vgem, &scratch);
495 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
496
497 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
498 igt_assert(ptr != MAP_FAILED);
499 gem_close(vgem, scratch.handle);
500
501 work(i915, dmabuf, ring, flags);
502
503 prime_sync_start(dmabuf, false);
504 for (i = 0; i < 1024; i++)
505 igt_assert_eq_u32(ptr[i], i);
506
507 prime_sync_end(dmabuf, false);
508 close(dmabuf);
509
510 munmap(ptr, scratch.size);
511 }
512
test_fence_wait(int i915,int vgem,unsigned ring,unsigned flags)513 static void test_fence_wait(int i915, int vgem, unsigned ring, unsigned flags)
514 {
515 struct vgem_bo scratch;
516 uint32_t fence;
517 uint32_t *ptr;
518 int dmabuf;
519
520 scratch.width = 1024;
521 scratch.height = 1;
522 scratch.bpp = 32;
523 vgem_create(vgem, &scratch);
524
525 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
526 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
527 igt_assert(prime_busy(dmabuf, false));
528 gem_close(vgem, scratch.handle);
529
530 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
531 igt_assert(ptr != MAP_FAILED);
532
533 igt_fork(child, 1)
534 work(i915, dmabuf, ring, flags);
535
536 sleep(1);
537
538 /* Check for invalidly completing the task early */
539 for (int i = 0; i < 1024; i++)
540 igt_assert_eq_u32(ptr[i], 0);
541
542 igt_assert(prime_busy(dmabuf, false));
543 vgem_fence_signal(vgem, fence);
544 igt_waitchildren();
545
546 /* But after signaling and waiting, it should be done */
547 prime_sync_start(dmabuf, false);
548 for (int i = 0; i < 1024; i++)
549 igt_assert_eq_u32(ptr[i], i);
550 prime_sync_end(dmabuf, false);
551
552 close(dmabuf);
553
554 munmap(ptr, scratch.size);
555 }
556
test_fence_hang(int i915,int vgem,unsigned flags)557 static void test_fence_hang(int i915, int vgem, unsigned flags)
558 {
559 struct vgem_bo scratch;
560 uint32_t *ptr;
561 int dmabuf;
562 int i;
563
564 scratch.width = 1024;
565 scratch.height = 1;
566 scratch.bpp = 32;
567 vgem_create(vgem, &scratch);
568 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
569 vgem_fence_attach(vgem, &scratch, flags | WIP_VGEM_FENCE_NOTIMEOUT);
570
571 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
572 igt_assert(ptr != MAP_FAILED);
573 gem_close(vgem, scratch.handle);
574
575 work(i915, dmabuf, I915_EXEC_DEFAULT, 0);
576
577 /* The work should have been cancelled */
578
579 prime_sync_start(dmabuf, false);
580 for (i = 0; i < 1024; i++)
581 igt_assert_eq_u32(ptr[i], 0);
582 prime_sync_end(dmabuf, false);
583 close(dmabuf);
584
585 munmap(ptr, scratch.size);
586 }
587
has_prime_export(int fd)588 static bool has_prime_export(int fd)
589 {
590 uint64_t value;
591
592 if (drmGetCap(fd, DRM_CAP_PRIME, &value))
593 return false;
594
595 return value & DRM_PRIME_CAP_EXPORT;
596 }
597
has_prime_import(int fd)598 static bool has_prime_import(int fd)
599 {
600 uint64_t value;
601
602 if (drmGetCap(fd, DRM_CAP_PRIME, &value))
603 return false;
604
605 return value & DRM_PRIME_CAP_IMPORT;
606 }
607
set_fb_on_crtc(int fd,int pipe,struct vgem_bo * bo,uint32_t fb_id)608 static uint32_t set_fb_on_crtc(int fd, int pipe, struct vgem_bo *bo, uint32_t fb_id)
609 {
610 drmModeRes *resources = drmModeGetResources(fd);
611 struct drm_mode_modeinfo *modes = malloc(4096*sizeof(*modes));
612 uint32_t encoders[32];
613
614 for (int o = 0; o < resources->count_connectors; o++) {
615 struct drm_mode_get_connector conn;
616 struct drm_mode_crtc set;
617 int e, m;
618
619 memset(&conn, 0, sizeof(conn));
620 conn.connector_id = resources->connectors[o];
621 drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn);
622 if (!conn.count_modes)
623 continue;
624
625 igt_assert(conn.count_modes <= 4096);
626 igt_assert(conn.count_encoders <= 32);
627
628 conn.modes_ptr = (uintptr_t)modes;
629 conn.encoders_ptr = (uintptr_t)encoders;
630 conn.count_props = 0;
631 do_or_die(drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn));
632
633 for (e = 0; e < conn.count_encoders; e++) {
634 struct drm_mode_get_encoder enc;
635
636 memset(&enc, 0, sizeof(enc));
637 enc.encoder_id = encoders[e];
638 drmIoctl(fd, DRM_IOCTL_MODE_GETENCODER, &enc);
639 if (enc.possible_crtcs & (1 << pipe))
640 break;
641 }
642 if (e == conn.count_encoders)
643 continue;
644
645 for (m = 0; m < conn.count_modes; m++) {
646 if (modes[m].hdisplay <= bo->width &&
647 modes[m].vdisplay <= bo->height)
648 break;
649 }
650 if (m == conn.count_modes)
651 continue;
652
653 memset(&set, 0, sizeof(set));
654 set.crtc_id = resources->crtcs[pipe];
655 set.fb_id = fb_id;
656 set.set_connectors_ptr = (uintptr_t)&conn.connector_id;
657 set.count_connectors = 1;
658 set.mode = modes[m];
659 set.mode_valid = 1;
660 if (drmIoctl(fd, DRM_IOCTL_MODE_SETCRTC, &set) == 0) {
661 drmModeFreeResources(resources);
662 return set.crtc_id;
663 }
664 }
665
666 drmModeFreeResources(resources);
667 return 0;
668 }
669
pipe_select(int pipe)670 static inline uint32_t pipe_select(int pipe)
671 {
672 if (pipe > 1)
673 return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
674 else if (pipe > 0)
675 return DRM_VBLANK_SECONDARY;
676 else
677 return 0;
678 }
679
get_vblank(int fd,int pipe,unsigned flags)680 static unsigned get_vblank(int fd, int pipe, unsigned flags)
681 {
682 union drm_wait_vblank vbl;
683
684 memset(&vbl, 0, sizeof(vbl));
685 vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe) | flags;
686 if (drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, &vbl))
687 return 0;
688
689 return vbl.reply.sequence;
690 }
691
flip_to_vgem(int i915,int vgem,struct vgem_bo * bo,uint32_t fb_id,uint32_t crtc_id,unsigned hang,const char * name)692 static void flip_to_vgem(int i915, int vgem,
693 struct vgem_bo *bo,
694 uint32_t fb_id,
695 uint32_t crtc_id,
696 unsigned hang,
697 const char *name)
698 {
699 struct pollfd pfd = { i915, POLLIN };
700 struct drm_event_vblank vbl;
701 uint32_t fence;
702
703 fence = vgem_fence_attach(vgem, bo, VGEM_FENCE_WRITE | hang);
704
705 igt_fork(child, 1) { /* Use a child in case we block uninterruptibly */
706 /* Check we don't block nor flip before the fence is ready */
707 do_or_die(drmModePageFlip(i915, crtc_id, fb_id,
708 DRM_MODE_PAGE_FLIP_EVENT, &fb_id));
709 for (int n = 0; n < 5; n++) { /* 5 frames should be <100ms */
710 igt_assert_f(poll(&pfd, 1, 0) == 0,
711 "flip to %s completed whilst busy\n",
712 name);
713 get_vblank(i915, 0, DRM_VBLANK_NEXTONMISS);
714 }
715 }
716 igt_waitchildren_timeout(2, "flip blocked by waiting for busy vgem fence");
717
718 /* And then the flip is completed as soon as it is ready */
719 if (!hang) {
720 unsigned long miss;
721
722 /* Signal fence at the start of the next vblank */
723 get_vblank(i915, 0, DRM_VBLANK_NEXTONMISS);
724 vgem_fence_signal(vgem, fence);
725
726 miss = 0;
727 igt_until_timeout(5) {
728 get_vblank(i915, 0, DRM_VBLANK_NEXTONMISS);
729 if (poll(&pfd, 1, 0))
730 break;
731 miss++;
732 }
733 if (miss > 1) {
734 igt_warn("Missed %lu vblanks after signaling before flip was completed\n",
735 miss);
736 }
737 igt_assert_eq(poll(&pfd, 1, 0), 1);
738 }
739
740 /* Even if hung, the flip must complete *eventually* */
741 igt_set_timeout(20, "flip blocked by hanging vgem fence"); /* XXX lower fail threshold? */
742 igt_assert_eq(read(i915, &vbl, sizeof(vbl)), sizeof(vbl));
743 igt_reset_timeout();
744 }
745
test_flip(int i915,int vgem,unsigned hang)746 static void test_flip(int i915, int vgem, unsigned hang)
747 {
748 drmModeModeInfo *mode = NULL;
749 uint32_t fb_id[2], handle[2], crtc_id;
750 igt_display_t display;
751 igt_output_t *output;
752 struct vgem_bo bo[2];
753 enum pipe pipe;
754
755 igt_display_require(&display, i915);
756 igt_display_require_output(&display);
757
758 for_each_pipe_with_valid_output(&display, pipe, output) {
759 mode = igt_output_get_mode(output);
760 break;
761 }
762
763 igt_assert(mode);
764
765 for (int i = 0; i < 2; i++) {
766 uint32_t strides[4] = {};
767 uint32_t offsets[4] = {};
768 int fd;
769
770 bo[i].width = mode->hdisplay;
771 bo[i].height = mode->vdisplay;
772 bo[i].bpp = 32;
773 vgem_create(vgem, &bo[i]);
774
775 fd = prime_handle_to_fd(vgem, bo[i].handle);
776 handle[i] = prime_fd_to_handle(i915, fd);
777 igt_assert(handle[i]);
778 close(fd);
779
780 strides[0] = bo[i].pitch;
781
782 /* May skip if i915 has no displays */
783 igt_require(__kms_addfb(i915, handle[i],
784 bo[i].width, bo[i].height,
785 DRM_FORMAT_XRGB8888, I915_TILING_NONE,
786 strides, offsets, 1,
787 LOCAL_DRM_MODE_FB_MODIFIERS,
788 &fb_id[i]) == 0);
789 igt_assert(fb_id[i]);
790 }
791
792 igt_require((crtc_id = set_fb_on_crtc(i915, 0, &bo[0], fb_id[0])));
793
794 /* Bind both fb for use by flipping */
795 for (int i = 1; i >= 0; i--) {
796 struct drm_event_vblank vbl;
797
798 do_or_die(drmModePageFlip(i915, crtc_id, fb_id[i],
799 DRM_MODE_PAGE_FLIP_EVENT, &fb_id[i]));
800 igt_assert_eq(read(i915, &vbl, sizeof(vbl)), sizeof(vbl));
801 }
802
803 /* Schedule a flip to wait upon the frontbuffer vgem being written */
804 flip_to_vgem(i915, vgem, &bo[0], fb_id[0], crtc_id, hang, "front");
805
806 /* Schedule a flip to wait upon the backbuffer vgem being written */
807 flip_to_vgem(i915, vgem, &bo[1], fb_id[1], crtc_id, hang, "back");
808
809 for (int i = 0; i < 2; i++) {
810 do_or_die(drmModeRmFB(i915, fb_id[i]));
811 gem_close(i915, handle[i]);
812 gem_close(vgem, bo[i].handle);
813 }
814 }
815
816 igt_main
817 {
818 const struct intel_execution_engine *e;
819 int i915 = -1;
820 int vgem = -1;
821
822 igt_fixture {
823 vgem = drm_open_driver(DRIVER_VGEM);
824 igt_require(has_prime_export(vgem));
825
826 i915 = drm_open_driver_master(DRIVER_INTEL);
827 igt_require_gem(i915);
828 igt_require(has_prime_import(i915));
829 gem_require_mmap_wc(i915);
830 }
831
832 igt_subtest("basic-read")
833 test_read(vgem, i915);
834
835 igt_subtest("basic-write")
836 test_write(vgem, i915);
837
838 igt_subtest("basic-gtt")
839 test_gtt(vgem, i915);
840
841 igt_subtest("shrink")
842 test_shrink(vgem, i915);
843
844 igt_subtest("coherency-gtt")
845 test_gtt_interleaved(vgem, i915);
846
847 for (e = intel_execution_engines; e->name; e++) {
848 igt_subtest_f("%ssync-%s",
849 e->exec_id == 0 ? "basic-" : "",
850 e->name) {
851 gem_require_ring(i915, e->exec_id | e->flags);
852 igt_require(gem_can_store_dword(i915, e->exec_id | e->flags));
853
854 gem_quiescent_gpu(i915);
855 test_sync(i915, vgem, e->exec_id, e->flags);
856 }
857 }
858
859 for (e = intel_execution_engines; e->name; e++) {
860 igt_subtest_f("%sbusy-%s",
861 e->exec_id == 0 ? "basic-" : "",
862 e->name) {
863 gem_require_ring(i915, e->exec_id | e->flags);
864 igt_require(gem_can_store_dword(i915, e->exec_id | e->flags));
865
866 gem_quiescent_gpu(i915);
867 test_busy(i915, vgem, e->exec_id, e->flags);
868 }
869 }
870
871 for (e = intel_execution_engines; e->name; e++) {
872 igt_subtest_f("%swait-%s",
873 e->exec_id == 0 ? "basic-" : "",
874 e->name) {
875 gem_require_ring(i915, e->exec_id | e->flags);
876 igt_require(gem_can_store_dword(i915, e->exec_id | e->flags));
877
878 gem_quiescent_gpu(i915);
879 test_wait(i915, vgem, e->exec_id, e->flags);
880 }
881 }
882
883 /* Fence testing */
884 igt_subtest_group {
885 igt_fixture {
886 igt_require(vgem_has_fences(vgem));
887 }
888
889 igt_subtest("basic-fence-read")
890 test_fence_read(i915, vgem);
891 igt_subtest("basic-fence-mmap")
892 test_fence_mmap(i915, vgem);
893
894 for (e = intel_execution_engines; e->name; e++) {
895 igt_subtest_f("%sfence-wait-%s",
896 e->exec_id == 0 ? "basic-" : "",
897 e->name) {
898 gem_require_ring(i915, e->exec_id | e->flags);
899 igt_require(gem_can_store_dword(i915, e->exec_id | e->flags));
900
901 gem_quiescent_gpu(i915);
902 test_fence_wait(i915, vgem, e->exec_id, e->flags);
903 }
904 }
905
906 igt_subtest("basic-fence-flip")
907 test_flip(i915, vgem, 0);
908
909 igt_subtest_group {
910 igt_fixture {
911 igt_require(vgem_fence_has_flag(vgem, WIP_VGEM_FENCE_NOTIMEOUT));
912 }
913
914 igt_subtest("fence-read-hang")
915 test_fence_hang(i915, vgem, 0);
916 igt_subtest("fence-write-hang")
917 test_fence_hang(i915, vgem, VGEM_FENCE_WRITE);
918
919 igt_subtest("fence-flip-hang")
920 test_flip(i915, vgem, WIP_VGEM_FENCE_NOTIMEOUT);
921 }
922 }
923
924 igt_fixture {
925 close(i915);
926 close(vgem);
927 }
928 }
929