1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Bradford <rob at linux.intel.com>
25 * Tiago Vignatti <tiago.vignatti at intel.com>
26 *
27 */
28
29 /*
30 * Testcase: Check whether mmap()ing dma-buf works
31 */
32 #include <unistd.h>
33 #include <stdlib.h>
34 #include <stdio.h>
35 #include <string.h>
36 #include <fcntl.h>
37 #include <inttypes.h>
38 #include <errno.h>
39 #include <sys/stat.h>
40 #include <sys/ioctl.h>
41 #include <pthread.h>
42
43 #include "drm.h"
44 #include "i915_drm.h"
45 #include "drmtest.h"
46 #include "igt_debugfs.h"
47 #include "ioctl_wrappers.h"
48 #include "i915/gem_mman.h"
49
50 #define BO_SIZE (16*1024)
51
52 static int fd;
53
54 char pattern[] = {0xff, 0x00, 0x00, 0x00,
55 0x00, 0xff, 0x00, 0x00,
56 0x00, 0x00, 0xff, 0x00,
57 0x00, 0x00, 0x00, 0xff};
58
59 static void
fill_bo(uint32_t handle,size_t size)60 fill_bo(uint32_t handle, size_t size)
61 {
62 off_t i;
63 for (i = 0; i < size; i+=sizeof(pattern))
64 {
65 gem_write(fd, handle, i, pattern, sizeof(pattern));
66 }
67 }
68
69 static void
fill_bo_cpu(char * ptr)70 fill_bo_cpu(char *ptr)
71 {
72 memcpy(ptr, pattern, sizeof(pattern));
73 }
74
75 static void
test_correct(void)76 test_correct(void)
77 {
78 int dma_buf_fd;
79 char *ptr1, *ptr2;
80 uint32_t handle;
81
82 handle = gem_create(fd, BO_SIZE);
83 fill_bo(handle, BO_SIZE);
84
85 dma_buf_fd = prime_handle_to_fd(fd, handle);
86 igt_assert(errno == 0);
87
88 /* Check correctness vs GEM_MMAP_GTT */
89 ptr1 = gem_mmap__gtt(fd, handle, BO_SIZE, PROT_READ);
90 ptr2 = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
91 igt_assert(ptr1 != MAP_FAILED);
92 igt_assert(ptr2 != MAP_FAILED);
93 igt_assert(memcmp(ptr1, ptr2, BO_SIZE) == 0);
94
95 /* Check pattern correctness */
96 igt_assert(memcmp(ptr2, pattern, sizeof(pattern)) == 0);
97
98 munmap(ptr1, BO_SIZE);
99 munmap(ptr2, BO_SIZE);
100 close(dma_buf_fd);
101 gem_close(fd, handle);
102 }
103
104 static void
test_map_unmap(void)105 test_map_unmap(void)
106 {
107 int dma_buf_fd;
108 char *ptr;
109 uint32_t handle;
110
111 handle = gem_create(fd, BO_SIZE);
112 fill_bo(handle, BO_SIZE);
113
114 dma_buf_fd = prime_handle_to_fd(fd, handle);
115 igt_assert(errno == 0);
116
117 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
118 igt_assert(ptr != MAP_FAILED);
119 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
120
121 /* Unmap and remap */
122 munmap(ptr, BO_SIZE);
123 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
124 igt_assert(ptr != MAP_FAILED);
125 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
126
127 munmap(ptr, BO_SIZE);
128 close(dma_buf_fd);
129 gem_close(fd, handle);
130 }
131
132 /* prime and then unprime and then prime again the same handle */
133 static void
test_reprime(void)134 test_reprime(void)
135 {
136 int dma_buf_fd;
137 char *ptr;
138 uint32_t handle;
139
140 handle = gem_create(fd, BO_SIZE);
141 fill_bo(handle, BO_SIZE);
142
143 dma_buf_fd = prime_handle_to_fd(fd, handle);
144 igt_assert(errno == 0);
145
146 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
147 igt_assert(ptr != MAP_FAILED);
148 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
149
150 close (dma_buf_fd);
151 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
152 munmap(ptr, BO_SIZE);
153
154 dma_buf_fd = prime_handle_to_fd(fd, handle);
155 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
156 igt_assert(ptr != MAP_FAILED);
157 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
158
159 munmap(ptr, BO_SIZE);
160 close(dma_buf_fd);
161 gem_close(fd, handle);
162 }
163
164 /* map from another process */
165 static void
test_forked(void)166 test_forked(void)
167 {
168 int dma_buf_fd;
169 char *ptr;
170 uint32_t handle;
171
172 handle = gem_create(fd, BO_SIZE);
173 fill_bo(handle, BO_SIZE);
174
175 dma_buf_fd = prime_handle_to_fd(fd, handle);
176 igt_assert(errno == 0);
177
178 igt_fork(childno, 1) {
179 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
180 igt_assert(ptr != MAP_FAILED);
181 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
182 munmap(ptr, BO_SIZE);
183 close(dma_buf_fd);
184 }
185 close(dma_buf_fd);
186 igt_waitchildren();
187 gem_close(fd, handle);
188 }
189
190 /* test simple CPU write */
191 static void
test_correct_cpu_write(void)192 test_correct_cpu_write(void)
193 {
194 int dma_buf_fd;
195 char *ptr;
196 uint32_t handle;
197
198 handle = gem_create(fd, BO_SIZE);
199
200 dma_buf_fd = prime_handle_to_fd_for_mmap(fd, handle);
201
202 /* Skip if DRM_RDWR is not supported */
203 igt_skip_on(errno == EINVAL);
204
205 /* Check correctness of map using write protection (PROT_WRITE) */
206 ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0);
207 igt_assert(ptr != MAP_FAILED);
208
209 /* Fill bo using CPU */
210 fill_bo_cpu(ptr);
211
212 /* Check pattern correctness */
213 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
214
215 munmap(ptr, BO_SIZE);
216 close(dma_buf_fd);
217 gem_close(fd, handle);
218 }
219
220 /* map from another process and then write using CPU */
221 static void
test_forked_cpu_write(void)222 test_forked_cpu_write(void)
223 {
224 int dma_buf_fd;
225 char *ptr;
226 uint32_t handle;
227
228 handle = gem_create(fd, BO_SIZE);
229
230 dma_buf_fd = prime_handle_to_fd_for_mmap(fd, handle);
231
232 /* Skip if DRM_RDWR is not supported */
233 igt_skip_on(errno == EINVAL);
234
235 igt_fork(childno, 1) {
236 ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE , MAP_SHARED, dma_buf_fd, 0);
237 igt_assert(ptr != MAP_FAILED);
238 fill_bo_cpu(ptr);
239
240 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
241 munmap(ptr, BO_SIZE);
242 close(dma_buf_fd);
243 }
244 close(dma_buf_fd);
245 igt_waitchildren();
246 gem_close(fd, handle);
247 }
248
249 static void
test_refcounting(void)250 test_refcounting(void)
251 {
252 int dma_buf_fd;
253 char *ptr;
254 uint32_t handle;
255
256 handle = gem_create(fd, BO_SIZE);
257 fill_bo(handle, BO_SIZE);
258
259 dma_buf_fd = prime_handle_to_fd(fd, handle);
260 igt_assert(errno == 0);
261 /* Close gem object before mapping */
262 gem_close(fd, handle);
263
264 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
265 igt_assert(ptr != MAP_FAILED);
266 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
267 munmap(ptr, BO_SIZE);
268 close (dma_buf_fd);
269 }
270
271 /* dup before mmap */
272 static void
test_dup(void)273 test_dup(void)
274 {
275 int dma_buf_fd;
276 char *ptr;
277 uint32_t handle;
278
279 handle = gem_create(fd, BO_SIZE);
280 fill_bo(handle, BO_SIZE);
281
282 dma_buf_fd = dup(prime_handle_to_fd(fd, handle));
283 igt_assert(errno == 0);
284
285 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
286 igt_assert(ptr != MAP_FAILED);
287 igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
288 munmap(ptr, BO_SIZE);
289 gem_close(fd, handle);
290 close (dma_buf_fd);
291 }
292
293 /* Used for error case testing to avoid wrapper */
prime_handle_to_fd_no_assert(uint32_t handle,int flags,int * fd_out)294 static int prime_handle_to_fd_no_assert(uint32_t handle, int flags, int *fd_out)
295 {
296 struct drm_prime_handle args;
297 int ret;
298
299 args.handle = handle;
300 args.flags = flags;
301 args.fd = -1;
302
303 ret = drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
304 if (ret)
305 ret = errno;
306 *fd_out = args.fd;
307
308 return ret;
309 }
310
has_userptr(void)311 static bool has_userptr(void)
312 {
313 uint32_t handle = 0;
314 void *ptr;
315
316 igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
317 if ( __gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0)
318 gem_close(fd, handle);
319 free(ptr);
320
321 return handle;
322 }
323
324 /* test for mmap(dma_buf_export(userptr)) */
325 static void
test_userptr(void)326 test_userptr(void)
327 {
328 int ret, dma_buf_fd;
329 void *ptr;
330 uint32_t handle;
331
332 igt_require(has_userptr());
333
334 /* create userptr bo */
335 ret = posix_memalign(&ptr, 4096, BO_SIZE);
336 igt_assert_eq(ret, 0);
337
338 /* we are not allowed to export unsynchronized userptr. Just create a normal
339 * one */
340 gem_userptr(fd, (uint32_t *)ptr, BO_SIZE, 0, 0, &handle);
341
342 /* export userptr */
343 ret = prime_handle_to_fd_no_assert(handle, DRM_CLOEXEC, &dma_buf_fd);
344 if (ret) {
345 igt_assert(ret == EINVAL || ret == ENODEV);
346 goto free_userptr;
347 } else {
348 igt_assert_eq(ret, 0);
349 igt_assert_lte(0, dma_buf_fd);
350 }
351
352 /* a userptr doesn't have the obj->base.filp, but can be exported via
353 * dma-buf, so make sure it fails here */
354 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
355 igt_assert(ptr == MAP_FAILED && errno == ENODEV);
356 free_userptr:
357 gem_close(fd, handle);
358 close(dma_buf_fd);
359 }
360
361 static void
test_errors(void)362 test_errors(void)
363 {
364 int i, dma_buf_fd;
365 char *ptr;
366 uint32_t handle;
367 int invalid_flags[] = {DRM_CLOEXEC - 1, DRM_CLOEXEC + 1,
368 DRM_RDWR - 1, DRM_RDWR + 1};
369
370 /* Test for invalid flags */
371 handle = gem_create(fd, BO_SIZE);
372 for (i = 0; i < sizeof(invalid_flags) / sizeof(invalid_flags[0]); i++) {
373 prime_handle_to_fd_no_assert(handle, invalid_flags[i], &dma_buf_fd);
374 igt_assert_eq(errno, EINVAL);
375 errno = 0;
376 }
377
378 /* Close gem object before priming */
379 handle = gem_create(fd, BO_SIZE);
380 fill_bo(handle, BO_SIZE);
381 gem_close(fd, handle);
382 prime_handle_to_fd_no_assert(handle, DRM_CLOEXEC, &dma_buf_fd);
383 igt_assert(dma_buf_fd == -1 && errno == ENOENT);
384 errno = 0;
385
386 /* close fd before mapping */
387 handle = gem_create(fd, BO_SIZE);
388 fill_bo(handle, BO_SIZE);
389 dma_buf_fd = prime_handle_to_fd(fd, handle);
390 igt_assert(errno == 0);
391 close(dma_buf_fd);
392 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
393 igt_assert(ptr == MAP_FAILED && errno == EBADF);
394 errno = 0;
395 gem_close(fd, handle);
396
397 /* Map too big */
398 handle = gem_create(fd, BO_SIZE);
399 fill_bo(handle, BO_SIZE);
400 dma_buf_fd = prime_handle_to_fd(fd, handle);
401 igt_assert(errno == 0);
402 ptr = mmap(NULL, BO_SIZE * 2, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
403 igt_assert(ptr == MAP_FAILED && errno == EINVAL);
404 errno = 0;
405 close(dma_buf_fd);
406 gem_close(fd, handle);
407
408 /* Overlapping the end of the buffer */
409 handle = gem_create(fd, BO_SIZE);
410 dma_buf_fd = prime_handle_to_fd(fd, handle);
411 igt_assert(errno == 0);
412 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, BO_SIZE / 2);
413 igt_assert(ptr == MAP_FAILED && errno == EINVAL);
414 errno = 0;
415 close(dma_buf_fd);
416 gem_close(fd, handle);
417 }
418
419 /* Test for invalid flags on sync ioctl */
420 static void
test_invalid_sync_flags(void)421 test_invalid_sync_flags(void)
422 {
423 int i, dma_buf_fd;
424 uint32_t handle;
425 struct local_dma_buf_sync sync;
426 int invalid_flags[] = {-1,
427 0x00,
428 LOCAL_DMA_BUF_SYNC_RW + 1,
429 LOCAL_DMA_BUF_SYNC_VALID_FLAGS_MASK + 1};
430
431 handle = gem_create(fd, BO_SIZE);
432 dma_buf_fd = prime_handle_to_fd(fd, handle);
433 for (i = 0; i < sizeof(invalid_flags) / sizeof(invalid_flags[0]); i++) {
434 memset(&sync, 0, sizeof(sync));
435 sync.flags = invalid_flags[i];
436
437 drmIoctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync);
438 igt_assert_eq(errno, EINVAL);
439 errno = 0;
440 }
441 }
442
443 static void
test_aperture_limit(void)444 test_aperture_limit(void)
445 {
446 int dma_buf_fd1, dma_buf_fd2;
447 char *ptr1, *ptr2;
448 uint32_t handle1, handle2;
449 /* Two buffers the sum of which > mappable aperture */
450 uint64_t size1 = (gem_mappable_aperture_size() * 7) / 8;
451 uint64_t size2 = (gem_mappable_aperture_size() * 3) / 8;
452
453 handle1 = gem_create(fd, size1);
454 fill_bo(handle1, BO_SIZE);
455
456 dma_buf_fd1 = prime_handle_to_fd(fd, handle1);
457 igt_assert(errno == 0);
458 ptr1 = mmap(NULL, size1, PROT_READ, MAP_SHARED, dma_buf_fd1, 0);
459 igt_assert(ptr1 != MAP_FAILED);
460 igt_assert(memcmp(ptr1, pattern, sizeof(pattern)) == 0);
461
462 handle2 = gem_create(fd, size1);
463 fill_bo(handle2, BO_SIZE);
464 dma_buf_fd2 = prime_handle_to_fd(fd, handle2);
465 igt_assert(errno == 0);
466 ptr2 = mmap(NULL, size2, PROT_READ, MAP_SHARED, dma_buf_fd2, 0);
467 igt_assert(ptr2 != MAP_FAILED);
468 igt_assert(memcmp(ptr2, pattern, sizeof(pattern)) == 0);
469
470 igt_assert(memcmp(ptr1, ptr2, BO_SIZE) == 0);
471
472 munmap(ptr1, size1);
473 munmap(ptr2, size2);
474 close(dma_buf_fd1);
475 close(dma_buf_fd2);
476 gem_close(fd, handle1);
477 gem_close(fd, handle2);
478 }
479
480 static int
check_for_dma_buf_mmap(void)481 check_for_dma_buf_mmap(void)
482 {
483 int dma_buf_fd;
484 char *ptr;
485 uint32_t handle;
486 int ret = 1;
487
488 handle = gem_create(fd, BO_SIZE);
489 dma_buf_fd = prime_handle_to_fd(fd, handle);
490 ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0);
491 if (ptr != MAP_FAILED)
492 ret = 0;
493 munmap(ptr, BO_SIZE);
494 gem_close(fd, handle);
495 close(dma_buf_fd);
496 return ret;
497 }
498
499 igt_main
500 {
501 struct {
502 const char *name;
503 void (*fn)(void);
504 } tests[] = {
505 { "test_correct", test_correct },
506 { "test_map_unmap", test_map_unmap },
507 { "test_reprime", test_reprime },
508 { "test_forked", test_forked },
509 { "test_correct_cpu_write", test_correct_cpu_write },
510 { "test_forked_cpu_write", test_forked_cpu_write },
511 { "test_refcounting", test_refcounting },
512 { "test_dup", test_dup },
513 { "test_userptr", test_userptr },
514 { "test_errors", test_errors },
515 { "test_invalid_sync_flags", test_invalid_sync_flags },
516 { "test_aperture_limit", test_aperture_limit },
517 };
518 int i;
519
520 igt_fixture {
521 fd = drm_open_driver(DRIVER_INTEL);
522 igt_skip_on((check_for_dma_buf_mmap() != 0));
523 errno = 0;
524 }
525
526
527 for (i = 0; i < ARRAY_SIZE(tests); i++) {
528 igt_subtest(tests[i].name)
529 tests[i].fn();
530 }
531
532 igt_fixture
533 close(fd);
534 }
535