xref: /aosp_15_r20/external/mesa3d/src/intel/tools/intel_dump_gpu.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include "drm-uapi/i915_drm.h"
41 #include <inttypes.h>
42 
43 #include "intel_aub.h"
44 #include "aub_write.h"
45 
46 #include "c11/threads.h"
47 #include "dev/intel_debug.h"
48 #include "dev/intel_device_info.h"
49 #include "common/intel_debug_identifier.h"
50 #include "common/intel_gem.h"
51 #include "util/macros.h"
52 #include "util/u_math.h"
53 
54 static int close_init_helper(int fd);
55 static int ioctl_init_helper(int fd, unsigned long request, ...);
56 static int munmap_init_helper(void *addr, size_t length);
57 
58 static int (*libc_close)(int fd) = close_init_helper;
59 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
60 static int (*libc_munmap)(void *addr, size_t length) = munmap_init_helper;
61 
62 static int drm_fd = -1;
63 static char *output_filename = NULL;
64 static FILE *output_file = NULL;
65 static int verbose = 0;
66 static bool device_override = false;
67 static bool capture_only = false;
68 static int64_t frame_id = -1;
69 static bool capture_finished = false;
70 
71 #define MAX_FD_COUNT 64
72 #define MAX_BO_COUNT 64 * 1024
73 
74 struct bo {
75    uint32_t size;
76    uint64_t offset;
77    void *map;
78    /* Whether the buffer has been positioned in the GTT already. */
79    bool gtt_mapped : 1;
80    /* Tracks userspace mmapping of the buffer */
81    bool user_mapped : 1;
82    /* Using the i915-gem mmapping ioctl & execbuffer ioctl, track whether a
83     * buffer has been updated.
84     */
85    bool dirty : 1;
86 };
87 
88 static struct bo *bos;
89 
90 #define DRM_MAJOR 226
91 
92 /* We set bit 0 in the map pointer for userptr BOs so we know not to
93  * munmap them on DRM_IOCTL_GEM_CLOSE.
94  */
95 #define USERPTR_FLAG 1
96 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
97 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
98 
99 #define fail_if(cond, ...) _fail_if(cond, "intel_dump_gpu", __VA_ARGS__)
100 
101 static struct bo *
get_bo(unsigned fd,uint32_t handle)102 get_bo(unsigned fd, uint32_t handle)
103 {
104    struct bo *bo;
105 
106    fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
107    fail_if(fd >= MAX_FD_COUNT, "bo fd too large\n");
108    bo = &bos[handle + fd * MAX_BO_COUNT];
109 
110    return bo;
111 }
112 
113 static struct intel_device_info devinfo = {0};
114 static int device = 0;
115 static struct aub_file aub_file;
116 
117 static void
ensure_device_info(int fd)118 ensure_device_info(int fd)
119 {
120    /* We can't do this at open time as we're not yet authenticated. */
121    if (device == 0) {
122       fail_if(!intel_get_device_info_from_fd(fd, &devinfo, -1, -1),
123               "failed to identify chipset.\n");
124       device = devinfo.pci_device_id;
125    } else if (devinfo.ver == 0) {
126       fail_if(!intel_get_device_info_from_pci_id(device, &devinfo),
127               "failed to identify chipset.\n");
128    }
129 }
130 
131 static void *
relocate_bo(int fd,struct bo * bo,const struct drm_i915_gem_execbuffer2 * execbuffer2,const struct drm_i915_gem_exec_object2 * obj)132 relocate_bo(int fd, struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
133             const struct drm_i915_gem_exec_object2 *obj)
134 {
135    const struct drm_i915_gem_exec_object2 *exec_objects =
136       (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
137    const struct drm_i915_gem_relocation_entry *relocs =
138       (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
139    void *relocated;
140    int handle;
141 
142    relocated = malloc(bo->size);
143    fail_if(relocated == NULL, "out of memory\n");
144    memcpy(relocated, GET_PTR(bo->map), bo->size);
145    for (size_t i = 0; i < obj->relocation_count; i++) {
146       fail_if(relocs[i].offset >= bo->size, "reloc outside bo\n");
147 
148       if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
149          handle = exec_objects[relocs[i].target_handle].handle;
150       else
151          handle = relocs[i].target_handle;
152 
153       aub_write_reloc(&devinfo, ((char *)relocated) + relocs[i].offset,
154                       get_bo(fd, handle)->offset + relocs[i].delta);
155    }
156 
157    return relocated;
158 }
159 
160 static int
gem_ioctl(int fd,unsigned long request,void * argp)161 gem_ioctl(int fd, unsigned long request, void *argp)
162 {
163    int ret;
164 
165    do {
166       ret = libc_ioctl(fd, request, argp);
167    } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
168 
169    return ret;
170 }
171 
172 static void *
gem_mmap(int fd,uint32_t handle,uint64_t offset,uint64_t size)173 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
174 {
175    struct drm_i915_gem_mmap mmap = {
176       .handle = handle,
177       .offset = offset,
178       .size = size
179    };
180 
181    if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
182       return MAP_FAILED;
183 
184    return (void *)(uintptr_t) mmap.addr_ptr;
185 }
186 
187 static enum intel_engine_class
engine_class_from_ring_flag(uint32_t ring_flag)188 engine_class_from_ring_flag(uint32_t ring_flag)
189 {
190    switch (ring_flag) {
191    case I915_EXEC_DEFAULT:
192    case I915_EXEC_RENDER:
193       return INTEL_ENGINE_CLASS_RENDER;
194    case I915_EXEC_BSD:
195       return INTEL_ENGINE_CLASS_VIDEO;
196    case I915_EXEC_BLT:
197       return INTEL_ENGINE_CLASS_COPY;
198    case I915_EXEC_VEBOX:
199       return INTEL_ENGINE_CLASS_VIDEO_ENHANCE;
200    default:
201       return INTEL_ENGINE_CLASS_INVALID;
202    }
203 }
204 
205 static void
dump_execbuffer2(int fd,struct drm_i915_gem_execbuffer2 * execbuffer2)206 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
207 {
208    struct drm_i915_gem_exec_object2 *exec_objects =
209       (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
210    uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
211    uint32_t offset;
212    struct drm_i915_gem_exec_object2 *obj;
213    struct bo *bo, *batch_bo;
214    int batch_index;
215    void *data;
216 
217    ensure_device_info(fd);
218 
219    if (capture_finished)
220       return;
221 
222    if (!aub_file.file) {
223       aub_file_init(&aub_file, output_file,
224                     verbose == 2 ? stdout : NULL,
225                     device, program_invocation_short_name);
226       aub_write_default_setup(&aub_file);
227 
228       if (verbose)
229          printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
230                 output_filename, device, devinfo.ver);
231    }
232 
233    if (aub_use_execlists(&aub_file))
234       offset = 0x1000;
235    else
236       offset = aub_gtt_size(&aub_file);
237 
238    for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
239       obj = &exec_objects[i];
240       bo = get_bo(fd, obj->handle);
241 
242       /* If bo->size == 0, this means they passed us an invalid
243        * buffer.  The kernel will reject it and so should we.
244        */
245       if (bo->size == 0) {
246          if (verbose)
247             printf("BO #%d is invalid!\n", obj->handle);
248          return;
249       }
250 
251       if (obj->flags & EXEC_OBJECT_PINNED) {
252          if (bo->offset != obj->offset)
253             bo->gtt_mapped = false;
254          bo->offset = obj->offset;
255       } else {
256          if (obj->alignment != 0)
257             offset = align(offset, obj->alignment);
258          bo->offset = offset;
259          offset = align(offset + bo->size + 4095, 4096);
260       }
261 
262       if (bo->map == NULL && bo->size > 0)
263          bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
264       fail_if(bo->map == MAP_FAILED, "bo mmap failed\n");
265    }
266 
267    uint64_t current_frame_id = 0;
268    if (frame_id >= 0) {
269       for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
270          obj = &exec_objects[i];
271          bo = get_bo(fd, obj->handle);
272 
273          /* Check against frame_id requirements. */
274          if (memcmp(bo->map, intel_debug_identifier(),
275                     intel_debug_identifier_size()) == 0) {
276             const struct intel_debug_block_frame *frame_desc =
277                intel_debug_get_identifier_block(bo->map, bo->size,
278                                                 INTEL_DEBUG_BLOCK_TYPE_FRAME);
279 
280             current_frame_id = frame_desc ? frame_desc->frame_id : 0;
281             break;
282          }
283       }
284    }
285 
286    if (verbose)
287       printf("Dumping execbuffer2 (frame_id=%"PRIu64", buffers=%u):\n",
288              current_frame_id, execbuffer2->buffer_count);
289 
290    /* Check whether we can stop right now. */
291    if (frame_id >= 0) {
292       if (current_frame_id < frame_id)
293          return;
294 
295       if (current_frame_id > frame_id) {
296          aub_file_finish(&aub_file);
297          capture_finished = true;
298          return;
299       }
300    }
301 
302 
303    /* Map buffers into the PPGTT. */
304    for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
305       obj = &exec_objects[i];
306       bo = get_bo(fd, obj->handle);
307 
308       if (verbose) {
309          printf("BO #%d (%dB) @ 0x%" PRIx64 "\n",
310                 obj->handle, bo->size, bo->offset);
311       }
312 
313       if (aub_use_execlists(&aub_file) && !bo->gtt_mapped) {
314          aub_map_ppgtt(&aub_file, bo->offset, bo->size);
315          bo->gtt_mapped = true;
316       }
317    }
318 
319    /* Write the buffer content into the Aub. */
320    batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
321       execbuffer2->buffer_count - 1;
322    batch_bo = get_bo(fd, exec_objects[batch_index].handle);
323    for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
324       obj = &exec_objects[i];
325       bo = get_bo(fd, obj->handle);
326 
327       if (obj->relocation_count > 0)
328          data = relocate_bo(fd, bo, execbuffer2, obj);
329       else
330          data = bo->map;
331 
332       bool write = !capture_only || (obj->flags & EXEC_OBJECT_CAPTURE);
333 
334       if (write && bo->dirty) {
335          if (bo == batch_bo) {
336             aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_BATCH,
337                                   GET_PTR(data), bo->size, bo->offset);
338          } else {
339             aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_NOTYPE,
340                                   GET_PTR(data), bo->size, bo->offset);
341          }
342 
343          if (!bo->user_mapped)
344             bo->dirty = false;
345       }
346 
347       if (data != bo->map)
348          free(data);
349    }
350 
351    uint32_t ctx_id = execbuffer2->rsvd1;
352 
353    aub_write_exec(&aub_file, ctx_id,
354                   batch_bo->offset + execbuffer2->batch_start_offset,
355                   offset, engine_class_from_ring_flag(ring_flag));
356 
357    if (device_override &&
358        (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
359       struct drm_i915_gem_exec_fence *fences =
360          (void*)(uintptr_t)execbuffer2->cliprects_ptr;
361       for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
362          if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
363             struct drm_syncobj_array arg = {
364                .handles = (uintptr_t)&fences[i].handle,
365                .count_handles = 1,
366                .pad = 0,
367             };
368             libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
369          }
370       }
371    }
372 }
373 
374 static void
add_new_bo(unsigned fd,int handle,uint64_t size,void * map)375 add_new_bo(unsigned fd, int handle, uint64_t size, void *map)
376 {
377    struct bo *bo = &bos[handle + fd * MAX_BO_COUNT];
378 
379    fail_if(handle >= MAX_BO_COUNT, "bo handle out of range\n");
380    fail_if(fd >= MAX_FD_COUNT, "bo fd out of range\n");
381    fail_if(size == 0, "bo size is invalid\n");
382 
383    bo->size = size;
384    bo->map = map;
385    bo->user_mapped = false;
386    bo->gtt_mapped = false;
387 }
388 
389 static void
remove_bo(int fd,int handle)390 remove_bo(int fd, int handle)
391 {
392    struct bo *bo = get_bo(fd, handle);
393 
394    if (bo->map && !IS_USERPTR(bo->map))
395       munmap(bo->map, bo->size);
396    memset(bo, 0, sizeof(*bo));
397 }
398 
399 __attribute__ ((visibility ("default"))) int
close(int fd)400 close(int fd)
401 {
402    if (fd == drm_fd)
403       drm_fd = -1;
404 
405    return libc_close(fd);
406 }
407 
408 static int
get_pci_id(int fd,int * pci_id)409 get_pci_id(int fd, int *pci_id)
410 {
411    if (device_override) {
412       *pci_id = device;
413       return 0;
414    }
415 
416    return intel_gem_get_param(fd, I915_PARAM_CHIPSET_ID, pci_id) ? 0 : -1;
417 }
418 
419 static void
maybe_init(int fd)420 maybe_init(int fd)
421 {
422    static bool initialized = false;
423    FILE *config;
424    char *key, *value;
425 
426    if (initialized)
427       return;
428 
429    initialized = true;
430 
431    const char *config_path = getenv("INTEL_DUMP_GPU_CONFIG");
432    fail_if(config_path == NULL, "INTEL_DUMP_GPU_CONFIG is not set\n");
433 
434    config = fopen(config_path, "r");
435    fail_if(config == NULL, "failed to open file %s\n", config_path);
436 
437    while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
438       if (!strcmp(key, "verbose")) {
439          if (!strcmp(value, "1")) {
440             verbose = 1;
441          } else if (!strcmp(value, "2")) {
442             verbose = 2;
443          }
444       } else if (!strcmp(key, "device")) {
445          fail_if(device != 0, "Device/Platform override specified multiple times.\n");
446          fail_if(sscanf(value, "%i", &device) != 1,
447                  "failed to parse device id '%s'\n",
448                  value);
449          device_override = true;
450       } else if (!strcmp(key, "platform")) {
451          fail_if(device != 0, "Device/Platform override specified multiple times.\n");
452          device = intel_device_name_to_pci_device_id(value);
453          fail_if(device == -1, "Unknown platform '%s'\n", value);
454          device_override = true;
455       } else if (!strcmp(key, "file")) {
456          free(output_filename);
457          if (output_file)
458             fclose(output_file);
459          output_filename = strdup(value);
460          output_file = fopen(output_filename, "w+");
461          fail_if(output_file == NULL,
462                  "failed to open file '%s'\n",
463                  output_filename);
464       } else if (!strcmp(key, "capture_only")) {
465          capture_only = atoi(value);
466       } else if (!strcmp(key, "frame")) {
467          frame_id = atol(value);
468       } else {
469          fprintf(stderr, "unknown option '%s'\n", key);
470       }
471 
472       free(key);
473       free(value);
474    }
475    fclose(config);
476 
477    bos = calloc(MAX_FD_COUNT * MAX_BO_COUNT, sizeof(bos[0]));
478    fail_if(bos == NULL, "out of memory\n");
479 
480    ASSERTED int ret = get_pci_id(fd, &device);
481    assert(ret == 0);
482 
483    aub_file_init(&aub_file, output_file,
484                  verbose == 2 ? stdout : NULL,
485                  device, program_invocation_short_name);
486    aub_write_default_setup(&aub_file);
487 
488    if (verbose)
489       printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
490              output_filename, device, devinfo.ver);
491 }
492 
493 static int
intercept_ioctl(int fd,unsigned long request,...)494 intercept_ioctl(int fd, unsigned long request, ...)
495 {
496    va_list args;
497    void *argp;
498    int ret;
499    struct stat buf;
500 
501    va_start(args, request);
502    argp = va_arg(args, void *);
503    va_end(args);
504 
505    if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
506        drm_fd != fd && fstat(fd, &buf) == 0 &&
507        (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
508       drm_fd = fd;
509       if (verbose)
510          printf("[intercept drm ioctl on fd %d]\n", fd);
511    }
512 
513    if (fd == drm_fd) {
514       maybe_init(fd);
515 
516       switch (request) {
517       case DRM_IOCTL_SYNCOBJ_WAIT:
518       case DRM_IOCTL_I915_GEM_WAIT: {
519          if (device_override)
520             return 0;
521          return libc_ioctl(fd, request, argp);
522       }
523 
524       case DRM_IOCTL_I915_GET_RESET_STATS: {
525          if (device_override) {
526             struct drm_i915_reset_stats *stats = argp;
527 
528             stats->reset_count = 0;
529             stats->batch_active = 0;
530             stats->batch_pending = 0;
531             return 0;
532          }
533          return libc_ioctl(fd, request, argp);
534       }
535 
536       case DRM_IOCTL_I915_GETPARAM: {
537          struct drm_i915_getparam *getparam = argp;
538 
539          ensure_device_info(fd);
540 
541          if (getparam->param == I915_PARAM_CHIPSET_ID)
542             return get_pci_id(fd, getparam->value);
543 
544          if (device_override) {
545             switch (getparam->param) {
546             case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
547                *getparam->value = devinfo.timestamp_frequency;
548                return 0;
549 
550             case I915_PARAM_HAS_WAIT_TIMEOUT:
551             case I915_PARAM_HAS_EXECBUF2:
552             case I915_PARAM_MMAP_VERSION:
553             case I915_PARAM_HAS_EXEC_ASYNC:
554             case I915_PARAM_HAS_EXEC_FENCE:
555             case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
556                *getparam->value = 1;
557                return 0;
558 
559             case I915_PARAM_HAS_EXEC_SOFTPIN:
560                *getparam->value = devinfo.ver >= 8 && devinfo.platform != INTEL_PLATFORM_CHV;
561                return 0;
562 
563             default:
564                return -1;
565             }
566          }
567 
568          return libc_ioctl(fd, request, argp);
569       }
570 
571       case DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM: {
572          struct drm_i915_gem_context_param *getparam = argp;
573 
574          ensure_device_info(fd);
575 
576          if (device_override) {
577             switch (getparam->param) {
578             case I915_CONTEXT_PARAM_GTT_SIZE:
579                if (devinfo.platform == INTEL_PLATFORM_EHL)
580                   getparam->value = 1ull << 36;
581                else if (devinfo.ver >= 8 && devinfo.platform != INTEL_PLATFORM_CHV)
582                   getparam->value = 1ull << 48;
583                else
584                   getparam->value = 1ull << 31;
585                return 0;
586 
587             default:
588                return -1;
589             }
590          }
591 
592          return libc_ioctl(fd, request, argp);
593       }
594 
595       case DRM_IOCTL_I915_GEM_EXECBUFFER: {
596          static bool once;
597          if (!once) {
598             fprintf(stderr,
599                     "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
600             once = true;
601          }
602          return libc_ioctl(fd, request, argp);
603       }
604 
605       case DRM_IOCTL_I915_GEM_EXECBUFFER2:
606       case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: {
607          dump_execbuffer2(fd, argp);
608          if (device_override)
609             return 0;
610 
611          return libc_ioctl(fd, request, argp);
612       }
613 
614       case DRM_IOCTL_I915_GEM_CONTEXT_CREATE: {
615          uint32_t *ctx_id = NULL;
616          struct drm_i915_gem_context_create *create = argp;
617          ret = 0;
618          if (!device_override) {
619             ret = libc_ioctl(fd, request, argp);
620             ctx_id = &create->ctx_id;
621          }
622 
623          if (ret == 0)
624             create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
625 
626          return ret;
627       }
628 
629       case DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT: {
630          uint32_t *ctx_id = NULL;
631          struct drm_i915_gem_context_create_ext *create = argp;
632          ret = 0;
633          if (!device_override) {
634             ret = libc_ioctl(fd, request, argp);
635             ctx_id = &create->ctx_id;
636          }
637 
638          if (ret == 0)
639             create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
640 
641          return ret;
642       }
643 
644       case DRM_IOCTL_I915_GEM_CREATE: {
645          struct drm_i915_gem_create *create = argp;
646 
647          ret = libc_ioctl(fd, request, argp);
648          if (ret == 0)
649             add_new_bo(fd, create->handle, create->size, NULL);
650 
651          return ret;
652       }
653 
654       case DRM_IOCTL_I915_GEM_CREATE_EXT: {
655          struct drm_i915_gem_create_ext *create = argp;
656 
657          ret = libc_ioctl(fd, request, argp);
658          if (ret == 0)
659             add_new_bo(fd, create->handle, create->size, NULL);
660 
661          return ret;
662       }
663 
664       case DRM_IOCTL_I915_GEM_USERPTR: {
665          struct drm_i915_gem_userptr *userptr = argp;
666 
667          ret = libc_ioctl(fd, request, argp);
668          if (ret == 0)
669             add_new_bo(fd, userptr->handle, userptr->user_size,
670                        (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
671 
672          return ret;
673       }
674 
675       case DRM_IOCTL_GEM_CLOSE: {
676          struct drm_gem_close *close = argp;
677 
678          remove_bo(fd, close->handle);
679 
680          return libc_ioctl(fd, request, argp);
681       }
682 
683       case DRM_IOCTL_GEM_OPEN: {
684          struct drm_gem_open *open = argp;
685 
686          ret = libc_ioctl(fd, request, argp);
687          if (ret == 0)
688             add_new_bo(fd, open->handle, open->size, NULL);
689 
690          return ret;
691       }
692 
693       case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
694          struct drm_prime_handle *prime = argp;
695 
696          ret = libc_ioctl(fd, request, argp);
697          if (ret == 0) {
698             off_t size;
699 
700             size = lseek(prime->fd, 0, SEEK_END);
701             fail_if(size == -1, "failed to get prime bo size\n");
702             add_new_bo(fd, prime->handle, size, NULL);
703 
704          }
705 
706          return ret;
707       }
708 
709       case DRM_IOCTL_I915_GEM_MMAP: {
710          ret = libc_ioctl(fd, request, argp);
711          if (ret == 0) {
712             struct drm_i915_gem_mmap *mmap = argp;
713             struct bo *bo = get_bo(fd, mmap->handle);
714             bo->user_mapped = true;
715             bo->dirty = true;
716          }
717          return ret;
718       }
719 
720       case DRM_IOCTL_I915_GEM_MMAP_OFFSET: {
721          ret = libc_ioctl(fd, request, argp);
722          if (ret == 0) {
723             struct drm_i915_gem_mmap_offset *mmap = argp;
724             struct bo *bo = get_bo(fd, mmap->handle);
725             bo->user_mapped = true;
726             bo->dirty = true;
727          }
728          return ret;
729       }
730 
731       default:
732          return libc_ioctl(fd, request, argp);
733       }
734    } else {
735       return libc_ioctl(fd, request, argp);
736    }
737 }
738 
739 __attribute__ ((visibility ("default"))) int
ioctl(int fd,unsigned long request,...)740 ioctl(int fd, unsigned long request, ...)
741 {
742    static thread_local bool entered = false;
743    va_list args;
744    void *argp;
745    int ret;
746 
747    va_start(args, request);
748    argp = va_arg(args, void *);
749    va_end(args);
750 
751    /* Some of the functions called by intercept_ioctl call ioctls of their
752     * own. These need to go to the libc ioctl instead of being passed back to
753     * intercept_ioctl to avoid a stack overflow. */
754    if (entered) {
755       return libc_ioctl(fd, request, argp);
756    } else {
757       entered = true;
758       ret = intercept_ioctl(fd, request, argp);
759       entered = false;
760       return ret;
761    }
762 }
763 
764 static void
init(void)765 init(void)
766 {
767    libc_close = dlsym(RTLD_NEXT, "close");
768    libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
769    libc_munmap = dlsym(RTLD_NEXT, "munmap");
770    fail_if(libc_close == NULL || libc_ioctl == NULL,
771            "failed to get libc ioctl or close\n");
772 }
773 
774 static int
close_init_helper(int fd)775 close_init_helper(int fd)
776 {
777    init();
778    return libc_close(fd);
779 }
780 
781 static int
ioctl_init_helper(int fd,unsigned long request,...)782 ioctl_init_helper(int fd, unsigned long request, ...)
783 {
784    va_list args;
785    void *argp;
786 
787    va_start(args, request);
788    argp = va_arg(args, void *);
789    va_end(args);
790 
791    init();
792    return libc_ioctl(fd, request, argp);
793 }
794 
795 static int
munmap_init_helper(void * addr,size_t length)796 munmap_init_helper(void *addr, size_t length)
797 {
798    init();
799    for (uint32_t i = 0; i < MAX_FD_COUNT * MAX_BO_COUNT; i++) {
800       struct bo *bo = &bos[i];
801       if (bo->map == addr) {
802          bo->user_mapped = false;
803          break;
804       }
805    }
806    return libc_munmap(addr, length);
807 }
808 
809 static void __attribute__ ((destructor))
fini(void)810 fini(void)
811 {
812    if (devinfo.ver != 0) {
813       free(output_filename);
814       if (!capture_finished)
815          aub_file_finish(&aub_file);
816       free(bos);
817    }
818 }
819