xref: /aosp_15_r20/external/mesa3d/src/freedreno/drm/msm/msm_priv.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012-2018 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #ifndef MSM_PRIV_H_
10 #define MSM_PRIV_H_
11 
12 #include "freedreno_drmif.h"
13 #include "freedreno_priv.h"
14 #include "freedreno_rd_output.h"
15 
16 #include "util/timespec.h"
17 #include "util/u_process.h"
18 
19 #ifndef __user
20 #define __user
21 #endif
22 
23 #include "drm-uapi/msm_drm.h"
24 
25 struct msm_device {
26    struct fd_device base;
27 };
28 FD_DEFINE_CAST(fd_device, msm_device);
29 
30 struct fd_device *msm_device_new(int fd, drmVersionPtr version);
31 
32 struct msm_pipe {
33    struct fd_pipe base;
34    uint32_t pipe;
35    uint32_t gpu_id;
36    uint64_t chip_id;
37    uint64_t gmem_base;
38    uint32_t gmem;
39    uint32_t queue_id;
40 };
41 FD_DEFINE_CAST(fd_pipe, msm_pipe);
42 
43 struct fd_pipe *msm_pipe_new(struct fd_device *dev, enum fd_pipe_id id,
44                              uint32_t prio);
45 
46 struct fd_ringbuffer *msm_ringbuffer_new_object(struct fd_pipe *pipe,
47                                                 uint32_t size);
48 
49 struct fd_submit *msm_submit_new(struct fd_pipe *pipe);
50 struct fd_submit *msm_submit_sp_new(struct fd_pipe *pipe);
51 
52 struct msm_bo {
53    struct fd_bo base;
54    uint64_t offset;
55 };
56 FD_DEFINE_CAST(fd_bo, msm_bo);
57 
58 struct fd_bo *msm_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags);
59 struct fd_bo *msm_bo_from_handle(struct fd_device *dev, uint32_t size,
60                                  uint32_t handle);
61 
62 static inline void
msm_dump_submit(struct drm_msm_gem_submit * req)63 msm_dump_submit(struct drm_msm_gem_submit *req)
64 {
65    for (unsigned i = 0; i < req->nr_bos; i++) {
66       struct drm_msm_gem_submit_bo *bos = U642VOID(req->bos);
67       struct drm_msm_gem_submit_bo *bo = &bos[i];
68       ERROR_MSG("  bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
69    }
70    for (unsigned i = 0; i < req->nr_cmds; i++) {
71       struct drm_msm_gem_submit_cmd *cmds = U642VOID(req->cmds);
72       struct drm_msm_gem_submit_cmd *cmd = &cmds[i];
73       struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
74       ERROR_MSG("  cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
75                 i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
76       for (unsigned j = 0; j < cmd->nr_relocs; j++) {
77          struct drm_msm_gem_submit_reloc *r = &relocs[j];
78          ERROR_MSG(
79             "    reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
80             ", reloc_offset=%" PRIu64,
81             j, r->submit_offset, r->or, r->shift, r->reloc_idx,
82             (uint64_t)r->reloc_offset);
83       }
84    }
85 }
86 
87 static inline bool
__should_dump(struct fd_bo * bo)88 __should_dump(struct fd_bo *bo)
89 {
90    return (bo->reloc_flags & FD_RELOC_DUMP) || FD_RD_DUMP(FULL);
91 }
92 
93 static inline void
__snapshot_buf(struct fd_rd_output * rd,struct fd_bo * bo,uint64_t iova,uint32_t size,bool full)94 __snapshot_buf(struct fd_rd_output *rd, struct fd_bo *bo, uint64_t iova,
95                uint32_t size, bool full)
96 {
97    uint64_t offset = 0;
98 
99    if (iova) {
100       offset = iova - fd_bo_get_iova(bo);
101    } else {
102       iova = fd_bo_get_iova(bo);
103       size = bo->size;
104    }
105 
106    fd_rd_output_write_section(rd, RD_GPUADDR, (uint32_t[]){
107       iova, size, iova >> 32
108    }, 12);
109 
110    if (!full)
111       return;
112 
113    const char *buf = __fd_bo_map(bo);
114    buf += offset;
115    fd_rd_output_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
116 }
117 
118 static inline void
msm_dump_rd(struct fd_pipe * pipe,struct drm_msm_gem_submit * req)119 msm_dump_rd(struct fd_pipe *pipe, struct drm_msm_gem_submit *req)
120 {
121    struct fd_rd_output *rd = &pipe->dev->rd;
122 
123    if (!fd_rd_dump_env.flags || !req->nr_cmds ||
124        !fd_rd_output_begin(rd, req->fence))
125       return;
126 
127    if (FD_RD_DUMP(FULL)) {
128       fd_pipe_wait(pipe, &(struct fd_fence) {
129          /* this is cheating a bit, but msm_pipe_wait only needs kfence */
130          .kfence = req->fence,
131       });
132    }
133 
134    const char *procname = util_get_process_name();
135    fd_rd_output_write_section(rd, RD_CHIP_ID, &to_msm_pipe(pipe)->chip_id, 8);
136    fd_rd_output_write_section(rd, RD_CMD, procname, strlen(procname));
137 
138    struct drm_msm_gem_submit_bo *bos = U642VOID(req->bos);
139    struct drm_msm_gem_submit_cmd *cmds = U642VOID(req->cmds);
140 
141    for (unsigned i = 0; i < req->nr_bos; i++) {
142       /* This size param to fd_bo_from_handle() only matters if the bo isn't already in
143        * the handle table.  Which it should be.
144        */
145       struct fd_bo *bo = fd_bo_from_handle(pipe->dev, bos[i].handle, 0);
146 
147       __snapshot_buf(rd, bo, 0, 0, __should_dump(bo));
148 
149       fd_bo_del(bo);
150    }
151 
152    for (unsigned i = 0; i < req->nr_cmds; i++) {
153       struct drm_msm_gem_submit_cmd *cmd = &cmds[i];
154       struct fd_bo *bo = fd_bo_from_handle(pipe->dev, bos[cmd->submit_idx].handle, 0);
155       uint64_t iova = fd_bo_get_iova(bo) + cmd->submit_offset;
156 
157       /* snapshot cmdstream bo's (if we haven't already): */
158       if (!__should_dump(bo))
159          __snapshot_buf(rd, bo, iova, cmd->size, true);
160 
161       fd_rd_output_write_section(rd, RD_CMDSTREAM_ADDR, (uint32_t[]){
162          iova, cmd->size >> 2, iova >> 32
163       }, 12);
164 
165       fd_bo_del(bo);
166    }
167 
168    fd_rd_output_end(rd);
169 }
170 
171 static inline void
get_abs_timeout(struct drm_msm_timespec * tv,uint64_t ns)172 get_abs_timeout(struct drm_msm_timespec *tv, uint64_t ns)
173 {
174    struct timespec t;
175 
176    if (ns == OS_TIMEOUT_INFINITE)
177       ns = 3600ULL * NSEC_PER_SEC; /* 1 hour timeout is almost infinite */
178 
179    clock_gettime(CLOCK_MONOTONIC, &t);
180    tv->tv_sec = t.tv_sec + ns / NSEC_PER_SEC;
181    tv->tv_nsec = t.tv_nsec + ns % NSEC_PER_SEC;
182    if (tv->tv_nsec >= NSEC_PER_SEC) { /* handle nsec overflow */
183       tv->tv_nsec -= NSEC_PER_SEC;
184       tv->tv_sec++;
185    }
186 }
187 
188 #endif /* MSM_PRIV_H_ */
189