xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/freedreno_util.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #ifndef FREEDRENO_UTIL_H_
10 #define FREEDRENO_UTIL_H_
11 
12 #include "common/freedreno_common.h"
13 
14 #include "drm/freedreno_drmif.h"
15 #include "drm/freedreno_ringbuffer.h"
16 
17 #include "util/format/u_formats.h"
18 #include "pipe/p_state.h"
19 #include "util/compiler.h"
20 #include "util/half_float.h"
21 #include "util/log.h"
22 #ifndef __cplusplus  // TODO fix cpu_trace.h to be c++ friendly
23 #include "util/perf/cpu_trace.h"
24 #endif
25 #include "util/u_debug.h"
26 #include "util/u_dynarray.h"
27 #include "util/u_math.h"
28 #include "util/u_pack_color.h"
29 
30 #include "adreno_common.xml.h"
31 #include "adreno_pm4.xml.h"
32 #include "disasm.h"
33 
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37 
38 enum adreno_rb_depth_format fd_pipe2depth(enum pipe_format format);
39 enum pc_di_index_size fd_pipe2index(enum pipe_format format);
40 enum pipe_format fd_gmem_restore_format(enum pipe_format format);
41 enum adreno_rb_blend_factor fd_blend_factor(unsigned factor);
42 enum adreno_pa_su_sc_draw fd_polygon_mode(unsigned mode);
43 enum adreno_stencil_op fd_stencil_op(unsigned op);
44 
45 #define A3XX_MAX_MIP_LEVELS 14
46 
47 #define A2XX_MAX_RENDER_TARGETS 1
48 #define A3XX_MAX_RENDER_TARGETS 4
49 #define A4XX_MAX_RENDER_TARGETS 8
50 #define A5XX_MAX_RENDER_TARGETS 8
51 #define A6XX_MAX_RENDER_TARGETS 8
52 
53 #define MAX_RENDER_TARGETS A6XX_MAX_RENDER_TARGETS
54 
55 /* clang-format off */
56 enum fd_debug_flag {
57    FD_DBG_MSGS         = BITFIELD_BIT(0),
58    FD_DBG_DISASM       = BITFIELD_BIT(1),
59    FD_DBG_DCLEAR       = BITFIELD_BIT(2),
60    FD_DBG_DDRAW        = BITFIELD_BIT(3),
61    FD_DBG_NOSCIS       = BITFIELD_BIT(4),
62    FD_DBG_DIRECT       = BITFIELD_BIT(5),
63    FD_DBG_GMEM         = BITFIELD_BIT(6),
64    FD_DBG_PERF         = BITFIELD_BIT(7),
65    FD_DBG_NOBIN        = BITFIELD_BIT(8),
66    FD_DBG_SYSMEM       = BITFIELD_BIT(9),
67    FD_DBG_SERIALC      = BITFIELD_BIT(10),
68    FD_DBG_SHADERDB     = BITFIELD_BIT(11),
69    FD_DBG_FLUSH        = BITFIELD_BIT(12),
70    /* BIT(13) */
71    FD_DBG_INORDER      = BITFIELD_BIT(14),
72    FD_DBG_BSTAT        = BITFIELD_BIT(15),
73    FD_DBG_NOGROW       = BITFIELD_BIT(16),
74    FD_DBG_LRZ          = BITFIELD_BIT(17),
75    FD_DBG_NOINDR       = BITFIELD_BIT(18),
76    FD_DBG_NOBLIT       = BITFIELD_BIT(19),
77    FD_DBG_HIPRIO       = BITFIELD_BIT(20),
78    FD_DBG_TTILE        = BITFIELD_BIT(21),
79    FD_DBG_PERFC        = BITFIELD_BIT(22),
80    FD_DBG_NOUBWC       = BITFIELD_BIT(23),
81    FD_DBG_NOLRZ        = BITFIELD_BIT(24),
82    FD_DBG_NOTILE       = BITFIELD_BIT(25),
83    FD_DBG_LAYOUT       = BITFIELD_BIT(26),
84    FD_DBG_NOFP16       = BITFIELD_BIT(27),
85    FD_DBG_NOHW         = BITFIELD_BIT(28),
86    FD_DBG_NOSBIN       = BITFIELD_BIT(29),
87    FD_DBG_STOMP        = BITFIELD_BIT(30),
88 };
89 /* clang-format on */
90 
91 extern int fd_mesa_debug;
92 extern bool fd_binning_enabled;
93 
94 #define FD_DBG(category) unlikely(fd_mesa_debug &FD_DBG_##category)
95 
96 #include <unistd.h>
97 #include <sys/types.h>
98 #include <sys/syscall.h>
99 
100 #define DBG(fmt, ...)                                                          \
101    do {                                                                        \
102       if (FD_DBG(MSGS))                                                        \
103          mesa_logi("%5d: %s:%d: " fmt, ((pid_t)syscall(SYS_gettid)),           \
104                                         __func__, __LINE__,                    \
105                                         ##__VA_ARGS__);                        \
106    } while (0)
107 
108 #define perf_debug_message(debug, type, ...)                                   \
109    do {                                                                        \
110       if (FD_DBG(PERF))                                                        \
111          mesa_logw(__VA_ARGS__);                                               \
112       struct util_debug_callback *__d = (debug);                               \
113       if (__d)                                                                 \
114          util_debug_message(__d, type, __VA_ARGS__);                           \
115    } while (0)
116 
117 #define perf_debug_ctx(ctx, ...)                                               \
118    do {                                                                        \
119       struct fd_context *__c = (ctx);                                          \
120       perf_debug_message(__c ? &__c->debug : NULL, PERF_INFO, __VA_ARGS__);    \
121    } while (0)
122 
123 #define perf_debug(...) perf_debug_ctx(NULL, __VA_ARGS__)
124 
125 #define perf_time_ctx(ctx, limit_ns, fmt, ...)                                 \
126    for (struct __perf_time_state __s =                                         \
127            {                                                                   \
128               .t = -__perf_get_time(ctx),                                      \
129            };                                                                  \
130         !__s.done; ({                                                          \
131            __s.t += __perf_get_time(ctx);                                      \
132            __s.done = true;                                                    \
133            if (__s.t > (limit_ns)) {                                           \
134               perf_debug_ctx(ctx, fmt " (%.03f ms)", ##__VA_ARGS__,            \
135                              (double)__s.t / 1000000.0);                       \
136            }                                                                   \
137         }))
138 
139 #define perf_time(limit_ns, fmt, ...)                                          \
140    perf_time_ctx(NULL, limit_ns, fmt, ##__VA_ARGS__)
141 
142 struct __perf_time_state {
143    int64_t t;
144    bool done;
145 };
146 
147 /* static inline would be nice here, except 'struct fd_context' is not
148  * defined yet:
149  */
150 #define __perf_get_time(ctx)                                                   \
151    ((FD_DBG(PERF) || ({                                                        \
152         struct fd_context *__c = (ctx);                                        \
153         unlikely(__c && __c->debug.debug_message);                             \
154      }))                                                                       \
155        ? os_time_get_nano()                                                    \
156        : 0)
157 
158 #define FD_DEFINE_CAST(parent, child)                                          \
159    static inline struct child *child(struct parent *x)                         \
160    {                                                                           \
161       return (struct child *)x;                                                \
162    }
163 
164 struct fd_context;
165 
166 /**
167  * A psuedo-variable for defining where various parts of the fd_context
168  * can be safely accessed.
169  *
170  * With threaded_context, certain pctx funcs are called from gallium
171  * front-end/state-tracker (eg. CSO creation), while others are called
172  * from the driver thread.  Things called from driver thread can safely
173  * access anything in the ctx, while things called from the fe/st thread
174  * must limit themselves to "safe" things (ie. ctx->screen is safe as it
175  * is immutable, but the blitter_context is not).
176  */
177 extern lock_cap_t fd_context_access_cap;
178 
179 /**
180  * Make the annotation a bit less verbose.. mark fields which should only
181  * be accessed by driver-thread with 'dt'
182  */
183 #define dt guarded_by(fd_context_access_cap)
184 
185 /**
186  * Annotation for entry-point functions only called in driver thread.
187  *
188  * For static functions, apply the annotation to the function declaration.
189  * Otherwise apply to the function prototype.
190  */
191 #define in_dt assert_cap(fd_context_access_cap)
192 
193 /**
194  * Annotation for internal functions which are only called from entry-
195  * point functions (with 'in_dt' annotation) or other internal functions
196  * with the 'assert_dt' annotation.
197  *
198  * For static functions, apply the annotation to the function declaration.
199  * Otherwise apply to the function prototype.
200  */
201 #define assert_dt requires_cap(fd_context_access_cap)
202 
203 /**
204  * Special helpers for context access outside of driver thread.  For ex,
205  * pctx->get_query_result() is not called on driver thread, but the
206  * query is guaranteed to be flushed, or the driver thread queue is
207  * guaranteed to be flushed.
208  *
209  * Use with caution!
210  */
211 static inline void
fd_context_access_begin(struct fd_context * ctx)212 fd_context_access_begin(struct fd_context *ctx)
213    acquire_cap(fd_context_access_cap)
214 {
215 }
216 
217 static inline void
fd_context_access_end(struct fd_context * ctx)218 fd_context_access_end(struct fd_context *ctx) release_cap(fd_context_access_cap)
219 {
220 }
221 
222 #define CP_REG(reg) ((0x4 << 16) | ((unsigned int)((reg) - (0x2000))))
223 
224 static inline uint32_t
DRAW(enum pc_di_primtype prim_type,enum pc_di_src_sel source_select,enum pc_di_index_size index_size,enum pc_di_vis_cull_mode vis_cull_mode,uint8_t instances)225 DRAW(enum pc_di_primtype prim_type, enum pc_di_src_sel source_select,
226      enum pc_di_index_size index_size, enum pc_di_vis_cull_mode vis_cull_mode,
227      uint8_t instances)
228 {
229    return (prim_type << 0) | (source_select << 6) | ((index_size & 1) << 11) |
230           ((index_size >> 1) << 13) | (vis_cull_mode << 9) | (1 << 14) |
231           (instances << 24);
232 }
233 
234 static inline uint32_t
DRAW_A20X(enum pc_di_primtype prim_type,enum pc_di_face_cull_sel faceness_cull_select,enum pc_di_src_sel source_select,enum pc_di_index_size index_size,bool pre_fetch_cull_enable,bool grp_cull_enable,uint16_t count)235 DRAW_A20X(enum pc_di_primtype prim_type,
236           enum pc_di_face_cull_sel faceness_cull_select,
237           enum pc_di_src_sel source_select, enum pc_di_index_size index_size,
238           bool pre_fetch_cull_enable, bool grp_cull_enable, uint16_t count)
239 {
240    return (prim_type << 0) | (source_select << 6) |
241           (faceness_cull_select << 8) | ((index_size & 1) << 11) |
242           ((index_size >> 1) << 13) | (pre_fetch_cull_enable << 14) |
243           (grp_cull_enable << 15) | (count << 16);
244 }
245 
246 /* for tracking cmdstream positions that need to be patched: */
247 struct fd_cs_patch {
248    uint32_t *cs;
249    uint32_t val;
250 };
251 #define fd_patch_num_elements(buf) ((buf)->size / sizeof(struct fd_cs_patch))
252 #define fd_patch_element(buf, i)                                               \
253    util_dynarray_element(buf, struct fd_cs_patch, i)
254 
255 static inline enum pipe_format
pipe_surface_format(struct pipe_surface * psurf)256 pipe_surface_format(struct pipe_surface *psurf)
257 {
258    if (!psurf)
259       return PIPE_FORMAT_NONE;
260    return psurf->format;
261 }
262 
263 static inline bool
fd_surface_half_precision(const struct pipe_surface * psurf)264 fd_surface_half_precision(const struct pipe_surface *psurf)
265 {
266    enum pipe_format format;
267 
268    if (!psurf)
269       return true;
270 
271    format = psurf->format;
272 
273    /* colors are provided in consts, which go through cov.f32f16, which will
274     * break these values
275     */
276    if (util_format_is_pure_integer(format))
277       return false;
278 
279    /* avoid losing precision on 32-bit float formats */
280    if (util_format_is_float(format) &&
281        util_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, 0) ==
282           32)
283       return false;
284 
285    return true;
286 }
287 
288 static inline unsigned
fd_sampler_first_level(const struct pipe_sampler_view * view)289 fd_sampler_first_level(const struct pipe_sampler_view *view)
290 {
291    if (view->target == PIPE_BUFFER)
292       return 0;
293    return view->u.tex.first_level;
294 }
295 
296 static inline unsigned
fd_sampler_last_level(const struct pipe_sampler_view * view)297 fd_sampler_last_level(const struct pipe_sampler_view *view)
298 {
299    if (view->target == PIPE_BUFFER)
300       return 0;
301    return view->u.tex.last_level;
302 }
303 
304 static inline bool
fd_half_precision(struct pipe_framebuffer_state * pfb)305 fd_half_precision(struct pipe_framebuffer_state *pfb)
306 {
307    unsigned i;
308 
309    for (i = 0; i < pfb->nr_cbufs; i++)
310       if (!fd_surface_half_precision(pfb->cbufs[i]))
311          return false;
312 
313    return true;
314 }
315 
316 static inline void emit_marker(struct fd_ringbuffer *ring, int scratch_idx);
317 
318 /* like OUT_RING() but appends a cmdstream patch point to 'buf' */
319 static inline void
OUT_RINGP(struct fd_ringbuffer * ring,uint32_t data,struct util_dynarray * buf)320 OUT_RINGP(struct fd_ringbuffer *ring, uint32_t data, struct util_dynarray *buf)
321 {
322    if (LOG_DWORDS) {
323       DBG("ring[%p]: OUT_RINGP  %04x:  %08x", ring,
324           (uint32_t)(ring->cur - ring->start), data);
325    }
326    util_dynarray_append(buf, struct fd_cs_patch,
327                         ((struct fd_cs_patch){
328                            .cs = ring->cur++,
329                            .val = data,
330                         }));
331 }
332 
333 static inline void
__OUT_IB(struct fd_ringbuffer * ring,bool prefetch,struct fd_ringbuffer * target)334 __OUT_IB(struct fd_ringbuffer *ring, bool prefetch,
335          struct fd_ringbuffer *target)
336 {
337    if (target->cur == target->start)
338       return;
339 
340    unsigned count = fd_ringbuffer_cmd_count(target);
341 
342    /* for debug after a lock up, write a unique counter value
343     * to scratch6 for each IB, to make it easier to match up
344     * register dumps to cmdstream.  The combination of IB and
345     * DRAW (scratch7) is enough to "triangulate" the particular
346     * draw that caused lockup.
347     */
348    emit_marker(ring, 6);
349 
350    for (unsigned i = 0; i < count; i++) {
351       uint32_t dwords;
352       OUT_PKT3(ring, prefetch ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD,
353                2);
354       dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
355       assert(dwords > 0);
356       OUT_RING(ring, dwords);
357       OUT_PKT2(ring);
358    }
359 
360    emit_marker(ring, 6);
361 }
362 
363 static inline void
__OUT_IB5(struct fd_ringbuffer * ring,struct fd_ringbuffer * target)364 __OUT_IB5(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
365 {
366    if (target->cur == target->start)
367       return;
368 
369    unsigned count = fd_ringbuffer_cmd_count(target);
370 
371    for (unsigned i = 0; i < count; i++) {
372       uint32_t dwords;
373       OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
374       dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
375       assert(dwords > 0);
376       OUT_RING(ring, dwords);
377    }
378 }
379 
380 /* CP_SCRATCH_REG4 is used to hold base address for query results:
381  * Note the scratch register move on a5xx+ but this is only used
382  * for pre-a5xx hw queries where we cannot allocate the query buf
383  * until the # of tiles is known.
384  */
385 #define HW_QUERY_BASE_REG REG_AXXX_CP_SCRATCH_REG4
386 
387 #if MESA_DEBUG
388 #define __EMIT_MARKER 1
389 #else
390 #define __EMIT_MARKER 0
391 #endif
392 
393 static inline void
emit_marker(struct fd_ringbuffer * ring,int scratch_idx)394 emit_marker(struct fd_ringbuffer *ring, int scratch_idx)
395 {
396    extern int32_t marker_cnt;
397    unsigned reg = REG_AXXX_CP_SCRATCH_REG0 + scratch_idx;
398    assert(reg != HW_QUERY_BASE_REG);
399    if (reg == HW_QUERY_BASE_REG)
400       return;
401    if (__EMIT_MARKER) {
402       OUT_WFI(ring);
403       OUT_PKT0(ring, reg, 1);
404       OUT_RING(ring, p_atomic_inc_return(&marker_cnt));
405    }
406 }
407 
408 
409 /*
410  * a3xx+ helpers:
411  */
412 
413 static inline enum a3xx_msaa_samples
fd_msaa_samples(unsigned samples)414 fd_msaa_samples(unsigned samples)
415 {
416    switch (samples) {
417    default:
418       unreachable("Unsupported samples");
419    case 0:
420    case 1:
421       return MSAA_ONE;
422    case 2:
423       return MSAA_TWO;
424    case 4:
425       return MSAA_FOUR;
426    case 8:
427       return MSAA_EIGHT;
428    }
429 }
430 
431 #define A3XX_MAX_TEXEL_BUFFER_ELEMENTS_UINT (1 << 13)
432 
433 /* Note that the Vulkan blob on a540 and 640 report a
434  * maxTexelBufferElements of just 65536 (the GLES3.2 and Vulkan
435  * minimum).
436  */
437 #define A4XX_MAX_TEXEL_BUFFER_ELEMENTS_UINT (1 << 27)
438 
439 static inline uint32_t
fd_clamp_buffer_size(enum pipe_format format,uint32_t size,unsigned max_texel_buffer_elements)440 fd_clamp_buffer_size(enum pipe_format format, uint32_t size,
441                      unsigned max_texel_buffer_elements)
442 {
443    /* The spec says:
444     *    The number of texels in the texel array is then clamped to the value of
445     *    the implementation-dependent limit GL_MAX_TEXTURE_BUFFER_SIZE.
446     *
447     * So compute the number of texels, compare to GL_MAX_TEXTURE_BUFFER_SIZE and update it.
448     */
449    unsigned blocksize = util_format_get_blocksize(format);
450    unsigned elements = MIN2(max_texel_buffer_elements, size / blocksize);
451 
452    return elements * blocksize;
453 }
454 
455 
456 /*
457  * a4xx+ helpers:
458  */
459 
460 static inline enum a4xx_state_block
fd4_stage2shadersb(gl_shader_stage type)461 fd4_stage2shadersb(gl_shader_stage type)
462 {
463    switch (type) {
464    case MESA_SHADER_VERTEX:
465       return SB4_VS_SHADER;
466    case MESA_SHADER_FRAGMENT:
467       return SB4_FS_SHADER;
468    case MESA_SHADER_COMPUTE:
469    case MESA_SHADER_KERNEL:
470       return SB4_CS_SHADER;
471    default:
472       unreachable("bad shader type");
473       return (enum a4xx_state_block) ~0;
474    }
475 }
476 
477 static inline enum a4xx_index_size
fd4_size2indextype(unsigned index_size)478 fd4_size2indextype(unsigned index_size)
479 {
480    switch (index_size) {
481    case 1:
482       return INDEX4_SIZE_8_BIT;
483    case 2:
484       return INDEX4_SIZE_16_BIT;
485    case 4:
486       return INDEX4_SIZE_32_BIT;
487    }
488    DBG("unsupported index size: %d", index_size);
489    assert(0);
490    return INDEX4_SIZE_32_BIT;
491 }
492 
493 /* Convert 19.2MHz RBBM always-on timer ticks to ns */
494 static inline uint64_t
ticks_to_ns(uint64_t ts)495 ticks_to_ns(uint64_t ts)
496 {
497    return ts * (1000000000 / 19200000);
498 }
499 
500 #ifdef __cplusplus
501 }
502 #endif
503 
504 #endif /* FREEDRENO_UTIL_H_ */
505