xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/freedreno_resource.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #ifndef FREEDRENO_RESOURCE_H_
10 #define FREEDRENO_RESOURCE_H_
11 
12 #include "util/list.h"
13 #include "util/simple_mtx.h"
14 #include "util/u_dump.h"
15 #include "util/u_range.h"
16 #include "util/u_transfer_helper.h"
17 
18 #include "freedreno/fdl/freedreno_layout.h"
19 #include "freedreno_batch.h"
20 #include "freedreno_util.h"
21 
22 BEGINC;
23 
24 #define PRSC_FMT                                                               \
25    "p: target=%s, format=%s, %ux%ux%u, "                                       \
26    "array_size=%u, last_level=%u, "                                            \
27    "nr_samples=%u, usage=%u, bind=%x, flags=%x"
28 #define PRSC_ARGS(p)                                                           \
29    (p), util_str_tex_target((p)->target, true),                                \
30       util_format_short_name((p)->format), (p)->width0, (p)->height0,          \
31       (p)->depth0, (p)->array_size, (p)->last_level, (p)->nr_samples,          \
32       (p)->usage, (p)->bind, (p)->flags
33 
34 enum fd_lrz_direction {
35    FD_LRZ_UNKNOWN,
36    /* Depth func less/less-than: */
37    FD_LRZ_LESS,
38    /* Depth func greater/greater-than: */
39    FD_LRZ_GREATER,
40 };
41 
42 /**
43  * State related to batch/resource tracking.
44  *
45  * With threaded_context we need to support replace_buffer_storage, in
46  * which case we can end up in transfer_map with tres->latest, but other
47  * pipe_context APIs using the original prsc pointer.  This allows TC to
48  * not have to synchronize the front-end thread with the buffer storage
49  * replacement called on driver thread.  But it complicates the batch/
50  * resource tracking.
51  *
52  * To handle this, we need to split the tracking out into it's own ref-
53  * counted structure, so as needed both "versions" of the resource can
54  * point to the same tracking.
55  *
56  * We could *almost* just push this down to fd_bo, except for a3xx/a4xx
57  * hw queries, where we don't know up-front the size to allocate for
58  * per-tile query results.
59  */
60 struct fd_resource_tracking {
61    struct pipe_reference reference;
62 
63    /* bitmask of in-flight batches which reference this resource.  Note
64     * that the batch doesn't hold reference to resources (but instead
65     * the fd_ringbuffer holds refs to the underlying fd_bo), but in case
66     * the resource is destroyed we need to clean up the batch's weak
67     * references to us.
68     */
69    uint32_t batch_mask;
70 
71    /* reference to batch that writes this resource: */
72    struct fd_batch *write_batch;
73 
74    /* Set of batches whose batch-cache key references this resource.
75     * We need to track this to know which batch-cache entries to
76     * invalidate if, for example, the resource is invalidated or
77     * shadowed.
78     */
79    uint32_t bc_batch_mask;
80 };
81 
82 void __fd_resource_tracking_destroy(struct fd_resource_tracking *track);
83 
84 static inline void
fd_resource_tracking_reference(struct fd_resource_tracking ** ptr,struct fd_resource_tracking * track)85 fd_resource_tracking_reference(struct fd_resource_tracking **ptr,
86                                struct fd_resource_tracking *track)
87 {
88    struct fd_resource_tracking *old_track = *ptr;
89 
90    if (pipe_reference(&(*ptr)->reference, &track->reference)) {
91       assert(!old_track->write_batch);
92       free(old_track);
93    }
94 
95    *ptr = track;
96 }
97 
98 /**
99  * A resource (any buffer/texture/image/etc)
100  */
101 struct fd_resource {
102    struct threaded_resource b;
103    struct fd_bo *bo; /* use fd_resource_set_bo() to write */
104    enum pipe_format internal_format;
105    uint32_t hash; /* _mesa_hash_pointer() on this resource's address. */
106    struct fdl_layout layout;
107 
108    /* buffer range that has been initialized */
109    struct util_range valid_buffer_range;
110    bool valid;
111    struct renderonly_scanout *scanout;
112 
113    /* reference to the resource holding stencil data for a z32_s8 texture */
114    /* TODO rename to secondary or auxiliary? */
115    struct fd_resource *stencil;
116 
117    struct fd_resource_tracking *track;
118 
119    simple_mtx_t lock;
120 
121    /* bitmask of state this resource could potentially dirty when rebound,
122     * see rebind_resource()
123     */
124    BITMASK_ENUM(fd_dirty_3d_state) dirty;
125 
126    /* Sequence # incremented each time bo changes: */
127    uint16_t seqno;
128 
129    /* Is this buffer a replacement created by threaded_context to avoid
130     * a stall in PIPE_MAP_DISCARD_WHOLE_RESOURCE|PIPE_MAP_WRITE case?
131     * If so, it no longer "owns" it's rsc->track, and so should not
132     * invalidate when the rsc is destroyed.
133     */
134    bool is_replacement : 1;
135 
136    /* Uninitialized resources with UBWC format need their UBWC flag data
137     * cleared before writes, as the UBWC state is read and used during
138     * writes, so undefined UBWC flag data results in undefined results.
139     */
140    bool needs_ubwc_clear : 1;
141 
142    /*
143     * LRZ
144     *
145     * TODO lrz width/height/pitch should probably also move to
146     * fdl_layout
147     */
148    bool lrz_valid : 1;
149    enum fd_lrz_direction lrz_direction : 2;
150    uint16_t lrz_width; // for lrz clear, does this differ from lrz_pitch?
151    uint16_t lrz_height;
152    uint16_t lrz_pitch;
153    uint32_t lrz_fc_offset;
154    struct fd_bo *lrz;
155 };
156 
157 struct fd_memory_object {
158    struct pipe_memory_object b;
159    struct fd_bo *bo;
160 };
161 
162 static inline struct fd_resource *
fd_resource(struct pipe_resource * ptex)163 fd_resource(struct pipe_resource *ptex)
164 {
165    return (struct fd_resource *)ptex;
166 }
167 
168 static inline struct fd_memory_object *
fd_memory_object(struct pipe_memory_object * pmemobj)169 fd_memory_object(struct pipe_memory_object *pmemobj)
170 {
171    return (struct fd_memory_object *)pmemobj;
172 }
173 
174 static inline bool
pending(struct fd_resource * rsc,bool write)175 pending(struct fd_resource *rsc, bool write)
176 {
177    /* if we have a pending GPU write, we are busy in any case: */
178    if (rsc->track->write_batch)
179       return true;
180 
181    /* if CPU wants to write, but we are pending a GPU read, we are busy: */
182    if (write && rsc->track->batch_mask)
183       return true;
184 
185    if (rsc->stencil && pending(rsc->stencil, write))
186       return true;
187 
188    return false;
189 }
190 
191 static inline bool
resource_busy(struct fd_resource * rsc,unsigned op)192 resource_busy(struct fd_resource *rsc, unsigned op)
193 {
194    return fd_bo_cpu_prep(rsc->bo, NULL, op | FD_BO_PREP_NOSYNC) != 0;
195 }
196 
197 int __fd_resource_wait(struct fd_context *ctx, struct fd_resource *rsc,
198                        unsigned op, const char *func);
199 #define fd_resource_wait(ctx, rsc, op) ({                                      \
200    MESA_TRACE_FUNC();                                                          \
201    __fd_resource_wait(ctx, rsc, op, __func__);                                 \
202 })
203 
204 static inline void
fd_resource_lock(struct fd_resource * rsc)205 fd_resource_lock(struct fd_resource *rsc)
206 {
207    simple_mtx_lock(&rsc->lock);
208 }
209 
210 static inline void
fd_resource_unlock(struct fd_resource * rsc)211 fd_resource_unlock(struct fd_resource *rsc)
212 {
213    simple_mtx_unlock(&rsc->lock);
214 }
215 
216 static inline void
fd_resource_set_usage(struct pipe_resource * prsc,enum fd_dirty_3d_state usage)217 fd_resource_set_usage(struct pipe_resource *prsc, enum fd_dirty_3d_state usage)
218 {
219    if (!prsc)
220       return;
221    struct fd_resource *rsc = fd_resource(prsc);
222    /* Bits are only ever ORed in, and we expect many set_usage() per
223     * resource, so do the quick check outside of the lock.
224     */
225    if (likely(rsc->dirty & usage))
226       return;
227    fd_resource_lock(rsc);
228    rsc->dirty |= usage;
229    fd_resource_unlock(rsc);
230 }
231 
232 static inline bool
has_depth(enum pipe_format format)233 has_depth(enum pipe_format format)
234 {
235    const struct util_format_description *desc = util_format_description(format);
236    return util_format_has_depth(desc);
237 }
238 
239 static inline bool
is_z32(enum pipe_format format)240 is_z32(enum pipe_format format)
241 {
242    switch (format) {
243    case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
244    case PIPE_FORMAT_Z32_UNORM:
245    case PIPE_FORMAT_Z32_FLOAT:
246       return true;
247    default:
248       return false;
249    }
250 }
251 
252 struct fd_transfer {
253    struct threaded_transfer b;
254    struct pipe_resource *staging_prsc;
255    struct pipe_box staging_box;
256    void *upload_ptr;
257 };
258 
259 static inline struct fd_transfer *
fd_transfer(struct pipe_transfer * ptrans)260 fd_transfer(struct pipe_transfer *ptrans)
261 {
262    return (struct fd_transfer *)ptrans;
263 }
264 
265 static inline struct fdl_slice *
fd_resource_slice(struct fd_resource * rsc,unsigned level)266 fd_resource_slice(struct fd_resource *rsc, unsigned level)
267 {
268    assert(level <= rsc->b.b.last_level);
269    return &rsc->layout.slices[level];
270 }
271 
272 static inline uint32_t
fd_resource_layer_stride(struct fd_resource * rsc,unsigned level)273 fd_resource_layer_stride(struct fd_resource *rsc, unsigned level)
274 {
275    return fdl_layer_stride(&rsc->layout, level);
276 }
277 
278 /* get pitch (in bytes) for specified mipmap level */
279 static inline uint32_t
fd_resource_pitch(struct fd_resource * rsc,unsigned level)280 fd_resource_pitch(struct fd_resource *rsc, unsigned level)
281 {
282    if (is_a2xx(fd_screen(rsc->b.b.screen)))
283       return fdl2_pitch(&rsc->layout, level);
284 
285    return fdl_pitch(&rsc->layout, level);
286 }
287 
288 /* get offset for specified mipmap level and texture/array layer */
289 static inline uint32_t
fd_resource_offset(struct fd_resource * rsc,unsigned level,unsigned layer)290 fd_resource_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
291 {
292    uint32_t offset = fdl_surface_offset(&rsc->layout, level, layer);
293    assert(offset < fd_bo_size(rsc->bo));
294    return offset;
295 }
296 
297 static inline uint32_t
fd_resource_ubwc_offset(struct fd_resource * rsc,unsigned level,unsigned layer)298 fd_resource_ubwc_offset(struct fd_resource *rsc, unsigned level, unsigned layer)
299 {
300    uint32_t offset = fdl_ubwc_offset(&rsc->layout, level, layer);
301    assert(offset < fd_bo_size(rsc->bo));
302    return offset;
303 }
304 
305 static inline uint32_t
fd_resource_tile_mode(struct pipe_resource * prsc,int level)306 fd_resource_tile_mode(struct pipe_resource *prsc, int level)
307 {
308    return fdl_tile_mode(&fd_resource(prsc)->layout, level);
309 }
310 
311 static inline const char *
fd_resource_tile_mode_desc(const struct fd_resource * rsc,int level)312 fd_resource_tile_mode_desc(const struct fd_resource *rsc, int level)
313 {
314    return fdl_tile_mode_desc(&rsc->layout, level);
315 }
316 
317 static inline bool
fd_resource_ubwc_enabled(struct fd_resource * rsc,int level)318 fd_resource_ubwc_enabled(struct fd_resource *rsc, int level)
319 {
320    return fdl_ubwc_enabled(&rsc->layout, level);
321 }
322 
323 /* access # of samples, with 0 normalized to 1 (which is what we care about
324  * most of the time)
325  */
326 static inline unsigned
fd_resource_nr_samples(const struct pipe_resource * prsc)327 fd_resource_nr_samples(const struct pipe_resource *prsc)
328 {
329    return MAX2(1, prsc->nr_samples);
330 }
331 
332 void fd_resource_screen_init(struct pipe_screen *pscreen);
333 void fd_resource_context_init(struct pipe_context *pctx);
334 
335 uint32_t fd_setup_slices(struct fd_resource *rsc);
336 void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
337 void fd_replace_buffer_storage(struct pipe_context *ctx,
338                                struct pipe_resource *dst,
339                                struct pipe_resource *src,
340                                unsigned num_rebinds,
341                                uint32_t rebind_mask,
342                                uint32_t delete_buffer_id) in_dt;
343 bool fd_resource_busy(struct pipe_screen *pscreen, struct pipe_resource *prsc,
344                       unsigned usage);
345 
346 void fd_resource_uncompress(struct fd_context *ctx,
347                             struct fd_resource *rsc,
348                             bool linear) assert_dt;
349 void fd_resource_dump(struct fd_resource *rsc, const char *name);
350 
351 bool fd_render_condition_check(struct pipe_context *pctx) assert_dt;
352 
353 static inline bool
fd_batch_references_resource(struct fd_batch * batch,struct fd_resource * rsc)354 fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
355 {
356    return rsc->track->batch_mask & (1 << batch->idx);
357 }
358 
359 static inline void
fd_batch_write_prep(struct fd_batch * batch,struct fd_resource * rsc)360 fd_batch_write_prep(struct fd_batch *batch, struct fd_resource *rsc) assert_dt
361 {
362    if (unlikely(rsc->needs_ubwc_clear)) {
363       batch->ctx->clear_ubwc(batch, rsc);
364       rsc->needs_ubwc_clear = false;
365    }
366 }
367 
368 static inline void
fd_batch_resource_read(struct fd_batch * batch,struct fd_resource * rsc)369 fd_batch_resource_read(struct fd_batch *batch,
370                        struct fd_resource *rsc) assert_dt
371 {
372    /* Fast path: if we hit this then we know we don't have anyone else
373     * writing to it (since both _write and _read flush other writers), and
374     * that we've already recursed for stencil.
375     */
376    if (unlikely(!fd_batch_references_resource(batch, rsc)))
377       fd_batch_resource_read_slowpath(batch, rsc);
378 }
379 
380 static inline bool
needs_dirty_resource(struct fd_context * ctx,struct pipe_resource * prsc,bool write)381 needs_dirty_resource(struct fd_context *ctx, struct pipe_resource *prsc, bool write)
382    assert_dt
383 {
384    if (!prsc)
385       return false;
386 
387    struct fd_resource *rsc = fd_resource(prsc);
388 
389    /* Switching between draw and non_draw will dirty all state, so if
390     * we pick the wrong one, all the bits in the dirty_resource state
391     * will be set anyways.. so no harm, no foul.
392     */
393    struct fd_batch *batch = ctx->batch_nondraw ? ctx->batch_nondraw : ctx->batch;
394 
395    if (!batch)
396       return false;
397 
398    if (write)
399       return rsc->track->write_batch != batch;
400 
401    return !fd_batch_references_resource(batch, rsc);
402 }
403 
404 static inline void
fd_dirty_resource(struct fd_context * ctx,struct pipe_resource * prsc,BITMASK_ENUM (fd_dirty_3d_state)dirty,bool write)405 fd_dirty_resource(struct fd_context *ctx, struct pipe_resource *prsc,
406                   BITMASK_ENUM(fd_dirty_3d_state) dirty, bool write)
407    assert_dt
408 {
409    fd_context_dirty(ctx, dirty);
410 
411    if (ctx->dirty_resource & dirty)
412       return;
413 
414    if (!needs_dirty_resource(ctx, prsc, write))
415       return;
416 
417    ctx->dirty_resource |= dirty;
418 }
419 
420 static inline void
fd_dirty_shader_resource(struct fd_context * ctx,struct pipe_resource * prsc,enum pipe_shader_type shader,BITMASK_ENUM (fd_dirty_shader_state)dirty,bool write)421 fd_dirty_shader_resource(struct fd_context *ctx, struct pipe_resource *prsc,
422                          enum pipe_shader_type shader,
423                          BITMASK_ENUM(fd_dirty_shader_state) dirty,
424                          bool write)
425    assert_dt
426 {
427    fd_context_dirty_shader(ctx, shader, dirty);
428 
429    if (ctx->dirty_shader_resource[shader] & dirty)
430       return;
431 
432    if (!needs_dirty_resource(ctx, prsc, write))
433       return;
434 
435    ctx->dirty_shader_resource[shader] |= dirty;
436    ctx->dirty_resource |= dirty_shader_to_dirty_state(dirty);
437 }
438 
439 static inline enum fdl_view_type
fdl_type_from_pipe_target(enum pipe_texture_target target)440 fdl_type_from_pipe_target(enum pipe_texture_target target) {
441    switch (target) {
442    case PIPE_TEXTURE_1D:
443    case PIPE_TEXTURE_1D_ARRAY:
444       return FDL_VIEW_TYPE_1D;
445    case PIPE_TEXTURE_2D:
446    case PIPE_TEXTURE_RECT:
447    case PIPE_TEXTURE_2D_ARRAY:
448       return FDL_VIEW_TYPE_2D;
449    case PIPE_TEXTURE_CUBE:
450    case PIPE_TEXTURE_CUBE_ARRAY:
451       return FDL_VIEW_TYPE_CUBE;
452    case PIPE_TEXTURE_3D:
453       return FDL_VIEW_TYPE_3D;
454    case PIPE_MAX_TEXTURE_TYPES:
455    default:
456       unreachable("bad texture type");
457    }
458 }
459 
460 ENDC;
461 
462 #endif /* FREEDRENO_RESOURCE_H_ */
463