1 /*
2 * Copyright © 2012 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <[email protected]>
7 */
8
9 #ifndef FREEDRENO_SCREEN_H_
10 #define FREEDRENO_SCREEN_H_
11
12 #include "common/freedreno_dev_info.h"
13 #include "drm/freedreno_drmif.h"
14 #include "drm/freedreno_ringbuffer.h"
15 #include "perfcntrs/freedreno_perfcntr.h"
16
17 #include "pipe/p_screen.h"
18 #include "renderonly/renderonly.h"
19 #include "util/u_debug.h"
20 #include "util/simple_mtx.h"
21 #include "util/slab.h"
22 #include "util/u_idalloc.h"
23 #include "util/u_memory.h"
24 #include "util/u_queue.h"
25
26 #include "freedreno_batch_cache.h"
27 #include "freedreno_gmem.h"
28 #include "freedreno_util.h"
29
30 struct fd_bo;
31
32 /* Potential reasons for needing to skip bypass path and use GMEM, the
33 * generation backend can override this with screen->gmem_reason_mask
34 */
35 enum fd_gmem_reason {
36 FD_GMEM_CLEARS_DEPTH_STENCIL = BIT(0),
37 FD_GMEM_DEPTH_ENABLED = BIT(1),
38 FD_GMEM_STENCIL_ENABLED = BIT(2),
39 FD_GMEM_BLEND_ENABLED = BIT(3),
40 FD_GMEM_LOGICOP_ENABLED = BIT(4),
41 FD_GMEM_FB_READ = BIT(5),
42 };
43
44 /* Offset within GMEM of various "non-GMEM" things that GMEM is used to
45 * cache. These offsets differ for gmem vs sysmem rendering (in sysmem
46 * mode, the entire GMEM can be used)
47 */
48 struct fd6_gmem_config {
49 /* Color/depth CCU cache: */
50 uint32_t color_ccu_offset;
51 uint32_t depth_ccu_offset;
52
53 /* Vertex attrib cache (a750+): */
54 uint32_t vpc_attr_buf_size;
55 uint32_t vpc_attr_buf_offset;
56 };
57
58 struct fd_screen {
59 struct pipe_screen base;
60
61 struct list_head context_list;
62
63 simple_mtx_t lock;
64
65 struct slab_parent_pool transfer_pool;
66
67 uint64_t gmem_base;
68 uint32_t gmemsize_bytes;
69
70 const struct fd_dev_id *dev_id;
71 uint8_t gen; /* GPU (major) generation */
72 uint32_t gpu_id; /* 220, 305, etc */
73 uint64_t chip_id; /* coreid:8 majorrev:8 minorrev:8 patch:8 */
74 uint32_t max_freq;
75 uint32_t ram_size;
76 uint32_t max_rts; /* max # of render targets */
77 uint32_t priority_mask;
78 unsigned prio_low, prio_norm, prio_high; /* remap low/norm/high priority to kernel priority */
79 bool has_timestamp;
80 bool has_robustness;
81 bool has_syncobj;
82
83 struct {
84 /* Conservative LRZ (default true) invalidates LRZ on draws with
85 * blend and depth-write enabled, because this can lead to incorrect
86 * rendering. Driconf can be used to disable conservative LRZ for
87 * games which do not have the problematic sequence of draws *and*
88 * suffer a performance loss with conservative LRZ.
89 */
90 bool conservative_lrz;
91
92 /* Enable EGL throttling (default true).
93 */
94 bool enable_throttling;
95
96 /* If "dual_color_blend_by_location" workaround is enabled
97 */
98 bool dual_color_blend_by_location;
99 } driconf;
100
101 struct fd_dev_info dev_info;
102 const struct fd_dev_info *info;
103 struct fd6_gmem_config config_gmem, config_sysmem;
104
105 /* Bitmask of gmem_reasons that do not force GMEM path over bypass
106 * for current generation.
107 */
108 enum fd_gmem_reason gmem_reason_mask;
109
110 unsigned num_perfcntr_groups;
111 const struct fd_perfcntr_group *perfcntr_groups;
112
113 /* generated at startup from the perfcntr groups: */
114 unsigned num_perfcntr_queries;
115 struct pipe_driver_query_info *perfcntr_queries;
116
117 void *compiler; /* currently unused for a2xx */
118 struct util_queue compile_queue; /* currently unused for a2xx */
119
120 struct fd_device *dev;
121
122 /* NOTE: we still need a pipe associated with the screen in a few
123 * places, like screen->get_timestamp(). For anything context
124 * related, use ctx->pipe instead.
125 */
126 struct fd_pipe *pipe;
127
128 uint32_t (*setup_slices)(struct fd_resource *rsc);
129 unsigned (*tile_mode)(const struct pipe_resource *prsc);
130 int (*layout_resource_for_modifier)(struct fd_resource *rsc,
131 uint64_t modifier);
132 bool (*is_format_supported)(struct pipe_screen *pscreen,
133 enum pipe_format fmt, uint64_t modifier);
134
135 /* indirect-branch emit: */
136 void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringbuffer *target);
137
138 /* simple gpu "memcpy": */
139 void (*mem_to_mem)(struct fd_ringbuffer *ring, struct pipe_resource *dst,
140 unsigned dst_off, struct pipe_resource *src,
141 unsigned src_off, unsigned sizedwords);
142
143 int64_t cpu_gpu_time_delta;
144
145 struct fd_batch_cache batch_cache;
146 struct fd_gmem_cache gmem_cache;
147
148 bool reorder;
149
150 seqno_t rsc_seqno;
151 seqno_t ctx_seqno;
152 struct util_idalloc_mt buffer_ids;
153
154 unsigned num_supported_modifiers;
155 const uint64_t *supported_modifiers;
156
157 struct renderonly *ro;
158
159 /* the blob seems to always use 8K factor and 128K param sizes, copy them */
160 #define FD6_TESS_FACTOR_SIZE (8 * 1024)
161 #define FD6_TESS_PARAM_SIZE (128 * 1024)
162 #define FD6_TESS_BO_SIZE (FD6_TESS_FACTOR_SIZE + FD6_TESS_PARAM_SIZE)
163 struct fd_bo *tess_bo;
164
165 /* table with MESA_PRIM_COUNT+1 entries mapping MESA_PRIM_x to
166 * DI_PT_x value to use for draw initiator. There are some
167 * slight differences between generation.
168 *
169 * Note that primtypes[PRIM_TYPE_MAX] is used to map to the
170 * internal RECTLIST primtype, if available, used for blits/
171 * clears.
172 */
173 const enum pc_di_primtype *primtypes;
174 uint32_t primtypes_mask;
175
176 simple_mtx_t aux_ctx_lock;
177 struct pipe_context *aux_ctx;
178 };
179
180 static inline struct fd_screen *
fd_screen(struct pipe_screen * pscreen)181 fd_screen(struct pipe_screen *pscreen)
182 {
183 return (struct fd_screen *)pscreen;
184 }
185
186 struct fd_context;
187 struct fd_context * fd_screen_aux_context_get(struct pipe_screen *pscreen);
188 void fd_screen_aux_context_put(struct pipe_screen *pscreen);
189
190 static inline void
fd_screen_lock(struct fd_screen * screen)191 fd_screen_lock(struct fd_screen *screen)
192 {
193 simple_mtx_lock(&screen->lock);
194 }
195
196 static inline void
fd_screen_unlock(struct fd_screen * screen)197 fd_screen_unlock(struct fd_screen *screen)
198 {
199 simple_mtx_unlock(&screen->lock);
200 }
201
202 static inline void
fd_screen_assert_locked(struct fd_screen * screen)203 fd_screen_assert_locked(struct fd_screen *screen)
204 {
205 simple_mtx_assert_locked(&screen->lock);
206 }
207
208 bool fd_screen_bo_get_handle(struct pipe_screen *pscreen, struct fd_bo *bo,
209 struct renderonly_scanout *scanout,
210 unsigned stride, struct winsys_handle *whandle);
211 struct fd_bo *fd_screen_bo_from_handle(struct pipe_screen *pscreen,
212 struct winsys_handle *whandle);
213
214 struct pipe_screen *fd_screen_create(int fd,
215 const struct pipe_screen_config *config,
216 struct renderonly *ro);
217
218 static inline bool
is_a20x(struct fd_screen * screen)219 is_a20x(struct fd_screen *screen)
220 {
221 return (screen->gpu_id >= 200) && (screen->gpu_id < 210);
222 }
223
224 static inline bool
is_a2xx(struct fd_screen * screen)225 is_a2xx(struct fd_screen *screen)
226 {
227 return screen->gen == 2;
228 }
229
230 /* is a3xx patch revision 0? */
231 /* TODO a306.0 probably doesn't need this.. be more clever?? */
232 static inline bool
is_a3xx_p0(struct fd_screen * screen)233 is_a3xx_p0(struct fd_screen *screen)
234 {
235 return (screen->chip_id & 0xff0000ff) == 0x03000000;
236 }
237
238 static inline bool
is_a3xx(struct fd_screen * screen)239 is_a3xx(struct fd_screen *screen)
240 {
241 return screen->gen == 3;
242 }
243
244 static inline bool
is_a4xx(struct fd_screen * screen)245 is_a4xx(struct fd_screen *screen)
246 {
247 return screen->gen == 4;
248 }
249
250 static inline bool
is_a5xx(struct fd_screen * screen)251 is_a5xx(struct fd_screen *screen)
252 {
253 return screen->gen == 5;
254 }
255
256 static inline bool
is_a6xx(struct fd_screen * screen)257 is_a6xx(struct fd_screen *screen)
258 {
259 return screen->gen >= 6;
260 }
261
262 /* is it using the ir3 compiler (shader isa introduced with a3xx)? */
263 static inline bool
is_ir3(struct fd_screen * screen)264 is_ir3(struct fd_screen *screen)
265 {
266 return is_a3xx(screen) || is_a4xx(screen) || is_a5xx(screen) ||
267 is_a6xx(screen);
268 }
269
270 static inline bool
has_compute(struct fd_screen * screen)271 has_compute(struct fd_screen *screen)
272 {
273 return is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
274 }
275
276 #endif /* FREEDRENO_SCREEN_H_ */
277