xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/freedreno_state.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #include "pipe/p_state.h"
10 #include "util/u_dual_blend.h"
11 #include "util/u_helpers.h"
12 #include "util/u_memory.h"
13 #include "util/u_string.h"
14 #include "util/u_upload_mgr.h"
15 
16 #include "common/freedreno_guardband.h"
17 
18 #include "freedreno_context.h"
19 #include "freedreno_gmem.h"
20 #include "freedreno_query_hw.h"
21 #include "freedreno_resource.h"
22 #include "freedreno_state.h"
23 #include "freedreno_texture.h"
24 #include "freedreno_util.h"
25 
26 #define get_safe(ptr, field) ((ptr) ? (ptr)->field : 0)
27 
28 /* All the generic state handling.. In case of CSO's that are specific
29  * to the GPU version, when the bind and the delete are common they can
30  * go in here.
31  */
32 
33 static void
update_draw_cost(struct fd_context * ctx)34 update_draw_cost(struct fd_context *ctx) assert_dt
35 {
36    struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
37 
38    ctx->draw_cost = pfb->nr_cbufs;
39    for (unsigned i = 0; i < pfb->nr_cbufs; i++)
40       if (fd_blend_enabled(ctx, i))
41          ctx->draw_cost++;
42    if (fd_depth_enabled(ctx))
43       ctx->draw_cost++;
44    if (fd_depth_write_enabled(ctx))
45       ctx->draw_cost++;
46 }
47 
48 static void
fd_set_blend_color(struct pipe_context * pctx,const struct pipe_blend_color * blend_color)49 fd_set_blend_color(struct pipe_context *pctx,
50                    const struct pipe_blend_color *blend_color) in_dt
51 {
52    struct fd_context *ctx = fd_context(pctx);
53    ctx->blend_color = *blend_color;
54    fd_context_dirty(ctx, FD_DIRTY_BLEND_COLOR);
55 }
56 
57 static void
fd_set_stencil_ref(struct pipe_context * pctx,const struct pipe_stencil_ref stencil_ref)58 fd_set_stencil_ref(struct pipe_context *pctx,
59                    const struct pipe_stencil_ref stencil_ref) in_dt
60 {
61    struct fd_context *ctx = fd_context(pctx);
62    ctx->stencil_ref = stencil_ref;
63    fd_context_dirty(ctx, FD_DIRTY_STENCIL_REF);
64 }
65 
66 static void
fd_set_clip_state(struct pipe_context * pctx,const struct pipe_clip_state * clip)67 fd_set_clip_state(struct pipe_context *pctx,
68                   const struct pipe_clip_state *clip) in_dt
69 {
70    struct fd_context *ctx = fd_context(pctx);
71    ctx->ucp = *clip;
72    fd_context_dirty(ctx, FD_DIRTY_UCP);
73 }
74 
75 static void
fd_set_sample_mask(struct pipe_context * pctx,unsigned sample_mask)76 fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask) in_dt
77 {
78    struct fd_context *ctx = fd_context(pctx);
79    ctx->sample_mask = (uint16_t)sample_mask;
80    fd_context_dirty(ctx, FD_DIRTY_SAMPLE_MASK);
81 }
82 
83 static void
fd_set_sample_locations(struct pipe_context * pctx,size_t size,const uint8_t * locations)84 fd_set_sample_locations(struct pipe_context *pctx, size_t size,
85                         const uint8_t *locations)
86   in_dt
87 {
88    struct fd_context *ctx = fd_context(pctx);
89 
90    if (!locations) {
91       ctx->sample_locations_enabled = false;
92       return;
93    }
94 
95    size = MIN2(size, sizeof(ctx->sample_locations));
96    memcpy(ctx->sample_locations, locations, size);
97    ctx->sample_locations_enabled = true;
98 
99    fd_context_dirty(ctx, FD_DIRTY_SAMPLE_LOCATIONS);
100 }
101 
102 static void
fd_set_min_samples(struct pipe_context * pctx,unsigned min_samples)103 fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples) in_dt
104 {
105    struct fd_context *ctx = fd_context(pctx);
106    ctx->min_samples = min_samples;
107    fd_context_dirty(ctx, FD_DIRTY_MIN_SAMPLES);
108 }
109 
110 static void
upload_user_buffer(struct pipe_context * pctx,struct pipe_constant_buffer * cb)111 upload_user_buffer(struct pipe_context *pctx, struct pipe_constant_buffer *cb)
112 {
113    u_upload_data(pctx->stream_uploader, 0, cb->buffer_size, 64,
114                  cb->user_buffer, &cb->buffer_offset, &cb->buffer);
115    cb->user_buffer = NULL;
116 }
117 
118 /* notes from calim on #dri-devel:
119  * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
120  * out to vec4's
121  * I should be able to consider that I own the user_ptr until the next
122  * set_constant_buffer() call, at which point I don't really care about the
123  * previous values.
124  * index>0 will be UBO's.. well, I'll worry about that later
125  */
126 static void
fd_set_constant_buffer(struct pipe_context * pctx,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * cb)127 fd_set_constant_buffer(struct pipe_context *pctx, enum pipe_shader_type shader,
128                        uint index, bool take_ownership,
129                        const struct pipe_constant_buffer *cb) in_dt
130 {
131    struct fd_context *ctx = fd_context(pctx);
132    struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
133 
134    util_copy_constant_buffer(&so->cb[index], cb, take_ownership);
135 
136    /* Note that gallium frontends can unbind constant buffers by
137     * passing a NULL cb, or a cb with no buffer:
138     */
139    if (!cb || !(cb->user_buffer || cb->buffer)) {
140       so->enabled_mask &= ~(1 << index);
141       return;
142    }
143 
144    if (cb->user_buffer && ctx->screen->gen >= 6) {
145       upload_user_buffer(pctx, &so->cb[index]);
146       cb = &so->cb[index];
147    }
148 
149    so->enabled_mask |= 1 << index;
150 
151    fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_CONST);
152    fd_resource_set_usage(cb->buffer, FD_DIRTY_CONST);
153    fd_dirty_shader_resource(ctx, cb->buffer, shader, FD_DIRTY_SHADER_CONST, false);
154 }
155 
156 void
fd_set_shader_buffers(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)157 fd_set_shader_buffers(struct pipe_context *pctx, enum pipe_shader_type shader,
158                       unsigned start, unsigned count,
159                       const struct pipe_shader_buffer *buffers,
160                       unsigned writable_bitmask) in_dt
161 {
162    struct fd_context *ctx = fd_context(pctx);
163    struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
164    const unsigned modified_bits = u_bit_consecutive(start, count);
165 
166    so->writable_mask &= ~modified_bits;
167    so->writable_mask |= writable_bitmask << start;
168 
169    for (unsigned i = 0; i < count; i++) {
170       unsigned n = i + start;
171       struct pipe_shader_buffer *buf = &so->sb[n];
172 
173       if (buffers && buffers[i].buffer) {
174          buf->buffer_offset = buffers[i].buffer_offset;
175          buf->buffer_size = buffers[i].buffer_size;
176          pipe_resource_reference(&buf->buffer, buffers[i].buffer);
177 
178          bool write = writable_bitmask & BIT(i);
179 
180          fd_resource_set_usage(buffers[i].buffer, FD_DIRTY_SSBO);
181          fd_dirty_shader_resource(ctx, buffers[i].buffer, shader,
182                                   FD_DIRTY_SHADER_SSBO, write);
183 
184          so->enabled_mask |= BIT(n);
185 
186          if (write) {
187             struct fd_resource *rsc = fd_resource(buf->buffer);
188             util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
189                            buf->buffer_offset,
190                            buf->buffer_offset + buf->buffer_size);
191          }
192       } else {
193          pipe_resource_reference(&buf->buffer, NULL);
194 
195          so->enabled_mask &= ~BIT(n);
196       }
197    }
198 
199    fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_SSBO);
200 }
201 
202 void
fd_set_shader_images(struct pipe_context * pctx,enum pipe_shader_type shader,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)203 fd_set_shader_images(struct pipe_context *pctx, enum pipe_shader_type shader,
204                      unsigned start, unsigned count,
205                      unsigned unbind_num_trailing_slots,
206                      const struct pipe_image_view *images) in_dt
207 {
208    struct fd_context *ctx = fd_context(pctx);
209    struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
210 
211    unsigned mask = 0;
212 
213    if (images) {
214       for (unsigned i = 0; i < count; i++) {
215          unsigned n = i + start;
216          struct pipe_image_view *buf = &so->si[n];
217 
218          if ((buf->resource == images[i].resource) &&
219              (buf->format == images[i].format) &&
220              (buf->access == images[i].access) &&
221              !memcmp(&buf->u, &images[i].u, sizeof(buf->u)))
222             continue;
223 
224          mask |= BIT(n);
225          util_copy_image_view(buf, &images[i]);
226 
227          if (buf->resource) {
228             bool write = buf->access & PIPE_IMAGE_ACCESS_WRITE;
229 
230             fd_resource_set_usage(buf->resource, FD_DIRTY_IMAGE);
231             fd_dirty_shader_resource(ctx, buf->resource, shader,
232                                      FD_DIRTY_SHADER_IMAGE, write);
233             so->enabled_mask |= BIT(n);
234 
235             if (write && (buf->resource->target == PIPE_BUFFER)) {
236                struct fd_resource *rsc = fd_resource(buf->resource);
237                util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
238                               buf->u.buf.offset,
239                               buf->u.buf.offset + buf->u.buf.size);
240             }
241          } else {
242             so->enabled_mask &= ~BIT(n);
243          }
244       }
245    } else {
246       mask = (BIT(count) - 1) << start;
247 
248       for (unsigned i = 0; i < count; i++) {
249          unsigned n = i + start;
250          struct pipe_image_view *img = &so->si[n];
251 
252          pipe_resource_reference(&img->resource, NULL);
253       }
254 
255       so->enabled_mask &= ~mask;
256    }
257 
258    for (unsigned i = 0; i < unbind_num_trailing_slots; i++)
259       pipe_resource_reference(&so->si[i + start + count].resource, NULL);
260 
261    so->enabled_mask &=
262       ~(BITFIELD_MASK(unbind_num_trailing_slots) << (start + count));
263 
264    fd_context_dirty_shader(ctx, shader, FD_DIRTY_SHADER_IMAGE);
265 }
266 
267 void
fd_set_framebuffer_state(struct pipe_context * pctx,const struct pipe_framebuffer_state * framebuffer)268 fd_set_framebuffer_state(struct pipe_context *pctx,
269                          const struct pipe_framebuffer_state *framebuffer)
270 {
271    struct fd_context *ctx = fd_context(pctx);
272    struct pipe_framebuffer_state *cso;
273 
274    DBG("%ux%u, %u layers, %u samples", framebuffer->width, framebuffer->height,
275        framebuffer->layers, framebuffer->samples);
276 
277    cso = &ctx->framebuffer;
278 
279    if (util_framebuffer_state_equal(cso, framebuffer))
280       return;
281 
282    /* Do this *after* checking that the framebuffer state is actually
283     * changing.  In the fd_blitter_clear() path, we get a pfb update
284     * to restore the current pfb state, which should not trigger us
285     * to flush (as that can cause the batch to be freed at a point
286     * before fd_clear() returns, but after the point where it expects
287     * flushes to potentially happen.
288     */
289    fd_context_switch_from(ctx);
290 
291    util_copy_framebuffer_state(cso, framebuffer);
292 
293    STATIC_ASSERT((4 * PIPE_MAX_COLOR_BUFS) == (8 * sizeof(ctx->all_mrt_channel_mask)));
294    ctx->all_mrt_channel_mask = 0;
295 
296    /* Generate a bitmask of all valid channels for all MRTs.  Blend
297     * state with unwritten channels essentially acts as blend enabled,
298     * which disables LRZ write.  But only if the cbuf *has* the masked
299     * channels, which is not known at the time the blend state is
300     * created.
301     */
302    for (unsigned i = 0; i < framebuffer->nr_cbufs; i++) {
303       if (!framebuffer->cbufs[i])
304          continue;
305 
306       enum pipe_format format = framebuffer->cbufs[i]->format;
307       unsigned nr = util_format_get_nr_components(format);
308 
309       ctx->all_mrt_channel_mask |= BITFIELD_MASK(nr) << (4 * i);
310    }
311 
312    cso->samples = util_framebuffer_get_num_samples(cso);
313 
314    if (ctx->screen->reorder) {
315       struct fd_batch *old_batch = NULL;
316 
317       fd_batch_reference(&old_batch, ctx->batch);
318 
319       if (likely(old_batch))
320          fd_batch_finish_queries(old_batch);
321 
322       fd_batch_reference(&ctx->batch, NULL);
323       fd_context_all_dirty(ctx);
324 
325       fd_batch_reference(&old_batch, NULL);
326    } else if (ctx->batch) {
327       DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
328           framebuffer->cbufs[0], framebuffer->zsbuf);
329       fd_batch_flush(ctx->batch);
330    }
331 
332    fd_context_dirty(ctx, FD_DIRTY_FRAMEBUFFER);
333 
334    for (unsigned i = 0; i < PIPE_MAX_VIEWPORTS; i++) {
335       ctx->disabled_scissor[i].minx = 0;
336       ctx->disabled_scissor[i].miny = 0;
337       ctx->disabled_scissor[i].maxx = cso->width - 1;
338       ctx->disabled_scissor[i].maxy = cso->height - 1;
339    }
340 
341    fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
342    update_draw_cost(ctx);
343 }
344 
345 static void
fd_set_polygon_stipple(struct pipe_context * pctx,const struct pipe_poly_stipple * stipple)346 fd_set_polygon_stipple(struct pipe_context *pctx,
347                        const struct pipe_poly_stipple *stipple) in_dt
348 {
349    struct fd_context *ctx = fd_context(pctx);
350    ctx->stipple = *stipple;
351    fd_context_dirty(ctx, FD_DIRTY_STIPPLE);
352 }
353 
354 static void
fd_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * scissor)355 fd_set_scissor_states(struct pipe_context *pctx, unsigned start_slot,
356                       unsigned num_scissors,
357                       const struct pipe_scissor_state *scissor) in_dt
358 {
359    struct fd_context *ctx = fd_context(pctx);
360 
361    for (unsigned i = 0; i < num_scissors; i++) {
362       unsigned idx = start_slot + i;
363 
364       if ((scissor[i].minx == scissor[i].maxx) ||
365           (scissor[i].miny == scissor[i].maxy)) {
366          ctx->scissor[idx].minx = ctx->scissor[idx].miny = 1;
367          ctx->scissor[idx].maxx = ctx->scissor[idx].maxy = 0;
368       } else {
369          ctx->scissor[idx].minx = scissor[i].minx;
370          ctx->scissor[idx].miny = scissor[i].miny;
371          ctx->scissor[idx].maxx = MAX2(scissor[i].maxx, 1) - 1;
372          ctx->scissor[idx].maxy = MAX2(scissor[i].maxy, 1) - 1;
373       }
374    }
375 
376    fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
377 }
378 
379 static void
init_scissor_states(struct pipe_context * pctx)380 init_scissor_states(struct pipe_context *pctx)
381    in_dt
382 {
383    struct fd_context *ctx = fd_context(pctx);
384 
385    for (unsigned idx = 0; idx < ARRAY_SIZE(ctx->scissor); idx++) {
386       ctx->scissor[idx].minx = ctx->scissor[idx].miny = 1;
387       ctx->scissor[idx].maxx = ctx->scissor[idx].maxy = 0;
388    }
389 }
390 
391 static void
fd_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * viewports)392 fd_set_viewport_states(struct pipe_context *pctx, unsigned start_slot,
393                        unsigned num_viewports,
394                        const struct pipe_viewport_state *viewports) in_dt
395 {
396    struct fd_context *ctx = fd_context(pctx);
397 
398    for (unsigned i = 0; i < num_viewports; i++) {
399       unsigned idx = start_slot + i;
400       struct pipe_scissor_state *scissor = &ctx->viewport_scissor[idx];
401       const struct pipe_viewport_state *viewport = &viewports[i];
402 
403       ctx->viewport[idx] = *viewport;
404 
405       /* see si_get_scissor_from_viewport(): */
406 
407       /* Convert (-1, -1) and (1, 1) from clip space into window space. */
408       float minx = -viewport->scale[0] + viewport->translate[0];
409       float miny = -viewport->scale[1] + viewport->translate[1];
410       float maxx = viewport->scale[0] + viewport->translate[0];
411       float maxy = viewport->scale[1] + viewport->translate[1];
412 
413       /* Handle inverted viewports. */
414       if (minx > maxx) {
415          SWAP(minx, maxx);
416       }
417       if (miny > maxy) {
418          SWAP(miny, maxy);
419       }
420 
421       const float max_dims = ctx->screen->gen >= 4 ? 16384.f : 4096.f;
422 
423       /* Clamp, convert to integer and round up the max bounds. */
424       scissor->minx = CLAMP(minx, 0.f, max_dims);
425       scissor->miny = CLAMP(miny, 0.f, max_dims);
426       scissor->maxx = MAX2(CLAMP(ceilf(maxx), 0.f, max_dims), 1) - 1;
427       scissor->maxy = MAX2(CLAMP(ceilf(maxy), 0.f, max_dims), 1) - 1;
428    }
429 
430    fd_context_dirty(ctx, FD_DIRTY_VIEWPORT);
431 
432    /* Guardband is only used on a6xx so far: */
433    if (!is_a6xx(ctx->screen))
434       return;
435 
436    ctx->guardband.x = ~0;
437    ctx->guardband.y = ~0;
438 
439    bool is3x = is_a3xx(ctx->screen);
440 
441    for (unsigned i = 0; i < PIPE_MAX_VIEWPORTS; i++) {
442       const struct pipe_viewport_state *vp = & ctx->viewport[i];
443 
444       unsigned gx = fd_calc_guardband(vp->translate[0], vp->scale[0], is3x);
445       unsigned gy = fd_calc_guardband(vp->translate[1], vp->scale[1], is3x);
446 
447       ctx->guardband.x = MIN2(ctx->guardband.x, gx);
448       ctx->guardband.y = MIN2(ctx->guardband.y, gy);
449    }
450 }
451 
452 static void
fd_set_vertex_buffers(struct pipe_context * pctx,unsigned count,const struct pipe_vertex_buffer * vb)453 fd_set_vertex_buffers(struct pipe_context *pctx, unsigned count,
454                       const struct pipe_vertex_buffer *vb) in_dt
455 {
456    struct fd_context *ctx = fd_context(pctx);
457    struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
458    int i;
459 
460    /* on a2xx, pitch is encoded in the vtx fetch instruction, so
461     * we need to mark VTXSTATE as dirty as well to trigger patching
462     * and re-emitting the vtx shader:
463     */
464    if (ctx->screen->gen < 3) {
465       for (i = 0; i < count; i++) {
466          bool new_enabled = vb && vb[i].buffer.resource;
467          bool old_enabled = so->vb[i].buffer.resource != NULL;
468          if (new_enabled != old_enabled) {
469             fd_context_dirty(ctx, FD_DIRTY_VTXSTATE);
470             break;
471          }
472       }
473    }
474 
475    util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, count,
476                                 true);
477    so->count = util_last_bit(so->enabled_mask);
478 
479    if (!vb)
480       return;
481 
482    fd_context_dirty(ctx, FD_DIRTY_VTXBUF);
483 
484    for (unsigned i = 0; i < count; i++) {
485       assert(!vb[i].is_user_buffer);
486       fd_resource_set_usage(vb[i].buffer.resource, FD_DIRTY_VTXBUF);
487       fd_dirty_resource(ctx, vb[i].buffer.resource, FD_DIRTY_VTXBUF, false);
488 
489       /* Robust buffer access: Return undefined data (the start of the buffer)
490        * instead of process termination or a GPU hang in case of overflow.
491        */
492       if (vb[i].buffer.resource &&
493           unlikely(vb[i].buffer_offset >= vb[i].buffer.resource->width0)) {
494          so->vb[i].buffer_offset = 0;
495       }
496    }
497 }
498 
499 static void
fd_blend_state_bind(struct pipe_context * pctx,void * hwcso)500 fd_blend_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
501 {
502    struct fd_context *ctx = fd_context(pctx);
503    struct pipe_blend_state *cso = hwcso;
504    bool old_is_dual = ctx->blend ? ctx->blend->rt[0].blend_enable &&
505                                       util_blend_state_is_dual(ctx->blend, 0)
506                                  : false;
507    bool new_is_dual =
508       cso ? cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) : false;
509    fd_context_dirty(ctx, FD_DIRTY_BLEND);
510    if (old_is_dual != new_is_dual)
511       fd_context_dirty(ctx, FD_DIRTY_BLEND_DUAL);
512 
513    bool old_coherent = get_safe(ctx->blend, blend_coherent);
514    bool new_coherent = get_safe(cso, blend_coherent);
515    if (new_coherent != old_coherent) {
516       fd_context_dirty(ctx, FD_DIRTY_BLEND_COHERENT);
517    }
518    ctx->blend = hwcso;
519    update_draw_cost(ctx);
520 }
521 
522 static void
fd_blend_state_delete(struct pipe_context * pctx,void * hwcso)523 fd_blend_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
524 {
525    FREE(hwcso);
526 }
527 
528 static void
fd_rasterizer_state_bind(struct pipe_context * pctx,void * hwcso)529 fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
530 {
531    struct fd_context *ctx = fd_context(pctx);
532    struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
533    bool discard = get_safe(ctx->rasterizer, rasterizer_discard);
534    unsigned clip_plane_enable = get_safe(ctx->rasterizer, clip_plane_enable);
535 
536    ctx->rasterizer = hwcso;
537    fd_context_dirty(ctx, FD_DIRTY_RASTERIZER);
538 
539    if (ctx->rasterizer && ctx->rasterizer->scissor) {
540       ctx->current_scissor = ctx->scissor;
541    } else {
542       ctx->current_scissor = ctx->disabled_scissor;
543    }
544 
545    /* if scissor enable bit changed we need to mark scissor
546     * state as dirty as well:
547     * NOTE: we can do a shallow compare, since we only care
548     * if it changed to/from &ctx->disable_scissor
549     */
550    if (old_scissor != fd_context_get_scissor(ctx))
551       fd_context_dirty(ctx, FD_DIRTY_SCISSOR);
552 
553    if (discard != get_safe(ctx->rasterizer, rasterizer_discard))
554       fd_context_dirty(ctx, FD_DIRTY_RASTERIZER_DISCARD);
555 
556    if (clip_plane_enable != get_safe(ctx->rasterizer, clip_plane_enable))
557       fd_context_dirty(ctx, FD_DIRTY_RASTERIZER_CLIP_PLANE_ENABLE);
558 }
559 
560 static void
fd_rasterizer_state_delete(struct pipe_context * pctx,void * hwcso)561 fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
562 {
563    FREE(hwcso);
564 }
565 
566 static void
fd_zsa_state_bind(struct pipe_context * pctx,void * hwcso)567 fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
568 {
569    struct fd_context *ctx = fd_context(pctx);
570    ctx->zsa = hwcso;
571    fd_context_dirty(ctx, FD_DIRTY_ZSA);
572    update_draw_cost(ctx);
573 }
574 
575 static void
fd_zsa_state_delete(struct pipe_context * pctx,void * hwcso)576 fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
577 {
578    FREE(hwcso);
579 }
580 
581 static void *
fd_vertex_state_create(struct pipe_context * pctx,unsigned num_elements,const struct pipe_vertex_element * elements)582 fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
583                        const struct pipe_vertex_element *elements)
584 {
585    struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj);
586 
587    if (!so)
588       return NULL;
589 
590    memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
591    so->num_elements = num_elements;
592    for (unsigned i = 0; i < num_elements; i++)
593       so->strides[elements[i].vertex_buffer_index] = elements[i].src_stride;
594 
595    return so;
596 }
597 
598 static void
fd_vertex_state_delete(struct pipe_context * pctx,void * hwcso)599 fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso) in_dt
600 {
601    FREE(hwcso);
602 }
603 
604 static void
fd_vertex_state_bind(struct pipe_context * pctx,void * hwcso)605 fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso) in_dt
606 {
607    struct fd_context *ctx = fd_context(pctx);
608    ctx->vtx.vtx = hwcso;
609    fd_context_dirty(ctx, FD_DIRTY_VTXSTATE);
610 }
611 
612 static struct pipe_stream_output_target *
fd_create_stream_output_target(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned buffer_offset,unsigned buffer_size)613 fd_create_stream_output_target(struct pipe_context *pctx,
614                                struct pipe_resource *prsc,
615                                unsigned buffer_offset, unsigned buffer_size)
616 {
617    struct fd_stream_output_target *target;
618    struct fd_resource *rsc = fd_resource(prsc);
619 
620    target = CALLOC_STRUCT(fd_stream_output_target);
621    if (!target)
622       return NULL;
623 
624    pipe_reference_init(&target->base.reference, 1);
625    pipe_resource_reference(&target->base.buffer, prsc);
626 
627    target->base.context = pctx;
628    target->base.buffer_offset = buffer_offset;
629    target->base.buffer_size = buffer_size;
630 
631    target->offset_buf = pipe_buffer_create(
632       pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, sizeof(uint32_t));
633 
634    assert(rsc->b.b.target == PIPE_BUFFER);
635    util_range_add(&rsc->b.b, &rsc->valid_buffer_range, buffer_offset,
636                   buffer_offset + buffer_size);
637 
638    return &target->base;
639 }
640 
641 static void
fd_stream_output_target_destroy(struct pipe_context * pctx,struct pipe_stream_output_target * target)642 fd_stream_output_target_destroy(struct pipe_context *pctx,
643                                 struct pipe_stream_output_target *target)
644 {
645    struct fd_stream_output_target *cso = fd_stream_output_target(target);
646 
647    pipe_resource_reference(&cso->base.buffer, NULL);
648    pipe_resource_reference(&cso->offset_buf, NULL);
649 
650    FREE(target);
651 }
652 
653 static void
fd_set_stream_output_targets(struct pipe_context * pctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)654 fd_set_stream_output_targets(struct pipe_context *pctx, unsigned num_targets,
655                              struct pipe_stream_output_target **targets,
656                              const unsigned *offsets) in_dt
657 {
658    struct fd_context *ctx = fd_context(pctx);
659    struct fd_streamout_stateobj *so = &ctx->streamout;
660    unsigned i;
661 
662    assert(num_targets <= ARRAY_SIZE(so->targets));
663 
664    /* Older targets need sw stats enabled for streamout emulation in VS: */
665    if (ctx->screen->gen < 5) {
666       if (num_targets && !so->num_targets) {
667          ctx->stats_users++;
668       } else if (so->num_targets && !num_targets) {
669          ctx->stats_users--;
670       }
671    }
672 
673    for (i = 0; i < num_targets; i++) {
674       bool changed = targets[i] != so->targets[i];
675       bool reset = (offsets[i] != (unsigned)-1);
676 
677       so->reset |= (reset << i);
678 
679       if (targets[i]) {
680          fd_resource_set_usage(targets[i]->buffer, FD_DIRTY_STREAMOUT);
681          fd_dirty_resource(ctx, targets[i]->buffer, FD_DIRTY_STREAMOUT, true);
682 
683          struct fd_stream_output_target *target = fd_stream_output_target(targets[i]);
684          fd_resource_set_usage(target->offset_buf, FD_DIRTY_STREAMOUT);
685          fd_dirty_resource(ctx, target->offset_buf, FD_DIRTY_STREAMOUT, true);
686       }
687 
688       if (!changed && !reset)
689          continue;
690 
691       /* Note that all SO targets will be reset at once at a
692        * BeginTransformFeedback().
693        */
694       if (reset) {
695          so->offsets[i] = offsets[i];
696          ctx->streamout.verts_written = 0;
697       }
698 
699       pipe_so_target_reference(&so->targets[i], targets[i]);
700    }
701 
702    for (; i < so->num_targets; i++) {
703       pipe_so_target_reference(&so->targets[i], NULL);
704    }
705 
706    so->num_targets = num_targets;
707 
708    fd_context_dirty(ctx, FD_DIRTY_STREAMOUT);
709 }
710 
711 static void
fd_bind_compute_state(struct pipe_context * pctx,void * state)712 fd_bind_compute_state(struct pipe_context *pctx, void *state) in_dt
713 {
714    struct fd_context *ctx = fd_context(pctx);
715    ctx->compute = state;
716    fd_context_dirty_shader(ctx, PIPE_SHADER_COMPUTE, FD_DIRTY_SHADER_PROG);
717 }
718 
719 /* TODO pipe_context::set_compute_resources() should DIAF and clover
720  * should be updated to use pipe_context::set_constant_buffer() and
721  * pipe_context::set_shader_images().  Until then just directly frob
722  * the UBO/image state to avoid the rest of the driver needing to
723  * know about this bastard api..
724  */
725 static void
fd_set_compute_resources(struct pipe_context * pctx,unsigned start,unsigned count,struct pipe_surface ** prscs)726 fd_set_compute_resources(struct pipe_context *pctx, unsigned start,
727                          unsigned count, struct pipe_surface **prscs) in_dt
728 {
729    struct fd_context *ctx = fd_context(pctx);
730    struct fd_constbuf_stateobj *so = &ctx->constbuf[PIPE_SHADER_COMPUTE];
731 
732    for (unsigned i = 0; i < count; i++) {
733       const uint32_t index = i + start + 1;   /* UBOs start at index 1 */
734 
735       if (!prscs) {
736          util_copy_constant_buffer(&so->cb[index], NULL, false);
737          so->enabled_mask &= ~(1 << index);
738       } else if (prscs[i]->format == PIPE_FORMAT_NONE) {
739          struct pipe_constant_buffer cb = {
740                .buffer = prscs[i]->texture,
741          };
742          util_copy_constant_buffer(&so->cb[index], &cb, false);
743          so->enabled_mask |= (1 << index);
744       } else {
745          // TODO images
746          unreachable("finishme");
747       }
748    }
749 }
750 
751 /* used by clover to bind global objects, returning the bo address
752  * via handles[n]
753  */
754 static void
fd_set_global_binding(struct pipe_context * pctx,unsigned first,unsigned count,struct pipe_resource ** prscs,uint32_t ** handles)755 fd_set_global_binding(struct pipe_context *pctx, unsigned first, unsigned count,
756                       struct pipe_resource **prscs, uint32_t **handles) in_dt
757 {
758    struct fd_context *ctx = fd_context(pctx);
759    struct fd_global_bindings_stateobj *so = &ctx->global_bindings;
760    unsigned mask = 0;
761 
762    if (prscs) {
763       for (unsigned i = 0; i < count; i++) {
764          unsigned n = i + first;
765 
766          mask |= BIT(n);
767 
768          pipe_resource_reference(&so->buf[n], prscs[i]);
769 
770          if (so->buf[n]) {
771             struct fd_resource *rsc = fd_resource(so->buf[n]);
772             uint32_t offset = *handles[i];
773             uint64_t iova = fd_bo_get_iova(rsc->bo) + offset;
774 
775             /* Yes, really, despite what the type implies: */
776             memcpy(handles[i], &iova, sizeof(iova));
777          }
778 
779          if (prscs[i])
780             so->enabled_mask |= BIT(n);
781          else
782             so->enabled_mask &= ~BIT(n);
783       }
784    } else {
785       mask = (BIT(count) - 1) << first;
786 
787       for (unsigned i = 0; i < count; i++) {
788          unsigned n = i + first;
789          pipe_resource_reference(&so->buf[n], NULL);
790       }
791 
792       so->enabled_mask &= ~mask;
793    }
794 }
795 
796 void
fd_state_init(struct pipe_context * pctx)797 fd_state_init(struct pipe_context *pctx)
798 {
799    pctx->set_blend_color = fd_set_blend_color;
800    pctx->set_stencil_ref = fd_set_stencil_ref;
801    pctx->set_clip_state = fd_set_clip_state;
802    pctx->set_sample_mask = fd_set_sample_mask;
803    pctx->set_min_samples = fd_set_min_samples;
804    pctx->set_constant_buffer = fd_set_constant_buffer;
805    pctx->set_shader_buffers = fd_set_shader_buffers;
806    pctx->set_shader_images = fd_set_shader_images;
807    pctx->set_framebuffer_state = fd_set_framebuffer_state;
808    pctx->set_sample_locations = fd_set_sample_locations;
809    pctx->set_polygon_stipple = fd_set_polygon_stipple;
810    pctx->set_scissor_states = fd_set_scissor_states;
811    pctx->set_viewport_states = fd_set_viewport_states;
812 
813    pctx->set_vertex_buffers = fd_set_vertex_buffers;
814 
815    pctx->bind_blend_state = fd_blend_state_bind;
816    pctx->delete_blend_state = fd_blend_state_delete;
817 
818    pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
819    pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
820 
821    pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
822    pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
823 
824    if (!pctx->create_vertex_elements_state)
825       pctx->create_vertex_elements_state = fd_vertex_state_create;
826    pctx->delete_vertex_elements_state = fd_vertex_state_delete;
827    pctx->bind_vertex_elements_state = fd_vertex_state_bind;
828 
829    pctx->create_stream_output_target = fd_create_stream_output_target;
830    pctx->stream_output_target_destroy = fd_stream_output_target_destroy;
831    pctx->set_stream_output_targets = fd_set_stream_output_targets;
832 
833    if (has_compute(fd_screen(pctx->screen))) {
834       pctx->bind_compute_state = fd_bind_compute_state;
835       pctx->set_compute_resources = fd_set_compute_resources;
836       pctx->set_global_binding = fd_set_global_binding;
837    }
838 
839    init_scissor_states(pctx);
840 }
841