xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/asahi/agx_uniforms.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2021 Alyssa Rosenzweig
3  * SPDX-License-Identifier: MIT
4  */
5 #include <stdio.h>
6 #include "asahi/genxml/agx_pack.h"
7 #include "pipe/p_state.h"
8 #include "util/format/u_format.h"
9 #include "util/half_float.h"
10 #include "util/macros.h"
11 #include "agx_state.h"
12 #include "pool.h"
13 
14 static uint64_t
agx_const_buffer_ptr(struct agx_batch * batch,struct pipe_constant_buffer * cb)15 agx_const_buffer_ptr(struct agx_batch *batch, struct pipe_constant_buffer *cb)
16 {
17    if (cb->buffer) {
18       struct agx_resource *rsrc = agx_resource(cb->buffer);
19       agx_batch_reads(batch, rsrc);
20 
21       return rsrc->bo->va->addr + cb->buffer_offset;
22    } else {
23       return 0;
24    }
25 }
26 
27 void
agx_upload_vbos(struct agx_batch * batch)28 agx_upload_vbos(struct agx_batch *batch)
29 {
30    struct agx_context *ctx = batch->ctx;
31    struct agx_vertex_elements *attribs = ctx->attributes;
32    uint64_t buffers[PIPE_MAX_ATTRIBS] = {0};
33    size_t buf_sizes[PIPE_MAX_ATTRIBS] = {0};
34 
35    /* TODO: To handle null vertex buffers, we use robustness always. Once we
36     * support soft fault in the kernel, we can optimize this.
37     */
38    u_foreach_bit(vbo, ctx->vb_mask) {
39       struct pipe_vertex_buffer vb = ctx->vertex_buffers[vbo];
40       assert(!vb.is_user_buffer);
41 
42       if (vb.buffer.resource) {
43          struct agx_resource *rsrc = agx_resource(vb.buffer.resource);
44          agx_batch_reads(batch, rsrc);
45 
46          buffers[vbo] = rsrc->bo->va->addr + vb.buffer_offset;
47          buf_sizes[vbo] = rsrc->layout.size_B - vb.buffer_offset;
48       }
49    }
50 
51    uint32_t zeroes[4] = {0};
52    uint64_t sink = agx_pool_upload_aligned(&batch->pool, &zeroes, 16, 16);
53 
54    for (unsigned i = 0; i < PIPE_MAX_ATTRIBS; ++i) {
55       unsigned buf = attribs->buffers[i];
56       uint64_t addr;
57 
58       batch->uniforms.attrib_clamp[i] = agx_calculate_vbo_clamp(
59          buffers[buf], sink, attribs->key[i].format, buf_sizes[buf],
60          attribs->key[i].stride, attribs->src_offsets[i], &addr);
61 
62       batch->uniforms.attrib_base[i] = addr;
63    }
64 }
65 
66 void
agx_upload_uniforms(struct agx_batch * batch)67 agx_upload_uniforms(struct agx_batch *batch)
68 {
69    struct agx_context *ctx = batch->ctx;
70 
71    struct agx_ptr root_ptr = agx_pool_alloc_aligned(
72       &batch->pool, sizeof(struct agx_draw_uniforms), 16);
73 
74    batch->uniforms.tables[AGX_SYSVAL_TABLE_ROOT] = root_ptr.gpu;
75    batch->uniforms.sample_mask = ctx->sample_mask;
76 
77    assert(_mesa_float_to_half(0.5) == 0x3800);
78    batch->uniforms.clip_z_coeff =
79       (ctx->rast && !ctx->rast->base.clip_halfz) ? 0x3800 : 0x0;
80 
81    batch->uniforms.sprite_mask =
82       (batch->reduced_prim == MESA_PRIM_POINTS && ctx->rast)
83          ? ctx->rast->base.sprite_coord_enable
84          : 0;
85 
86    memcpy(root_ptr.cpu, &batch->uniforms, sizeof(batch->uniforms));
87 }
88 
89 void
agx_set_sampler_uniforms(struct agx_batch * batch,enum pipe_shader_type stage)90 agx_set_sampler_uniforms(struct agx_batch *batch, enum pipe_shader_type stage)
91 {
92    struct agx_context *ctx = batch->ctx;
93    struct agx_stage *st = &ctx->stage[stage];
94    struct agx_stage_uniforms *unif = &batch->stage_uniforms[stage];
95    struct agx_device *dev = agx_device(ctx->base.screen);
96 
97    u_foreach_bit(s, st->valid_samplers) {
98       unif->lod_bias[s] = st->samplers[s]->lod_bias_as_fp16;
99    }
100 
101    /* If we use bindless samplers, insert sampler into the heap */
102    if (st->shader && st->shader->uses_bindless_samplers) {
103       u_foreach_bit(s, st->valid_samplers) {
104          unif->sampler_handle[s] =
105             28 +
106             agx_sampler_heap_add(dev, &batch->sampler_heap,
107                                  &st->samplers[s]->desc_without_custom_border);
108       }
109    }
110 }
111 
112 void
agx_set_cbuf_uniforms(struct agx_batch * batch,enum pipe_shader_type stage)113 agx_set_cbuf_uniforms(struct agx_batch *batch, enum pipe_shader_type stage)
114 {
115    struct agx_stage *st = &batch->ctx->stage[stage];
116    struct agx_stage_uniforms *unif = &batch->stage_uniforms[stage];
117 
118    u_foreach_bit(cb, st->cb_mask) {
119       unif->ubo_base[cb] = agx_const_buffer_ptr(batch, &st->cb[cb]);
120       unif->ubo_size[cb] = st->cb[cb].buffer_size;
121    }
122 }
123 
124 void
agx_set_ssbo_uniforms(struct agx_batch * batch,enum pipe_shader_type stage)125 agx_set_ssbo_uniforms(struct agx_batch *batch, enum pipe_shader_type stage)
126 {
127    struct agx_stage *st = &batch->ctx->stage[stage];
128    struct agx_stage_uniforms *unif = &batch->stage_uniforms[stage];
129 
130    /* Single element sink. TODO: Optimize with soft fault. */
131    uint32_t zeroes[4] = {0};
132    uint64_t sink = agx_pool_upload_aligned(&batch->pool, &zeroes, 16, 16);
133 
134    /* Consider all shader buffers, needed to avoid faults with
135     * e.g. arb_shader_storage_buffer_object-array-ssbo-binding.
136     */
137    for (unsigned cb = 0; cb < PIPE_MAX_SHADER_BUFFERS; ++cb) {
138       struct pipe_shader_buffer *sb = &st->ssbo[cb];
139 
140       if (sb->buffer && st->ssbo[cb].buffer_size) {
141          struct agx_resource *rsrc = agx_resource(sb->buffer);
142 
143          if (st->ssbo_writable_mask & BITFIELD_BIT(cb)) {
144             agx_batch_writes_range(batch, rsrc, sb->buffer_offset,
145                                    sb->buffer_size);
146             batch->incoherent_writes = true;
147          } else {
148             agx_batch_reads(batch, rsrc);
149          }
150 
151          unif->ssbo_base[cb] = rsrc->bo->va->addr + sb->buffer_offset;
152          unif->ssbo_size[cb] = st->ssbo[cb].buffer_size;
153       } else {
154          /* Invalid, so use the sink */
155          unif->ssbo_base[cb] = sink;
156          unif->ssbo_size[cb] = 0;
157       }
158    }
159 }
160