xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/nouveau/nv50/nv50_shader_state.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2008 Ben Skeggs
3  * Copyright 2010 Christoph Bumiller
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "pipe/p_context.h"
25 #include "pipe/p_defines.h"
26 #include "pipe/p_state.h"
27 #include "util/u_inlines.h"
28 
29 #include "nv50/nv50_context.h"
30 #include "nv50/nv50_query_hw.h"
31 
32 #include "nv50/nv50_compute.xml.h"
33 
34 void
nv50_constbufs_validate(struct nv50_context * nv50)35 nv50_constbufs_validate(struct nv50_context *nv50)
36 {
37    struct nouveau_pushbuf *push = nv50->base.pushbuf;
38    unsigned s;
39 
40    for (s = 0; s < NV50_MAX_3D_SHADER_STAGES; ++s) {
41       unsigned p;
42 
43       if (s == NV50_SHADER_STAGE_FRAGMENT)
44          p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
45       else
46       if (s == NV50_SHADER_STAGE_GEOMETRY)
47          p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
48       else
49          p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;
50 
51       while (nv50->constbuf_dirty[s]) {
52          const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;
53 
54          assert(i < NV50_MAX_PIPE_CONSTBUFS);
55          nv50->constbuf_dirty[s] &= ~(1 << i);
56 
57          if (nv50->constbuf[s][i].user) {
58             const unsigned b = NV50_CB_PVP + s;
59             unsigned start = 0;
60             unsigned words = nv50->constbuf[s][0].size / 4;
61             if (i) {
62                NOUVEAU_ERR("user constbufs only supported in slot 0\n");
63                continue;
64             }
65             if (!nv50->state.uniform_buffer_bound[s]) {
66                nv50->state.uniform_buffer_bound[s] = true;
67                BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
68                PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
69             }
70             while (words) {
71                unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN);
72 
73                PUSH_SPACE(push, nr + 3);
74                BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
75                PUSH_DATA (push, (start << 8) | b);
76                BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
77                PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);
78 
79                start += nr;
80                words -= nr;
81             }
82          } else {
83             struct nv04_resource *res =
84                nv04_resource(nv50->constbuf[s][i].u.buf);
85             if (res) {
86                /* TODO: allocate persistent bindings */
87                const unsigned b = s * 16 + i;
88 
89                assert(nouveau_resource_mapped_by_gpu(&res->base));
90 
91                BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
92                PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
93                PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
94                PUSH_DATA (push, (b << 16) |
95                           (nv50->constbuf[s][i].size & 0xffff));
96                BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
97                PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
98 
99                BCTX_REFN(nv50->bufctx_3d, 3D_CB(s, i), res, RD);
100 
101                nv50->cb_dirty = 1; /* Force cache flush for UBO. */
102                res->cb_bindings[s] |= 1 << i;
103             } else {
104                BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
105                PUSH_DATA (push, (i << 8) | p | 0);
106             }
107             if (i == 0)
108                nv50->state.uniform_buffer_bound[s] = false;
109          }
110       }
111    }
112 
113    /* Invalidate all COMPUTE constbufs because they are aliased with 3D. */
114    nv50->dirty_cp |= NV50_NEW_CP_CONSTBUF;
115    nv50->constbuf_dirty[NV50_SHADER_STAGE_COMPUTE] |= nv50->constbuf_valid[NV50_SHADER_STAGE_COMPUTE];
116    nv50->state.uniform_buffer_bound[NV50_SHADER_STAGE_COMPUTE] = false;
117 }
118 
119 static bool
nv50_program_validate(struct nv50_context * nv50,struct nv50_program * prog)120 nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog)
121 {
122    if (!prog->translated) {
123       prog->translated = nv50_program_translate(
124          prog, nv50->screen->base.device->chipset, &nv50->base.debug);
125       if (!prog->translated)
126          return false;
127    } else
128    if (prog->mem)
129       return true;
130 
131    simple_mtx_assert_locked(&nv50->screen->state_lock);
132    return nv50_program_upload_code(nv50, prog);
133 }
134 
135 static inline void
nv50_program_update_context_state(struct nv50_context * nv50,struct nv50_program * prog,int stage)136 nv50_program_update_context_state(struct nv50_context *nv50,
137                                   struct nv50_program *prog, int stage)
138 {
139    const unsigned flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR;
140 
141    if (prog && prog->tls_space) {
142       if (nv50->state.new_tls_space)
143          nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
144       if (!nv50->state.tls_required || nv50->state.new_tls_space)
145          BCTX_REFN_bo(nv50->bufctx_3d, 3D_TLS, flags, nv50->screen->tls_bo);
146       nv50->state.new_tls_space = false;
147       nv50->state.tls_required |= 1 << stage;
148    } else {
149       if (nv50->state.tls_required == (1 << stage))
150          nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TLS);
151       nv50->state.tls_required &= ~(1 << stage);
152    }
153 }
154 
155 void
nv50_vertprog_validate(struct nv50_context * nv50)156 nv50_vertprog_validate(struct nv50_context *nv50)
157 {
158    struct nouveau_pushbuf *push = nv50->base.pushbuf;
159    struct nv50_program *vp = nv50->vertprog;
160 
161    if (!nv50_program_validate(nv50, vp))
162          return;
163    nv50_program_update_context_state(nv50, vp, 0);
164 
165    BEGIN_NV04(push, NV50_3D(VP_ATTR_EN(0)), 2);
166    PUSH_DATA (push, vp->vp.attrs[0]);
167    PUSH_DATA (push, vp->vp.attrs[1]);
168    BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_RESULT), 1);
169    PUSH_DATA (push, vp->max_out);
170    BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_TEMP), 1);
171    PUSH_DATA (push, vp->max_gpr);
172    BEGIN_NV04(push, NV50_3D(VP_START_ID), 1);
173    PUSH_DATA (push, vp->code_base);
174 }
175 
176 void
nv50_fragprog_validate(struct nv50_context * nv50)177 nv50_fragprog_validate(struct nv50_context *nv50)
178 {
179    struct nouveau_pushbuf *push = nv50->base.pushbuf;
180    struct nv50_program *fp = nv50->fragprog;
181    struct pipe_rasterizer_state *rast = &nv50->rast->pipe;
182 
183    if (!fp || !rast)
184       return;
185 
186    if (nv50->zsa && nv50->zsa->pipe.alpha_enabled) {
187       struct pipe_framebuffer_state *fb = &nv50->framebuffer;
188       bool blendable = fb->nr_cbufs == 0 || !fb->cbufs[0] ||
189          nv50->screen->base.base.is_format_supported(
190                &nv50->screen->base.base,
191                fb->cbufs[0]->format,
192                fb->cbufs[0]->texture->target,
193                fb->cbufs[0]->texture->nr_samples,
194                fb->cbufs[0]->texture->nr_storage_samples,
195                PIPE_BIND_BLENDABLE);
196       /* If we already have alphatest code, we have to keep updating
197        * it. However we only have to have different code if the current RT0 is
198        * non-blendable. Otherwise we just set it to always pass and use the
199        * hardware alpha test.
200        */
201       if (fp->fp.alphatest || !blendable) {
202          uint8_t alphatest = PIPE_FUNC_ALWAYS + 1;
203          if (!blendable)
204             alphatest = nv50->zsa->pipe.alpha_func + 1;
205          if (!fp->fp.alphatest)
206             nv50_program_destroy(nv50, fp);
207          else if (fp->mem && fp->fp.alphatest != alphatest)
208             nouveau_heap_free(&fp->mem);
209 
210          fp->fp.alphatest = alphatest;
211       }
212    } else if (fp->fp.alphatest && fp->fp.alphatest != PIPE_FUNC_ALWAYS + 1) {
213       /* Alpha test is disabled but we have a shader where it's filled
214        * in. Make sure to reset the function to 'always', otherwise it'll end
215        * up discarding fragments incorrectly.
216        */
217       if (fp->mem)
218          nouveau_heap_free(&fp->mem);
219 
220       fp->fp.alphatest = PIPE_FUNC_ALWAYS + 1;
221    }
222 
223    if (fp->fp.force_persample_interp != rast->force_persample_interp) {
224       /* Force the program to be reuploaded, which will trigger interp fixups
225        * to get applied
226        */
227       if (fp->mem)
228          nouveau_heap_free(&fp->mem);
229 
230       fp->fp.force_persample_interp = rast->force_persample_interp;
231    }
232 
233    if (fp->mem && !(nv50->dirty_3d & (NV50_NEW_3D_FRAGPROG | NV50_NEW_3D_MIN_SAMPLES)))
234       return;
235 
236    if (!nv50_program_validate(nv50, fp))
237       return;
238    nv50_program_update_context_state(nv50, fp, 1);
239 
240    BEGIN_NV04(push, NV50_3D(FP_REG_ALLOC_TEMP), 1);
241    PUSH_DATA (push, fp->max_gpr);
242    BEGIN_NV04(push, NV50_3D(FP_RESULT_COUNT), 1);
243    PUSH_DATA (push, fp->max_out);
244    BEGIN_NV04(push, NV50_3D(FP_CONTROL), 1);
245    PUSH_DATA (push, fp->fp.flags[0]);
246    BEGIN_NV04(push, NV50_3D(FP_CTRL_UNK196C), 1);
247    PUSH_DATA (push, fp->fp.flags[1]);
248    BEGIN_NV04(push, NV50_3D(FP_START_ID), 1);
249    PUSH_DATA (push, fp->code_base);
250 
251    if (nv50->screen->tesla->oclass >= NVA3_3D_CLASS) {
252       BEGIN_NV04(push, SUBC_3D(NVA3_3D_FP_MULTISAMPLE), 1);
253       if (nv50->min_samples > 1 || fp->fp.has_samplemask)
254          PUSH_DATA(push,
255                    NVA3_3D_FP_MULTISAMPLE_FORCE_PER_SAMPLE |
256                    (NVA3_3D_FP_MULTISAMPLE_EXPORT_SAMPLE_MASK *
257                     fp->fp.has_samplemask));
258       else
259          PUSH_DATA(push, 0);
260    }
261 }
262 
263 void
nv50_gmtyprog_validate(struct nv50_context * nv50)264 nv50_gmtyprog_validate(struct nv50_context *nv50)
265 {
266    struct nouveau_pushbuf *push = nv50->base.pushbuf;
267    struct nv50_program *gp = nv50->gmtyprog;
268 
269    if (gp) {
270       if (!nv50_program_validate(nv50, gp))
271          return;
272       BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_TEMP), 1);
273       PUSH_DATA (push, gp->max_gpr);
274       BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_RESULT), 1);
275       PUSH_DATA (push, gp->max_out);
276       BEGIN_NV04(push, NV50_3D(GP_OUTPUT_PRIMITIVE_TYPE), 1);
277       PUSH_DATA (push, gp->gp.prim_type);
278       BEGIN_NV04(push, NV50_3D(GP_VERTEX_OUTPUT_COUNT), 1);
279       PUSH_DATA (push, gp->gp.vert_count);
280       BEGIN_NV04(push, NV50_3D(GP_START_ID), 1);
281       PUSH_DATA (push, gp->code_base);
282 
283       nv50->state.prim_size = gp->gp.prim_type; /* enum matches vertex count */
284    }
285    nv50_program_update_context_state(nv50, gp, 2);
286 
287    /* GP_ENABLE is updated in linkage validation */
288 }
289 
290 void
nv50_compprog_validate(struct nv50_context * nv50)291 nv50_compprog_validate(struct nv50_context *nv50)
292 {
293    struct nouveau_pushbuf *push = nv50->base.pushbuf;
294    struct nv50_program *cp = nv50->compprog;
295 
296    if (cp && !nv50_program_validate(nv50, cp))
297       return;
298 
299    BEGIN_NV04(push, NV50_CP(CODE_CB_FLUSH), 1);
300    PUSH_DATA (push, 0);
301 }
302 
303 static void
nv50_sprite_coords_validate(struct nv50_context * nv50)304 nv50_sprite_coords_validate(struct nv50_context *nv50)
305 {
306    struct nouveau_pushbuf *push = nv50->base.pushbuf;
307    uint32_t pntc[8], mode;
308    struct nv50_program *fp = nv50->fragprog;
309    unsigned i, c;
310    unsigned m = (nv50->state.interpolant_ctrl >> 8) & 0xff;
311 
312    if (!nv50->rast->pipe.point_quad_rasterization) {
313       if (nv50->state.point_sprite) {
314          BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
315          for (i = 0; i < 8; ++i)
316             PUSH_DATA(push, 0);
317 
318          nv50->state.point_sprite = false;
319       }
320       return;
321    } else {
322       nv50->state.point_sprite = true;
323    }
324 
325    memset(pntc, 0, sizeof(pntc));
326 
327    for (i = 0; i < fp->in_nr; i++) {
328       unsigned n = util_bitcount(fp->in[i].mask);
329 
330       if (fp->in[i].sn != TGSI_SEMANTIC_GENERIC) {
331          m += n;
332          continue;
333       }
334       if (!(nv50->rast->pipe.sprite_coord_enable & (1 << fp->in[i].si))) {
335          m += n;
336          continue;
337       }
338 
339       for (c = 0; c < 4; ++c) {
340          if (fp->in[i].mask & (1 << c)) {
341             pntc[m / 8] |= (c + 1) << ((m % 8) * 4);
342             ++m;
343          }
344       }
345    }
346 
347    if (nv50->rast->pipe.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT)
348       mode = 0x00;
349    else
350       mode = 0x10;
351 
352    BEGIN_NV04(push, NV50_3D(POINT_SPRITE_CTRL), 1);
353    PUSH_DATA (push, mode);
354 
355    BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
356    PUSH_DATAp(push, pntc, 8);
357 }
358 
359 /* Validate state derived from shaders and the rasterizer cso. */
360 void
nv50_validate_derived_rs(struct nv50_context * nv50)361 nv50_validate_derived_rs(struct nv50_context *nv50)
362 {
363    struct nouveau_pushbuf *push = nv50->base.pushbuf;
364    uint32_t color, psize;
365 
366    nv50_sprite_coords_validate(nv50);
367 
368    if (nv50->state.rasterizer_discard != nv50->rast->pipe.rasterizer_discard) {
369       nv50->state.rasterizer_discard = nv50->rast->pipe.rasterizer_discard;
370       BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
371       PUSH_DATA (push, !nv50->rast->pipe.rasterizer_discard);
372    }
373 
374    if (nv50->dirty_3d & NV50_NEW_3D_FRAGPROG)
375       return;
376    psize = nv50->state.semantic_psize & ~NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
377    color = nv50->state.semantic_color & ~NV50_3D_SEMANTIC_COLOR_CLMP_EN;
378 
379    if (nv50->rast->pipe.clamp_vertex_color)
380       color |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
381 
382    if (color != nv50->state.semantic_color) {
383       nv50->state.semantic_color = color;
384       BEGIN_NV04(push, NV50_3D(SEMANTIC_COLOR), 1);
385       PUSH_DATA (push, color);
386    }
387 
388    if (nv50->rast->pipe.point_size_per_vertex)
389       psize |= NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
390 
391    if (psize != nv50->state.semantic_psize) {
392       nv50->state.semantic_psize = psize;
393       BEGIN_NV04(push, NV50_3D(SEMANTIC_PTSZ), 1);
394       PUSH_DATA (push, psize);
395    }
396 }
397 
398 static int
nv50_vec4_map(uint8_t * map,int mid,uint32_t lin[4],struct nv50_varying * in,struct nv50_varying * out)399 nv50_vec4_map(uint8_t *map, int mid, uint32_t lin[4],
400               struct nv50_varying *in, struct nv50_varying *out)
401 {
402    int c;
403    uint8_t mv = out->mask, mf = in->mask, oid = out->hw;
404 
405    for (c = 0; c < 4; ++c) {
406       if (mf & 1) {
407          if (in->linear)
408             lin[mid / 32] |= 1 << (mid % 32);
409          if (mv & 1)
410             map[mid] = oid;
411          else
412          if (c == 3)
413             map[mid] |= 1;
414          ++mid;
415       }
416 
417       oid += mv & 1;
418       mf >>= 1;
419       mv >>= 1;
420    }
421 
422    return mid;
423 }
424 
425 void
nv50_fp_linkage_validate(struct nv50_context * nv50)426 nv50_fp_linkage_validate(struct nv50_context *nv50)
427 {
428    struct nouveau_pushbuf *push = nv50->base.pushbuf;
429    struct nv50_program *vp = nv50->gmtyprog ? nv50->gmtyprog : nv50->vertprog;
430    struct nv50_program *fp = nv50->fragprog;
431    struct nv50_varying dummy;
432    int i, n, c, m;
433    uint32_t primid = 0;
434    uint32_t layerid = 0;
435    uint32_t viewportid = 0;
436    uint32_t psiz = 0x000;
437    uint32_t interp = fp->fp.interp;
438    uint32_t colors = fp->fp.colors;
439    uint32_t clpd_nr = util_last_bit(vp->vp.clip_enable | vp->vp.cull_enable);
440    uint32_t lin[4];
441    uint8_t map[64];
442    uint8_t so_map[64];
443 
444    if (!(nv50->dirty_3d & (NV50_NEW_3D_VERTPROG |
445                            NV50_NEW_3D_FRAGPROG |
446                            NV50_NEW_3D_GMTYPROG))) {
447       uint8_t bfc, ffc;
448       ffc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_FFC0_ID__MASK);
449       bfc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_BFC0_ID__MASK)
450          >> 8;
451       if (nv50->rast->pipe.light_twoside == ((ffc == bfc) ? 0 : 1))
452          return;
453    }
454 
455    memset(lin, 0x00, sizeof(lin));
456 
457    /* XXX: in buggy-endian mode, is the first element of map (u32)0x000000xx
458     *  or is it the first byte ?
459     */
460    memset(map, nv50->gmtyprog ? 0x80 : 0x40, sizeof(map));
461 
462    dummy.mask = 0xf; /* map all components of HPOS */
463    dummy.linear = 0;
464    m = nv50_vec4_map(map, 0, lin, &dummy, &vp->out[0]);
465 
466    for (c = 0; c < clpd_nr; ++c)
467       map[m++] = vp->vp.clpd[c / 4] + (c % 4);
468 
469    colors |= m << 8; /* adjust BFC0 id */
470 
471    dummy.mask = 0x0;
472 
473    /* if light_twoside is active, FFC0_ID == BFC0_ID is invalid */
474    if (nv50->rast->pipe.light_twoside) {
475       for (i = 0; i < 2; ++i) {
476          n = vp->vp.bfc[i];
477          if (fp->vp.bfc[i] >= fp->in_nr)
478             continue;
479          m = nv50_vec4_map(map, m, lin, &fp->in[fp->vp.bfc[i]],
480                            (n < vp->out_nr) ? &vp->out[n] : &dummy);
481       }
482    }
483    colors += m - 4; /* adjust FFC0 id */
484    interp |= m << 8; /* set map id where 'normal' FP inputs start */
485 
486    for (i = 0; i < fp->in_nr; ++i) {
487       for (n = 0; n < vp->out_nr; ++n)
488          if (vp->out[n].sn == fp->in[i].sn &&
489              vp->out[n].si == fp->in[i].si)
490             break;
491       switch (fp->in[i].sn) {
492       case TGSI_SEMANTIC_PRIMID:
493          primid = m;
494          break;
495       case TGSI_SEMANTIC_LAYER:
496          layerid = m;
497          break;
498       case TGSI_SEMANTIC_VIEWPORT_INDEX:
499          viewportid = m;
500          break;
501       }
502       m = nv50_vec4_map(map, m, lin,
503                         &fp->in[i], (n < vp->out_nr) ? &vp->out[n] : &dummy);
504    }
505 
506    if (vp->gp.has_layer && !layerid) {
507       layerid = m;
508       map[m++] = vp->gp.layerid;
509    }
510 
511    if (vp->gp.has_viewport && !viewportid) {
512       viewportid = m;
513       map[m++] = vp->gp.viewportid;
514    }
515 
516    if (nv50->rast->pipe.point_size_per_vertex) {
517       psiz = (m << 4) | 1;
518       map[m++] = vp->vp.psiz;
519    }
520 
521    if (nv50->rast->pipe.clamp_vertex_color)
522       colors |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
523 
524    if (unlikely(vp->so)) {
525       /* Slot i in STRMOUT_MAP specifies the offset where slot i in RESULT_MAP
526        * gets written.
527        *
528        * TODO:
529        * Inverting vp->so->map (output -> offset) would probably speed this up.
530        */
531       memset(so_map, 0, sizeof(so_map));
532       for (i = 0; i < vp->so->map_size; ++i) {
533          if (vp->so->map[i] == 0xff)
534             continue;
535          for (c = 0; c < m; ++c)
536             if (map[c] == vp->so->map[i] && !so_map[c])
537                break;
538          if (c == m) {
539             c = m;
540             map[m++] = vp->so->map[i];
541          }
542          so_map[c] = 0x80 | i;
543       }
544       for (c = m; c & 3; ++c)
545          so_map[c] = 0;
546    }
547 
548    n = (m + 3) / 4;
549    assert(m <= 64);
550 
551    if (unlikely(nv50->gmtyprog)) {
552       BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP_SIZE), 1);
553       PUSH_DATA (push, m);
554       BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP(0)), n);
555       PUSH_DATAp(push, map, n);
556    } else {
557       BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
558       PUSH_DATA (push, vp->vp.attrs[2] | fp->vp.attrs[2]);
559 
560       BEGIN_NV04(push, NV50_3D(SEMANTIC_PRIM_ID), 1);
561       PUSH_DATA (push, primid);
562 
563       assert(m > 0);
564       BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
565       PUSH_DATA (push, m);
566       BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
567       PUSH_DATAp(push, map, n);
568    }
569 
570    BEGIN_NV04(push, NV50_3D(GP_VIEWPORT_ID_ENABLE), 5);
571    PUSH_DATA (push, vp->gp.has_viewport);
572    PUSH_DATA (push, colors);
573    PUSH_DATA (push, (clpd_nr << 8) | 4);
574    PUSH_DATA (push, layerid);
575    PUSH_DATA (push, psiz);
576 
577    BEGIN_NV04(push, NV50_3D(SEMANTIC_VIEWPORT), 1);
578    PUSH_DATA (push, viewportid);
579 
580    BEGIN_NV04(push, NV50_3D(LAYER), 1);
581    PUSH_DATA (push, vp->gp.has_layer << 16);
582 
583    BEGIN_NV04(push, NV50_3D(FP_INTERPOLANT_CTRL), 1);
584    PUSH_DATA (push, interp);
585 
586    nv50->state.interpolant_ctrl = interp;
587 
588    nv50->state.semantic_color = colors;
589    nv50->state.semantic_psize = psiz;
590 
591    BEGIN_NV04(push, NV50_3D(NOPERSPECTIVE_BITMAP(0)), 4);
592    PUSH_DATAp(push, lin, 4);
593 
594    BEGIN_NV04(push, NV50_3D(GP_ENABLE), 1);
595    PUSH_DATA (push, nv50->gmtyprog ? 1 : 0);
596 
597    if (vp->so) {
598       BEGIN_NV04(push, NV50_3D(STRMOUT_MAP(0)), n);
599       PUSH_DATAp(push, so_map, n);
600    }
601 }
602 
603 static int
nv50_vp_gp_mapping(uint8_t * map,int m,struct nv50_program * vp,struct nv50_program * gp)604 nv50_vp_gp_mapping(uint8_t *map, int m,
605                    struct nv50_program *vp, struct nv50_program *gp)
606 {
607    int i, j, c;
608 
609    for (i = 0; i < gp->in_nr; ++i) {
610       uint8_t oid = 0, mv = 0, mg = gp->in[i].mask;
611 
612       for (j = 0; j < vp->out_nr; ++j) {
613          if (vp->out[j].sn == gp->in[i].sn &&
614              vp->out[j].si == gp->in[i].si) {
615             mv = vp->out[j].mask;
616             oid = vp->out[j].hw;
617             break;
618          }
619       }
620 
621       for (c = 0; c < 4; ++c, mv >>= 1, mg >>= 1) {
622          if (mg & mv & 1)
623             map[m++] = oid;
624          else
625          if (mg & 1)
626             map[m++] = (c == 3) ? 0x41 : 0x40;
627          oid += mv & 1;
628       }
629    }
630    if (!m)
631       map[m++] = 0;
632    return m;
633 }
634 
635 void
nv50_gp_linkage_validate(struct nv50_context * nv50)636 nv50_gp_linkage_validate(struct nv50_context *nv50)
637 {
638    struct nouveau_pushbuf *push = nv50->base.pushbuf;
639    struct nv50_program *vp = nv50->vertprog;
640    struct nv50_program *gp = nv50->gmtyprog;
641    int m = 0;
642    int n;
643    uint8_t map[64];
644 
645    if (!gp)
646       return;
647    memset(map, 0, sizeof(map));
648 
649    m = nv50_vp_gp_mapping(map, m, vp, gp);
650 
651    n = (m + 3) / 4;
652 
653    BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
654    PUSH_DATA (push, vp->vp.attrs[2] | gp->vp.attrs[2]);
655 
656    assert(m > 0);
657    BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
658    PUSH_DATA (push, m);
659    BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
660    PUSH_DATAp(push, map, n);
661 }
662 
663 void
nv50_stream_output_validate(struct nv50_context * nv50)664 nv50_stream_output_validate(struct nv50_context *nv50)
665 {
666    struct nouveau_pushbuf *push = nv50->base.pushbuf;
667    struct nv50_stream_output_state *so;
668    uint32_t ctrl;
669    unsigned i;
670    unsigned prims = ~0;
671 
672    so = nv50->gmtyprog ? nv50->gmtyprog->so : nv50->vertprog->so;
673 
674    BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
675    PUSH_DATA (push, 0);
676    if (!so || !nv50->num_so_targets) {
677       if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
678          BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
679          PUSH_DATA (push, 0);
680       }
681       BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
682       PUSH_DATA (push, 1);
683       return;
684    }
685 
686    /* previous TFB needs to complete */
687    if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
688       BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
689       PUSH_DATA (push, 0);
690    }
691 
692    ctrl = so->ctrl;
693    if (nv50->screen->base.class_3d >= NVA0_3D_CLASS)
694       ctrl |= NVA0_3D_STRMOUT_BUFFERS_CTRL_LIMIT_MODE_OFFSET;
695 
696    BEGIN_NV04(push, NV50_3D(STRMOUT_BUFFERS_CTRL), 1);
697    PUSH_DATA (push, ctrl);
698 
699    for (i = 0; i < nv50->num_so_targets; ++i) {
700       struct nv50_so_target *targ = nv50_so_target(nv50->so_target[i]);
701       struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);
702 
703       const unsigned n = nv50->screen->base.class_3d >= NVA0_3D_CLASS ? 4 : 3;
704 
705       uint32_t so_used = 0;
706 
707       if (!targ->clean) {
708          if (n == 4)
709             nv84_hw_query_fifo_wait(push, nv50_query(targ->pq));
710          else
711             so_used = nv50->so_used[i];
712       }
713       BEGIN_NV04(push, NV50_3D(STRMOUT_ADDRESS_HIGH(i)), n);
714       PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset + so_used);
715       PUSH_DATA (push, buf->address + targ->pipe.buffer_offset + so_used);
716       PUSH_DATA (push, so->num_attribs[i]);
717       if (n == 4) {
718          PUSH_DATA(push, targ->pipe.buffer_size);
719          if (!targ->clean) {
720             assert(targ->pq);
721             nv50_hw_query_pushbuf_submit(nv50, NVA0_3D_STRMOUT_OFFSET(i),
722                                          nv50_query(targ->pq), 0x4);
723          } else {
724             BEGIN_NV04(push, NVA0_3D(STRMOUT_OFFSET(i)), 1);
725             PUSH_DATA(push, 0);
726             targ->clean = false;
727          }
728       } else {
729          const unsigned limit = (targ->pipe.buffer_size - so_used) /
730             (so->stride[i] * nv50->state.prim_size);
731          prims = MIN2(prims, limit);
732          targ->clean = false;
733       }
734       targ->stride = so->stride[i];
735       BCTX_REFN(nv50->bufctx_3d, 3D_SO, buf, WR);
736    }
737    if (prims != ~0) {
738       BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
739       PUSH_DATA (push, prims);
740    }
741    BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
742    PUSH_DATA (push, 1);
743    BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
744    PUSH_DATA (push, 1);
745 }
746