xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/nouveau/nv50/nv50_transfer.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 
2 #include "util/format/u_format.h"
3 
4 #include "nv50/nv50_context.h"
5 
6 #include "nv50/g80_defs.xml.h"
7 
8 struct nv50_transfer {
9    struct pipe_transfer base;
10    struct nv50_m2mf_rect rect[2];
11    uint32_t nblocksx;
12    uint32_t nblocksy;
13 };
14 
15 void
nv50_m2mf_rect_setup(struct nv50_m2mf_rect * rect,struct pipe_resource * restrict res,unsigned l,unsigned x,unsigned y,unsigned z)16 nv50_m2mf_rect_setup(struct nv50_m2mf_rect *rect,
17                      struct pipe_resource *restrict res, unsigned l,
18                      unsigned x, unsigned y, unsigned z)
19 {
20    struct nv50_miptree *mt = nv50_miptree(res);
21    const unsigned w = u_minify(res->width0, l);
22    const unsigned h = u_minify(res->height0, l);
23 
24    rect->bo = mt->base.bo;
25    rect->domain = mt->base.domain;
26    rect->base = mt->level[l].offset;
27    if (mt->base.bo->offset != mt->base.address)
28       rect->base += mt->base.address - mt->base.bo->offset;
29    rect->pitch = mt->level[l].pitch;
30    if (util_format_is_plain(res->format)) {
31       rect->width = w << mt->ms_x;
32       rect->height = h << mt->ms_y;
33       rect->x = x << mt->ms_x;
34       rect->y = y << mt->ms_y;
35    } else {
36       rect->width = util_format_get_nblocksx(res->format, w);
37       rect->height = util_format_get_nblocksy(res->format, h);
38       rect->x = util_format_get_nblocksx(res->format, x);
39       rect->y = util_format_get_nblocksy(res->format, y);
40    }
41    rect->tile_mode = mt->level[l].tile_mode;
42    rect->cpp = util_format_get_blocksize(res->format);
43 
44    if (mt->layout_3d) {
45       rect->z = z;
46       rect->depth = u_minify(res->depth0, l);
47    } else {
48       rect->base += z * mt->layer_stride;
49       rect->z = 0;
50       rect->depth = 1;
51    }
52 }
53 
54 /* This is very similar to nv50_2d_texture_do_copy, but doesn't require
55  * miptree objects. Maybe refactor? Although it's not straightforward.
56  */
57 static void
nv50_2d_transfer_rect(struct nv50_context * nv50,const struct nv50_m2mf_rect * dst,const struct nv50_m2mf_rect * src,uint32_t nblocksx,uint32_t nblocksy)58 nv50_2d_transfer_rect(struct nv50_context *nv50,
59                       const struct nv50_m2mf_rect *dst,
60                       const struct nv50_m2mf_rect *src,
61                       uint32_t nblocksx, uint32_t nblocksy)
62 {
63    struct nouveau_pushbuf *push = nv50->base.pushbuf;
64    struct nouveau_bufctx *bctx = nv50->bufctx;
65    const int cpp = dst->cpp;
66 
67    nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD);
68    nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR);
69    nouveau_pushbuf_bufctx(push, bctx);
70    PUSH_VAL(push);
71 
72    uint32_t format;
73    switch (cpp) {
74    case 1:
75       format = G80_SURFACE_FORMAT_R8_UNORM;
76       break;
77    case 2:
78       format = G80_SURFACE_FORMAT_R16_UNORM;
79       break;
80    case 4:
81       format = G80_SURFACE_FORMAT_BGRA8_UNORM;
82       break;
83    case 8:
84       format = G80_SURFACE_FORMAT_RGBA16_FLOAT;
85       break;
86    case 16:
87       format = G80_SURFACE_FORMAT_RGBA32_FLOAT;
88       break;
89    default:
90       assert(!"Unexpected cpp");
91       format = G80_SURFACE_FORMAT_R8_UNORM;
92    }
93 
94    if (nouveau_bo_memtype(src->bo)) {
95       BEGIN_NV04(push, NV50_2D(SRC_FORMAT), 5);
96       PUSH_DATA (push, format);
97       PUSH_DATA (push, 0);
98       PUSH_DATA (push, src->tile_mode);
99       PUSH_DATA (push, src->depth);
100       PUSH_DATA (push, src->z);
101       BEGIN_NV04(push, NV50_2D(SRC_WIDTH), 4);
102       PUSH_DATA (push, src->width);
103       PUSH_DATA (push, src->height);
104       PUSH_DATAh(push, src->bo->offset + src->base);
105       PUSH_DATA (push, src->bo->offset + src->base);
106    } else {
107       BEGIN_NV04(push, NV50_2D(SRC_FORMAT), 2);
108       PUSH_DATA (push, format);
109       PUSH_DATA (push, 1);
110       BEGIN_NV04(push, NV50_2D(SRC_PITCH), 5);
111       PUSH_DATA (push, src->pitch);
112       PUSH_DATA (push, src->width);
113       PUSH_DATA (push, src->height);
114       PUSH_DATAh(push, src->bo->offset + src->base);
115       PUSH_DATA (push, src->bo->offset + src->base);
116    }
117 
118    if (nouveau_bo_memtype(dst->bo)) {
119       BEGIN_NV04(push, NV50_2D(DST_FORMAT), 5);
120       PUSH_DATA (push, format);
121       PUSH_DATA (push, 0);
122       PUSH_DATA (push, dst->tile_mode);
123       PUSH_DATA (push, dst->depth);
124       PUSH_DATA (push, dst->z);
125       BEGIN_NV04(push, NV50_2D(DST_WIDTH), 4);
126       PUSH_DATA (push, dst->width);
127       PUSH_DATA (push, dst->height);
128       PUSH_DATAh(push, dst->bo->offset + dst->base);
129       PUSH_DATA (push, dst->bo->offset + dst->base);
130    } else {
131       BEGIN_NV04(push, NV50_2D(DST_FORMAT), 2);
132       PUSH_DATA (push, format);
133       PUSH_DATA (push, 1);
134       BEGIN_NV04(push, NV50_2D(DST_PITCH), 5);
135       PUSH_DATA (push, dst->pitch);
136       PUSH_DATA (push, dst->width);
137       PUSH_DATA (push, dst->height);
138       PUSH_DATAh(push, dst->bo->offset + dst->base);
139       PUSH_DATA (push, dst->bo->offset + dst->base);
140    }
141 
142    BEGIN_NV04(push, NV50_2D(BLIT_CONTROL), 1);
143    PUSH_DATA (push, NV50_2D_BLIT_CONTROL_FILTER_POINT_SAMPLE);
144    BEGIN_NV04(push, NV50_2D(BLIT_DST_X), 4);
145    PUSH_DATA (push, dst->x);
146    PUSH_DATA (push, dst->y);
147    PUSH_DATA (push, nblocksx);
148    PUSH_DATA (push, nblocksy);
149    BEGIN_NV04(push, NV50_2D(BLIT_DU_DX_FRACT), 4);
150    PUSH_DATA (push, 0);
151    PUSH_DATA (push, 1);
152    PUSH_DATA (push, 0);
153    PUSH_DATA (push, 1);
154    BEGIN_NV04(push, NV50_2D(BLIT_SRC_X_FRACT), 4);
155    PUSH_DATA (push, 0);
156    PUSH_DATA (push, src->x);
157    PUSH_DATA (push, 0);
158    PUSH_DATA (push, src->y);
159 
160    nouveau_bufctx_reset(bctx, 0);
161 }
162 
163 void
nv50_m2mf_transfer_rect(struct nv50_context * nv50,const struct nv50_m2mf_rect * dst,const struct nv50_m2mf_rect * src,uint32_t nblocksx,uint32_t nblocksy)164 nv50_m2mf_transfer_rect(struct nv50_context *nv50,
165                         const struct nv50_m2mf_rect *dst,
166                         const struct nv50_m2mf_rect *src,
167                         uint32_t nblocksx, uint32_t nblocksy)
168 {
169    struct nouveau_pushbuf *push = nv50->base.pushbuf;
170    struct nouveau_bufctx *bctx = nv50->bufctx;
171    const int cpp = dst->cpp;
172    uint32_t src_ofst = src->base;
173    uint32_t dst_ofst = dst->base;
174    uint32_t height = nblocksy;
175    uint32_t sy = src->y;
176    uint32_t dy = dst->y;
177 
178    assert(dst->cpp == src->cpp);
179 
180    /* Workaround: M2MF appears to break at the 64k boundary for tiled
181     * textures, which can really only happen with RGBA32 formats.
182     */
183    bool eng2d = false;
184    if (nouveau_bo_memtype(src->bo)) {
185       if (src->width * cpp > 65536)
186          eng2d = true;
187    }
188    if (nouveau_bo_memtype(dst->bo)) {
189       if (dst->width * cpp > 65536)
190          eng2d = true;
191    }
192    if (eng2d) {
193       nv50_2d_transfer_rect(nv50, dst, src, nblocksx, nblocksy);
194       return;
195    }
196 
197    nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD);
198    nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR);
199    nouveau_pushbuf_bufctx(push, bctx);
200    PUSH_VAL(push);
201 
202    if (nouveau_bo_memtype(src->bo)) {
203       BEGIN_NV04(push, NV50_M2MF(LINEAR_IN), 6);
204       PUSH_DATA (push, 0);
205       PUSH_DATA (push, src->tile_mode);
206       PUSH_DATA (push, src->width * cpp);
207       PUSH_DATA (push, src->height);
208       PUSH_DATA (push, src->depth);
209       PUSH_DATA (push, src->z);
210    } else {
211       src_ofst += src->y * src->pitch + src->x * cpp;
212 
213       BEGIN_NV04(push, NV50_M2MF(LINEAR_IN), 1);
214       PUSH_DATA (push, 1);
215       BEGIN_NV04(push, SUBC_M2MF(NV03_M2MF_PITCH_IN), 1);
216       PUSH_DATA (push, src->pitch);
217    }
218 
219    if (nouveau_bo_memtype(dst->bo)) {
220       BEGIN_NV04(push, NV50_M2MF(LINEAR_OUT), 6);
221       PUSH_DATA (push, 0);
222       PUSH_DATA (push, dst->tile_mode);
223       PUSH_DATA (push, dst->width * cpp);
224       PUSH_DATA (push, dst->height);
225       PUSH_DATA (push, dst->depth);
226       PUSH_DATA (push, dst->z);
227    } else {
228       dst_ofst += dst->y * dst->pitch + dst->x * cpp;
229 
230       BEGIN_NV04(push, NV50_M2MF(LINEAR_OUT), 1);
231       PUSH_DATA (push, 1);
232       BEGIN_NV04(push, SUBC_M2MF(NV03_M2MF_PITCH_OUT), 1);
233       PUSH_DATA (push, dst->pitch);
234    }
235 
236    while (height) {
237       int line_count = height > 2047 ? 2047 : height;
238 
239       BEGIN_NV04(push, NV50_M2MF(OFFSET_IN_HIGH), 2);
240       PUSH_DATAh(push, src->bo->offset + src_ofst);
241       PUSH_DATAh(push, dst->bo->offset + dst_ofst);
242 
243       BEGIN_NV04(push, SUBC_M2MF(NV03_M2MF_OFFSET_IN), 2);
244       PUSH_DATA (push, src->bo->offset + src_ofst);
245       PUSH_DATA (push, dst->bo->offset + dst_ofst);
246 
247       if (nouveau_bo_memtype(src->bo)) {
248          BEGIN_NV04(push, NV50_M2MF(TILING_POSITION_IN), 1);
249          PUSH_DATA (push, (sy << 16) | (src->x * cpp));
250       } else {
251          src_ofst += line_count * src->pitch;
252       }
253       if (nouveau_bo_memtype(dst->bo)) {
254          BEGIN_NV04(push, NV50_M2MF(TILING_POSITION_OUT), 1);
255          PUSH_DATA (push, (dy << 16) | (dst->x * cpp));
256       } else {
257          dst_ofst += line_count * dst->pitch;
258       }
259 
260       BEGIN_NV04(push, SUBC_M2MF(NV03_M2MF_LINE_LENGTH_IN), 4);
261       PUSH_DATA (push, nblocksx * cpp);
262       PUSH_DATA (push, line_count);
263       PUSH_DATA (push, (1 << 8) | (1 << 0));
264       PUSH_DATA (push, 0);
265 
266       height -= line_count;
267       sy += line_count;
268       dy += line_count;
269    }
270 
271    nouveau_bufctx_reset(bctx, 0);
272 }
273 
274 void
nv50_sifc_linear_u8(struct nouveau_context * nv,struct nouveau_bo * dst,unsigned offset,unsigned domain,unsigned size,const void * data)275 nv50_sifc_linear_u8(struct nouveau_context *nv,
276                     struct nouveau_bo *dst, unsigned offset, unsigned domain,
277                     unsigned size, const void *data)
278 {
279    struct nv50_context *nv50 = nv50_context(&nv->pipe);
280    struct nouveau_pushbuf *push = nv50->base.pushbuf;
281    uint32_t *src = (uint32_t *)data;
282    unsigned count = DIV_ROUND_UP(size, 4);
283    unsigned max_size = 0x8000;
284 
285    nouveau_bufctx_refn(nv50->bufctx, 0, dst, domain | NOUVEAU_BO_WR);
286    nouveau_pushbuf_bufctx(push, nv50->bufctx);
287 
288    PUSH_VAL(push);
289 
290    while (count) {
291       unsigned xcoord = offset & 0xff;
292       offset &= ~0xff;
293 
294       BEGIN_NV04(push, NV50_2D(DST_FORMAT), 2);
295       PUSH_DATA (push, G80_SURFACE_FORMAT_R8_UNORM);
296       PUSH_DATA (push, 1);
297       BEGIN_NV04(push, NV50_2D(DST_PITCH), 5);
298       PUSH_DATA (push, 262144);
299       PUSH_DATA (push, 65536);
300       PUSH_DATA (push, 1);
301       PUSH_DATAh(push, dst->offset + offset);
302       PUSH_DATA (push, dst->offset + offset);
303       BEGIN_NV04(push, NV50_2D(SIFC_BITMAP_ENABLE), 2);
304       PUSH_DATA (push, 0);
305       PUSH_DATA (push, G80_SURFACE_FORMAT_R8_UNORM);
306       BEGIN_NV04(push, NV50_2D(SIFC_WIDTH), 10);
307       PUSH_DATA (push, MIN2(size, max_size));
308       PUSH_DATA (push, 1);
309       PUSH_DATA (push, 0);
310       PUSH_DATA (push, 1);
311       PUSH_DATA (push, 0);
312       PUSH_DATA (push, 1);
313       PUSH_DATA (push, 0);
314       PUSH_DATA (push, xcoord);
315       PUSH_DATA (push, 0);
316       PUSH_DATA (push, 0);
317 
318       unsigned iter_count = MIN2(count, max_size / 4);
319       count -= iter_count;
320       offset += max_size;
321       size -= max_size;
322 
323       while (iter_count) {
324          unsigned nr = MIN2(iter_count, NV04_PFIFO_MAX_PACKET_LEN);
325 
326          BEGIN_NI04(push, NV50_2D(SIFC_DATA), nr);
327          PUSH_DATAp(push, src, nr);
328 
329          src += nr;
330          iter_count -= nr;
331       }
332    }
333 
334    nouveau_bufctx_reset(nv50->bufctx, 0);
335 }
336 
337 void
nv50_m2mf_copy_linear(struct nouveau_context * nv,struct nouveau_bo * dst,unsigned dstoff,unsigned dstdom,struct nouveau_bo * src,unsigned srcoff,unsigned srcdom,unsigned size)338 nv50_m2mf_copy_linear(struct nouveau_context *nv,
339                       struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
340                       struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
341                       unsigned size)
342 {
343    struct nouveau_pushbuf *push = nv->pushbuf;
344    struct nouveau_bufctx *bctx = nv50_context(&nv->pipe)->bufctx;
345 
346    nouveau_bufctx_refn(bctx, 0, src, srcdom | NOUVEAU_BO_RD);
347    nouveau_bufctx_refn(bctx, 0, dst, dstdom | NOUVEAU_BO_WR);
348    nouveau_pushbuf_bufctx(push, bctx);
349    PUSH_VAL(push);
350 
351    BEGIN_NV04(push, NV50_M2MF(LINEAR_IN), 1);
352    PUSH_DATA (push, 1);
353    BEGIN_NV04(push, NV50_M2MF(LINEAR_OUT), 1);
354    PUSH_DATA (push, 1);
355 
356    while (size) {
357       unsigned bytes = MIN2(size, 1 << 17);
358 
359       BEGIN_NV04(push, NV50_M2MF(OFFSET_IN_HIGH), 2);
360       PUSH_DATAh(push, src->offset + srcoff);
361       PUSH_DATAh(push, dst->offset + dstoff);
362       BEGIN_NV04(push, SUBC_M2MF(NV03_M2MF_OFFSET_IN), 2);
363       PUSH_DATA (push, src->offset + srcoff);
364       PUSH_DATA (push, dst->offset + dstoff);
365       BEGIN_NV04(push, SUBC_M2MF(NV03_M2MF_LINE_LENGTH_IN), 4);
366       PUSH_DATA (push, bytes);
367       PUSH_DATA (push, 1);
368       PUSH_DATA (push, (1 << 8) | (1 << 0));
369       PUSH_DATA (push, 0);
370 
371       srcoff += bytes;
372       dstoff += bytes;
373       size -= bytes;
374    }
375 
376    nouveau_bufctx_reset(bctx, 0);
377 }
378 
379 void *
nv50_miptree_transfer_map(struct pipe_context * pctx,struct pipe_resource * res,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)380 nv50_miptree_transfer_map(struct pipe_context *pctx,
381                           struct pipe_resource *res,
382                           unsigned level,
383                           unsigned usage,
384                           const struct pipe_box *box,
385                           struct pipe_transfer **ptransfer)
386 {
387    struct nv50_context *nv50 = nv50_context(pctx);
388    struct nouveau_device *dev = nv50->screen->base.device;
389    const struct nv50_miptree *mt = nv50_miptree(res);
390    struct nv50_transfer *tx;
391    uint32_t size;
392    int ret;
393    unsigned flags = 0;
394 
395    if (usage & PIPE_MAP_DIRECTLY)
396       return NULL;
397 
398    tx = CALLOC_STRUCT(nv50_transfer);
399    if (!tx)
400       return NULL;
401 
402    pipe_resource_reference(&tx->base.resource, res);
403 
404    tx->base.level = level;
405    tx->base.usage = usage;
406    tx->base.box = *box;
407 
408    if (util_format_is_plain(res->format)) {
409       tx->nblocksx = box->width << mt->ms_x;
410       tx->nblocksy = box->height << mt->ms_y;
411    } else {
412       tx->nblocksx = util_format_get_nblocksx(res->format, box->width);
413       tx->nblocksy = util_format_get_nblocksy(res->format, box->height);
414    }
415 
416    tx->base.stride = tx->nblocksx * util_format_get_blocksize(res->format);
417    tx->base.layer_stride = tx->nblocksy * tx->base.stride;
418 
419    nv50_m2mf_rect_setup(&tx->rect[0], res, level, box->x, box->y, box->z);
420 
421    size = tx->base.layer_stride;
422 
423    ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
424                         size * tx->base.box.depth, NULL, &tx->rect[1].bo);
425    if (ret) {
426       FREE(tx);
427       return NULL;
428    }
429 
430    tx->rect[1].cpp = tx->rect[0].cpp;
431    tx->rect[1].width = tx->nblocksx;
432    tx->rect[1].height = tx->nblocksy;
433    tx->rect[1].depth = 1;
434    tx->rect[1].pitch = tx->base.stride;
435    tx->rect[1].domain = NOUVEAU_BO_GART;
436 
437    if (usage & PIPE_MAP_READ) {
438       unsigned base = tx->rect[0].base;
439       unsigned z = tx->rect[0].z;
440       unsigned i;
441       for (i = 0; i < box->depth; ++i) {
442          nv50_m2mf_transfer_rect(nv50, &tx->rect[1], &tx->rect[0],
443                                  tx->nblocksx, tx->nblocksy);
444          if (mt->layout_3d)
445             tx->rect[0].z++;
446          else
447             tx->rect[0].base += mt->layer_stride;
448          tx->rect[1].base += size;
449       }
450       tx->rect[0].z = z;
451       tx->rect[0].base = base;
452       tx->rect[1].base = 0;
453    }
454 
455    if (tx->rect[1].bo->map) {
456       *ptransfer = &tx->base;
457       return tx->rect[1].bo->map;
458    }
459 
460    if (usage & PIPE_MAP_READ)
461       flags = NOUVEAU_BO_RD;
462    if (usage & PIPE_MAP_WRITE)
463       flags |= NOUVEAU_BO_WR;
464 
465    ret = BO_MAP(nv50->base.screen, tx->rect[1].bo, flags, nv50->base.client);
466    if (ret) {
467       nouveau_bo_ref(NULL, &tx->rect[1].bo);
468       FREE(tx);
469       return NULL;
470    }
471 
472    *ptransfer = &tx->base;
473    return tx->rect[1].bo->map;
474 }
475 
476 void
nv50_miptree_transfer_unmap(struct pipe_context * pctx,struct pipe_transfer * transfer)477 nv50_miptree_transfer_unmap(struct pipe_context *pctx,
478                             struct pipe_transfer *transfer)
479 {
480    struct nv50_context *nv50 = nv50_context(pctx);
481    struct nv50_transfer *tx = (struct nv50_transfer *)transfer;
482    struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
483    unsigned i;
484 
485    if (tx->base.usage & PIPE_MAP_WRITE) {
486       for (i = 0; i < tx->base.box.depth; ++i) {
487          nv50_m2mf_transfer_rect(nv50, &tx->rect[0], &tx->rect[1],
488                                  tx->nblocksx, tx->nblocksy);
489          if (mt->layout_3d)
490             tx->rect[0].z++;
491          else
492             tx->rect[0].base += mt->layer_stride;
493          tx->rect[1].base += tx->nblocksy * tx->base.stride;
494       }
495 
496       /* Allow the copies above to finish executing before freeing the source */
497       nouveau_fence_work(nv50->base.fence,
498                          nouveau_fence_unref_bo, tx->rect[1].bo);
499    } else {
500       nouveau_bo_ref(NULL, &tx->rect[1].bo);
501    }
502 
503    pipe_resource_reference(&transfer->resource, NULL);
504 
505    FREE(tx);
506 }
507 
508 static void
nv50_cb_bo_push(struct nouveau_context * nv,struct nouveau_bo * bo,unsigned domain,unsigned bufid,unsigned offset,unsigned words,const uint32_t * data)509 nv50_cb_bo_push(struct nouveau_context *nv,
510                 struct nouveau_bo *bo, unsigned domain,
511                 unsigned bufid,
512                 unsigned offset, unsigned words,
513                 const uint32_t *data)
514 {
515    struct nouveau_pushbuf *push = nv->pushbuf;
516 
517    assert(!(offset & 3));
518 
519    while (words) {
520       unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN);
521 
522       PUSH_SPACE(push, nr + 3);
523       PUSH_REF1 (push, bo, NOUVEAU_BO_WR | domain);
524       BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
525       PUSH_DATA (push, (offset << 6) | bufid);
526       BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
527       PUSH_DATAp(push, data, nr);
528 
529       words -= nr;
530       data += nr;
531       offset += nr * 4;
532    }
533 }
534 
535 void
nv50_cb_push(struct nouveau_context * nv,struct nv04_resource * res,unsigned offset,unsigned words,const uint32_t * data)536 nv50_cb_push(struct nouveau_context *nv,
537              struct nv04_resource *res,
538              unsigned offset, unsigned words, const uint32_t *data)
539 {
540    struct nv50_context *nv50 = nv50_context(&nv->pipe);
541    struct nv50_constbuf *cb = NULL;
542    int s, bufid;
543    /* Go through all the constbuf binding points of this buffer and try to
544     * find one which contains the region to be updated.
545     */
546    for (s = 0; s < NV50_MAX_SHADER_STAGES && !cb; s++) {
547       uint16_t bindings = res->cb_bindings[s];
548       while (bindings) {
549          int i = ffs(bindings) - 1;
550          uint32_t cb_offset = nv50->constbuf[s][i].offset;
551 
552          bindings &= ~(1 << i);
553          if (cb_offset <= offset &&
554              cb_offset + nv50->constbuf[s][i].size >= offset + words * 4) {
555             cb = &nv50->constbuf[s][i];
556             bufid = s * 16 + i;
557             break;
558          }
559       }
560    }
561 
562    if (cb) {
563       nv50_cb_bo_push(nv, res->bo, res->domain,
564                       bufid, offset - cb->offset, words, data);
565    } else {
566       nv->push_data(nv, res->bo, res->offset + offset, res->domain,
567                     words * 4, data);
568    }
569 }
570