xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/nouveau/nvc0/nvc0_context.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2010 Christoph Bumiller
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "pipe/p_defines.h"
24 #include "util/u_framebuffer.h"
25 #include "util/u_upload_mgr.h"
26 
27 #include "nvc0/nvc0_context.h"
28 #include "nvc0/nvc0_screen.h"
29 #include "nvc0/nvc0_resource.h"
30 
31 
32 #include "xf86drm.h"
33 #include "drm-uapi/nouveau_drm.h"
34 
35 
36 static void
nvc0_svm_migrate(struct pipe_context * pipe,unsigned num_ptrs,const void * const * ptrs,const size_t * sizes,bool to_device,bool mem_undefined)37 nvc0_svm_migrate(struct pipe_context *pipe, unsigned num_ptrs,
38                  const void* const* ptrs, const size_t *sizes,
39                  bool to_device, bool mem_undefined)
40 {
41    struct nvc0_context *nvc0 = nvc0_context(pipe);
42    struct nouveau_screen *screen = &nvc0->screen->base;
43    int fd = screen->drm->fd;
44    unsigned i;
45 
46    for (i = 0; i < num_ptrs; i++) {
47       struct drm_nouveau_svm_bind args;
48       uint64_t cmd, prio, target;
49 
50       args.va_start = (uint64_t)(uintptr_t)ptrs[i];
51       if (sizes && sizes[i]) {
52          args.va_end = (uint64_t)(uintptr_t)ptrs[i] + sizes[i];
53          args.npages = DIV_ROUND_UP(args.va_end - args.va_start, 0x1000);
54       } else {
55          args.va_end = 0;
56          args.npages = 0;
57       }
58       args.stride = 0;
59 
60       args.reserved0 = 0;
61       args.reserved1 = 0;
62 
63       prio = 0;
64       cmd = NOUVEAU_SVM_BIND_COMMAND__MIGRATE;
65       target = to_device ? NOUVEAU_SVM_BIND_TARGET__GPU_VRAM : 0;
66 
67       args.header = cmd << NOUVEAU_SVM_BIND_COMMAND_SHIFT;
68       args.header |= prio << NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
69       args.header |= target << NOUVEAU_SVM_BIND_TARGET_SHIFT;
70 
71       /* This is best effort, so no garanty whatsoever */
72       drmCommandWrite(fd, DRM_NOUVEAU_SVM_BIND,
73                       &args, sizeof(args));
74    }
75 }
76 
77 
78 static void
nvc0_flush(struct pipe_context * pipe,struct pipe_fence_handle ** fence,unsigned flags)79 nvc0_flush(struct pipe_context *pipe,
80            struct pipe_fence_handle **fence,
81            unsigned flags)
82 {
83    struct nvc0_context *nvc0 = nvc0_context(pipe);
84 
85    if (fence)
86       nouveau_fence_ref(nvc0->base.fence, (struct nouveau_fence **)fence);
87 
88    PUSH_KICK(nvc0->base.pushbuf); /* fencing handled in kick_notify */
89 
90    nouveau_context_update_frame_stats(&nvc0->base);
91 }
92 
93 static void
nvc0_texture_barrier(struct pipe_context * pipe,unsigned flags)94 nvc0_texture_barrier(struct pipe_context *pipe, unsigned flags)
95 {
96    struct nouveau_pushbuf *push = nvc0_context(pipe)->base.pushbuf;
97 
98    IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
99    IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
100 }
101 
102 static void
nvc0_memory_barrier(struct pipe_context * pipe,unsigned flags)103 nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags)
104 {
105    struct nvc0_context *nvc0 = nvc0_context(pipe);
106    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
107    int i, s;
108 
109    if (!(flags & ~PIPE_BARRIER_UPDATE))
110       return;
111 
112    if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
113       for (i = 0; i < nvc0->num_vtxbufs; ++i) {
114          if (!nvc0->vtxbuf[i].buffer.resource && !nvc0->vtxbuf[i].is_user_buffer)
115             continue;
116          if (nvc0->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
117             nvc0->base.vbo_dirty = true;
118       }
119 
120       for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) {
121          uint32_t valid = nvc0->constbuf_valid[s];
122 
123          while (valid && !nvc0->cb_dirty) {
124             const unsigned i = ffs(valid) - 1;
125             struct pipe_resource *res;
126 
127             valid &= ~(1 << i);
128             if (nvc0->constbuf[s][i].user)
129                continue;
130 
131             res = nvc0->constbuf[s][i].u.buf;
132             if (!res)
133                continue;
134 
135             if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
136                nvc0->cb_dirty = true;
137          }
138       }
139    } else {
140       /* Pretty much any writing by shaders needs a serialize after
141        * it. Especially when moving between 3d and compute pipelines, but even
142        * without that.
143        */
144       IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
145    }
146 
147    /* If we're going to texture from a buffer/image written by a shader, we
148     * must flush the texture cache.
149     */
150    if (flags & PIPE_BARRIER_TEXTURE)
151       IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
152 
153    if (flags & PIPE_BARRIER_CONSTANT_BUFFER)
154       nvc0->cb_dirty = true;
155    if (flags & (PIPE_BARRIER_VERTEX_BUFFER | PIPE_BARRIER_INDEX_BUFFER))
156       nvc0->base.vbo_dirty = true;
157 }
158 
159 static void
nvc0_emit_string_marker(struct pipe_context * pipe,const char * str,int len)160 nvc0_emit_string_marker(struct pipe_context *pipe, const char *str, int len)
161 {
162    struct nouveau_pushbuf *push = nvc0_context(pipe)->base.pushbuf;
163    int string_words = len / 4;
164    int data_words;
165 
166    if (len <= 0)
167       return;
168    string_words = MIN2(string_words, NV04_PFIFO_MAX_PACKET_LEN);
169    if (string_words == NV04_PFIFO_MAX_PACKET_LEN)
170       data_words = string_words;
171    else
172       data_words = string_words + !!(len & 3);
173    BEGIN_NIC0(push, SUBC_3D(NV04_GRAPH_NOP), data_words);
174    if (string_words)
175       PUSH_DATAp(push, str, string_words);
176    if (string_words != data_words) {
177       int data = 0;
178       memcpy(&data, &str[string_words * 4], len & 3);
179       PUSH_DATA (push, data);
180    }
181 }
182 
183 static enum pipe_reset_status
nvc0_get_device_reset_status(struct pipe_context * pipe)184 nvc0_get_device_reset_status(struct pipe_context *pipe)
185 {
186    return PIPE_NO_RESET;
187 }
188 
189 static void
nvc0_context_unreference_resources(struct nvc0_context * nvc0)190 nvc0_context_unreference_resources(struct nvc0_context *nvc0)
191 {
192    unsigned s, i;
193 
194    nouveau_bufctx_del(&nvc0->bufctx_3d);
195    nouveau_bufctx_del(&nvc0->bufctx);
196    nouveau_bufctx_del(&nvc0->bufctx_cp);
197 
198    util_unreference_framebuffer_state(&nvc0->framebuffer);
199 
200    for (i = 0; i < nvc0->num_vtxbufs; ++i)
201       pipe_vertex_buffer_unreference(&nvc0->vtxbuf[i]);
202 
203    for (s = 0; s < 6; ++s) {
204       for (i = 0; i < nvc0->num_textures[s]; ++i)
205          pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
206 
207       for (i = 0; i < NVC0_MAX_PIPE_CONSTBUFS; ++i)
208          if (!nvc0->constbuf[s][i].user)
209             pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, NULL);
210 
211       for (i = 0; i < NVC0_MAX_BUFFERS; ++i)
212          pipe_resource_reference(&nvc0->buffers[s][i].buffer, NULL);
213 
214       for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
215          pipe_resource_reference(&nvc0->images[s][i].resource, NULL);
216          if (nvc0->screen->base.class_3d >= GM107_3D_CLASS)
217             pipe_sampler_view_reference(&nvc0->images_tic[s][i], NULL);
218       }
219    }
220 
221    for (s = 0; s < 2; ++s) {
222       for (i = 0; i < NVC0_MAX_SURFACE_SLOTS; ++i)
223          pipe_surface_reference(&nvc0->surfaces[s][i], NULL);
224    }
225 
226    for (i = 0; i < nvc0->num_tfbbufs; ++i)
227       pipe_so_target_reference(&nvc0->tfbbuf[i], NULL);
228 
229    for (i = 0; i < nvc0->global_residents.size / sizeof(struct pipe_resource *);
230         ++i) {
231       struct pipe_resource **res = util_dynarray_element(
232          &nvc0->global_residents, struct pipe_resource *, i);
233       pipe_resource_reference(res, NULL);
234    }
235    util_dynarray_fini(&nvc0->global_residents);
236 
237    if (nvc0->tcp_empty)
238       nvc0->base.pipe.delete_tcs_state(&nvc0->base.pipe, nvc0->tcp_empty);
239 }
240 
241 static void
nvc0_destroy(struct pipe_context * pipe)242 nvc0_destroy(struct pipe_context *pipe)
243 {
244    struct nvc0_context *nvc0 = nvc0_context(pipe);
245 
246    simple_mtx_lock(&nvc0->screen->state_lock);
247    if (nvc0->screen->cur_ctx == nvc0) {
248       nvc0->screen->cur_ctx = NULL;
249       nvc0->screen->save_state = nvc0->state;
250       nvc0->screen->save_state.tfb = NULL;
251    }
252    simple_mtx_unlock(&nvc0->screen->state_lock);
253 
254    if (nvc0->base.pipe.stream_uploader)
255       u_upload_destroy(nvc0->base.pipe.stream_uploader);
256 
257    /* Unset bufctx, we don't want to revalidate any resources after the flush.
258     * Other contexts will always set their bufctx again on action calls.
259     */
260    nouveau_pushbuf_bufctx(nvc0->base.pushbuf, NULL);
261    PUSH_KICK(nvc0->base.pushbuf);
262 
263    nvc0_context_unreference_resources(nvc0);
264    nvc0_blitctx_destroy(nvc0);
265 
266    list_for_each_entry_safe(struct nvc0_resident, pos, &nvc0->tex_head, list) {
267       list_del(&pos->list);
268       free(pos);
269    }
270 
271    list_for_each_entry_safe(struct nvc0_resident, pos, &nvc0->img_head, list) {
272       list_del(&pos->list);
273       free(pos);
274    }
275 
276    nouveau_fence_cleanup(&nvc0->base);
277    nouveau_context_destroy(&nvc0->base);
278 }
279 
280 void
nvc0_default_kick_notify(struct nouveau_context * context)281 nvc0_default_kick_notify(struct nouveau_context *context)
282 {
283    struct nvc0_context *nvc0 = nvc0_context(&context->pipe);
284 
285    _nouveau_fence_next(context);
286    _nouveau_fence_update(context->screen, true);
287 
288    nvc0->state.flushed = true;
289 }
290 
291 static int
nvc0_invalidate_resource_storage(struct nouveau_context * ctx,struct pipe_resource * res,int ref)292 nvc0_invalidate_resource_storage(struct nouveau_context *ctx,
293                                  struct pipe_resource *res,
294                                  int ref)
295 {
296    struct nvc0_context *nvc0 = nvc0_context(&ctx->pipe);
297    unsigned s, i;
298 
299    if (res->bind & PIPE_BIND_RENDER_TARGET) {
300       for (i = 0; i < nvc0->framebuffer.nr_cbufs; ++i) {
301          if (nvc0->framebuffer.cbufs[i] &&
302              nvc0->framebuffer.cbufs[i]->texture == res) {
303             nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
304             nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_FB);
305             if (!--ref)
306                return ref;
307          }
308       }
309    }
310    if (res->bind & PIPE_BIND_DEPTH_STENCIL) {
311       if (nvc0->framebuffer.zsbuf &&
312           nvc0->framebuffer.zsbuf->texture == res) {
313          nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
314          nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_FB);
315          if (!--ref)
316             return ref;
317       }
318    }
319 
320    if (res->target == PIPE_BUFFER) {
321       for (i = 0; i < nvc0->num_vtxbufs; ++i) {
322          if (nvc0->vtxbuf[i].buffer.resource == res) {
323             nvc0->dirty_3d |= NVC0_NEW_3D_ARRAYS;
324             nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX);
325             if (!--ref)
326                return ref;
327          }
328       }
329 
330       for (s = 0; s < 6; ++s) {
331          for (i = 0; i < nvc0->num_textures[s]; ++i) {
332             if (nvc0->textures[s][i] &&
333                 nvc0->textures[s][i]->texture == res) {
334                nvc0->textures_dirty[s] |= 1 << i;
335                if (unlikely(s == 5)) {
336                   nvc0->dirty_cp |= NVC0_NEW_CP_TEXTURES;
337                   nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_TEX(i));
338                } else {
339                   nvc0->dirty_3d |= NVC0_NEW_3D_TEXTURES;
340                   nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_TEX(s, i));
341                }
342                if (!--ref)
343                   return ref;
344             }
345          }
346       }
347 
348       for (s = 0; s < 6; ++s) {
349          for (i = 0; i < NVC0_MAX_PIPE_CONSTBUFS; ++i) {
350             if (!(nvc0->constbuf_valid[s] & (1 << i)))
351                continue;
352             if (!nvc0->constbuf[s][i].user &&
353                 nvc0->constbuf[s][i].u.buf == res) {
354                nvc0->constbuf_dirty[s] |= 1 << i;
355                if (unlikely(s == 5)) {
356                   nvc0->dirty_cp |= NVC0_NEW_CP_CONSTBUF;
357                   nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_CB(i));
358                } else {
359                   nvc0->dirty_3d |= NVC0_NEW_3D_CONSTBUF;
360                   nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_CB(s, i));
361                }
362                if (!--ref)
363                   return ref;
364             }
365          }
366       }
367 
368       for (s = 0; s < 6; ++s) {
369          for (i = 0; i < NVC0_MAX_BUFFERS; ++i) {
370             if (nvc0->buffers[s][i].buffer == res) {
371                nvc0->buffers_dirty[s] |= 1 << i;
372                if (unlikely(s == 5)) {
373                   nvc0->dirty_cp |= NVC0_NEW_CP_BUFFERS;
374                   nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_BUF);
375                } else {
376                   nvc0->dirty_3d |= NVC0_NEW_3D_BUFFERS;
377                   nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_BUF);
378                }
379                if (!--ref)
380                   return ref;
381             }
382          }
383       }
384 
385       for (s = 0; s < 6; ++s) {
386          for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
387             if (nvc0->images[s][i].resource == res) {
388                nvc0->images_dirty[s] |= 1 << i;
389                if (unlikely(s == 5)) {
390                   nvc0->dirty_cp |= NVC0_NEW_CP_SURFACES;
391                   nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
392                } else {
393                   nvc0->dirty_3d |= NVC0_NEW_3D_SURFACES;
394                   nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_SUF);
395                }
396             }
397             if (!--ref)
398                return ref;
399          }
400       }
401    }
402 
403    return ref;
404 }
405 
406 static void
407 nvc0_context_get_sample_position(struct pipe_context *, unsigned, unsigned,
408                                  float *);
409 
410 struct pipe_context *
nvc0_create(struct pipe_screen * pscreen,void * priv,unsigned ctxflags)411 nvc0_create(struct pipe_screen *pscreen, void *priv, unsigned ctxflags)
412 {
413    struct nvc0_screen *screen = nvc0_screen(pscreen);
414    struct nvc0_context *nvc0;
415    struct pipe_context *pipe;
416    int ret;
417    uint32_t flags;
418 
419    nvc0 = CALLOC_STRUCT(nvc0_context);
420    if (!nvc0)
421       return NULL;
422    pipe = &nvc0->base.pipe;
423 
424    if (!nvc0_blitctx_create(nvc0))
425       goto out_err;
426 
427    if (nouveau_context_init(&nvc0->base, &screen->base))
428       goto out_err;
429    nvc0->base.kick_notify = nvc0_default_kick_notify;
430    nvc0->base.pushbuf->rsvd_kick = 5;
431 
432    ret = nouveau_bufctx_new(nvc0->base.client, 2, &nvc0->bufctx);
433    if (!ret)
434       ret = nouveau_bufctx_new(nvc0->base.client, NVC0_BIND_3D_COUNT,
435                                &nvc0->bufctx_3d);
436    if (!ret)
437       ret = nouveau_bufctx_new(nvc0->base.client, NVC0_BIND_CP_COUNT,
438                                &nvc0->bufctx_cp);
439    if (ret)
440       goto out_err;
441 
442    nvc0->screen = screen;
443    pipe->screen = pscreen;
444    pipe->priv = priv;
445    pipe->stream_uploader = u_upload_create_default(pipe);
446    if (!pipe->stream_uploader)
447       goto out_err;
448    pipe->const_uploader = pipe->stream_uploader;
449 
450    pipe->destroy = nvc0_destroy;
451 
452    pipe->draw_vbo = nvc0_draw_vbo;
453    pipe->clear = nvc0_clear;
454    pipe->launch_grid = (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) ?
455       nve4_launch_grid : nvc0_launch_grid;
456 
457    pipe->svm_migrate = nvc0_svm_migrate;
458 
459    pipe->flush = nvc0_flush;
460    pipe->texture_barrier = nvc0_texture_barrier;
461    pipe->memory_barrier = nvc0_memory_barrier;
462    pipe->get_sample_position = nvc0_context_get_sample_position;
463    pipe->emit_string_marker = nvc0_emit_string_marker;
464    pipe->get_device_reset_status = nvc0_get_device_reset_status;
465 
466    nvc0_init_query_functions(nvc0);
467    nvc0_init_surface_functions(nvc0);
468    nvc0_init_state_functions(nvc0);
469    nvc0_init_transfer_functions(nvc0);
470    nvc0_init_resource_functions(pipe);
471    if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS)
472       nvc0_init_bindless_functions(pipe);
473 
474    list_inithead(&nvc0->tex_head);
475    list_inithead(&nvc0->img_head);
476 
477    nvc0->base.invalidate_resource_storage = nvc0_invalidate_resource_storage;
478 
479    pipe->create_video_codec = nvc0_create_decoder;
480    pipe->create_video_buffer = nvc0_video_buffer_create;
481 
482    /* shader builtin library is per-screen, but we need a context for m2mf */
483    nvc0_program_library_upload(nvc0);
484    nvc0_program_init_tcp_empty(nvc0);
485    if (!nvc0->tcp_empty)
486       goto out_err;
487    /* set the empty tctl prog on next draw in case one is never set */
488    nvc0->dirty_3d |= NVC0_NEW_3D_TCTLPROG;
489 
490    /* Do not bind the COMPUTE driver constbuf at screen initialization because
491     * CBs are aliased between 3D and COMPUTE, but make sure it will be bound if
492     * a grid is launched later. */
493    nvc0->dirty_cp |= NVC0_NEW_CP_DRIVERCONST;
494 
495    /* now that there are no more opportunities for errors, set the current
496     * context if there isn't already one.
497     */
498    simple_mtx_lock(&screen->state_lock);
499    if (!screen->cur_ctx) {
500       nvc0->state = screen->save_state;
501       screen->cur_ctx = nvc0;
502    }
503    simple_mtx_unlock(&screen->state_lock);
504 
505    nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx);
506    PUSH_SPACE(nvc0->base.pushbuf, 8);
507 
508    /* add permanently resident buffers to bufctxts */
509 
510    flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RD;
511 
512    BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->uniform_bo);
513    BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->txc);
514    if (screen->compute) {
515       BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->uniform_bo);
516       BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->txc);
517    }
518 
519    flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RDWR;
520 
521    if (screen->poly_cache)
522       BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->poly_cache);
523    if (screen->compute)
524       BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->tls);
525 
526    flags = NOUVEAU_BO_GART | NOUVEAU_BO_WR;
527 
528    BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->fence.bo);
529    BCTX_REFN_bo(nvc0->bufctx, FENCE, flags, screen->fence.bo);
530    if (screen->compute)
531       BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->fence.bo);
532 
533    nvc0->base.scratch.bo_size = 2 << 20;
534 
535    memset(nvc0->tex_handles, ~0, sizeof(nvc0->tex_handles));
536 
537    util_dynarray_init(&nvc0->global_residents, NULL);
538 
539    // Make sure that the first TSC entry has SRGB conversion bit set, since we
540    // use it as a fallback on Fermi for TXF, and on Kepler+ generations for
541    // FBFETCH handling (which also uses TXF).
542    //
543    // NOTE: Preliminary testing suggests that this isn't necessary at all at
544    // least on GM20x (untested on Kepler). However this is ~free, so no reason
545    // not to do it.
546    if (!screen->tsc.entries[0])
547       nvc0_upload_tsc0(nvc0);
548 
549    // On Fermi, mark samplers dirty so that the proper binding can happen
550    if (screen->base.class_3d < NVE4_3D_CLASS) {
551       for (int s = 0; s < 6; s++)
552          nvc0->samplers_dirty[s] = 1;
553       nvc0->dirty_3d |= NVC0_NEW_3D_SAMPLERS;
554       nvc0->dirty_cp |= NVC0_NEW_CP_SAMPLERS;
555    }
556 
557    nouveau_fence_new(&nvc0->base, &nvc0->base.fence);
558 
559    return pipe;
560 
561 out_err:
562    if (nvc0) {
563       if (pipe->stream_uploader)
564          u_upload_destroy(pipe->stream_uploader);
565       if (nvc0->bufctx_3d)
566          nouveau_bufctx_del(&nvc0->bufctx_3d);
567       if (nvc0->bufctx_cp)
568          nouveau_bufctx_del(&nvc0->bufctx_cp);
569       if (nvc0->bufctx)
570          nouveau_bufctx_del(&nvc0->bufctx);
571       FREE(nvc0->blit);
572       FREE(nvc0);
573    }
574    return NULL;
575 }
576 
577 void
nvc0_bufctx_fence(struct nvc0_context * nvc0,struct nouveau_bufctx * bufctx,bool on_flush)578 nvc0_bufctx_fence(struct nvc0_context *nvc0, struct nouveau_bufctx *bufctx,
579                   bool on_flush)
580 {
581    struct list_head *list = on_flush ? &bufctx->current : &bufctx->pending;
582    struct list_head *it;
583    NOUVEAU_DRV_STAT_IFD(unsigned count = 0);
584 
585    for (it = list->next; it != list; it = it->next) {
586       struct nouveau_bufref *ref = (struct nouveau_bufref *)it;
587       struct nv04_resource *res = ref->priv;
588       if (res)
589          nvc0_resource_validate(nvc0, res, (unsigned)ref->priv_data);
590       NOUVEAU_DRV_STAT_IFD(count++);
591    }
592    NOUVEAU_DRV_STAT(&nvc0->screen->base, resource_validate_count, count);
593 }
594 
595 const void *
nvc0_get_sample_locations(unsigned sample_count)596 nvc0_get_sample_locations(unsigned sample_count)
597 {
598    static const uint8_t ms1[1][2] = { { 0x8, 0x8 } };
599    static const uint8_t ms2[2][2] = {
600       { 0x4, 0x4 }, { 0xc, 0xc } }; /* surface coords (0,0), (1,0) */
601    static const uint8_t ms4[4][2] = {
602       { 0x6, 0x2 }, { 0xe, 0x6 },   /* (0,0), (1,0) */
603       { 0x2, 0xa }, { 0xa, 0xe } }; /* (0,1), (1,1) */
604    static const uint8_t ms8[8][2] = {
605       { 0x1, 0x7 }, { 0x5, 0x3 },   /* (0,0), (1,0) */
606       { 0x3, 0xd }, { 0x7, 0xb },   /* (0,1), (1,1) */
607       { 0x9, 0x5 }, { 0xf, 0x1 },   /* (2,0), (3,0) */
608       { 0xb, 0xf }, { 0xd, 0x9 } }; /* (2,1), (3,1) */
609 
610    const uint8_t (*ptr)[2];
611 
612    switch (sample_count) {
613    case 0:
614    case 1: ptr = ms1; break;
615    case 2: ptr = ms2; break;
616    case 4: ptr = ms4; break;
617    case 8: ptr = ms8; break;
618    default:
619       assert(0);
620       return NULL; /* bad sample count -> undefined locations */
621    }
622    return ptr;
623 }
624 
625 static void
nvc0_context_get_sample_position(struct pipe_context * pipe,unsigned sample_count,unsigned sample_index,float * xy)626 nvc0_context_get_sample_position(struct pipe_context *pipe,
627                                  unsigned sample_count, unsigned sample_index,
628                                  float *xy)
629 {
630    const uint8_t (*ptr)[2];
631 
632    ptr = nvc0_get_sample_locations(sample_count);
633    if (!ptr)
634       return;
635 
636    xy[0] = ptr[sample_index][0] * 0.0625f;
637    xy[1] = ptr[sample_index][1] * 0.0625f;
638 }
639