1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "pipe/p_defines.h"
24 #include "util/u_framebuffer.h"
25 #include "util/u_upload_mgr.h"
26
27 #include "nv50/nv50_context.h"
28 #include "nv50/nv50_screen.h"
29 #include "nv50/nv50_resource.h"
30
31 static void
nv50_flush(struct pipe_context * pipe,struct pipe_fence_handle ** fence,unsigned flags)32 nv50_flush(struct pipe_context *pipe,
33 struct pipe_fence_handle **fence,
34 unsigned flags)
35 {
36 struct nouveau_context *context = nouveau_context(pipe);
37
38 if (fence)
39 nouveau_fence_ref(context->fence, (struct nouveau_fence **)fence);
40
41 PUSH_KICK(context->pushbuf);
42
43 nouveau_context_update_frame_stats(nouveau_context(pipe));
44 }
45
46 static void
nv50_texture_barrier(struct pipe_context * pipe,unsigned flags)47 nv50_texture_barrier(struct pipe_context *pipe, unsigned flags)
48 {
49 struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf;
50
51 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
52 PUSH_DATA (push, 0);
53 BEGIN_NV04(push, NV50_3D(TEX_CACHE_CTL), 1);
54 PUSH_DATA (push, 0x20);
55 }
56
57 static void
nv50_memory_barrier(struct pipe_context * pipe,unsigned flags)58 nv50_memory_barrier(struct pipe_context *pipe, unsigned flags)
59 {
60 struct nv50_context *nv50 = nv50_context(pipe);
61 struct nouveau_pushbuf *push = nv50->base.pushbuf;
62 int i, s;
63
64 if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
65 for (i = 0; i < nv50->num_vtxbufs; ++i) {
66 if (!nv50->vtxbuf[i].buffer.resource && !nv50->vtxbuf[i].is_user_buffer)
67 continue;
68 if (nv50->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
69 nv50->base.vbo_dirty = true;
70 }
71
72 for (s = 0; s < NV50_MAX_3D_SHADER_STAGES && !nv50->cb_dirty; ++s) {
73 uint32_t valid = nv50->constbuf_valid[s];
74
75 while (valid && !nv50->cb_dirty) {
76 const unsigned i = ffs(valid) - 1;
77 struct pipe_resource *res;
78
79 valid &= ~(1 << i);
80 if (nv50->constbuf[s][i].user)
81 continue;
82
83 res = nv50->constbuf[s][i].u.buf;
84 if (!res)
85 continue;
86
87 if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
88 nv50->cb_dirty = true;
89 }
90 }
91 } else {
92 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
93 PUSH_DATA (push, 0);
94 }
95
96 /* If we're going to texture from a buffer/image written by a shader, we
97 * must flush the texture cache.
98 */
99 if (flags & PIPE_BARRIER_TEXTURE) {
100 BEGIN_NV04(push, NV50_3D(TEX_CACHE_CTL), 1);
101 PUSH_DATA (push, 0x20);
102 }
103
104 if (flags & PIPE_BARRIER_CONSTANT_BUFFER)
105 nv50->cb_dirty = true;
106 if (flags & (PIPE_BARRIER_VERTEX_BUFFER | PIPE_BARRIER_INDEX_BUFFER))
107 nv50->base.vbo_dirty = true;
108 }
109
110 static void
nv50_emit_string_marker(struct pipe_context * pipe,const char * str,int len)111 nv50_emit_string_marker(struct pipe_context *pipe, const char *str, int len)
112 {
113 struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf;
114 int string_words = len / 4;
115 int data_words;
116
117 if (len <= 0)
118 return;
119 string_words = MIN2(string_words, NV04_PFIFO_MAX_PACKET_LEN);
120 if (string_words == NV04_PFIFO_MAX_PACKET_LEN)
121 data_words = string_words;
122 else
123 data_words = string_words + !!(len & 3);
124 BEGIN_NI04(push, SUBC_3D(NV04_GRAPH_NOP), data_words);
125 if (string_words)
126 PUSH_DATAp(push, str, string_words);
127 if (string_words != data_words) {
128 int data = 0;
129 memcpy(&data, &str[string_words * 4], len & 3);
130 PUSH_DATA (push, data);
131 }
132 }
133
134 void
nv50_default_kick_notify(struct nouveau_context * context)135 nv50_default_kick_notify(struct nouveau_context *context)
136 {
137 struct nv50_context *nv50 = nv50_context(&context->pipe);
138
139 _nouveau_fence_next(context);
140 _nouveau_fence_update(context->screen, true);
141 nv50->state.flushed = true;
142 }
143
144 static void
nv50_context_unreference_resources(struct nv50_context * nv50)145 nv50_context_unreference_resources(struct nv50_context *nv50)
146 {
147 unsigned s, i;
148
149 nouveau_bufctx_del(&nv50->bufctx_3d);
150 nouveau_bufctx_del(&nv50->bufctx);
151 nouveau_bufctx_del(&nv50->bufctx_cp);
152
153 util_unreference_framebuffer_state(&nv50->framebuffer);
154
155 assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
156 for (i = 0; i < nv50->num_vtxbufs; ++i)
157 pipe_vertex_buffer_unreference(&nv50->vtxbuf[i]);
158
159 for (s = 0; s < NV50_MAX_SHADER_STAGES; ++s) {
160 assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
161 for (i = 0; i < nv50->num_textures[s]; ++i)
162 pipe_sampler_view_reference(&nv50->textures[s][i], NULL);
163
164 for (i = 0; i < NV50_MAX_PIPE_CONSTBUFS; ++i)
165 if (!nv50->constbuf[s][i].user)
166 pipe_resource_reference(&nv50->constbuf[s][i].u.buf, NULL);
167 }
168
169 for (i = 0; i < nv50->global_residents.size / sizeof(struct pipe_resource *);
170 ++i) {
171 struct pipe_resource **res = util_dynarray_element(
172 &nv50->global_residents, struct pipe_resource *, i);
173 pipe_resource_reference(res, NULL);
174 }
175 util_dynarray_fini(&nv50->global_residents);
176 }
177
178 static void
nv50_destroy(struct pipe_context * pipe)179 nv50_destroy(struct pipe_context *pipe)
180 {
181 struct nv50_context *nv50 = nv50_context(pipe);
182
183 simple_mtx_lock(&nv50->screen->state_lock);
184 if (nv50->screen->cur_ctx == nv50) {
185 nv50->screen->cur_ctx = NULL;
186 /* Save off the state in case another context gets created */
187 nv50->screen->save_state = nv50->state;
188 }
189 simple_mtx_unlock(&nv50->screen->state_lock);
190
191 if (nv50->base.pipe.stream_uploader)
192 u_upload_destroy(nv50->base.pipe.stream_uploader);
193
194 nouveau_pushbuf_bufctx(nv50->base.pushbuf, NULL);
195 PUSH_KICK(nv50->base.pushbuf);
196
197 nv50_context_unreference_resources(nv50);
198
199 FREE(nv50->blit);
200
201 nouveau_fence_cleanup(&nv50->base);
202 nouveau_context_destroy(&nv50->base);
203 }
204
205 static int
nv50_invalidate_resource_storage(struct nouveau_context * ctx,struct pipe_resource * res,int ref)206 nv50_invalidate_resource_storage(struct nouveau_context *ctx,
207 struct pipe_resource *res,
208 int ref)
209 {
210 struct nv50_context *nv50 = nv50_context(&ctx->pipe);
211 unsigned bind = res->bind ? res->bind : PIPE_BIND_VERTEX_BUFFER;
212 unsigned s, i;
213
214 if (bind & PIPE_BIND_RENDER_TARGET) {
215 assert(nv50->framebuffer.nr_cbufs <= PIPE_MAX_COLOR_BUFS);
216 for (i = 0; i < nv50->framebuffer.nr_cbufs; ++i) {
217 if (nv50->framebuffer.cbufs[i] &&
218 nv50->framebuffer.cbufs[i]->texture == res) {
219 nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER;
220 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
221 if (!--ref)
222 return ref;
223 }
224 }
225 }
226 if (bind & PIPE_BIND_DEPTH_STENCIL) {
227 if (nv50->framebuffer.zsbuf &&
228 nv50->framebuffer.zsbuf->texture == res) {
229 nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER;
230 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_FB);
231 if (!--ref)
232 return ref;
233 }
234 }
235
236 if (bind & (PIPE_BIND_VERTEX_BUFFER |
237 PIPE_BIND_INDEX_BUFFER |
238 PIPE_BIND_CONSTANT_BUFFER |
239 PIPE_BIND_STREAM_OUTPUT |
240 PIPE_BIND_SAMPLER_VIEW)) {
241
242 assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
243 for (i = 0; i < nv50->num_vtxbufs; ++i) {
244 if (nv50->vtxbuf[i].buffer.resource == res) {
245 nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
246 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_VERTEX);
247 if (!--ref)
248 return ref;
249 }
250 }
251
252 for (s = 0; s < NV50_MAX_SHADER_STAGES; ++s) {
253 assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
254 for (i = 0; i < nv50->num_textures[s]; ++i) {
255 if (nv50->textures[s][i] &&
256 nv50->textures[s][i]->texture == res) {
257 if (unlikely(s == NV50_SHADER_STAGE_COMPUTE)) {
258 nv50->dirty_cp |= NV50_NEW_CP_TEXTURES;
259 nouveau_bufctx_reset(nv50->bufctx_cp, NV50_BIND_CP_TEXTURES);
260 } else {
261 nv50->dirty_3d |= NV50_NEW_3D_TEXTURES;
262 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TEXTURES);
263 }
264 if (!--ref)
265 return ref;
266 }
267 }
268 }
269
270 for (s = 0; s < NV50_MAX_SHADER_STAGES; ++s) {
271 for (i = 0; i < NV50_MAX_PIPE_CONSTBUFS; ++i) {
272 if (!(nv50->constbuf_valid[s] & (1 << i)))
273 continue;
274 if (!nv50->constbuf[s][i].user &&
275 nv50->constbuf[s][i].u.buf == res) {
276 nv50->constbuf_dirty[s] |= 1 << i;
277 if (unlikely(s == NV50_SHADER_STAGE_COMPUTE)) {
278 nv50->dirty_cp |= NV50_NEW_CP_CONSTBUF;
279 nouveau_bufctx_reset(nv50->bufctx_cp, NV50_BIND_CP_CB(i));
280 } else {
281 nv50->dirty_3d |= NV50_NEW_3D_CONSTBUF;
282 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_CB(s, i));
283 }
284 if (!--ref)
285 return ref;
286 }
287 }
288 }
289 }
290
291 return ref;
292 }
293
294 static void
295 nv50_context_get_sample_position(struct pipe_context *, unsigned, unsigned,
296 float *);
297
298 struct pipe_context *
nv50_create(struct pipe_screen * pscreen,void * priv,unsigned ctxflags)299 nv50_create(struct pipe_screen *pscreen, void *priv, unsigned ctxflags)
300 {
301 struct nv50_screen *screen = nv50_screen(pscreen);
302 struct nv50_context *nv50;
303 struct pipe_context *pipe;
304 int ret;
305 uint32_t flags;
306
307 nv50 = CALLOC_STRUCT(nv50_context);
308 if (!nv50)
309 return NULL;
310 pipe = &nv50->base.pipe;
311
312 if (!nv50_blitctx_create(nv50))
313 goto out_err;
314
315 if (nouveau_context_init(&nv50->base, &screen->base))
316 goto out_err;
317
318 ret = nouveau_bufctx_new(nv50->base.client, 2, &nv50->bufctx);
319 if (!ret)
320 ret = nouveau_bufctx_new(nv50->base.client, NV50_BIND_3D_COUNT,
321 &nv50->bufctx_3d);
322 if (!ret)
323 ret = nouveau_bufctx_new(nv50->base.client, NV50_BIND_CP_COUNT,
324 &nv50->bufctx_cp);
325 if (ret)
326 goto out_err;
327
328 nv50->base.copy_data = nv50_m2mf_copy_linear;
329 nv50->base.push_data = nv50_sifc_linear_u8;
330 nv50->base.push_cb = nv50_cb_push;
331
332 nv50->screen = screen;
333 pipe->screen = pscreen;
334 pipe->priv = priv;
335 pipe->stream_uploader = u_upload_create_default(pipe);
336 if (!pipe->stream_uploader)
337 goto out_err;
338 pipe->const_uploader = pipe->stream_uploader;
339
340 pipe->destroy = nv50_destroy;
341
342 pipe->draw_vbo = nv50_draw_vbo;
343 pipe->clear = nv50_clear;
344 pipe->launch_grid = nv50_launch_grid;
345
346 pipe->flush = nv50_flush;
347 pipe->texture_barrier = nv50_texture_barrier;
348 pipe->memory_barrier = nv50_memory_barrier;
349 pipe->get_sample_position = nv50_context_get_sample_position;
350 pipe->emit_string_marker = nv50_emit_string_marker;
351
352 simple_mtx_lock(&screen->state_lock);
353 if (!screen->cur_ctx) {
354 /* Restore the last context's state here, normally handled during
355 * context switch
356 */
357 nv50->state = screen->save_state;
358 screen->cur_ctx = nv50;
359 }
360 simple_mtx_unlock(&screen->state_lock);
361
362 nouveau_pushbuf_bufctx(nv50->base.pushbuf, nv50->bufctx);
363 nv50->base.kick_notify = nv50_default_kick_notify;
364 nv50->base.pushbuf->rsvd_kick = 5;
365 PUSH_SPACE(nv50->base.pushbuf, 8);
366
367 nv50_init_query_functions(nv50);
368 nv50_init_surface_functions(nv50);
369 nv50_init_state_functions(nv50);
370 nv50_init_resource_functions(pipe);
371
372 nv50->base.invalidate_resource_storage = nv50_invalidate_resource_storage;
373
374 if (screen->base.device->chipset < 0x84 ||
375 debug_get_bool_option("NOUVEAU_PMPEG", false)) {
376 /* PMPEG */
377 nouveau_context_init_vdec(&nv50->base);
378 } else if (screen->base.device->chipset < 0x98 ||
379 screen->base.device->chipset == 0xa0) {
380 /* VP2 */
381 pipe->create_video_codec = nv84_create_decoder;
382 pipe->create_video_buffer = nv84_video_buffer_create;
383 } else {
384 /* VP3/4 */
385 pipe->create_video_codec = nv98_create_decoder;
386 pipe->create_video_buffer = nv98_video_buffer_create;
387 }
388
389 flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD;
390
391 BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->code);
392 BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->uniforms);
393 BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->txc);
394 BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->stack_bo);
395 if (screen->compute) {
396 BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->code);
397 BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->uniforms);
398 BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->txc);
399 BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->stack_bo);
400 }
401
402 flags = NOUVEAU_BO_GART | NOUVEAU_BO_WR;
403
404 BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->fence.bo);
405 BCTX_REFN_bo(nv50->bufctx, FENCE, flags, screen->fence.bo);
406 if (screen->compute)
407 BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->fence.bo);
408
409 nv50->base.scratch.bo_size = 2 << 20;
410
411 util_dynarray_init(&nv50->global_residents, NULL);
412
413 // Make sure that the first TSC entry has SRGB conversion bit set, since we
414 // use it as a fallback.
415 if (!screen->tsc.entries[0])
416 nv50_upload_tsc0(nv50);
417
418 // And mark samplers as dirty so that the first slot would get bound to the
419 // zero entry if it's not otherwise set.
420 nv50->dirty_3d |= NV50_NEW_3D_SAMPLERS;
421
422 nouveau_fence_new(&nv50->base, &nv50->base.fence);
423
424 return pipe;
425
426 out_err:
427 if (pipe->stream_uploader)
428 u_upload_destroy(pipe->stream_uploader);
429 if (nv50->bufctx_3d)
430 nouveau_bufctx_del(&nv50->bufctx_3d);
431 if (nv50->bufctx_cp)
432 nouveau_bufctx_del(&nv50->bufctx_cp);
433 if (nv50->bufctx)
434 nouveau_bufctx_del(&nv50->bufctx);
435 FREE(nv50->blit);
436 FREE(nv50);
437 return NULL;
438 }
439
440 void
nv50_bufctx_fence(struct nv50_context * nv50,struct nouveau_bufctx * bufctx,bool on_flush)441 nv50_bufctx_fence(struct nv50_context *nv50, struct nouveau_bufctx *bufctx, bool on_flush)
442 {
443 struct list_head *list = on_flush ? &bufctx->current : &bufctx->pending;
444 struct list_head *it;
445
446 for (it = list->next; it != list; it = it->next) {
447 struct nouveau_bufref *ref = (struct nouveau_bufref *)it;
448 struct nv04_resource *res = ref->priv;
449 if (res)
450 nv50_resource_validate(nv50, res, (unsigned)ref->priv_data);
451 }
452 }
453
454 static void
nv50_context_get_sample_position(struct pipe_context * pipe,unsigned sample_count,unsigned sample_index,float * xy)455 nv50_context_get_sample_position(struct pipe_context *pipe,
456 unsigned sample_count, unsigned sample_index,
457 float *xy)
458 {
459 static const uint8_t ms1[1][2] = { { 0x8, 0x8 } };
460 static const uint8_t ms2[2][2] = {
461 { 0x4, 0x4 }, { 0xc, 0xc } }; /* surface coords (0,0), (1,0) */
462 static const uint8_t ms4[4][2] = {
463 { 0x6, 0x2 }, { 0xe, 0x6 }, /* (0,0), (1,0) */
464 { 0x2, 0xa }, { 0xa, 0xe } }; /* (0,1), (1,1) */
465 static const uint8_t ms8[8][2] = {
466 { 0x1, 0x7 }, { 0x5, 0x3 }, /* (0,0), (1,0) */
467 { 0x3, 0xd }, { 0x7, 0xb }, /* (0,1), (1,1) */
468 { 0x9, 0x5 }, { 0xf, 0x1 }, /* (2,0), (3,0) */
469 { 0xb, 0xf }, { 0xd, 0x9 } }; /* (2,1), (3,1) */
470
471 const uint8_t (*ptr)[2];
472
473 switch (sample_count) {
474 case 0:
475 case 1: ptr = ms1; break;
476 case 2: ptr = ms2; break;
477 case 4: ptr = ms4; break;
478 case 8: ptr = ms8; break;
479 default:
480 assert(0);
481 return; /* bad sample count -> undefined locations */
482 }
483 xy[0] = ptr[sample_index][0] * 0.0625f;
484 xy[1] = ptr[sample_index][1] * 0.0625f;
485 }
486