1 /*
2 * Copyright (c) 2012-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Wladimir J. van der Laan <[email protected]>
25 * Christian Gmeiner <[email protected]>
26 */
27
28 #include "etnaviv_context.h"
29
30 #include "etnaviv_blend.h"
31 #include "etnaviv_clear_blit.h"
32 #include "etnaviv_compiler.h"
33 #include "etnaviv_debug.h"
34 #include "etnaviv_emit.h"
35 #include "etnaviv_fence.h"
36 #include "etnaviv_ml.h"
37 #include "etnaviv_query.h"
38 #include "etnaviv_query_acc.h"
39 #include "etnaviv_rasterizer.h"
40 #include "etnaviv_resource.h"
41 #include "etnaviv_screen.h"
42 #include "etnaviv_shader.h"
43 #include "etnaviv_state.h"
44 #include "etnaviv_surface.h"
45 #include "etnaviv_texture.h"
46 #include "etnaviv_transfer.h"
47 #include "etnaviv_translate.h"
48 #include "etnaviv_zsa.h"
49
50 #include "pipe/p_context.h"
51 #include "pipe/p_state.h"
52 #include "util/hash_table.h"
53 #include "util/u_blitter.h"
54 #include "util/u_draw.h"
55 #include "util/u_helpers.h"
56 #include "util/u_inlines.h"
57 #include "util/u_memory.h"
58 #include "util/u_prim.h"
59 #include "util/u_upload_mgr.h"
60 #include "util/u_debug_cb.h"
61 #include "util/u_surface.h"
62 #include "util/u_transfer.h"
63
64 static inline void
etna_emit_nop_with_data(struct etna_cmd_stream * stream,uint32_t value)65 etna_emit_nop_with_data(struct etna_cmd_stream *stream, uint32_t value)
66 {
67 etna_cmd_stream_emit(stream, VIV_FE_NOP_HEADER_OP_NOP);
68 etna_cmd_stream_emit(stream, value);
69 }
70
71 static void
etna_emit_string_marker(struct pipe_context * pctx,const char * string,int len)72 etna_emit_string_marker(struct pipe_context *pctx, const char *string, int len)
73 {
74 struct etna_context *ctx = etna_context(pctx);
75 struct etna_cmd_stream *stream = ctx->stream;
76 const uint32_t *buf = (const void *)string;
77
78 etna_cmd_stream_reserve(stream, len * 2);
79
80 while (len >= 4) {
81 etna_emit_nop_with_data(stream, *buf);
82 buf++;
83 len -= 4;
84 }
85
86 /* copy remainder bytes without reading past end of input string */
87 if (len > 0) {
88 uint32_t w = 0;
89 memcpy(&w, buf, len);
90 etna_emit_nop_with_data(stream, w);
91 }
92 }
93
94 static void
etna_set_frontend_noop(struct pipe_context * pctx,bool enable)95 etna_set_frontend_noop(struct pipe_context *pctx, bool enable)
96 {
97 struct etna_context *ctx = etna_context(pctx);
98
99 pctx->flush(pctx, NULL, 0);
100 ctx->is_noop = enable;
101 }
102
103 static void
etna_context_destroy(struct pipe_context * pctx)104 etna_context_destroy(struct pipe_context *pctx)
105 {
106 struct etna_context *ctx = etna_context(pctx);
107
108 if (ctx->pending_resources)
109 _mesa_hash_table_destroy(ctx->pending_resources, NULL);
110
111 if (ctx->updated_resources)
112 _mesa_set_destroy(ctx->updated_resources, NULL);
113
114 if (ctx->flush_resources)
115 _mesa_set_destroy(ctx->flush_resources, NULL);
116
117 util_copy_framebuffer_state(&ctx->framebuffer_s, NULL);
118
119 if (ctx->blitter)
120 util_blitter_destroy(ctx->blitter);
121
122 if (pctx->stream_uploader)
123 u_upload_destroy(pctx->stream_uploader);
124
125 if (ctx->stream)
126 etna_cmd_stream_del(ctx->stream);
127
128 etna_texture_fini(pctx);
129
130 slab_destroy_child(&ctx->transfer_pool);
131
132 if (ctx->in_fence_fd != -1)
133 close(ctx->in_fence_fd);
134
135 FREE(pctx);
136 }
137
138 /* Update render state where needed based on draw operation */
139 static void
etna_update_state_for_draw(struct etna_context * ctx,const struct pipe_draw_info * info)140 etna_update_state_for_draw(struct etna_context *ctx, const struct pipe_draw_info *info)
141 {
142 /* Handle primitive restart:
143 * - If not an indexed draw, we don't care about the state of the primitive restart bit.
144 * - Otherwise, set the bit in INDEX_STREAM_CONTROL in the index buffer state
145 * accordingly
146 * - If the value of the INDEX_STREAM_CONTROL register changed due to this, or
147 * primitive restart is enabled and the restart index changed, mark the index
148 * buffer state as dirty
149 */
150
151 if (info->index_size) {
152 uint32_t new_control = ctx->index_buffer.FE_INDEX_STREAM_CONTROL;
153
154 if (info->primitive_restart)
155 new_control |= VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART;
156 else
157 new_control &= ~VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART;
158
159 if (ctx->index_buffer.FE_INDEX_STREAM_CONTROL != new_control ||
160 (info->primitive_restart && ctx->index_buffer.FE_PRIMITIVE_RESTART_INDEX != info->restart_index)) {
161 ctx->index_buffer.FE_INDEX_STREAM_CONTROL = new_control;
162 ctx->index_buffer.FE_PRIMITIVE_RESTART_INDEX = info->restart_index;
163 ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
164 }
165 }
166 }
167
168 static bool
etna_get_vs(struct etna_context * ctx,struct etna_shader_key * const key)169 etna_get_vs(struct etna_context *ctx, struct etna_shader_key* const key)
170 {
171 const struct etna_shader_variant *old = ctx->shader.vs;
172
173 ctx->shader.vs = etna_shader_variant(ctx->shader.bind_vs, key, &ctx->base.debug, true);
174
175 if (!ctx->shader.vs)
176 return false;
177
178 if (old != ctx->shader.vs)
179 ctx->dirty |= ETNA_DIRTY_SHADER;
180
181 return true;
182 }
183
184 static bool
etna_get_fs(struct etna_context * ctx,struct etna_shader_key * const key)185 etna_get_fs(struct etna_context *ctx, struct etna_shader_key* const key)
186 {
187 const struct etna_shader_variant *old = ctx->shader.fs;
188
189 /* update the key if we need to run nir_lower_sample_tex_compare(..). */
190 if (ctx->screen->info->halti < 2 &&
191 (ctx->dirty & (ETNA_DIRTY_SAMPLERS | ETNA_DIRTY_SAMPLER_VIEWS))) {
192
193 for (unsigned int i = 0; i < ctx->num_fragment_sampler_views; i++) {
194 if (ctx->sampler[i]->compare_mode == PIPE_TEX_COMPARE_NONE)
195 continue;
196
197 key->has_sample_tex_compare = 1;
198 key->num_texture_states = ctx->num_fragment_sampler_views;
199
200 key->tex_swizzle[i].swizzle_r = ctx->sampler_view[i]->swizzle_r;
201 key->tex_swizzle[i].swizzle_g = ctx->sampler_view[i]->swizzle_g;
202 key->tex_swizzle[i].swizzle_b = ctx->sampler_view[i]->swizzle_b;
203 key->tex_swizzle[i].swizzle_a = ctx->sampler_view[i]->swizzle_a;
204
205 key->tex_compare_func[i] = ctx->sampler[i]->compare_func;
206 }
207 }
208
209 ctx->shader.fs = etna_shader_variant(ctx->shader.bind_fs, key, &ctx->base.debug, true);
210
211 if (!ctx->shader.fs)
212 return false;
213
214 if (old != ctx->shader.fs)
215 ctx->dirty |= ETNA_DIRTY_SHADER;
216
217 return true;
218 }
219
220 static void
etna_draw_vbo(struct pipe_context * pctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)221 etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
222 unsigned drawid_offset,
223 const struct pipe_draw_indirect_info *indirect,
224 const struct pipe_draw_start_count_bias *draws,
225 unsigned num_draws)
226 {
227 if (num_draws > 1) {
228 util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws);
229 return;
230 }
231
232 if (!indirect && (!draws[0].count || !info->instance_count))
233 return;
234
235 struct etna_context *ctx = etna_context(pctx);
236 struct etna_screen *screen = ctx->screen;
237 struct pipe_framebuffer_state *pfb = &ctx->framebuffer_s;
238 uint32_t draw_mode;
239 unsigned i;
240
241 if (!indirect &&
242 !info->primitive_restart &&
243 !u_trim_pipe_prim(info->mode, (unsigned*)&draws[0].count))
244 return;
245
246 if (ctx->vertex_elements == NULL || ctx->vertex_elements->num_elements == 0)
247 return; /* Nothing to do */
248
249 if (unlikely(ctx->rasterizer->cull_face == PIPE_FACE_FRONT_AND_BACK &&
250 u_decomposed_prim(info->mode) == MESA_PRIM_TRIANGLES))
251 return;
252
253 if (!etna_render_condition_check(pctx))
254 return;
255
256 int prims = u_decomposed_prims_for_vertices(info->mode, draws[0].count);
257 if (unlikely(prims <= 0)) {
258 DBG("Invalid draw primitive mode=%i or no primitives to be drawn", info->mode);
259 return;
260 }
261
262 draw_mode = translate_draw_mode(info->mode);
263 if (draw_mode == ETNA_NO_MATCH) {
264 BUG("Unsupported draw mode");
265 return;
266 }
267
268 /* Upload a user index buffer. */
269 unsigned index_offset = 0;
270 struct pipe_resource *indexbuf = NULL;
271
272 if (info->index_size) {
273 indexbuf = info->has_user_indices ? NULL : info->index.resource;
274 if (info->has_user_indices &&
275 !util_upload_index_buffer(pctx, info, &draws[0], &indexbuf, &index_offset, 4)) {
276 BUG("Index buffer upload failed.");
277 return;
278 }
279 /* Add start to index offset, when rendering indexed */
280 index_offset += draws[0].start * info->index_size;
281
282 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(indexbuf)->bo;
283 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = index_offset;
284 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
285 ctx->index_buffer.FE_INDEX_STREAM_CONTROL = translate_index_size(info->index_size);
286
287 if (!ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo) {
288 BUG("Unsupported or no index buffer");
289 return;
290 }
291 } else {
292 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = 0;
293 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = 0;
294 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = 0;
295 ctx->index_buffer.FE_INDEX_STREAM_CONTROL = 0;
296 }
297 ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
298
299 struct etna_shader_key key = {
300 .front_ccw = ctx->rasterizer->front_ccw,
301 .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
302 .sprite_coord_yinvert = !!ctx->rasterizer->sprite_coord_mode,
303 };
304
305 if (pfb->cbufs[0])
306 key.frag_rb_swap = !!translate_pe_format_rb_swap(pfb->cbufs[0]->format);
307
308 if (!etna_get_vs(ctx, &key) || !etna_get_fs(ctx, &key)) {
309 BUG("compiled shaders are not okay");
310 return;
311 }
312
313 /* Update any derived state */
314 if (!etna_state_update(ctx))
315 return;
316
317 /*
318 * Figure out the buffers/features we need:
319 */
320 if (ctx->dirty & ETNA_DIRTY_ZSA) {
321 if (etna_depth_enabled(ctx))
322 resource_written(ctx, pfb->zsbuf->texture);
323
324 if (etna_stencil_enabled(ctx))
325 resource_written(ctx, pfb->zsbuf->texture);
326 }
327
328 if (ctx->dirty & ETNA_DIRTY_FRAMEBUFFER) {
329 for (i = 0; i < pfb->nr_cbufs; i++) {
330 struct pipe_resource *surf;
331
332 if (!pfb->cbufs[i])
333 continue;
334
335 surf = pfb->cbufs[i]->texture;
336 resource_written(ctx, surf);
337 }
338 }
339
340 if (ctx->dirty & ETNA_DIRTY_SHADER) {
341 /* Mark constant buffers as being read */
342 u_foreach_bit(i, ctx->constant_buffer[PIPE_SHADER_VERTEX].enabled_mask)
343 resource_read(ctx, ctx->constant_buffer[PIPE_SHADER_VERTEX].cb[i].buffer);
344
345 u_foreach_bit(i, ctx->constant_buffer[PIPE_SHADER_FRAGMENT].enabled_mask)
346 resource_read(ctx, ctx->constant_buffer[PIPE_SHADER_FRAGMENT].cb[i].buffer);
347 }
348
349 if (ctx->dirty & ETNA_DIRTY_VERTEX_BUFFERS) {
350 /* Mark VBOs as being read */
351 u_foreach_bit(i, ctx->vertex_buffer.enabled_mask) {
352 assert(!ctx->vertex_buffer.vb[i].is_user_buffer);
353 resource_read(ctx, ctx->vertex_buffer.vb[i].buffer.resource);
354 }
355 }
356
357 if (ctx->dirty & ETNA_DIRTY_INDEX_BUFFER) {
358 /* Mark index buffer as being read */
359 resource_read(ctx, indexbuf);
360 }
361
362 /* Mark textures as being read */
363 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
364 if (ctx->sampler_view[i]) {
365 if (ctx->dirty & ETNA_DIRTY_SAMPLER_VIEWS)
366 resource_read(ctx, ctx->sampler_view[i]->texture);
367
368 /* if texture was modified since the last update,
369 * we need to clear the texture cache and possibly
370 * resolve/update ts
371 */
372 etna_update_sampler_source(ctx->sampler_view[i], i);
373 }
374 }
375
376 ctx->stats.prims_generated += u_reduced_prims_for_vertices(info->mode, draws[0].count);
377 ctx->stats.draw_calls++;
378
379 /* Update state for this draw operation */
380 etna_update_state_for_draw(ctx, info);
381
382 /* First, sync state, then emit DRAW_PRIMITIVES or DRAW_INDEXED_PRIMITIVES */
383 etna_emit_state(ctx);
384
385 if (!VIV_FEATURE(screen, ETNA_FEATURE_NEW_GPIPE)) {
386 switch (draw_mode) {
387 case PRIMITIVE_TYPE_LINE_LOOP:
388 case PRIMITIVE_TYPE_LINE_STRIP:
389 case PRIMITIVE_TYPE_TRIANGLE_STRIP:
390 case PRIMITIVE_TYPE_TRIANGLE_FAN:
391 etna_set_state(ctx->stream, VIVS_GL_VERTEX_ELEMENT_CONFIG,
392 VIVS_GL_VERTEX_ELEMENT_CONFIG_UNK0 |
393 VIVS_GL_VERTEX_ELEMENT_CONFIG_REUSE);
394 break;
395 default:
396 etna_set_state(ctx->stream, VIVS_GL_VERTEX_ELEMENT_CONFIG,
397 VIVS_GL_VERTEX_ELEMENT_CONFIG_UNK0);
398 break;
399 }
400 }
401
402 if (screen->info->halti >= 2) {
403 /* On HALTI2+ (GC3000 and higher) only use instanced drawing commands, as the blob does */
404 etna_draw_instanced(ctx->stream, info->index_size, draw_mode, info->instance_count,
405 draws[0].count, info->index_size ? draws->index_bias : draws[0].start);
406 } else {
407 if (info->index_size)
408 etna_draw_indexed_primitives(ctx->stream, draw_mode, 0, prims, draws->index_bias);
409 else
410 etna_draw_primitives(ctx->stream, draw_mode, draws[0].start, prims);
411 }
412
413 if (DBG_ENABLED(ETNA_DBG_DRAW_STALL)) {
414 /* Stall the FE after every draw operation. This allows better
415 * debug of GPU hang conditions, as the FE will indicate which
416 * draw op has caused the hang. */
417 etna_stall(ctx->stream, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
418 }
419
420 if (DBG_ENABLED(ETNA_DBG_FLUSH_ALL))
421 pctx->flush(pctx, NULL, 0);
422
423 if (ctx->framebuffer_s.cbufs[0])
424 etna_resource_level_mark_changed(etna_surface(ctx->framebuffer_s.cbufs[0])->level);
425 if (ctx->framebuffer_s.zsbuf)
426 etna_resource_level_mark_changed(etna_surface(ctx->framebuffer_s.zsbuf)->level);
427 if (info->index_size && indexbuf != info->index.resource)
428 pipe_resource_reference(&indexbuf, NULL);
429 }
430
431 static void
etna_reset_gpu_state(struct etna_context * ctx)432 etna_reset_gpu_state(struct etna_context *ctx)
433 {
434 struct etna_cmd_stream *stream = ctx->stream;
435 struct etna_screen *screen = ctx->screen;
436 uint32_t dummy_attribs[VIVS_NFE_GENERIC_ATTRIB__LEN] = { 0 };
437
438 if (ctx->compute_only) {
439 /* compute only context does not make use of any of the dirty state tracking. */
440 assert(ctx->dirty == 0);
441 assert(ctx->dirty_sampler_views == 0);
442 assert(ctx->prev_active_samplers == 0);
443
444 etna_cmd_stream_mark_end_of_context_init(stream);
445
446 return;
447 }
448
449 etna_set_state(stream, VIVS_GL_API_MODE, VIVS_GL_API_MODE_OPENGL);
450 etna_set_state(stream, VIVS_PA_W_CLIP_LIMIT, 0x34000001);
451 etna_set_state(stream, VIVS_PA_FLAGS, 0x00000000); /* blob sets ZCONVERT_BYPASS on GC3000+, this messes up z for us */
452 etna_set_state(stream, VIVS_PA_VIEWPORT_UNK00A80, 0x38a01404);
453 etna_set_state(stream, VIVS_PA_VIEWPORT_UNK00A84, fui(8192.0));
454 etna_set_state(stream, VIVS_PA_ZFARCLIPPING, 0x00000000);
455 etna_set_state(stream, VIVS_RA_HDEPTH_CONTROL, 0x00007000);
456 etna_set_state(stream, VIVS_PS_CONTROL_EXT, 0x00000000);
457
458 /* There is no HALTI0 specific state */
459 if (screen->info->halti >= 1) { /* Only on HALTI1+ */
460 etna_set_state(stream, VIVS_VS_HALTI1_UNK00884, 0x00000808);
461 }
462 if (screen->info->halti >= 2) { /* Only on HALTI2+ */
463 etna_set_state(stream, VIVS_RA_UNK00E0C, 0x00000000);
464 }
465 if (screen->info->halti >= 3) { /* Only on HALTI3+ */
466 etna_set_state(stream, VIVS_PS_HALTI3_UNK0103C, 0x76543210);
467 }
468 if (screen->info->halti >= 4) { /* Only on HALTI4+ */
469 etna_set_state(stream, VIVS_PS_MSAA_CONFIG, 0x6fffffff & 0xf70fffff & 0xfff6ffff &
470 0xffff6fff & 0xfffff6ff & 0xffffff7f);
471 etna_set_state(stream, VIVS_PE_HALTI4_UNK014C0, 0x00000000);
472 }
473 if (screen->info->halti >= 5) { /* Only on HALTI5+ */
474 etna_set_state(stream, VIVS_NTE_DESCRIPTOR_UNK14C40, 0x00000001);
475 etna_set_state(stream, VIVS_FE_HALTI5_UNK007D8, 0x00000002);
476 etna_set_state(stream, VIVS_PS_SAMPLER_BASE, 0x00000000);
477 etna_set_state(stream, VIVS_VS_SAMPLER_BASE, 0x00000020);
478 etna_set_state(stream, VIVS_SH_CONFIG, VIVS_SH_CONFIG_RTNE_ROUNDING);
479 } else { /* Only on pre-HALTI5 */
480 etna_set_state(stream, VIVS_GL_UNK03838, 0x00000000);
481 etna_set_state(stream, VIVS_GL_UNK03854, 0x00000000);
482 }
483
484 if (VIV_FEATURE(screen, ETNA_FEATURE_BUG_FIXES18))
485 etna_set_state(stream, VIVS_GL_BUG_FIXES, 0x6);
486
487 if (!screen->specs.use_blt) {
488 /* Enable SINGLE_BUFFER for resolve, if supported */
489 etna_set_state(stream, VIVS_RS_SINGLE_BUFFER, COND(screen->specs.single_buffer, VIVS_RS_SINGLE_BUFFER_ENABLE));
490 }
491
492 if (screen->info->halti >= 5) {
493 /* TXDESC cache flush - do this once at the beginning, as texture
494 * descriptors are only written by the CPU once, then patched by the kernel
495 * before command stream submission. It does not need flushing if the
496 * referenced image data changes.
497 */
498 etna_set_state(stream, VIVS_NTE_DESCRIPTOR_FLUSH, 0);
499 etna_set_state(stream, VIVS_GL_FLUSH_CACHE,
500 VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK12 |
501 VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK13);
502
503 /* Icache invalidate (should do this on shader change?) */
504 etna_set_state(stream, VIVS_VS_ICACHE_INVALIDATE,
505 VIVS_VS_ICACHE_INVALIDATE_UNK0 | VIVS_VS_ICACHE_INVALIDATE_UNK1 |
506 VIVS_VS_ICACHE_INVALIDATE_UNK2 | VIVS_VS_ICACHE_INVALIDATE_UNK3 |
507 VIVS_VS_ICACHE_INVALIDATE_UNK4);
508 }
509
510 /* It seems that some GPUs (at least some GC400 have shown this behavior)
511 * come out of reset with random vertex attributes enabled and also don't
512 * disable them on the write to the first config register as normal. Enabling
513 * all attributes seems to provide the GPU with the required edge to actually
514 * disable the unused attributes on the next draw.
515 */
516 if (screen->info->halti >= 5) {
517 etna_set_state_multi(stream, VIVS_NFE_GENERIC_ATTRIB_CONFIG0(0),
518 VIVS_NFE_GENERIC_ATTRIB__LEN, dummy_attribs);
519 } else {
520 etna_set_state_multi(stream, VIVS_FE_VERTEX_ELEMENT_CONFIG(0),
521 screen->info->halti >= 0 ? 16 : 12, dummy_attribs);
522 }
523
524 etna_cmd_stream_mark_end_of_context_init(stream);
525
526 ctx->dirty = ~0L;
527 ctx->dirty_sampler_views = ~0L;
528 ctx->prev_active_samplers = ~0L;
529 }
530
531 void
etna_flush(struct pipe_context * pctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags,bool internal)532 etna_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
533 enum pipe_flush_flags flags, bool internal)
534 {
535 struct etna_context *ctx = etna_context(pctx);
536 int out_fence_fd = -1;
537
538 list_for_each_entry(struct etna_acc_query, aq, &ctx->active_acc_queries, node)
539 etna_acc_query_suspend(aq, ctx);
540
541 if (!internal) {
542 /* flush all resources that need an implicit flush */
543 set_foreach(ctx->flush_resources, entry) {
544 struct pipe_resource *prsc = (struct pipe_resource *)entry->key;
545
546 pctx->flush_resource(pctx, prsc);
547 pipe_resource_reference(&prsc, NULL);
548 }
549 _mesa_set_clear(ctx->flush_resources, NULL);
550
551 /* reset shared resources update tracking */
552 set_foreach(ctx->updated_resources, entry) {
553 struct pipe_resource *prsc = (struct pipe_resource *)entry->key;
554 pipe_resource_reference(&prsc, NULL);
555 }
556 _mesa_set_clear(ctx->updated_resources, NULL);
557 }
558
559 etna_cmd_stream_flush(ctx->stream, ctx->in_fence_fd,
560 (flags & PIPE_FLUSH_FENCE_FD) ? &out_fence_fd : NULL,
561 ctx->is_noop);
562
563 list_for_each_entry(struct etna_acc_query, aq, &ctx->active_acc_queries, node)
564 etna_acc_query_resume(aq, ctx);
565
566 if (fence)
567 *fence = etna_fence_create(pctx, out_fence_fd);
568
569 _mesa_hash_table_clear(ctx->pending_resources, NULL);
570
571 etna_reset_gpu_state(ctx);
572 }
573
574 static void
etna_context_flush(struct pipe_context * pctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags)575 etna_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
576 enum pipe_flush_flags flags)
577 {
578 etna_flush(pctx, fence, flags, false);
579 }
580
581 static void
etna_context_force_flush(struct etna_cmd_stream * stream,void * priv)582 etna_context_force_flush(struct etna_cmd_stream *stream, void *priv)
583 {
584 struct pipe_context *pctx = priv;
585
586 etna_flush(pctx, NULL, 0, true);
587
588 /* update derived states as the context is now fully dirty */
589 etna_state_update(etna_context(pctx));
590 }
591
592 void
etna_context_add_flush_resource(struct etna_context * ctx,struct pipe_resource * rsc)593 etna_context_add_flush_resource(struct etna_context *ctx,
594 struct pipe_resource *rsc)
595 {
596 bool found;
597
598 _mesa_set_search_or_add(ctx->flush_resources, rsc, &found);
599
600 if (!found)
601 pipe_reference(NULL, &rsc->reference);
602 }
603
604 static void
etna_set_debug_callback(struct pipe_context * pctx,const struct util_debug_callback * cb)605 etna_set_debug_callback(struct pipe_context *pctx,
606 const struct util_debug_callback *cb)
607 {
608 struct etna_context *ctx = etna_context(pctx);
609 struct etna_screen *screen = ctx->screen;
610
611 util_queue_finish(&screen->shader_compiler_queue);
612 u_default_set_debug_callback(pctx, cb);
613 }
614
615 struct pipe_context *
etna_context_create(struct pipe_screen * pscreen,void * priv,unsigned flags)616 etna_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
617 {
618 struct etna_context *ctx = CALLOC_STRUCT(etna_context);
619 struct etna_screen *screen;
620 struct pipe_context *pctx;
621 struct etna_pipe *pipe;
622 bool compute_only = flags & PIPE_CONTEXT_COMPUTE_ONLY;
623
624 if (ctx == NULL)
625 return NULL;
626
627 pctx = &ctx->base;
628 pctx->priv = ctx;
629 pctx->screen = pscreen;
630 pctx->stream_uploader = u_upload_create_default(pctx);
631 if (!pctx->stream_uploader)
632 goto fail;
633 pctx->const_uploader = pctx->stream_uploader;
634
635 screen = etna_screen(pscreen);
636 pipe = (compute_only && screen->pipe_nn) ? screen->pipe_nn : screen->pipe;
637 ctx->stream = etna_cmd_stream_new(pipe, 0x2000,
638 &etna_context_force_flush, pctx);
639 if (ctx->stream == NULL)
640 goto fail;
641
642 ctx->pending_resources = _mesa_pointer_hash_table_create(NULL);
643 if (!ctx->pending_resources)
644 goto fail;
645
646 ctx->flush_resources = _mesa_set_create(NULL, _mesa_hash_pointer,
647 _mesa_key_pointer_equal);
648 if (!ctx->flush_resources)
649 goto fail;
650
651 ctx->updated_resources = _mesa_set_create(NULL, _mesa_hash_pointer,
652 _mesa_key_pointer_equal);
653 if (!ctx->updated_resources)
654 goto fail;
655
656 /* context ctxate setup */
657 ctx->screen = screen;
658 /* need some sane default in case gallium frontends don't set some state: */
659 ctx->sample_mask = 0xffff;
660
661 ctx->compute_only = compute_only;
662
663 /* Set sensible defaults for state */
664 etna_reset_gpu_state(ctx);
665
666 ctx->in_fence_fd = -1;
667
668 pctx->destroy = etna_context_destroy;
669 pctx->draw_vbo = etna_draw_vbo;
670 pctx->ml_subgraph_create = etna_ml_subgraph_create;
671 pctx->ml_subgraph_invoke = etna_ml_subgraph_invoke;
672 pctx->ml_subgraph_read_output = etna_ml_subgraph_read_outputs;
673 pctx->ml_subgraph_destroy = etna_ml_subgraph_destroy;
674 pctx->flush = etna_context_flush;
675 pctx->set_debug_callback = etna_set_debug_callback;
676 pctx->create_fence_fd = etna_create_fence_fd;
677 pctx->fence_server_sync = etna_fence_server_sync;
678 pctx->emit_string_marker = etna_emit_string_marker;
679 pctx->set_frontend_noop = etna_set_frontend_noop;
680 pctx->clear_buffer = u_default_clear_buffer;
681 pctx->clear_texture = u_default_clear_texture;
682
683 /* creation of compile states */
684 pctx->create_blend_state = etna_blend_state_create;
685 pctx->create_rasterizer_state = etna_rasterizer_state_create;
686 pctx->create_depth_stencil_alpha_state = etna_zsa_state_create;
687
688 etna_clear_blit_init(pctx);
689 etna_query_context_init(pctx);
690 etna_state_init(pctx);
691 etna_surface_init(pctx);
692 etna_shader_init(pctx);
693 etna_texture_init(pctx);
694 etna_transfer_init(pctx);
695
696 if (!ctx->compute_only) {
697 ctx->blitter = util_blitter_create(pctx);
698 if (!ctx->blitter)
699 goto fail;
700 }
701
702 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
703 list_inithead(&ctx->active_acc_queries);
704
705 return pctx;
706
707 fail:
708 pctx->destroy(pctx);
709
710 return NULL;
711 }
712
713 bool
etna_render_condition_check(struct pipe_context * pctx)714 etna_render_condition_check(struct pipe_context *pctx)
715 {
716 struct etna_context *ctx = etna_context(pctx);
717
718 if (!ctx->cond_query)
719 return true;
720
721 perf_debug_ctx(ctx, "Implementing conditional rendering on the CPU");
722
723 union pipe_query_result res = { 0 };
724 bool wait =
725 ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
726 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
727
728 if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
729 return (bool)res.u64 != ctx->cond_cond;
730
731 return true;
732 }
733