xref: /aosp_15_r20/external/mesa3d/src/mesa/vbo/vbo_save_draw.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Mesa 3-D graphics library
3  *
4  * Copyright (C) 1999-2008  Brian Paul   All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included
14  * in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 /* Author:
26  *    Keith Whitwell <[email protected]>
27  */
28 
29 #include <stdbool.h>
30 #include "main/arrayobj.h"
31 #include "util/glheader.h"
32 #include "main/bufferobj.h"
33 #include "main/context.h"
34 #include "main/enable.h"
35 #include "main/mesa_private.h"
36 #include "main/macros.h"
37 #include "main/light.h"
38 #include "main/state.h"
39 #include "main/varray.h"
40 #include "util/bitscan.h"
41 #include "state_tracker/st_draw.h"
42 #include "pipe/p_context.h"
43 
44 #include "vbo_private.h"
45 
46 static void
copy_vao(struct gl_context * ctx,const struct gl_vertex_array_object * vao,GLbitfield mask,GLbitfield state,GLbitfield pop_state,int shift,fi_type ** data,bool * color0_changed)47 copy_vao(struct gl_context *ctx, const struct gl_vertex_array_object *vao,
48          GLbitfield mask, GLbitfield state, GLbitfield pop_state,
49          int shift, fi_type **data, bool *color0_changed)
50 {
51    struct vbo_context *vbo = vbo_context(ctx);
52 
53    mask &= vao->Enabled;
54    while (mask) {
55       const int i = u_bit_scan(&mask);
56       const struct gl_array_attributes *attrib = &vao->VertexAttrib[i];
57       unsigned current_index = shift + i;
58       struct gl_array_attributes *currval = &vbo->current[current_index];
59       const GLubyte size = attrib->Format.User.Size;
60       const GLenum16 type = attrib->Format.User.Type;
61       fi_type tmp[8];
62       int dmul_shift = 0;
63 
64       if (type == GL_DOUBLE ||
65           type == GL_UNSIGNED_INT64_ARB) {
66          dmul_shift = 1;
67          memcpy(tmp, *data, size * 2 * sizeof(GLfloat));
68       } else {
69          COPY_CLEAN_4V_TYPE_AS_UNION(tmp, size, *data, type);
70       }
71 
72       if (memcmp(currval->Ptr, tmp, 4 * sizeof(GLfloat) << dmul_shift) != 0) {
73          memcpy((fi_type*)currval->Ptr, tmp, 4 * sizeof(GLfloat) << dmul_shift);
74 
75          if (current_index == VBO_ATTRIB_COLOR0)
76             *color0_changed = true;
77 
78          /* The fixed-func vertex program uses this. */
79          if (current_index == VBO_ATTRIB_MAT_FRONT_SHININESS ||
80              current_index == VBO_ATTRIB_MAT_BACK_SHININESS)
81             ctx->NewState |= _NEW_FF_VERT_PROGRAM;
82 
83          if (current_index == VBO_ATTRIB_EDGEFLAG)
84             _mesa_update_edgeflag_state_vao(ctx);
85 
86          ctx->NewState |= state;
87          ctx->PopAttribState |= pop_state;
88       }
89 
90       if (type != currval->Format.User.Type ||
91           (size >> dmul_shift) != currval->Format.User.Size) {
92          vbo_set_vertex_format(&currval->Format, size >> dmul_shift, type);
93          /* The format changed. We need to update gallium vertex elements. */
94          if (state == _NEW_CURRENT_ATTRIB)
95             ctx->NewState |= state;
96       }
97 
98       *data += size;
99    }
100 }
101 
102 /**
103  * After playback, copy everything but the position from the
104  * last vertex to the saved state
105  */
106 static void
playback_copy_to_current(struct gl_context * ctx,const struct vbo_save_vertex_list * node)107 playback_copy_to_current(struct gl_context *ctx,
108                          const struct vbo_save_vertex_list *node)
109 {
110    if (!node->cold->current_data)
111       return;
112 
113    fi_type *data = node->cold->current_data;
114    bool color0_changed = false;
115 
116    /* Copy conventional attribs and generics except pos */
117    copy_vao(ctx, node->cold->VAO[VP_MODE_SHADER], ~VERT_BIT_POS,
118             _NEW_CURRENT_ATTRIB, GL_CURRENT_BIT, 0, &data, &color0_changed);
119    /* Copy materials */
120    copy_vao(ctx, node->cold->VAO[VP_MODE_FF], VERT_BIT_MAT_ALL,
121             _NEW_MATERIAL, GL_LIGHTING_BIT,
122             VBO_MATERIAL_SHIFT, &data, &color0_changed);
123 
124    if (color0_changed && ctx->Light.ColorMaterialEnabled) {
125       _mesa_update_color_material(ctx, ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
126    }
127 
128    /* CurrentExecPrimitive
129     */
130    if (node->cold->prim_count) {
131       const struct _mesa_prim *prim = &node->cold->prims[node->cold->prim_count - 1];
132       if (prim->end)
133          ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
134       else
135          ctx->Driver.CurrentExecPrimitive = prim->mode;
136    }
137 }
138 
139 
140 static void
loopback_vertex_list(struct gl_context * ctx,const struct vbo_save_vertex_list * list)141 loopback_vertex_list(struct gl_context *ctx,
142                      const struct vbo_save_vertex_list *list)
143 {
144    struct gl_buffer_object *bo = list->cold->VAO[0]->BufferBinding[0].BufferObj;
145    void *buffer = NULL;
146 
147    /* Reuse BO mapping when possible to avoid costly mapping on every glCallList(). */
148    if (_mesa_bufferobj_mapped(bo, MAP_INTERNAL)) {
149       if (list->cold->bo_bytes_used <= bo->Mappings[MAP_INTERNAL].Length)
150          buffer = bo->Mappings[MAP_INTERNAL].Pointer;
151       else
152          _mesa_bufferobj_unmap(ctx, bo, MAP_INTERNAL);
153    }
154 
155    if (!buffer && list->cold->bo_bytes_used)
156       buffer = _mesa_bufferobj_map_range(ctx, 0, list->cold->bo_bytes_used, GL_MAP_READ_BIT,
157                                          bo, MAP_INTERNAL);
158 
159    /* TODO: in this case, we shouldn't create a bo at all and instead keep
160     * the in-RAM buffer. */
161    _vbo_loopback_vertex_list(ctx, list, buffer);
162 
163    if (!ctx->Const.AllowMappedBuffersDuringExecution && buffer)
164       _mesa_bufferobj_unmap(ctx, bo, MAP_INTERNAL);
165 }
166 
167 
168 void
vbo_save_playback_vertex_list_loopback(struct gl_context * ctx,void * data)169 vbo_save_playback_vertex_list_loopback(struct gl_context *ctx, void *data)
170 {
171    const struct vbo_save_vertex_list *node =
172       (const struct vbo_save_vertex_list *) data;
173 
174    FLUSH_FOR_DRAW(ctx);
175 
176    if (_mesa_inside_begin_end(ctx) && node->draw_begins) {
177       /* Error: we're about to begin a new primitive but we're already
178        * inside a glBegin/End pair.
179        */
180       _mesa_error(ctx, GL_INVALID_OPERATION,
181                   "draw operation inside glBegin/End");
182       return;
183    }
184    /* Various degenerate cases: translate into immediate mode
185     * calls rather than trying to execute in place.
186     */
187    loopback_vertex_list(ctx, node);
188 }
189 
190 enum vbo_save_status {
191    DONE,
192    USE_SLOW_PATH,
193 };
194 
195 static enum vbo_save_status
vbo_save_playback_vertex_list_gallium(struct gl_context * ctx,const struct vbo_save_vertex_list * node,bool copy_to_current)196 vbo_save_playback_vertex_list_gallium(struct gl_context *ctx,
197                                       const struct vbo_save_vertex_list *node,
198                                       bool copy_to_current)
199 {
200    /* Don't use this if selection or feedback mode is enabled. st/mesa can't
201     * handle it.
202     */
203    if (!ctx->Const.HasDrawVertexState || ctx->RenderMode != GL_RENDER)
204       return USE_SLOW_PATH;
205 
206    const gl_vertex_processing_mode mode = ctx->VertexProgram._VPMode;
207 
208    /* This sets which vertex arrays are enabled, which determines
209     * which attribs have stride = 0 and whether edge flags are enabled.
210     */
211    const GLbitfield enabled = node->enabled_attribs[mode];
212    _mesa_set_varying_vp_inputs(ctx, enabled);
213 
214    if (ctx->NewState)
215       _mesa_update_state(ctx);
216 
217    /* Return precomputed GL errors such as invalid shaders. */
218    if (!ctx->ValidPrimMask) {
219       _mesa_error(ctx, ctx->DrawGLError, "glCallList");
220       return DONE;
221    }
222 
223    /* Use the slow path when there are vertex inputs without vertex
224     * elements. This happens with zero-stride attribs and non-fixed-func
225     * shaders.
226     *
227     * Dual-slot inputs are also unsupported because the higher slot is
228     * always missing in vertex elements.
229     *
230     * TODO: Add support for zero-stride attribs.
231     */
232    struct gl_program *vp = ctx->VertexProgram._Current;
233 
234    if (vp->info.inputs_read & ~enabled || vp->DualSlotInputs)
235       return USE_SLOW_PATH;
236 
237    struct pipe_vertex_state *state = node->state[mode];
238    struct pipe_draw_vertex_state_info info;
239 
240    info.mode = node->mode;
241    info.take_vertex_state_ownership = false;
242 
243    if (node->ctx == ctx) {
244       /* This mechanism allows passing references to the driver without
245        * using atomics to increase the reference count.
246        *
247        * This private refcount can be decremented without atomics but only
248        * one context (ctx above) can use this counter (so that it's only
249        * used by 1 thread).
250        *
251        * This number is atomically added to reference.count at
252        * initialization. If it's never used, the same number is atomically
253        * subtracted from reference.count before destruction. If this number
254        * is decremented, we can pass one reference to the driver without
255        * touching reference.count with atomics. At destruction we only
256        * subtract the number of references we have not returned. This can
257        * possibly turn a million atomic increments into 1 add and 1 subtract
258        * atomic op over the whole lifetime of an app.
259        */
260       int16_t * const private_refcount = (int16_t*)&node->private_refcount[mode];
261       assert(*private_refcount >= 0);
262 
263       if (unlikely(*private_refcount == 0)) {
264          /* pipe_vertex_state can be reused through util_vertex_state_cache,
265           * and there can be many display lists over-incrementing this number,
266           * causing it to overflow.
267           *
268           * Guess that the same state can never be used by N=500000 display
269           * lists, so one display list can only increment it by
270           * INT_MAX / N.
271           */
272          const int16_t add_refs = INT_MAX / 500000;
273          p_atomic_add(&state->reference.count, add_refs);
274          *private_refcount = add_refs;
275       }
276 
277       (*private_refcount)--;
278       info.take_vertex_state_ownership = true;
279    }
280 
281    /* Set edge flags. */
282    _mesa_update_edgeflag_state_explicit(ctx, enabled & VERT_BIT_EDGEFLAG);
283 
284    st_prepare_draw(ctx, ST_PIPELINE_RENDER_STATE_MASK_NO_VARRAYS);
285 
286    struct pipe_context *pipe = ctx->pipe;
287    uint32_t velem_mask = ctx->VertexProgram._Current->info.inputs_read;
288 
289    /* Fast path using a pre-built gallium vertex buffer state. */
290    if (node->modes || node->num_draws > 1) {
291       const struct pipe_draw_start_count_bias *draws = node->start_counts;
292       const uint8_t *mode = node->modes;
293       unsigned num_draws = node->num_draws;
294 
295       if (!mode) {
296          pipe->draw_vertex_state(pipe, state, velem_mask, info, draws, num_draws);
297       } else {
298          /* Find consecutive draws where mode doesn't vary. */
299          for (unsigned i = 0, first = 0; i <= num_draws; i++) {
300             if (i == num_draws || mode[i] != mode[first]) {
301                unsigned current_num_draws = i - first;
302 
303                /* Increase refcount to be able to use take_vertex_state_ownership
304                 * with all draws.
305                 */
306                if (i != num_draws && info.take_vertex_state_ownership)
307                   p_atomic_inc(&state->reference.count);
308 
309                info.mode = mode[first];
310                pipe->draw_vertex_state(pipe, state, velem_mask, info, &draws[first],
311                                        current_num_draws);
312                first = i;
313             }
314          }
315       }
316    } else if (node->num_draws) {
317       pipe->draw_vertex_state(pipe, state, velem_mask, info,
318                               &node->start_count, 1);
319    }
320 
321    /* Restore edge flag state and ctx->VertexProgram._VaryingInputs. */
322    _mesa_update_edgeflag_state_vao(ctx);
323 
324    if (copy_to_current)
325       playback_copy_to_current(ctx, node);
326    return DONE;
327 }
328 
329 /**
330  * Execute the buffer and save copied verts.
331  * This is called from the display list code when executing
332  * a drawing command.
333  */
334 void
vbo_save_playback_vertex_list(struct gl_context * ctx,void * data,bool copy_to_current)335 vbo_save_playback_vertex_list(struct gl_context *ctx, void *data, bool copy_to_current)
336 {
337    const struct vbo_save_vertex_list *node =
338       (const struct vbo_save_vertex_list *) data;
339 
340    FLUSH_FOR_DRAW(ctx);
341 
342    if (_mesa_inside_begin_end(ctx) && node->draw_begins) {
343       /* Error: we're about to begin a new primitive but we're already
344        * inside a glBegin/End pair.
345        */
346       _mesa_error(ctx, GL_INVALID_OPERATION,
347                   "draw operation inside glBegin/End");
348       return;
349    }
350 
351    if (vbo_save_playback_vertex_list_gallium(ctx, node, copy_to_current) == DONE)
352       return;
353 
354    /* Save the Draw VAO before we override it. */
355    const gl_vertex_processing_mode mode = ctx->VertexProgram._VPMode;
356    GLbitfield vao_filter = _vbo_get_vao_filter(mode);
357    struct gl_vertex_array_object *old_vao;
358    GLbitfield old_vp_input_filter;
359 
360    _mesa_save_and_set_draw_vao(ctx, node->cold->VAO[mode], vao_filter,
361                                &old_vao, &old_vp_input_filter);
362    _mesa_set_varying_vp_inputs(ctx, vao_filter &
363                                ctx->Array._DrawVAO->_EnabledWithMapMode);
364 
365    /* Need that at least one time. */
366    if (ctx->NewState)
367       _mesa_update_state(ctx);
368 
369    /* Return precomputed GL errors such as invalid shaders. */
370    if (!ctx->ValidPrimMask) {
371       _mesa_restore_draw_vao(ctx, old_vao, old_vp_input_filter);
372       _mesa_error(ctx, ctx->DrawGLError, "glCallList");
373       return;
374    }
375 
376    assert(ctx->NewState == 0);
377 
378    struct pipe_draw_info *info = (struct pipe_draw_info *) &node->cold->info;
379 
380    st_prepare_draw(ctx, ST_PIPELINE_RENDER_STATE_MASK);
381 
382    if (node->modes) {
383       ctx->Driver.DrawGalliumMultiMode(ctx, info,
384                                        node->start_counts,
385                                        node->modes,
386                                        node->num_draws);
387    } else if (node->num_draws == 1) {
388       ctx->Driver.DrawGallium(ctx, info, 0, NULL, &node->start_count, 1);
389    } else if (node->num_draws) {
390       ctx->Driver.DrawGallium(ctx, info, 0, NULL, node->start_counts,
391                               node->num_draws);
392    }
393 
394    _mesa_restore_draw_vao(ctx, old_vao, old_vp_input_filter);
395 
396    if (copy_to_current)
397       playback_copy_to_current(ctx, node);
398 }
399