1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Keith Whitwell <[email protected]>
26 */
27
28 #include <stdbool.h>
29 #include <stdio.h>
30 #include "main/arrayobj.h"
31 #include "util/glheader.h"
32 #include "main/bufferobj.h"
33 #include "main/context.h"
34 #include "main/enums.h"
35 #include "main/state.h"
36 #include "main/varray.h"
37 #include "state_tracker/st_draw.h"
38
39 #include "vbo_private.h"
40
41 static void
vbo_exec_debug_verts(struct vbo_exec_context * exec)42 vbo_exec_debug_verts(struct vbo_exec_context *exec)
43 {
44 GLuint count = exec->vtx.vert_count;
45 GLuint i;
46
47 printf("%s: %u vertices %d primitives, %d vertsize\n",
48 __func__,
49 count,
50 exec->vtx.prim_count,
51 exec->vtx.vertex_size);
52
53 for (i = 0 ; i < exec->vtx.prim_count ; i++) {
54 printf(" prim %d: %s %d..%d %s %s\n",
55 i,
56 _mesa_lookup_prim_by_nr(exec->vtx.mode[i]),
57 exec->vtx.draw[i].start,
58 exec->vtx.draw[i].start + exec->vtx.draw[i].count,
59 exec->vtx.markers[i].begin ? "BEGIN" : "(wrap)",
60 exec->vtx.markers[i].end ? "END" : "(wrap)");
61 }
62 }
63
64
65 static GLuint
vbo_exec_copy_vertices(struct vbo_exec_context * exec)66 vbo_exec_copy_vertices(struct vbo_exec_context *exec)
67 {
68 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
69 const GLuint sz = exec->vtx.vertex_size;
70 fi_type *dst = exec->vtx.copied.buffer;
71 unsigned last = exec->vtx.prim_count - 1;
72 unsigned start = exec->vtx.draw[last].start;
73 const fi_type *src = exec->vtx.buffer_map + start * sz;
74
75 return vbo_copy_vertices(ctx, ctx->Driver.CurrentExecPrimitive,
76 start,
77 &exec->vtx.draw[last].count,
78 exec->vtx.markers[last].begin,
79 sz, false, dst, src);
80 }
81
82
83
84 /* TODO: populate these as the vertex is defined:
85 */
86 static void
vbo_exec_bind_arrays(struct gl_context * ctx,struct gl_vertex_array_object ** old_vao,GLbitfield * old_vp_input_filter)87 vbo_exec_bind_arrays(struct gl_context *ctx,
88 struct gl_vertex_array_object **old_vao,
89 GLbitfield *old_vp_input_filter)
90 {
91 struct vbo_context *vbo = vbo_context(ctx);
92 struct gl_vertex_array_object *vao = vbo->VAO;
93 struct vbo_exec_context *exec = &vbo->exec;
94
95 GLintptr buffer_offset;
96 if (exec->vtx.bufferobj) {
97 assert(exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Pointer);
98 buffer_offset = exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Offset +
99 exec->vtx.buffer_offset;
100 } else {
101 /* Ptr into ordinary app memory */
102 buffer_offset = (GLbyte *)exec->vtx.buffer_map - (GLbyte *)NULL;
103 }
104
105 const gl_vertex_processing_mode mode = ctx->VertexProgram._VPMode;
106
107 GLbitfield vao_enabled, vao_filter;
108 if (_mesa_hw_select_enabled(ctx)) {
109 /* HW GL_SELECT has fixed input */
110 vao_enabled = vao_filter = VERT_BIT_POS | VERT_BIT_SELECT_RESULT_OFFSET;
111 } else {
112 vao_enabled = _vbo_get_vao_enabled_from_vbo(mode, exec->vtx.enabled);
113 vao_filter = _vbo_get_vao_filter(mode);
114 }
115
116 /* At first disable arrays no longer needed */
117 _mesa_disable_vertex_array_attribs(ctx, vao, ~vao_enabled);
118 assert((~vao_enabled & vao->Enabled) == 0);
119
120 /* Bind the buffer object */
121 const GLuint stride = exec->vtx.vertex_size*sizeof(GLfloat);
122 _mesa_bind_vertex_buffer(ctx, vao, 0, exec->vtx.bufferobj, buffer_offset,
123 stride, false, false);
124
125 /* Retrieve the mapping from VBO_ATTRIB to VERT_ATTRIB space
126 * Note that the position/generic0 aliasing is done in the VAO.
127 */
128 const GLubyte *const vao_to_vbo_map = _vbo_attribute_alias_map[mode];
129 /* Now set the enabled arrays */
130 GLbitfield mask = vao_enabled;
131 while (mask) {
132 const int vao_attr = u_bit_scan(&mask);
133 const GLubyte vbo_attr = vao_to_vbo_map[vao_attr];
134
135 const GLubyte size = exec->vtx.attr[vbo_attr].size;
136 const GLenum16 type = exec->vtx.attr[vbo_attr].type;
137 const GLuint offset = (GLuint)((GLbyte *)exec->vtx.attrptr[vbo_attr] -
138 (GLbyte *)exec->vtx.vertex);
139 assert(offset <= ctx->Const.MaxVertexAttribRelativeOffset);
140
141 /* Set and enable */
142 _vbo_set_attrib_format(ctx, vao, vao_attr, buffer_offset,
143 size, type, offset);
144
145 /* The vao is initially created with all bindings set to 0. */
146 assert(vao->VertexAttrib[vao_attr].BufferBindingIndex == 0);
147 }
148 _mesa_enable_vertex_array_attribs(ctx, vao, vao_enabled);
149 assert(vao_enabled == vao->Enabled);
150 assert(!exec->vtx.bufferobj ||
151 (vao_enabled & ~vao->VertexAttribBufferMask) == 0);
152
153 _mesa_save_and_set_draw_vao(ctx, vao, vao_filter,
154 old_vao, old_vp_input_filter);
155 _mesa_set_varying_vp_inputs(ctx, vao_filter &
156 ctx->Array._DrawVAO->_EnabledWithMapMode);
157 }
158
159
160 /**
161 * Unmap the VBO. This is called before drawing.
162 */
163 static void
vbo_exec_vtx_unmap(struct vbo_exec_context * exec)164 vbo_exec_vtx_unmap(struct vbo_exec_context *exec)
165 {
166 if (exec->vtx.bufferobj) {
167 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
168
169 if (!ctx->Extensions.ARB_buffer_storage) {
170 GLintptr offset = exec->vtx.buffer_used -
171 exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Offset;
172 GLsizeiptr length = (exec->vtx.buffer_ptr - exec->vtx.buffer_map) *
173 sizeof(float);
174
175 if (length)
176 _mesa_bufferobj_flush_mapped_range(ctx, offset, length,
177 exec->vtx.bufferobj,
178 MAP_INTERNAL);
179 }
180
181 exec->vtx.buffer_used += (exec->vtx.buffer_ptr -
182 exec->vtx.buffer_map) * sizeof(float);
183
184 assert(exec->vtx.buffer_used <= ctx->Const.glBeginEndBufferSize);
185 assert(exec->vtx.buffer_ptr != NULL);
186
187 _mesa_bufferobj_unmap(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
188 exec->vtx.buffer_map = NULL;
189 exec->vtx.buffer_ptr = NULL;
190 exec->vtx.max_vert = 0;
191 }
192 }
193
194 static bool
vbo_exec_buffer_has_space(struct vbo_exec_context * exec)195 vbo_exec_buffer_has_space(struct vbo_exec_context *exec)
196 {
197 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
198
199 return ctx->Const.glBeginEndBufferSize > exec->vtx.buffer_used + 1024;
200 }
201
202
203 /**
204 * Map the vertex buffer to begin storing glVertex, glColor, etc data.
205 */
206 void
vbo_exec_vtx_map(struct vbo_exec_context * exec)207 vbo_exec_vtx_map(struct vbo_exec_context *exec)
208 {
209 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
210 const GLenum usage = GL_STREAM_DRAW_ARB;
211 GLenum accessRange = GL_MAP_WRITE_BIT | /* for MapBufferRange */
212 GL_MAP_UNSYNCHRONIZED_BIT;
213
214 if (ctx->Extensions.ARB_buffer_storage) {
215 /* We sometimes read from the buffer, so map it for read too.
216 * Only the persistent mapping can do that, because the non-persistent
217 * mapping uses flags that are incompatible with GL_MAP_READ_BIT.
218 */
219 accessRange |= GL_MAP_PERSISTENT_BIT |
220 GL_MAP_COHERENT_BIT |
221 GL_MAP_READ_BIT;
222 } else {
223 accessRange |= GL_MAP_INVALIDATE_RANGE_BIT |
224 GL_MAP_FLUSH_EXPLICIT_BIT |
225 MESA_MAP_NOWAIT_BIT;
226 }
227
228 if (!exec->vtx.bufferobj)
229 return;
230
231 assert(!exec->vtx.buffer_map);
232 assert(!exec->vtx.buffer_ptr);
233
234 if (vbo_exec_buffer_has_space(exec)) {
235 /* The VBO exists and there's room for more */
236 if (exec->vtx.bufferobj->Size > 0) {
237 exec->vtx.buffer_map = (fi_type *)
238 _mesa_bufferobj_map_range(ctx,
239 exec->vtx.buffer_used,
240 ctx->Const.glBeginEndBufferSize
241 - exec->vtx.buffer_used,
242 accessRange,
243 exec->vtx.bufferobj,
244 MAP_INTERNAL);
245 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
246 }
247 else {
248 exec->vtx.buffer_ptr = exec->vtx.buffer_map = NULL;
249 }
250 }
251
252 if (!exec->vtx.buffer_map) {
253 /* Need to allocate a new VBO */
254 exec->vtx.buffer_used = 0;
255
256 if (_mesa_bufferobj_data(ctx, GL_ARRAY_BUFFER_ARB,
257 ctx->Const.glBeginEndBufferSize,
258 NULL, usage,
259 GL_MAP_WRITE_BIT |
260 (ctx->Extensions.ARB_buffer_storage ?
261 GL_MAP_PERSISTENT_BIT |
262 GL_MAP_COHERENT_BIT |
263 GL_MAP_READ_BIT : 0) |
264 GL_DYNAMIC_STORAGE_BIT |
265 GL_CLIENT_STORAGE_BIT,
266 exec->vtx.bufferobj)) {
267 /* buffer allocation worked, now map the buffer */
268 exec->vtx.buffer_map =
269 (fi_type *)_mesa_bufferobj_map_range(ctx,
270 0, ctx->Const.glBeginEndBufferSize,
271 accessRange,
272 exec->vtx.bufferobj,
273 MAP_INTERNAL);
274 }
275 else {
276 _mesa_error(ctx, GL_OUT_OF_MEMORY, "VBO allocation");
277 exec->vtx.buffer_map = NULL;
278 }
279 }
280
281 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
282 exec->vtx.buffer_offset = 0;
283
284 if (!exec->vtx.buffer_map) {
285 /* out of memory */
286 vbo_install_exec_vtxfmt_noop(ctx);
287 }
288 else {
289 if (_mesa_using_noop_vtxfmt(ctx->Dispatch.Exec)) {
290 /* The no-op functions are installed so switch back to regular
291 * functions. We do this test just to avoid frequent and needless
292 * calls to vbo_install_exec_vtxfmt().
293 */
294 vbo_init_dispatch_begin_end(ctx);
295 }
296 }
297
298 if (0)
299 printf("map %d..\n", exec->vtx.buffer_used);
300 }
301
302
303
304 /**
305 * Execute the buffer and save copied verts.
306 */
307 void
vbo_exec_vtx_flush(struct vbo_exec_context * exec)308 vbo_exec_vtx_flush(struct vbo_exec_context *exec)
309 {
310 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
311
312 /* Only unmap if persistent mappings are unsupported. */
313 bool persistent_mapping = ctx->Extensions.ARB_buffer_storage &&
314 exec->vtx.bufferobj &&
315 exec->vtx.buffer_map;
316
317 if (0)
318 vbo_exec_debug_verts(exec);
319
320 if (exec->vtx.prim_count &&
321 exec->vtx.vert_count) {
322
323 exec->vtx.copied.nr = vbo_exec_copy_vertices(exec);
324
325 if (exec->vtx.copied.nr != exec->vtx.vert_count) {
326 struct gl_vertex_array_object *old_vao;
327 GLbitfield old_vp_input_filter;
328
329 /* Prepare and set the Begin/End internal VAO for drawing. */
330 vbo_exec_bind_arrays(ctx, &old_vao, &old_vp_input_filter);
331
332 if (ctx->NewState)
333 _mesa_update_state(ctx);
334
335 if (!persistent_mapping)
336 vbo_exec_vtx_unmap(exec);
337
338 assert(ctx->NewState == 0);
339
340 if (0)
341 printf("%s %d %d\n", __func__, exec->vtx.prim_count,
342 exec->vtx.vert_count);
343
344 st_prepare_draw(ctx, ST_PIPELINE_RENDER_STATE_MASK);
345
346 ctx->Driver.DrawGalliumMultiMode(ctx, &exec->vtx.info,
347 exec->vtx.draw,
348 exec->vtx.mode,
349 exec->vtx.prim_count);
350
351 /* Get new storage -- unless asked not to. */
352 if (!persistent_mapping)
353 vbo_exec_vtx_map(exec);
354
355 _mesa_restore_draw_vao(ctx, old_vao, old_vp_input_filter);
356 }
357 }
358
359 if (persistent_mapping) {
360 exec->vtx.buffer_used += (exec->vtx.buffer_ptr - exec->vtx.buffer_map) *
361 sizeof(float);
362 exec->vtx.buffer_map = exec->vtx.buffer_ptr;
363
364 /* Set the buffer offset for the next draw. */
365 exec->vtx.buffer_offset = exec->vtx.buffer_used;
366
367 if (!vbo_exec_buffer_has_space(exec)) {
368 /* This will allocate a new buffer. */
369 vbo_exec_vtx_unmap(exec);
370 vbo_exec_vtx_map(exec);
371 }
372 }
373
374 if (exec->vtx.vertex_size == 0)
375 exec->vtx.max_vert = 0;
376 else
377 exec->vtx.max_vert = vbo_compute_max_verts(exec);
378
379 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
380 exec->vtx.prim_count = 0;
381 exec->vtx.vert_count = 0;
382 }
383