1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <[email protected]>
31 */
32
33 #include "util/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/dlist.h"
38 #include "main/eval.h"
39 #include "main/state.h"
40 #include "main/light.h"
41 #include "main/api_arrayelt.h"
42 #include "main/draw_validate.h"
43 #include "main/dispatch.h"
44 #include "util/bitscan.h"
45 #include "util/u_memory.h"
46 #include "api_exec_decl.h"
47
48 #include "vbo_private.h"
49
50 /** ID/name for immediate-mode VBO */
51 #define IMM_BUFFER_NAME 0xaabbccdd
52
53
54 /**
55 * Close off the last primitive, execute the buffer, restart the
56 * primitive. This is called when we fill a vertex buffer before
57 * hitting glEnd.
58 */
59 static void
vbo_exec_wrap_buffers(struct vbo_exec_context * exec)60 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
61 {
62 if (exec->vtx.prim_count == 0) {
63 exec->vtx.copied.nr = 0;
64 exec->vtx.vert_count = 0;
65 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
66 }
67 else {
68 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
69 unsigned last = exec->vtx.prim_count - 1;
70 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
71 const bool last_begin = exec->vtx.markers[last].begin;
72 GLuint last_count = 0;
73
74 if (_mesa_inside_begin_end(ctx)) {
75 last_draw->count = exec->vtx.vert_count - last_draw->start;
76 last_count = last_draw->count;
77 exec->vtx.markers[last].end = 0;
78 }
79
80 /* Special handling for wrapping GL_LINE_LOOP */
81 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
82 last_count > 0 &&
83 !exec->vtx.markers[last].end) {
84 /* draw this section of the incomplete line loop as a line strip */
85 exec->vtx.mode[last] = GL_LINE_STRIP;
86 if (!last_begin) {
87 /* This is not the first section of the line loop, so don't
88 * draw the 0th vertex. We're saving it until we draw the
89 * very last section of the loop.
90 */
91 last_draw->start++;
92 last_draw->count--;
93 }
94 }
95
96 /* Execute the buffer and save copied vertices.
97 */
98 if (exec->vtx.vert_count)
99 vbo_exec_vtx_flush(exec);
100 else {
101 exec->vtx.prim_count = 0;
102 exec->vtx.copied.nr = 0;
103 }
104
105 /* Emit a glBegin to start the new list.
106 */
107 assert(exec->vtx.prim_count == 0);
108
109 if (_mesa_inside_begin_end(ctx)) {
110 exec->vtx.mode[0] = ctx->Driver.CurrentExecPrimitive;
111 exec->vtx.draw[0].start = 0;
112 exec->vtx.markers[0].begin = 0;
113 exec->vtx.prim_count++;
114
115 if (exec->vtx.copied.nr == last_count)
116 exec->vtx.markers[0].begin = last_begin;
117 }
118 }
119 }
120
121
122 /**
123 * Deal with buffer wrapping where provoked by the vertex buffer
124 * filling up, as opposed to upgrade_vertex().
125 */
126 static void
vbo_exec_vtx_wrap(struct vbo_exec_context * exec)127 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
128 {
129 unsigned numComponents;
130
131 /* Run pipeline on current vertices, copy wrapped vertices
132 * to exec->vtx.copied.
133 */
134 vbo_exec_wrap_buffers(exec);
135
136 if (!exec->vtx.buffer_ptr) {
137 /* probably ran out of memory earlier when allocating the VBO */
138 return;
139 }
140
141 /* Copy stored stored vertices to start of new list.
142 */
143 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
144
145 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
146 memcpy(exec->vtx.buffer_ptr,
147 exec->vtx.copied.buffer,
148 numComponents * sizeof(fi_type));
149 exec->vtx.buffer_ptr += numComponents;
150 exec->vtx.vert_count += exec->vtx.copied.nr;
151
152 exec->vtx.copied.nr = 0;
153 }
154
155
156 /**
157 * Copy the active vertex's values to the ctx->Current fields.
158 */
159 static void
vbo_exec_copy_to_current(struct vbo_exec_context * exec)160 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
161 {
162 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
163 struct vbo_context *vbo = vbo_context(ctx);
164 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
165 bool color0_changed = false;
166
167 while (enabled) {
168 const int i = u_bit_scan64(&enabled);
169
170 /* Note: the exec->vtx.current[i] pointers point into the
171 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
172 */
173 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
174 fi_type tmp[8]; /* space for doubles */
175 int dmul_shift = 0;
176
177 assert(exec->vtx.attr[i].size);
178
179 /* VBO_ATTRIB_SELECT_RESULT_INDEX has no current */
180 if (!current)
181 continue;
182
183 if (exec->vtx.attr[i].type == GL_DOUBLE ||
184 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
185 memset(tmp, 0, sizeof(tmp));
186 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
187 dmul_shift = 1;
188 } else {
189 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
190 exec->vtx.attr[i].size,
191 exec->vtx.attrptr[i],
192 exec->vtx.attr[i].type);
193 }
194
195 if (memcmp(current, tmp, 4 * sizeof(GLfloat) << dmul_shift) != 0) {
196 memcpy(current, tmp, 4 * sizeof(GLfloat) << dmul_shift);
197
198 if (i == VBO_ATTRIB_COLOR0)
199 color0_changed = true;
200
201 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT) {
202 ctx->NewState |= _NEW_MATERIAL;
203 ctx->PopAttribState |= GL_LIGHTING_BIT;
204
205 /* The fixed-func vertex program uses this. */
206 if (i == VBO_ATTRIB_MAT_FRONT_SHININESS ||
207 i == VBO_ATTRIB_MAT_BACK_SHININESS)
208 ctx->NewState |= _NEW_FF_VERT_PROGRAM;
209 } else {
210 if (i == VBO_ATTRIB_EDGEFLAG)
211 _mesa_update_edgeflag_state_vao(ctx);
212
213 ctx->NewState |= _NEW_CURRENT_ATTRIB;
214 ctx->PopAttribState |= GL_CURRENT_BIT;
215 }
216 }
217
218 /* Given that we explicitly state size here, there is no need
219 * for the COPY_CLEAN above, could just copy 16 bytes and be
220 * done. The only problem is when Mesa accesses ctx->Current
221 * directly.
222 */
223 /* Size here is in components - not bytes */
224 if (exec->vtx.attr[i].type != vbo->current[i].Format.User.Type ||
225 (exec->vtx.attr[i].size >> dmul_shift) != vbo->current[i].Format.User.Size) {
226 vbo_set_vertex_format(&vbo->current[i].Format,
227 exec->vtx.attr[i].size >> dmul_shift,
228 exec->vtx.attr[i].type);
229 /* The format changed. We need to update gallium vertex elements.
230 * Material attributes don't need this because they don't have formats.
231 */
232 if (i <= VBO_ATTRIB_EDGEFLAG)
233 ctx->NewState |= _NEW_CURRENT_ATTRIB;
234 }
235 }
236
237 if (color0_changed && ctx->Light.ColorMaterialEnabled) {
238 _mesa_update_color_material(ctx,
239 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
240 }
241 }
242
243
244 /**
245 * Flush existing data, set new attrib size, replay copied vertices.
246 * This is called when we transition from a small vertex attribute size
247 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
248 * We need to go back over the previous 2-component texcoords and insert
249 * zero and one values.
250 * \param attr VBO_ATTRIB_x vertex attribute value
251 */
252 static void
vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context * exec,GLuint attr,GLuint newSize,GLenum newType)253 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
254 GLuint attr, GLuint newSize, GLenum newType)
255 {
256 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
257 struct vbo_context *vbo = vbo_context(ctx);
258 const GLint lastcount = exec->vtx.vert_count;
259 fi_type *old_attrptr[VBO_ATTRIB_MAX];
260 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
261 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
262 const GLuint oldSize = exec->vtx.attr[attr].size;
263 GLuint i;
264
265 assert(attr < VBO_ATTRIB_MAX);
266
267 if (unlikely(!exec->vtx.buffer_ptr)) {
268 /* We should only hit this when use_buffer_objects=true */
269 assert(exec->vtx.bufferobj);
270 vbo_exec_vtx_map(exec);
271 assert(exec->vtx.buffer_ptr);
272 }
273
274 /* Run pipeline on current vertices, copy wrapped vertices
275 * to exec->vtx.copied.
276 */
277 vbo_exec_wrap_buffers(exec);
278
279 if (unlikely(exec->vtx.copied.nr)) {
280 /* We're in the middle of a primitive, keep the old vertex
281 * format around to be able to translate the copied vertices to
282 * the new format.
283 */
284 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
285 }
286
287 /* Heuristic: Attempt to isolate attributes received outside
288 * begin/end so that they don't bloat the vertices.
289 */
290 if (!_mesa_inside_begin_end(ctx) &&
291 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
292 vbo_exec_copy_to_current(exec);
293 vbo_reset_all_attr(ctx);
294 }
295
296 /* Fix up sizes:
297 */
298 exec->vtx.attr[attr].size = newSize;
299 exec->vtx.attr[attr].active_size = newSize;
300 exec->vtx.attr[attr].type = newType;
301 exec->vtx.vertex_size += newSize - oldSize;
302 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
303 exec->vtx.max_vert = vbo_compute_max_verts(exec);
304 exec->vtx.vert_count = 0;
305 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
306 exec->vtx.enabled |= BITFIELD64_BIT(attr);
307
308 if (attr != 0) {
309 if (unlikely(oldSize)) {
310 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
311
312 /* If there are attribs after the resized attrib... */
313 if (offset + oldSize < old_vtx_size_no_pos) {
314 int size_diff = newSize - oldSize;
315 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
316 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
317 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
318 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
319
320 if (size_diff < 0) {
321 /* Decreasing the size: Copy from first to last to move
322 * elements to the left.
323 */
324 fi_type *old_end = old_last + 1;
325 fi_type *old = old_first;
326 fi_type *new = new_first;
327
328 do {
329 *new++ = *old++;
330 } while (old != old_end);
331 } else {
332 /* Increasing the size: Copy from last to first to move
333 * elements to the right.
334 */
335 fi_type *old_end = old_first - 1;
336 fi_type *old = old_last;
337 fi_type *new = new_last;
338
339 do {
340 *new-- = *old--;
341 } while (old != old_end);
342 }
343
344 /* Update pointers to attribs, because we moved them. */
345 GLbitfield64 enabled = exec->vtx.enabled &
346 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
347 ~BITFIELD64_BIT(attr);
348 while (enabled) {
349 unsigned i = u_bit_scan64(&enabled);
350
351 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
352 exec->vtx.attrptr[i] += size_diff;
353 }
354 }
355 } else {
356 /* Just have to append the new attribute at the end */
357 exec->vtx.attrptr[attr] = exec->vtx.vertex +
358 exec->vtx.vertex_size_no_pos - newSize;
359 }
360 }
361
362 /* The position is always last. */
363 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
364
365 /* Replay stored vertices to translate them
366 * to new format here.
367 *
368 * -- No need to replay - just copy piecewise
369 */
370 if (unlikely(exec->vtx.copied.nr)) {
371 fi_type *data = exec->vtx.copied.buffer;
372 fi_type *dest = exec->vtx.buffer_ptr;
373
374 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
375
376 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
377 GLbitfield64 enabled = exec->vtx.enabled;
378 while (enabled) {
379 const int j = u_bit_scan64(&enabled);
380 GLuint sz = exec->vtx.attr[j].size;
381 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
382 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
383
384 assert(sz);
385
386 if (j == attr) {
387 if (oldSize) {
388 fi_type tmp[4];
389 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
390 data + old_offset,
391 exec->vtx.attr[j].type);
392 COPY_SZ_4V(dest + new_offset, newSize, tmp);
393 } else {
394 fi_type *current = (fi_type *)vbo->current[j].Ptr;
395 COPY_SZ_4V(dest + new_offset, sz, current);
396 }
397 }
398 else {
399 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
400 }
401 }
402
403 data += old_vtx_size;
404 dest += exec->vtx.vertex_size;
405 }
406
407 exec->vtx.buffer_ptr = dest;
408 exec->vtx.vert_count += exec->vtx.copied.nr;
409 exec->vtx.copied.nr = 0;
410 }
411 }
412
413
414 /**
415 * This is when a vertex attribute transitions to a different size.
416 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
417 * glTexCoord4f() call. We promote the array from size=2 to size=4.
418 * \param newSize size of new vertex (number of 32-bit words).
419 * \param attr VBO_ATTRIB_x vertex attribute value
420 */
421 static void
vbo_exec_fixup_vertex(struct gl_context * ctx,GLuint attr,GLuint newSize,GLenum newType)422 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
423 GLuint newSize, GLenum newType)
424 {
425 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
426
427 assert(attr < VBO_ATTRIB_MAX);
428
429 if (newSize > exec->vtx.attr[attr].size ||
430 newType != exec->vtx.attr[attr].type) {
431 /* New size is larger. Need to flush existing vertices and get
432 * an enlarged vertex format.
433 */
434 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize, newType);
435 }
436 else if (newSize < exec->vtx.attr[attr].active_size) {
437 GLuint i;
438 const fi_type *id =
439 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
440
441 /* New size is smaller - just need to fill in some
442 * zeros. Don't need to flush or wrap.
443 */
444 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
445 exec->vtx.attrptr[attr][i-1] = id[i-1];
446
447 exec->vtx.attr[attr].active_size = newSize;
448 }
449 }
450
451
452 /**
453 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
454 * It depends on a few things, including whether we're inside or outside
455 * of glBegin/glEnd.
456 */
457 static inline bool
is_vertex_position(const struct gl_context * ctx,GLuint index)458 is_vertex_position(const struct gl_context *ctx, GLuint index)
459 {
460 return (index == 0 &&
461 _mesa_attr_zero_aliases_vertex(ctx) &&
462 _mesa_inside_begin_end(ctx));
463 }
464
465 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
466 #if UTIL_ARCH_LITTLE_ENDIAN
467 #define SET_64BIT(dst32, u64) do { \
468 *(dst32)++ = (u64); \
469 *(dst32)++ = (uint64_t)(u64) >> 32; \
470 } while (0)
471 #else
472 #define SET_64BIT(dst32, u64) do { \
473 *(dst32)++ = (uint64_t)(u64) >> 32; \
474 *(dst32)++ = (u64); \
475 } while (0)
476 #endif
477
478
479 /**
480 * This macro is used to implement all the glVertex, glColor, glTexCoord,
481 * glVertexAttrib, etc functions.
482 * \param A VBO_ATTRIB_x attribute index
483 * \param N attribute size (1..4)
484 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
485 * \param C cast type (uint32_t or uint64_t)
486 * \param V0, V1, v2, V3 attribute value
487 */
488 #define ATTR_UNION_BASE(A, N, T, C, V0, V1, V2, V3) \
489 do { \
490 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
491 int sz = (sizeof(C) / sizeof(GLfloat)); \
492 \
493 assert(sz == 1 || sz == 2); \
494 /* store a copy of the attribute in exec except for glVertex */ \
495 if ((A) != 0) { \
496 /* Check if attribute size or type is changing. */ \
497 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
498 exec->vtx.attr[A].type != T)) { \
499 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
500 } \
501 \
502 C *dest = (C *)exec->vtx.attrptr[A]; \
503 if (N>0) dest[0] = V0; \
504 if (N>1) dest[1] = V1; \
505 if (N>2) dest[2] = V2; \
506 if (N>3) dest[3] = V3; \
507 assert(exec->vtx.attr[A].type == T); \
508 \
509 /* we now have accumulated a per-vertex attribute */ \
510 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
511 } else { \
512 /* This is a glVertex call */ \
513 int size = exec->vtx.attr[0].size; \
514 \
515 /* Check if attribute size or type is changing. */ \
516 if (unlikely(size < N * sz || \
517 exec->vtx.attr[0].type != T)) { \
518 vbo_exec_wrap_upgrade_vertex(exec, 0, N * sz, T); \
519 } \
520 \
521 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
522 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
523 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
524 \
525 /* Copy over attributes from exec. */ \
526 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
527 *dst++ = *src++; \
528 \
529 /* Store the position, which is always last and can have 32 or */ \
530 /* 64 bits per channel. */ \
531 if (sizeof(C) == 4) { \
532 if (N > 0) *dst++ = V0; \
533 if (N > 1) *dst++ = V1; \
534 if (N > 2) *dst++ = V2; \
535 if (N > 3) *dst++ = V3; \
536 \
537 if (unlikely(N < size)) { \
538 if (N < 2 && size >= 2) *dst++ = V1; \
539 if (N < 3 && size >= 3) *dst++ = V2; \
540 if (N < 4 && size >= 4) *dst++ = V3; \
541 } \
542 } else { \
543 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
544 /* separately */ \
545 if (N > 0) SET_64BIT(dst, V0); \
546 if (N > 1) SET_64BIT(dst, V1); \
547 if (N > 2) SET_64BIT(dst, V2); \
548 if (N > 3) SET_64BIT(dst, V3); \
549 \
550 if (unlikely(N * 2 < size)) { \
551 if (N < 2 && size >= 4) SET_64BIT(dst, V1); \
552 if (N < 3 && size >= 6) SET_64BIT(dst, V2); \
553 if (N < 4 && size >= 8) SET_64BIT(dst, V3); \
554 } \
555 } \
556 \
557 /* dst now points at the beginning of the next vertex */ \
558 exec->vtx.buffer_ptr = (fi_type*)dst; \
559 \
560 /* Don't set FLUSH_UPDATE_CURRENT because */ \
561 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
562 \
563 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
564 vbo_exec_vtx_wrap(exec); \
565 } \
566 } while (0)
567
568 #undef ERROR
569 #define ERROR(err) _mesa_error(ctx, err, __func__)
570 #define TAG(x) _mesa_##x
571 #define SUPPRESS_STATIC
572
573 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
574 ATTR_UNION_BASE(A, N, T, C, V0, V1, V2, V3)
575
576 #include "vbo_attrib_tmp.h"
577
578
579 /**
580 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
581 * this may be a (partial) no-op.
582 */
583 void GLAPIENTRY
_mesa_Materialfv(GLenum face,GLenum pname,const GLfloat * params)584 _mesa_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
585 {
586 GLbitfield updateMats;
587 GET_CURRENT_CONTEXT(ctx);
588
589 /* This function should be a no-op when it tries to update material
590 * attributes which are currently tracking glColor via glColorMaterial.
591 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
592 * indicating which material attributes can actually be updated below.
593 */
594 if (ctx->Light.ColorMaterialEnabled) {
595 updateMats = ~ctx->Light._ColorMaterialBitmask;
596 }
597 else {
598 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
599 updateMats = ALL_MATERIAL_BITS;
600 }
601
602 if (_mesa_is_desktop_gl_compat(ctx) && face == GL_FRONT) {
603 updateMats &= FRONT_MATERIAL_BITS;
604 }
605 else if (_mesa_is_desktop_gl_compat(ctx) && face == GL_BACK) {
606 updateMats &= BACK_MATERIAL_BITS;
607 }
608 else if (face != GL_FRONT_AND_BACK) {
609 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
610 return;
611 }
612
613 switch (pname) {
614 case GL_EMISSION:
615 if (updateMats & MAT_BIT_FRONT_EMISSION)
616 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
617 if (updateMats & MAT_BIT_BACK_EMISSION)
618 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
619 break;
620 case GL_AMBIENT:
621 if (updateMats & MAT_BIT_FRONT_AMBIENT)
622 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
623 if (updateMats & MAT_BIT_BACK_AMBIENT)
624 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
625 break;
626 case GL_DIFFUSE:
627 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
628 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
629 if (updateMats & MAT_BIT_BACK_DIFFUSE)
630 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
631 break;
632 case GL_SPECULAR:
633 if (updateMats & MAT_BIT_FRONT_SPECULAR)
634 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
635 if (updateMats & MAT_BIT_BACK_SPECULAR)
636 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
637 break;
638 case GL_SHININESS:
639 if (*params < 0 || *params > ctx->Const.MaxShininess) {
640 _mesa_error(ctx, GL_INVALID_VALUE,
641 "glMaterial(invalid shininess: %f out range [0, %f])",
642 *params, ctx->Const.MaxShininess);
643 return;
644 }
645 if (updateMats & MAT_BIT_FRONT_SHININESS)
646 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
647 if (updateMats & MAT_BIT_BACK_SHININESS)
648 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
649 break;
650 case GL_COLOR_INDEXES:
651 if (ctx->API != API_OPENGL_COMPAT) {
652 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
653 return;
654 }
655 if (updateMats & MAT_BIT_FRONT_INDEXES)
656 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
657 if (updateMats & MAT_BIT_BACK_INDEXES)
658 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
659 break;
660 case GL_AMBIENT_AND_DIFFUSE:
661 if (updateMats & MAT_BIT_FRONT_AMBIENT)
662 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
663 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
664 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
665 if (updateMats & MAT_BIT_BACK_AMBIENT)
666 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
667 if (updateMats & MAT_BIT_BACK_DIFFUSE)
668 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
669 break;
670 default:
671 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
672 return;
673 }
674 }
675
676
677 /**
678 * Flush (draw) vertices.
679 *
680 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
681 */
682 static void
vbo_exec_FlushVertices_internal(struct vbo_exec_context * exec,unsigned flags)683 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
684 {
685 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
686
687 if (flags & FLUSH_STORED_VERTICES) {
688 if (exec->vtx.vert_count) {
689 vbo_exec_vtx_flush(exec);
690 }
691
692 if (exec->vtx.vertex_size) {
693 vbo_exec_copy_to_current(exec);
694 vbo_reset_all_attr(ctx);
695 }
696
697 /* All done. */
698 ctx->Driver.NeedFlush = 0;
699 } else {
700 assert(flags == FLUSH_UPDATE_CURRENT);
701
702 /* Note that the vertex size is unchanged.
703 * (vbo_reset_all_attr isn't called)
704 */
705 vbo_exec_copy_to_current(exec);
706
707 /* Only FLUSH_UPDATE_CURRENT is done. */
708 ctx->Driver.NeedFlush = ~FLUSH_UPDATE_CURRENT;
709 }
710 }
711
712
713 void GLAPIENTRY
_mesa_EvalCoord1f(GLfloat u)714 _mesa_EvalCoord1f(GLfloat u)
715 {
716 GET_CURRENT_CONTEXT(ctx);
717 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
718
719 {
720 GLint i;
721 if (exec->eval.recalculate_maps)
722 vbo_exec_eval_update(exec);
723
724 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
725 if (exec->eval.map1[i].map)
726 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
727 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
728 }
729 }
730
731 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
732 exec->vtx.vertex_size * sizeof(GLfloat));
733
734 vbo_exec_do_EvalCoord1f(exec, u);
735
736 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
737 exec->vtx.vertex_size * sizeof(GLfloat));
738 }
739
740
741 void GLAPIENTRY
_mesa_EvalCoord2f(GLfloat u,GLfloat v)742 _mesa_EvalCoord2f(GLfloat u, GLfloat v)
743 {
744 GET_CURRENT_CONTEXT(ctx);
745 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
746
747 {
748 GLint i;
749 if (exec->eval.recalculate_maps)
750 vbo_exec_eval_update(exec);
751
752 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
753 if (exec->eval.map2[i].map)
754 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
755 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
756 }
757
758 if (ctx->Eval.AutoNormal)
759 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
760 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
761 }
762
763 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
764 exec->vtx.vertex_size * sizeof(GLfloat));
765
766 vbo_exec_do_EvalCoord2f(exec, u, v);
767
768 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
769 exec->vtx.vertex_size * sizeof(GLfloat));
770 }
771
772
773 void GLAPIENTRY
_mesa_EvalCoord1fv(const GLfloat * u)774 _mesa_EvalCoord1fv(const GLfloat *u)
775 {
776 _mesa_EvalCoord1f(u[0]);
777 }
778
779
780 void GLAPIENTRY
_mesa_EvalCoord2fv(const GLfloat * u)781 _mesa_EvalCoord2fv(const GLfloat *u)
782 {
783 _mesa_EvalCoord2f(u[0], u[1]);
784 }
785
786
787 void GLAPIENTRY
_mesa_EvalPoint1(GLint i)788 _mesa_EvalPoint1(GLint i)
789 {
790 GET_CURRENT_CONTEXT(ctx);
791 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
792 (GLfloat) ctx->Eval.MapGrid1un);
793 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
794
795 _mesa_EvalCoord1f(u);
796 }
797
798
799 void GLAPIENTRY
_mesa_EvalPoint2(GLint i,GLint j)800 _mesa_EvalPoint2(GLint i, GLint j)
801 {
802 GET_CURRENT_CONTEXT(ctx);
803 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
804 (GLfloat) ctx->Eval.MapGrid2un);
805 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
806 (GLfloat) ctx->Eval.MapGrid2vn);
807 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
808 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
809
810 _mesa_EvalCoord2f(u, v);
811 }
812
813
814 /**
815 * Called via glBegin.
816 */
817 void GLAPIENTRY
_mesa_Begin(GLenum mode)818 _mesa_Begin(GLenum mode)
819 {
820 GET_CURRENT_CONTEXT(ctx);
821 struct vbo_context *vbo = vbo_context(ctx);
822 struct vbo_exec_context *exec = &vbo->exec;
823 int i;
824
825 if (_mesa_inside_begin_end(ctx)) {
826 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
827 return;
828 }
829
830 if (ctx->NewState)
831 _mesa_update_state(ctx);
832
833 GLenum error = _mesa_valid_prim_mode(ctx, mode);
834 if (error != GL_NO_ERROR) {
835 _mesa_error(ctx, error, "glBegin");
836 return;
837 }
838
839 /* Heuristic: attempt to isolate attributes occurring outside
840 * begin/end pairs.
841 *
842 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
843 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
844 */
845 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
846 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
847
848 i = exec->vtx.prim_count++;
849 exec->vtx.mode[i] = mode;
850 exec->vtx.draw[i].start = exec->vtx.vert_count;
851 exec->vtx.markers[i].begin = 1;
852
853 ctx->Driver.CurrentExecPrimitive = mode;
854
855 ctx->Dispatch.Exec = _mesa_hw_select_enabled(ctx) ?
856 ctx->Dispatch.HWSelectModeBeginEnd : ctx->Dispatch.BeginEnd;
857
858 /* We may have been called from a display list, in which case we should
859 * leave dlist.c's dispatch table in place.
860 */
861 if (ctx->GLThread.enabled) {
862 if (ctx->Dispatch.Current == ctx->Dispatch.OutsideBeginEnd)
863 ctx->Dispatch.Current = ctx->Dispatch.Exec;
864 } else if (ctx->GLApi == ctx->Dispatch.OutsideBeginEnd) {
865 ctx->GLApi = ctx->Dispatch.Current = ctx->Dispatch.Exec;
866 _glapi_set_dispatch(ctx->GLApi);
867 } else {
868 assert(ctx->GLApi == ctx->Dispatch.Save);
869 }
870 }
871
872
873 /**
874 * Try to merge / concatenate the two most recent VBO primitives.
875 */
876 static void
try_vbo_merge(struct vbo_exec_context * exec)877 try_vbo_merge(struct vbo_exec_context *exec)
878 {
879 unsigned cur = exec->vtx.prim_count - 1;
880
881 assert(exec->vtx.prim_count >= 1);
882
883 vbo_try_prim_conversion(&exec->vtx.mode[cur], &exec->vtx.draw[cur].count);
884
885 if (exec->vtx.prim_count >= 2) {
886 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
887 unsigned prev = cur - 1;
888
889 if (vbo_merge_draws(ctx, false,
890 exec->vtx.mode[prev],
891 exec->vtx.mode[cur],
892 exec->vtx.draw[prev].start,
893 exec->vtx.draw[cur].start,
894 &exec->vtx.draw[prev].count,
895 exec->vtx.draw[cur].count,
896 0, 0,
897 &exec->vtx.markers[prev].end,
898 exec->vtx.markers[cur].begin,
899 exec->vtx.markers[cur].end))
900 exec->vtx.prim_count--; /* drop the last primitive */
901 }
902 }
903
904
905 /**
906 * Called via glEnd.
907 */
908 void GLAPIENTRY
_mesa_End(void)909 _mesa_End(void)
910 {
911 GET_CURRENT_CONTEXT(ctx);
912 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
913
914 if (!_mesa_inside_begin_end(ctx)) {
915 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
916 return;
917 }
918
919 ctx->Dispatch.Exec = ctx->Dispatch.OutsideBeginEnd;
920
921 if (ctx->GLThread.enabled) {
922 if (ctx->Dispatch.Current == ctx->Dispatch.BeginEnd ||
923 ctx->Dispatch.Current == ctx->Dispatch.HWSelectModeBeginEnd) {
924 ctx->Dispatch.Current = ctx->Dispatch.Exec;
925 }
926 } else if (ctx->GLApi == ctx->Dispatch.BeginEnd ||
927 ctx->GLApi == ctx->Dispatch.HWSelectModeBeginEnd) {
928 ctx->GLApi = ctx->Dispatch.Current = ctx->Dispatch.Exec;
929 _glapi_set_dispatch(ctx->GLApi);
930 }
931
932 if (exec->vtx.prim_count > 0) {
933 /* close off current primitive */
934 unsigned last = exec->vtx.prim_count - 1;
935 struct pipe_draw_start_count_bias *last_draw = &exec->vtx.draw[last];
936 unsigned count = exec->vtx.vert_count - last_draw->start;
937
938 last_draw->count = count;
939 exec->vtx.markers[last].end = 1;
940
941 if (count) {
942 /* mark result buffer used */
943 if (_mesa_hw_select_enabled(ctx))
944 ctx->Select.ResultUsed = GL_TRUE;
945
946 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
947 }
948
949 /* Special handling for GL_LINE_LOOP */
950 bool driver_supports_lineloop =
951 ctx->Const.DriverSupportedPrimMask & BITFIELD_BIT(MESA_PRIM_LINE_LOOP);
952 if (exec->vtx.mode[last] == GL_LINE_LOOP &&
953 (exec->vtx.markers[last].begin == 0 || !driver_supports_lineloop)) {
954 /* We're finishing drawing a line loop. Append 0th vertex onto
955 * end of vertex buffer so we can draw it as a line strip.
956 */
957 const fi_type *src = exec->vtx.buffer_map +
958 last_draw->start * exec->vtx.vertex_size;
959 fi_type *dst = exec->vtx.buffer_map +
960 exec->vtx.vert_count * exec->vtx.vertex_size;
961
962 /* copy 0th vertex to end of buffer */
963 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
964
965 if (exec->vtx.markers[last].begin == 0)
966 last_draw->start++; /* skip vertex0 */
967
968 /* note that the count stays unchanged */
969 exec->vtx.mode[last] = GL_LINE_STRIP;
970
971 /* Increment the vertex count so the next primitive doesn't
972 * overwrite the last vertex which we just added.
973 */
974 exec->vtx.vert_count++;
975 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
976
977 if (!driver_supports_lineloop)
978 last_draw->count++;
979 }
980
981 try_vbo_merge(exec);
982 }
983
984 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
985
986 if (exec->vtx.prim_count == VBO_MAX_PRIM)
987 vbo_exec_vtx_flush(exec);
988
989 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
990 _mesa_flush(ctx);
991 }
992 }
993
994
995 /**
996 * Called via glPrimitiveRestartNV()
997 */
998 void GLAPIENTRY
_mesa_PrimitiveRestartNV(void)999 _mesa_PrimitiveRestartNV(void)
1000 {
1001 GLenum curPrim;
1002 GET_CURRENT_CONTEXT(ctx);
1003
1004 curPrim = ctx->Driver.CurrentExecPrimitive;
1005
1006 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
1007 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
1008 }
1009 else {
1010 _mesa_End();
1011 _mesa_Begin(curPrim);
1012 }
1013 }
1014
1015
1016 /**
1017 * A special version of glVertexAttrib4f that does not treat index 0 as
1018 * VBO_ATTRIB_POS.
1019 */
1020 static void
VertexAttrib4f_nopos(GLuint index,GLfloat x,GLfloat y,GLfloat z,GLfloat w)1021 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1022 {
1023 GET_CURRENT_CONTEXT(ctx);
1024 if (index < ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs)
1025 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1026 else
1027 ERROR(GL_INVALID_VALUE);
1028 }
1029
1030 static void GLAPIENTRY
_es_VertexAttrib4fARB(GLuint index,GLfloat x,GLfloat y,GLfloat z,GLfloat w)1031 _es_VertexAttrib4fARB(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1032 {
1033 VertexAttrib4f_nopos(index, x, y, z, w);
1034 }
1035
1036
1037 static void GLAPIENTRY
_es_VertexAttrib1fARB(GLuint indx,GLfloat x)1038 _es_VertexAttrib1fARB(GLuint indx, GLfloat x)
1039 {
1040 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1041 }
1042
1043
1044 static void GLAPIENTRY
_es_VertexAttrib1fvARB(GLuint indx,const GLfloat * values)1045 _es_VertexAttrib1fvARB(GLuint indx, const GLfloat* values)
1046 {
1047 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1048 }
1049
1050
1051 static void GLAPIENTRY
_es_VertexAttrib2fARB(GLuint indx,GLfloat x,GLfloat y)1052 _es_VertexAttrib2fARB(GLuint indx, GLfloat x, GLfloat y)
1053 {
1054 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1055 }
1056
1057
1058 static void GLAPIENTRY
_es_VertexAttrib2fvARB(GLuint indx,const GLfloat * values)1059 _es_VertexAttrib2fvARB(GLuint indx, const GLfloat* values)
1060 {
1061 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1062 }
1063
1064
1065 static void GLAPIENTRY
_es_VertexAttrib3fARB(GLuint indx,GLfloat x,GLfloat y,GLfloat z)1066 _es_VertexAttrib3fARB(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1067 {
1068 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1069 }
1070
1071
1072 static void GLAPIENTRY
_es_VertexAttrib3fvARB(GLuint indx,const GLfloat * values)1073 _es_VertexAttrib3fvARB(GLuint indx, const GLfloat* values)
1074 {
1075 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1076 }
1077
1078
1079 static void GLAPIENTRY
_es_VertexAttrib4fvARB(GLuint indx,const GLfloat * values)1080 _es_VertexAttrib4fvARB(GLuint indx, const GLfloat* values)
1081 {
1082 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1083 }
1084
1085
1086 void
vbo_init_dispatch_begin_end(struct gl_context * ctx)1087 vbo_init_dispatch_begin_end(struct gl_context *ctx)
1088 {
1089 #define NAME_AE(x) _mesa_##x
1090 #define NAME_CALLLIST(x) _mesa_##x
1091 #define NAME(x) _mesa_##x
1092 #define NAME_ES(x) _es_##x
1093
1094 struct _glapi_table *tab = ctx->Dispatch.OutsideBeginEnd;
1095 #include "api_beginend_init.h"
1096
1097 if (ctx->Dispatch.BeginEnd) {
1098 tab = ctx->Dispatch.BeginEnd;
1099 #include "api_beginend_init.h"
1100 }
1101 }
1102
1103
1104 void
vbo_reset_all_attr(struct gl_context * ctx)1105 vbo_reset_all_attr(struct gl_context *ctx)
1106 {
1107 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1108
1109 while (exec->vtx.enabled) {
1110 const int i = u_bit_scan64(&exec->vtx.enabled);
1111
1112 /* Reset the vertex attribute by setting its size to zero. */
1113 exec->vtx.attr[i].size = 0;
1114 exec->vtx.attr[i].type = GL_FLOAT;
1115 exec->vtx.attr[i].active_size = 0;
1116 exec->vtx.attrptr[i] = NULL;
1117 }
1118
1119 exec->vtx.vertex_size = 0;
1120 }
1121
1122
1123 void
vbo_exec_vtx_init(struct vbo_exec_context * exec)1124 vbo_exec_vtx_init(struct vbo_exec_context *exec)
1125 {
1126 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1127
1128 exec->vtx.bufferobj = _mesa_bufferobj_alloc(ctx, IMM_BUFFER_NAME);
1129
1130 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1131 vbo_reset_all_attr(ctx);
1132
1133 exec->vtx.info.instance_count = 1;
1134 exec->vtx.info.max_index = ~0;
1135 }
1136
1137
1138 void
vbo_exec_vtx_destroy(struct vbo_exec_context * exec)1139 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1140 {
1141 /* using a real VBO for vertex data */
1142 struct gl_context *ctx = gl_context_from_vbo_exec(exec);
1143
1144 /* True VBOs should already be unmapped
1145 */
1146 if (exec->vtx.buffer_map) {
1147 assert(!exec->vtx.bufferobj ||
1148 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1149 if (!exec->vtx.bufferobj) {
1150 align_free(exec->vtx.buffer_map);
1151 exec->vtx.buffer_map = NULL;
1152 exec->vtx.buffer_ptr = NULL;
1153 }
1154 }
1155
1156 /* Free the vertex buffer. Unmap first if needed.
1157 */
1158 if (exec->vtx.bufferobj &&
1159 _mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1160 _mesa_bufferobj_unmap(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1161 }
1162 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1163 }
1164
1165
1166 /**
1167 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1168 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1169 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1170 * __struct gl_contextRec::Current and gl_light_attrib::Material
1171 *
1172 * Note that the default T&L engine never clears the
1173 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1174 *
1175 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1176 */
1177 void
vbo_exec_FlushVertices(struct gl_context * ctx,GLuint flags)1178 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1179 {
1180 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1181
1182 #ifndef NDEBUG
1183 /* debug check: make sure we don't get called recursively */
1184 exec->flush_call_depth++;
1185 assert(exec->flush_call_depth == 1);
1186 #endif
1187
1188 if (_mesa_inside_begin_end(ctx)) {
1189 /* We've had glBegin but not glEnd! */
1190 #ifndef NDEBUG
1191 exec->flush_call_depth--;
1192 assert(exec->flush_call_depth == 0);
1193 #endif
1194 return;
1195 }
1196
1197 /* Flush (draw). */
1198 vbo_exec_FlushVertices_internal(exec, flags);
1199
1200 #ifndef NDEBUG
1201 exec->flush_call_depth--;
1202 assert(exec->flush_call_depth == 0);
1203 #endif
1204 }
1205
1206
1207 void GLAPIENTRY
_es_Color4f(GLfloat r,GLfloat g,GLfloat b,GLfloat a)1208 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1209 {
1210 _mesa_Color4f(r, g, b, a);
1211 }
1212
1213
1214 void GLAPIENTRY
_es_Normal3f(GLfloat x,GLfloat y,GLfloat z)1215 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1216 {
1217 _mesa_Normal3f(x, y, z);
1218 }
1219
1220
1221 void GLAPIENTRY
_es_MultiTexCoord4f(GLenum target,GLfloat s,GLfloat t,GLfloat r,GLfloat q)1222 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1223 {
1224 _mesa_MultiTexCoord4fARB(target, s, t, r, q);
1225 }
1226
1227
1228 void GLAPIENTRY
_es_Materialfv(GLenum face,GLenum pname,const GLfloat * params)1229 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1230 {
1231 _mesa_Materialfv(face, pname, params);
1232 }
1233
1234
1235 void GLAPIENTRY
_es_Materialf(GLenum face,GLenum pname,GLfloat param)1236 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1237 {
1238 GLfloat p[4];
1239 p[0] = param;
1240 p[1] = p[2] = p[3] = 0.0F;
1241 _mesa_Materialfv(face, pname, p);
1242 }
1243
1244 #undef TAG
1245 #undef SUPPRESS_STATIC
1246 #define TAG(x) _hw_select_##x
1247 /* filter out none vertex api */
1248 #define HW_SELECT_MODE
1249
1250 #undef ATTR_UNION
1251 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
1252 do { \
1253 if ((A) == 0) { \
1254 ATTR_UNION_BASE(VBO_ATTRIB_SELECT_RESULT_OFFSET, 1, GL_UNSIGNED_INT, uint32_t, \
1255 ctx->Select.ResultOffset, 0, 0, 0); \
1256 } \
1257 ATTR_UNION_BASE(A, N, T, C, V0, V1, V2, V3); \
1258 } while (0)
1259
1260 #include "vbo_attrib_tmp.h"
1261
1262 void
vbo_init_dispatch_hw_select_begin_end(struct gl_context * ctx)1263 vbo_init_dispatch_hw_select_begin_end(struct gl_context *ctx)
1264 {
1265 int numEntries = MAX2(_gloffset_COUNT, _glapi_get_dispatch_table_size());
1266 memcpy(ctx->Dispatch.HWSelectModeBeginEnd, ctx->Dispatch.BeginEnd, numEntries * sizeof(_glapi_proc));
1267
1268 #undef NAME
1269 #define NAME(x) _hw_select_##x
1270 struct _glapi_table *tab = ctx->Dispatch.HWSelectModeBeginEnd;
1271 #include "api_hw_select_init.h"
1272 }
1273