1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 *
24 */
25
26 #include "util/format/u_format.h"
27 #include "util/u_draw.h"
28 #include "util/u_inlines.h"
29 #include "util/u_prim.h"
30 #include "translate/translate.h"
31
32 #include "nouveau_fence.h"
33 #include "nv_object.xml.h"
34 #include "nv30/nv30-40_3d.xml.h"
35 #include "nv30/nv30_context.h"
36 #include "nv30/nv30_format.h"
37 #include "nv30/nv30_winsys.h"
38
39 static void
nv30_emit_vtxattr(struct nv30_context * nv30,struct pipe_vertex_buffer * vb,struct pipe_vertex_element * ve,unsigned attr)40 nv30_emit_vtxattr(struct nv30_context *nv30, struct pipe_vertex_buffer *vb,
41 struct pipe_vertex_element *ve, unsigned attr)
42 {
43 const unsigned nc = util_format_get_nr_components(ve->src_format);
44 struct nouveau_pushbuf *push = nv30->base.pushbuf;
45 struct nv04_resource *res = nv04_resource(vb->buffer.resource);
46 const void *data;
47 float v[4];
48
49 data = nouveau_resource_map_offset(&nv30->base, res, vb->buffer_offset +
50 ve->src_offset, NOUVEAU_BO_RD);
51
52 util_format_unpack_rgba(ve->src_format, v, data, 1);
53
54 switch (nc) {
55 case 4:
56 BEGIN_NV04(push, NV30_3D(VTX_ATTR_4F(attr)), 4);
57 PUSH_DATAf(push, v[0]);
58 PUSH_DATAf(push, v[1]);
59 PUSH_DATAf(push, v[2]);
60 PUSH_DATAf(push, v[3]);
61 break;
62 case 3:
63 BEGIN_NV04(push, NV30_3D(VTX_ATTR_3F(attr)), 3);
64 PUSH_DATAf(push, v[0]);
65 PUSH_DATAf(push, v[1]);
66 PUSH_DATAf(push, v[2]);
67 break;
68 case 2:
69 BEGIN_NV04(push, NV30_3D(VTX_ATTR_2F(attr)), 2);
70 PUSH_DATAf(push, v[0]);
71 PUSH_DATAf(push, v[1]);
72 break;
73 case 1:
74 BEGIN_NV04(push, NV30_3D(VTX_ATTR_1F(attr)), 1);
75 PUSH_DATAf(push, v[0]);
76 break;
77 default:
78 assert(0);
79 break;
80 }
81 }
82
83 static inline void
nv30_vbuf_range(struct nv30_context * nv30,int vbi,uint32_t * base,uint32_t * size)84 nv30_vbuf_range(struct nv30_context *nv30, int vbi,
85 uint32_t *base, uint32_t *size)
86 {
87 assert(nv30->vbo_max_index != ~0);
88 *base = nv30->vbo_min_index * nv30->vertex->strides[vbi];
89 *size = (nv30->vbo_max_index -
90 nv30->vbo_min_index + 1) * nv30->vertex->strides[vbi];
91 }
92
93 static void
nv30_prevalidate_vbufs(struct nv30_context * nv30)94 nv30_prevalidate_vbufs(struct nv30_context *nv30)
95 {
96 struct pipe_vertex_buffer *vb;
97 struct nv04_resource *buf;
98 int i;
99 uint32_t base, size;
100
101 nv30->vbo_fifo = nv30->vbo_user = 0;
102
103 for (i = 0; i < nv30->num_vtxbufs; i++) {
104 vb = &nv30->vtxbuf[i];
105 if (!nv30->vertex->strides[i] || !vb->buffer.resource) /* NOTE: user_buffer not implemented */
106 continue;
107 buf = nv04_resource(vb->buffer.resource);
108
109 /* NOTE: user buffers with temporary storage count as mapped by GPU */
110 if (!nouveau_resource_mapped_by_gpu(vb->buffer.resource)) {
111 if (nv30->vbo_push_hint) {
112 nv30->vbo_fifo = ~0;
113 continue;
114 } else {
115 if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
116 nv30->vbo_user |= 1 << i;
117 assert(nv30->vertex->strides[i] > vb->buffer_offset);
118 nv30_vbuf_range(nv30, i, &base, &size);
119 nouveau_user_buffer_upload(&nv30->base, buf, base, size);
120 } else {
121 nouveau_buffer_migrate(&nv30->base, buf, NOUVEAU_BO_GART);
122 }
123 nv30->base.vbo_dirty = true;
124 }
125 }
126 }
127 }
128
129 static void
nv30_update_user_vbufs(struct nv30_context * nv30)130 nv30_update_user_vbufs(struct nv30_context *nv30)
131 {
132 struct nouveau_pushbuf *push = nv30->base.pushbuf;
133 uint32_t base, offset, size;
134 int i;
135 uint32_t written = 0;
136
137 for (i = 0; i < nv30->vertex->num_elements; i++) {
138 struct pipe_vertex_element *ve = &nv30->vertex->pipe[i];
139 const int b = ve->vertex_buffer_index;
140 struct pipe_vertex_buffer *vb = &nv30->vtxbuf[b];
141 struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
142
143 if (!(nv30->vbo_user & (1 << b)))
144 continue;
145
146 if (!nv30->vertex->strides[i]) {
147 nv30_emit_vtxattr(nv30, vb, ve, i);
148 continue;
149 }
150 nv30_vbuf_range(nv30, b, &base, &size);
151
152 if (!(written & (1 << b))) {
153 written |= 1 << b;
154 nouveau_user_buffer_upload(&nv30->base, buf, base, size);
155 }
156
157 offset = vb->buffer_offset + ve->src_offset;
158
159 BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
160 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP, buf, offset,
161 NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
162 0, NV30_3D_VTXBUF_DMA1);
163 }
164 nv30->base.vbo_dirty = true;
165 }
166
167 static inline void
nv30_release_user_vbufs(struct nv30_context * nv30)168 nv30_release_user_vbufs(struct nv30_context *nv30)
169 {
170 uint32_t vbo_user = nv30->vbo_user;
171
172 while (vbo_user) {
173 int i = ffs(vbo_user) - 1;
174 vbo_user &= ~(1 << i);
175
176 nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer.resource));
177 }
178
179 nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXTMP);
180 }
181
182 void
nv30_vbo_validate(struct nv30_context * nv30)183 nv30_vbo_validate(struct nv30_context *nv30)
184 {
185 struct nouveau_pushbuf *push = nv30->base.pushbuf;
186 struct nv30_vertex_stateobj *vertex = nv30->vertex;
187 struct pipe_vertex_element *ve;
188 struct pipe_vertex_buffer *vb;
189 unsigned i, redefine;
190
191 nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
192 if (!nv30->vertex || nv30->draw_flags)
193 return;
194
195 #if UTIL_ARCH_BIG_ENDIAN
196 if (1) { /* Figure out where the buffers are getting messed up */
197 #else
198 if (unlikely(vertex->need_conversion)) {
199 #endif
200 nv30->vbo_fifo = ~0;
201 nv30->vbo_user = 0;
202 } else {
203 nv30_prevalidate_vbufs(nv30);
204 }
205
206 if (!PUSH_SPACE(push, 128))
207 return;
208
209 redefine = MAX2(vertex->num_elements, nv30->state.num_vtxelts);
210 if (redefine == 0)
211 return;
212
213 BEGIN_NV04(push, NV30_3D(VTXFMT(0)), redefine);
214
215 for (i = 0; i < vertex->num_elements; i++) {
216 ve = &vertex->pipe[i];
217 vb = &nv30->vtxbuf[ve->vertex_buffer_index];
218
219 if (likely(vertex->strides[ve->vertex_buffer_index]) || nv30->vbo_fifo)
220 PUSH_DATA (push, (vertex->strides[ve->vertex_buffer_index] << 8) | vertex->element[i].state);
221 else
222 PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
223 }
224
225 for (; i < nv30->state.num_vtxelts; i++) {
226 PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
227 }
228
229 for (i = 0; i < vertex->num_elements; i++) {
230 struct nv04_resource *res;
231 unsigned offset;
232 bool user;
233
234 ve = &vertex->pipe[i];
235 vb = &nv30->vtxbuf[ve->vertex_buffer_index];
236 user = (nv30->vbo_user & (1 << ve->vertex_buffer_index));
237
238 res = nv04_resource(vb->buffer.resource);
239
240 if (nv30->vbo_fifo || unlikely(ve->src_stride == 0)) {
241 if (!nv30->vbo_fifo)
242 nv30_emit_vtxattr(nv30, vb, ve, i);
243 continue;
244 }
245
246 offset = ve->src_offset + vb->buffer_offset;
247
248 BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
249 PUSH_RESRC(push, NV30_3D(VTXBUF(i)), user ? BUFCTX_VTXTMP : BUFCTX_VTXBUF,
250 res, offset, NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
251 0, NV30_3D_VTXBUF_DMA1);
252 }
253
254 nv30->state.num_vtxelts = vertex->num_elements;
255 }
256
257 static void *
258 nv30_vertex_state_create(struct pipe_context *pipe, unsigned num_elements,
259 const struct pipe_vertex_element *elements)
260 {
261 struct nv30_vertex_stateobj *so;
262 struct translate_key transkey;
263 unsigned i;
264
265 so = CALLOC(1, sizeof(*so) + sizeof(*so->element) * num_elements);
266 if (!so)
267 return NULL;
268 memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
269 so->num_elements = num_elements;
270 so->need_conversion = false;
271
272 transkey.nr_elements = 0;
273 transkey.output_stride = 0;
274
275 for (i = 0; i < num_elements; i++) {
276 const struct pipe_vertex_element *ve = &elements[i];
277 const unsigned vbi = ve->vertex_buffer_index;
278 enum pipe_format fmt = ve->src_format;
279
280 so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
281 if (!so->element[i].state) {
282 switch (util_format_get_nr_components(fmt)) {
283 case 1: fmt = PIPE_FORMAT_R32_FLOAT; break;
284 case 2: fmt = PIPE_FORMAT_R32G32_FLOAT; break;
285 case 3: fmt = PIPE_FORMAT_R32G32B32_FLOAT; break;
286 case 4: fmt = PIPE_FORMAT_R32G32B32A32_FLOAT; break;
287 default:
288 assert(0);
289 FREE(so);
290 return NULL;
291 }
292 so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
293 so->need_conversion = true;
294 }
295
296 if (1) {
297 unsigned j = transkey.nr_elements++;
298
299 transkey.element[j].type = TRANSLATE_ELEMENT_NORMAL;
300 transkey.element[j].input_format = ve->src_format;
301 transkey.element[j].input_buffer = vbi;
302 transkey.element[j].input_offset = ve->src_offset;
303 transkey.element[j].instance_divisor = ve->instance_divisor;
304
305 transkey.element[j].output_format = fmt;
306 transkey.element[j].output_offset = transkey.output_stride;
307 transkey.output_stride += (util_format_get_stride(fmt, 1) + 3) & ~3;
308 }
309 so->strides[vbi] = ve->src_stride;
310 }
311
312 so->translate = translate_create(&transkey);
313 so->vtx_size = transkey.output_stride / 4;
314 so->vtx_per_packet_max = NV04_PFIFO_MAX_PACKET_LEN / MAX2(so->vtx_size, 1);
315 return so;
316 }
317
318 static void
319 nv30_vertex_state_delete(struct pipe_context *pipe, void *hwcso)
320 {
321 struct nv30_vertex_stateobj *so = hwcso;
322
323 if (so->translate)
324 so->translate->release(so->translate);
325 FREE(hwcso);
326 }
327
328 static void
329 nv30_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
330 {
331 struct nv30_context *nv30 = nv30_context(pipe);
332
333 nv30->vertex = hwcso;
334 nv30->dirty |= NV30_NEW_VERTEX;
335 }
336
337 static void
338 nv30_draw_arrays(struct nv30_context *nv30,
339 unsigned mode, unsigned start, unsigned count,
340 unsigned instance_count)
341 {
342 struct nouveau_pushbuf *push = nv30->base.pushbuf;
343 unsigned prim;
344
345 prim = nv30_prim_gl(mode);
346
347 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
348 PUSH_DATA (push, prim);
349 while (count) {
350 const unsigned mpush = 2047 * 256;
351 unsigned npush = (count > mpush) ? mpush : count;
352 unsigned wpush = ((npush + 255) & ~255) >> 8;
353
354 count -= npush;
355
356 BEGIN_NI04(push, NV30_3D(VB_VERTEX_BATCH), wpush);
357 while (npush >= 256) {
358 PUSH_DATA (push, 0xff000000 | start);
359 start += 256;
360 npush -= 256;
361 }
362
363 if (npush)
364 PUSH_DATA (push, ((npush - 1) << 24) | start);
365 }
366 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
367 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
368 }
369
370 static void
371 nv30_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
372 unsigned start, unsigned count)
373 {
374 map += start;
375
376 if (count & 1) {
377 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
378 PUSH_DATA (push, *map++);
379 }
380
381 count >>= 1;
382 while (count) {
383 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
384 count -= npush;
385
386 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
387 while (npush--) {
388 PUSH_DATA (push, (map[1] << 16) | map[0]);
389 map += 2;
390 }
391 }
392
393 }
394
395 static void
396 nv30_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
397 unsigned start, unsigned count)
398 {
399 map += start;
400
401 if (count & 1) {
402 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
403 PUSH_DATA (push, *map++);
404 }
405
406 count >>= 1;
407 while (count) {
408 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
409 count -= npush;
410
411 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
412 while (npush--) {
413 PUSH_DATA (push, (map[1] << 16) | map[0]);
414 map += 2;
415 }
416 }
417 }
418
419 static void
420 nv30_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
421 unsigned start, unsigned count)
422 {
423 map += start;
424
425 while (count) {
426 const unsigned nr = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
427
428 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U32), nr);
429 PUSH_DATAp(push, map, nr);
430
431 map += nr;
432 count -= nr;
433 }
434 }
435
436 static void
437 nv30_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
438 const uint32_t *map,
439 unsigned start, unsigned count)
440 {
441 map += start;
442
443 if (count & 1) {
444 BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
445 PUSH_DATA (push, *map++);
446 }
447
448 count >>= 1;
449 while (count) {
450 unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
451 count -= npush;
452
453 BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
454 while (npush--) {
455 PUSH_DATA (push, (map[1] << 16) | map[0]);
456 map += 2;
457 }
458 }
459 }
460
461 static void
462 nv30_draw_elements(struct nv30_context *nv30, bool shorten,
463 const struct pipe_draw_info *info,
464 unsigned mode, unsigned start, unsigned count,
465 unsigned instance_count, int32_t index_bias,
466 unsigned index_size)
467 {
468 struct nouveau_pushbuf *push = nv30->base.pushbuf;
469 struct nouveau_object *eng3d = nv30->screen->eng3d;
470 unsigned prim = nv30_prim_gl(mode);
471
472 if (eng3d->oclass >= NV40_3D_CLASS && index_bias != nv30->state.index_bias) {
473 BEGIN_NV04(push, NV40_3D(VB_ELEMENT_BASE), 1);
474 PUSH_DATA (push, index_bias);
475 nv30->state.index_bias = index_bias;
476 }
477
478 if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 &&
479 !info->has_user_indices) {
480 struct nv04_resource *res = nv04_resource(info->index.resource);
481 unsigned offset = 0;
482
483 assert(nouveau_resource_mapped_by_gpu(&res->base));
484
485 BEGIN_NV04(push, NV30_3D(IDXBUF_OFFSET), 2);
486 PUSH_RESRC(push, NV30_3D(IDXBUF_OFFSET), BUFCTX_IDXBUF, res, offset,
487 NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, 0);
488 PUSH_MTHD (push, NV30_3D(IDXBUF_FORMAT), BUFCTX_IDXBUF, res->bo,
489 (index_size == 2) ? 0x00000010 : 0x00000000,
490 res->domain | NOUVEAU_BO_RD,
491 0, NV30_3D_IDXBUF_FORMAT_DMA1);
492 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
493 PUSH_DATA (push, prim);
494 while (count) {
495 const unsigned mpush = 2047 * 256;
496 unsigned npush = (count > mpush) ? mpush : count;
497 unsigned wpush = ((npush + 255) & ~255) >> 8;
498
499 count -= npush;
500
501 BEGIN_NI04(push, NV30_3D(VB_INDEX_BATCH), wpush);
502 while (npush >= 256) {
503 PUSH_DATA (push, 0xff000000 | start);
504 start += 256;
505 npush -= 256;
506 }
507
508 if (npush)
509 PUSH_DATA (push, ((npush - 1) << 24) | start);
510 }
511 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
512 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
513 PUSH_RESET(push, BUFCTX_IDXBUF);
514 } else {
515 const void *data;
516 if (!info->has_user_indices)
517 data = nouveau_resource_map_offset(&nv30->base,
518 nv04_resource(info->index.resource),
519 0, NOUVEAU_BO_RD);
520 else
521 data = info->index.user;
522 if (!data)
523 return;
524
525 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
526 PUSH_DATA (push, prim);
527 switch (index_size) {
528 case 1:
529 nv30_draw_elements_inline_u08(push, data, start, count);
530 break;
531 case 2:
532 nv30_draw_elements_inline_u16(push, data, start, count);
533 break;
534 case 4:
535 if (shorten)
536 nv30_draw_elements_inline_u32_short(push, data, start, count);
537 else
538 nv30_draw_elements_inline_u32(push, data, start, count);
539 break;
540 default:
541 assert(0);
542 return;
543 }
544 BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
545 PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
546 }
547 }
548
549 static void
550 nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info,
551 unsigned drawid_offset,
552 const struct pipe_draw_indirect_info *indirect,
553 const struct pipe_draw_start_count_bias *draws,
554 unsigned num_draws)
555 {
556 if (num_draws > 1) {
557 util_draw_multi(pipe, info, drawid_offset, indirect, draws, num_draws);
558 return;
559 }
560
561 if (!indirect && (!draws[0].count || !info->instance_count))
562 return;
563
564 struct nv30_context *nv30 = nv30_context(pipe);
565 struct nouveau_pushbuf *push = nv30->base.pushbuf;
566 int i;
567
568 if (!info->primitive_restart &&
569 !u_trim_pipe_prim(info->mode, (unsigned*)&draws[0].count))
570 return;
571
572 /* For picking only a few vertices from a large user buffer, push is better,
573 * if index count is larger and we expect repeated vertices, suggest upload.
574 */
575 nv30->vbo_push_hint = /* the 64 is heuristic */
576 !(info->index_size &&
577 info->index_bounds_valid &&
578 ((info->max_index - info->min_index + 64) < draws[0].count));
579
580 if (info->index_bounds_valid) {
581 nv30->vbo_min_index = info->min_index;
582 nv30->vbo_max_index = info->max_index;
583 } else {
584 nv30->vbo_min_index = 0;
585 nv30->vbo_max_index = ~0;
586 }
587
588 if (nv30->vbo_push_hint != !!nv30->vbo_fifo)
589 nv30->dirty |= NV30_NEW_ARRAYS;
590
591 if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS)))
592 nv30_update_user_vbufs(nv30);
593
594 nv30_state_validate(nv30, ~0, true);
595 if (nv30->draw_flags) {
596 nv30_render_vbo(pipe, info, drawid_offset, &draws[0]);
597 return;
598 } else
599 if (nv30->vbo_fifo) {
600 nv30_push_vbo(nv30, info, &draws[0]);
601 return;
602 }
603
604 for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
605 if (!nv30->vtxbuf[i].buffer.resource)
606 continue;
607 if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
608 nv30->base.vbo_dirty = true;
609 }
610
611 if (!nv30->base.vbo_dirty && info->index_size && !info->has_user_indices &&
612 info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
613 nv30->base.vbo_dirty = true;
614
615 if (nv30->base.vbo_dirty) {
616 BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1);
617 PUSH_DATA (push, 0);
618 nv30->base.vbo_dirty = false;
619 }
620
621 if (!info->index_size) {
622 nv30_draw_arrays(nv30,
623 info->mode, draws[0].start, draws[0].count,
624 info->instance_count);
625 } else {
626 bool shorten = info->index_bounds_valid && info->max_index <= 65535;
627
628 if (info->primitive_restart != nv30->state.prim_restart) {
629 if (info->primitive_restart) {
630 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 2);
631 PUSH_DATA (push, 1);
632 PUSH_DATA (push, info->restart_index);
633
634 if (info->restart_index > 65535)
635 shorten = false;
636 } else {
637 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 1);
638 PUSH_DATA (push, 0);
639 }
640 nv30->state.prim_restart = info->primitive_restart;
641 } else
642 if (info->primitive_restart) {
643 BEGIN_NV04(push, NV40_3D(PRIM_RESTART_INDEX), 1);
644 PUSH_DATA (push, info->restart_index);
645
646 if (info->restart_index > 65535)
647 shorten = false;
648 }
649
650 nv30_draw_elements(nv30, shorten, info,
651 info->mode, draws[0].start, draws[0].count,
652 info->instance_count, draws[0].index_bias, info->index_size);
653 }
654
655 nv30_state_release(nv30);
656 nv30_release_user_vbufs(nv30);
657 }
658
659 void
660 nv30_vbo_init(struct pipe_context *pipe)
661 {
662 pipe->create_vertex_elements_state = nv30_vertex_state_create;
663 pipe->delete_vertex_elements_state = nv30_vertex_state_delete;
664 pipe->bind_vertex_elements_state = nv30_vertex_state_bind;
665 pipe->draw_vbo = nv30_draw_vbo;
666 }
667