1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Build post-transformation, post-clipping vertex buffers and element
31 * lists by hooking into the end of the primitive pipeline and
32 * manipulating the vertex_id field in the vertex headers.
33 *
34 * XXX: work in progress
35 *
36 * \author José Fonseca <[email protected]>
37 * \author Keith Whitwell <[email protected]>
38 */
39
40 #include "draw/draw_context.h"
41 #include "draw/draw_vbuf.h"
42 #include "util/u_debug.h"
43 #include "util/u_inlines.h"
44 #include "util/u_math.h"
45 #include "util/u_memory.h"
46
47 #include "i915_batch.h"
48 #include "i915_context.h"
49 #include "i915_reg.h"
50 #include "i915_state.h"
51
52 /**
53 * Primitive renderer for i915.
54 */
55 struct i915_vbuf_render {
56 struct vbuf_render base;
57
58 struct i915_context *i915;
59
60 /** Vertex size in bytes */
61 size_t vertex_size;
62
63 /** Software primitive */
64 unsigned prim;
65
66 /** Hardware primitive */
67 unsigned hwprim;
68
69 /** Genereate a vertex list */
70 unsigned fallback;
71
72 /* Stuff for the vbo */
73 struct i915_winsys_buffer *vbo;
74 size_t vbo_size; /**< current size of allocated buffer */
75 size_t vbo_alloc_size; /**< minimum buffer size to allocate */
76 size_t vbo_hw_offset; /**< offset that we program the hardware with */
77 size_t vbo_sw_offset; /**< offset that we work with */
78 size_t vbo_index; /**< index offset to be added to all indices */
79 void *vbo_ptr;
80 size_t vbo_max_used;
81 size_t vbo_max_index; /**< index offset to be added to all indices */
82 };
83
84 /**
85 * Basically a cast wrapper.
86 */
87 static inline struct i915_vbuf_render *
i915_vbuf_render(struct vbuf_render * render)88 i915_vbuf_render(struct vbuf_render *render)
89 {
90 assert(render);
91 return (struct i915_vbuf_render *)render;
92 }
93
94 /**
95 * If vbo state differs between renderer and context
96 * push state to the context. This function pushes
97 * hw_offset to i915->vbo_offset and vbo to i915->vbo.
98 *
99 * Side effects:
100 * May updates context vbo_offset and vbo fields.
101 */
102 static void
i915_vbuf_update_vbo_state(struct vbuf_render * render)103 i915_vbuf_update_vbo_state(struct vbuf_render *render)
104 {
105 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
106 struct i915_context *i915 = i915_render->i915;
107
108 if (i915->vbo != i915_render->vbo ||
109 i915->vbo_offset != i915_render->vbo_hw_offset) {
110 i915->vbo = i915_render->vbo;
111 i915->vbo_offset = i915_render->vbo_hw_offset;
112 i915->dirty |= I915_NEW_VBO;
113 }
114 }
115
116 /**
117 * Callback exported to the draw module.
118 * Returns the current vertex_info.
119 *
120 * Side effects:
121 * If state is dirty update derived state.
122 */
123 static const struct vertex_info *
i915_vbuf_render_get_vertex_info(struct vbuf_render * render)124 i915_vbuf_render_get_vertex_info(struct vbuf_render *render)
125 {
126 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
127 struct i915_context *i915 = i915_render->i915;
128
129 if (i915->dirty) {
130 /* make sure we have up to date vertex layout */
131 i915_update_derived(i915);
132 }
133
134 return &i915->current.vertex_info.draw;
135 }
136
137 /**
138 * Reserve space in the vbo for vertices.
139 *
140 * Side effects:
141 * None.
142 */
143 static bool
i915_vbuf_render_reserve(struct i915_vbuf_render * i915_render,size_t size)144 i915_vbuf_render_reserve(struct i915_vbuf_render *i915_render, size_t size)
145 {
146 struct i915_context *i915 = i915_render->i915;
147
148 if (i915_render->vbo_size < size + i915_render->vbo_sw_offset)
149 return false;
150
151 if (i915->vbo_flushed)
152 return false;
153
154 return true;
155 }
156
157 /**
158 * Allocate a new vbo buffer should there not be enough space for
159 * the requested number of vertices by the draw module.
160 *
161 * Side effects:
162 * Updates hw_offset, sw_offset, index and allocates a new buffer.
163 * Will set i915->vbo to null on buffer allocation.
164 */
165 static void
i915_vbuf_render_new_buf(struct i915_vbuf_render * i915_render,size_t size)166 i915_vbuf_render_new_buf(struct i915_vbuf_render *i915_render, size_t size)
167 {
168 struct i915_context *i915 = i915_render->i915;
169 struct i915_winsys *iws = i915->iws;
170
171 if (i915_render->vbo) {
172 iws->buffer_unmap(iws, i915_render->vbo);
173 iws->buffer_destroy(iws, i915_render->vbo);
174 /*
175 * XXX If buffers where referenced then this should be done in
176 * update_vbo_state but since they arn't and malloc likes to reuse
177 * memory we need to set it to null
178 */
179 i915->vbo = NULL;
180 i915_render->vbo = NULL;
181 }
182
183 i915->vbo_flushed = 0;
184
185 i915_render->vbo_size = MAX2(size, i915_render->vbo_alloc_size);
186 i915_render->vbo_hw_offset = 0;
187 i915_render->vbo_sw_offset = 0;
188 i915_render->vbo_index = 0;
189
190 i915_render->vbo =
191 iws->buffer_create(iws, i915_render->vbo_size, I915_NEW_VERTEX);
192 i915_render->vbo_ptr = iws->buffer_map(iws, i915_render->vbo, true);
193 }
194
195 /**
196 * Callback exported to the draw module.
197 *
198 * Side effects:
199 * Updates hw_offset, sw_offset, index and may allocate
200 * a new buffer. Also updates may update the vbo state
201 * on the i915 context.
202 */
203 static bool
i915_vbuf_render_allocate_vertices(struct vbuf_render * render,uint16_t vertex_size,uint16_t nr_vertices)204 i915_vbuf_render_allocate_vertices(struct vbuf_render *render,
205 uint16_t vertex_size, uint16_t nr_vertices)
206 {
207 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
208 size_t size = (size_t)vertex_size * (size_t)nr_vertices;
209 size_t offset;
210
211 /*
212 * Align sw_offset with first multiple of vertex size from hw_offset.
213 * Set index to be the multiples from from hw_offset to sw_offset.
214 * i915_vbuf_render_new_buf will reset index, sw_offset, hw_offset
215 * when it allocates a new buffer this is correct.
216 */
217 {
218 offset = i915_render->vbo_sw_offset - i915_render->vbo_hw_offset;
219 offset = util_align_npot(offset, vertex_size);
220 i915_render->vbo_sw_offset = i915_render->vbo_hw_offset + offset;
221 i915_render->vbo_index = offset / vertex_size;
222 }
223
224 if (!i915_vbuf_render_reserve(i915_render, size))
225 i915_vbuf_render_new_buf(i915_render, size);
226
227 /*
228 * If a new buffer has been alocated sw_offset,
229 * hw_offset & index will be reset by new_buf
230 */
231
232 i915_render->vertex_size = vertex_size;
233
234 i915_vbuf_update_vbo_state(render);
235
236 if (!i915_render->vbo)
237 return false;
238 return true;
239 }
240
241 static void *
i915_vbuf_render_map_vertices(struct vbuf_render * render)242 i915_vbuf_render_map_vertices(struct vbuf_render *render)
243 {
244 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
245 struct i915_context *i915 = i915_render->i915;
246
247 if (i915->vbo_flushed)
248 debug_printf("%s bad vbo flush occurred stalling on hw\n", __func__);
249
250 return (unsigned char *)i915_render->vbo_ptr + i915_render->vbo_sw_offset;
251 }
252
253 static void
i915_vbuf_render_unmap_vertices(struct vbuf_render * render,uint16_t min_index,uint16_t max_index)254 i915_vbuf_render_unmap_vertices(struct vbuf_render *render, uint16_t min_index,
255 uint16_t max_index)
256 {
257 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
258
259 i915_render->vbo_max_index = max_index;
260 i915_render->vbo_max_used = MAX2(i915_render->vbo_max_used,
261 i915_render->vertex_size * (max_index + 1));
262 }
263
264 /**
265 * Ensure that the given max_index given is not larger ushort max.
266 * If it is larger then ushort max it advanced the hw_offset to the
267 * same position in the vbo as sw_offset and set index to zero.
268 *
269 * Side effects:
270 * On failure update hw_offset and index.
271 */
272 static void
i915_vbuf_ensure_index_bounds(struct vbuf_render * render,unsigned max_index)273 i915_vbuf_ensure_index_bounds(struct vbuf_render *render, unsigned max_index)
274 {
275 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
276
277 if (max_index + i915_render->vbo_index < ((1 << 17) - 1))
278 return;
279
280 i915_render->vbo_hw_offset = i915_render->vbo_sw_offset;
281 i915_render->vbo_index = 0;
282
283 i915_vbuf_update_vbo_state(render);
284 }
285
286 static void
i915_vbuf_render_set_primitive(struct vbuf_render * render,enum mesa_prim prim)287 i915_vbuf_render_set_primitive(struct vbuf_render *render, enum mesa_prim prim)
288 {
289 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
290 i915_render->prim = prim;
291
292 switch (prim) {
293 case MESA_PRIM_POINTS:
294 i915_render->hwprim = PRIM3D_POINTLIST;
295 i915_render->fallback = 0;
296 break;
297 case MESA_PRIM_LINES:
298 i915_render->hwprim = PRIM3D_LINELIST;
299 i915_render->fallback = 0;
300 break;
301 case MESA_PRIM_LINE_LOOP:
302 i915_render->hwprim = PRIM3D_LINELIST;
303 i915_render->fallback = MESA_PRIM_LINE_LOOP;
304 break;
305 case MESA_PRIM_LINE_STRIP:
306 i915_render->hwprim = PRIM3D_LINESTRIP;
307 i915_render->fallback = 0;
308 break;
309 case MESA_PRIM_TRIANGLES:
310 i915_render->hwprim = PRIM3D_TRILIST;
311 i915_render->fallback = 0;
312 break;
313 case MESA_PRIM_TRIANGLE_STRIP:
314 i915_render->hwprim = PRIM3D_TRISTRIP;
315 i915_render->fallback = 0;
316 break;
317 case MESA_PRIM_TRIANGLE_FAN:
318 i915_render->hwprim = PRIM3D_TRIFAN;
319 i915_render->fallback = 0;
320 break;
321 case MESA_PRIM_QUADS:
322 i915_render->hwprim = PRIM3D_TRILIST;
323 i915_render->fallback = MESA_PRIM_QUADS;
324 break;
325 case MESA_PRIM_QUAD_STRIP:
326 i915_render->hwprim = PRIM3D_TRILIST;
327 i915_render->fallback = MESA_PRIM_QUAD_STRIP;
328 break;
329 case MESA_PRIM_POLYGON:
330 i915_render->hwprim = PRIM3D_POLY;
331 i915_render->fallback = 0;
332 break;
333 default:
334 /* FIXME: Actually, can handle a lot more just fine... */
335 assert(0 && "unexpected prim in i915_vbuf_render_set_primitive()");
336 }
337 }
338
339 /**
340 * Used for fallbacks in draw_arrays
341 */
342 static void
draw_arrays_generate_indices(struct vbuf_render * render,unsigned start,uint32_t nr,unsigned type)343 draw_arrays_generate_indices(struct vbuf_render *render, unsigned start,
344 uint32_t nr, unsigned type)
345 {
346 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
347 struct i915_context *i915 = i915_render->i915;
348 unsigned i;
349 unsigned end = start + nr + i915_render->vbo_index;
350 start += i915_render->vbo_index;
351
352 switch (type) {
353 case 0:
354 for (i = start; i + 1 < end; i += 2)
355 OUT_BATCH((i + 0) | (i + 1) << 16);
356 if (i < end)
357 OUT_BATCH(i);
358 break;
359 case MESA_PRIM_LINE_LOOP:
360 if (nr >= 2) {
361 for (i = start + 1; i < end; i++)
362 OUT_BATCH((i - 1) | (i + 0) << 16);
363 OUT_BATCH((i - 1) | (start) << 16);
364 }
365 break;
366 case MESA_PRIM_QUADS:
367 for (i = start; i + 3 < end; i += 4) {
368 OUT_BATCH((i + 0) | (i + 1) << 16);
369 OUT_BATCH((i + 3) | (i + 1) << 16);
370 OUT_BATCH((i + 2) | (i + 3) << 16);
371 }
372 break;
373 case MESA_PRIM_QUAD_STRIP:
374 for (i = start; i + 3 < end; i += 2) {
375 OUT_BATCH((i + 0) | (i + 1) << 16);
376 OUT_BATCH((i + 3) | (i + 2) << 16);
377 OUT_BATCH((i + 0) | (i + 3) << 16);
378 }
379 break;
380 default:
381 assert(0);
382 }
383 }
384
385 static unsigned
draw_arrays_calc_nr_indices(uint32_t nr,unsigned type)386 draw_arrays_calc_nr_indices(uint32_t nr, unsigned type)
387 {
388 switch (type) {
389 case 0:
390 return nr;
391 case MESA_PRIM_LINE_LOOP:
392 if (nr >= 2)
393 return nr * 2;
394 else
395 return 0;
396 case MESA_PRIM_QUADS:
397 return (nr / 4) * 6;
398 case MESA_PRIM_QUAD_STRIP:
399 return ((nr - 2) / 2) * 6;
400 default:
401 assert(0);
402 return 0;
403 }
404 }
405
406 static void
draw_arrays_fallback(struct vbuf_render * render,unsigned start,uint32_t nr)407 draw_arrays_fallback(struct vbuf_render *render, unsigned start, uint32_t nr)
408 {
409 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
410 struct i915_context *i915 = i915_render->i915;
411 unsigned nr_indices;
412
413 nr_indices = draw_arrays_calc_nr_indices(nr, i915_render->fallback);
414 if (!nr_indices)
415 return;
416
417 i915_vbuf_ensure_index_bounds(render, start + nr_indices);
418
419 if (i915->dirty)
420 i915_update_derived(i915);
421
422 if (i915->hardware_dirty)
423 i915_emit_hardware_state(i915);
424
425 if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
426 FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
427
428 /* Make sure state is re-emitted after a flush:
429 */
430 i915_emit_hardware_state(i915);
431 i915->vbo_flushed = 1;
432
433 if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
434 mesa_loge("i915: Failed to allocate space for %d indices in fresh "
435 "batch with %d bytes left\n",
436 nr_indices, (int)i915_winsys_batchbuffer_space(i915->batch));
437 assert(0);
438 goto out;
439 }
440 }
441
442 OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | i915_render->hwprim |
443 PRIM_INDIRECT_ELTS | nr_indices);
444
445 draw_arrays_generate_indices(render, start, nr, i915_render->fallback);
446
447 out:
448 return;
449 }
450
451 static void
i915_vbuf_render_draw_arrays(struct vbuf_render * render,unsigned start,uint32_t nr)452 i915_vbuf_render_draw_arrays(struct vbuf_render *render, unsigned start,
453 uint32_t nr)
454 {
455 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
456 struct i915_context *i915 = i915_render->i915;
457
458 if (i915_render->fallback) {
459 draw_arrays_fallback(render, start, nr);
460 return;
461 }
462
463 i915_vbuf_ensure_index_bounds(render, start + nr);
464 start += i915_render->vbo_index;
465
466 if (i915->dirty)
467 i915_update_derived(i915);
468
469 if (i915->hardware_dirty)
470 i915_emit_hardware_state(i915);
471
472 if (!BEGIN_BATCH(2)) {
473 FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
474
475 /* Make sure state is re-emitted after a flush:
476 */
477 i915_emit_hardware_state(i915);
478 i915->vbo_flushed = 1;
479
480 if (!BEGIN_BATCH(2)) {
481 assert(0);
482 goto out;
483 }
484 }
485
486 OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | PRIM_INDIRECT_SEQUENTIAL |
487 i915_render->hwprim | nr);
488 OUT_BATCH(start); /* Beginning vertex index */
489
490 out:
491 return;
492 }
493
494 /**
495 * Used for normal and fallback emitting of indices
496 * If type is zero normal operation assumed.
497 */
498 static void
draw_generate_indices(struct vbuf_render * render,const uint16_t * indices,uint32_t nr_indices,unsigned type)499 draw_generate_indices(struct vbuf_render *render, const uint16_t *indices,
500 uint32_t nr_indices, unsigned type)
501 {
502 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
503 struct i915_context *i915 = i915_render->i915;
504 unsigned i;
505 unsigned o = i915_render->vbo_index;
506
507 switch (type) {
508 case 0:
509 for (i = 0; i + 1 < nr_indices; i += 2) {
510 OUT_BATCH((o + indices[i]) | (o + indices[i + 1]) << 16);
511 }
512 if (i < nr_indices) {
513 OUT_BATCH((o + indices[i]));
514 }
515 break;
516 case MESA_PRIM_LINE_LOOP:
517 if (nr_indices >= 2) {
518 for (i = 1; i < nr_indices; i++)
519 OUT_BATCH((o + indices[i - 1]) | (o + indices[i]) << 16);
520 OUT_BATCH((o + indices[i - 1]) | (o + indices[0]) << 16);
521 }
522 break;
523 case MESA_PRIM_QUADS:
524 for (i = 0; i + 3 < nr_indices; i += 4) {
525 OUT_BATCH((o + indices[i + 0]) | (o + indices[i + 1]) << 16);
526 OUT_BATCH((o + indices[i + 3]) | (o + indices[i + 1]) << 16);
527 OUT_BATCH((o + indices[i + 2]) | (o + indices[i + 3]) << 16);
528 }
529 break;
530 case MESA_PRIM_QUAD_STRIP:
531 for (i = 0; i + 3 < nr_indices; i += 2) {
532 OUT_BATCH((o + indices[i + 0]) | (o + indices[i + 1]) << 16);
533 OUT_BATCH((o + indices[i + 3]) | (o + indices[i + 2]) << 16);
534 OUT_BATCH((o + indices[i + 0]) | (o + indices[i + 3]) << 16);
535 }
536 break;
537 default:
538 assert(0);
539 break;
540 }
541 }
542
543 static unsigned
draw_calc_nr_indices(uint32_t nr_indices,unsigned type)544 draw_calc_nr_indices(uint32_t nr_indices, unsigned type)
545 {
546 switch (type) {
547 case 0:
548 return nr_indices;
549 case MESA_PRIM_LINE_LOOP:
550 if (nr_indices >= 2)
551 return nr_indices * 2;
552 else
553 return 0;
554 case MESA_PRIM_QUADS:
555 return (nr_indices / 4) * 6;
556 case MESA_PRIM_QUAD_STRIP:
557 return ((nr_indices - 2) / 2) * 6;
558 default:
559 assert(0);
560 return 0;
561 }
562 }
563
564 static void
i915_vbuf_render_draw_elements(struct vbuf_render * render,const uint16_t * indices,uint32_t nr_indices)565 i915_vbuf_render_draw_elements(struct vbuf_render *render,
566 const uint16_t *indices, uint32_t nr_indices)
567 {
568 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
569 struct i915_context *i915 = i915_render->i915;
570 unsigned save_nr_indices;
571
572 save_nr_indices = nr_indices;
573
574 nr_indices = draw_calc_nr_indices(nr_indices, i915_render->fallback);
575 if (!nr_indices)
576 return;
577
578 i915_vbuf_ensure_index_bounds(render, i915_render->vbo_max_index);
579
580 if (i915->dirty)
581 i915_update_derived(i915);
582
583 if (i915->hardware_dirty)
584 i915_emit_hardware_state(i915);
585
586 if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
587 FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
588
589 /* Make sure state is re-emitted after a flush:
590 */
591 i915_emit_hardware_state(i915);
592 i915->vbo_flushed = 1;
593
594 if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
595 mesa_loge("i915: Failed to allocate space for %d indices in fresh "
596 "batch with %d bytes left\n",
597 nr_indices, (int)i915_winsys_batchbuffer_space(i915->batch));
598 assert(0);
599 goto out;
600 }
601 }
602
603 OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | i915_render->hwprim |
604 PRIM_INDIRECT_ELTS | nr_indices);
605 draw_generate_indices(render, indices, save_nr_indices,
606 i915_render->fallback);
607
608 out:
609 return;
610 }
611
612 static void
i915_vbuf_render_release_vertices(struct vbuf_render * render)613 i915_vbuf_render_release_vertices(struct vbuf_render *render)
614 {
615 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
616
617 i915_render->vbo_sw_offset += i915_render->vbo_max_used;
618 i915_render->vbo_max_used = 0;
619
620 /*
621 * Micro optimization, by calling update here we the offset change
622 * will be picked up on the next pipe_context::draw_*.
623 */
624 i915_vbuf_update_vbo_state(render);
625 }
626
627 static void
i915_vbuf_render_destroy(struct vbuf_render * render)628 i915_vbuf_render_destroy(struct vbuf_render *render)
629 {
630 struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
631 struct i915_context *i915 = i915_render->i915;
632 struct i915_winsys *iws = i915->iws;
633
634 if (i915_render->vbo) {
635 i915->vbo = NULL;
636 iws->buffer_unmap(iws, i915_render->vbo);
637 iws->buffer_destroy(iws, i915_render->vbo);
638 }
639
640 FREE(i915_render);
641 }
642
643 /**
644 * Create a new primitive render.
645 */
646 static struct vbuf_render *
i915_vbuf_render_create(struct i915_context * i915)647 i915_vbuf_render_create(struct i915_context *i915)
648 {
649 struct i915_vbuf_render *i915_render = CALLOC_STRUCT(i915_vbuf_render);
650
651 i915_render->i915 = i915;
652
653 i915_render->base.max_vertex_buffer_bytes = 4 * 4096;
654
655 /* NOTE: it must be such that state and vertices indices fit in a single
656 * batch buffer. 4096 is one batch buffer and 430 is the max amount of
657 * state in dwords. The result is the number of 16-bit indices which can
658 * fit in a single batch buffer.
659 */
660 i915_render->base.max_indices = (4096 - 430 * 4) / 2;
661
662 i915_render->base.get_vertex_info = i915_vbuf_render_get_vertex_info;
663 i915_render->base.allocate_vertices = i915_vbuf_render_allocate_vertices;
664 i915_render->base.map_vertices = i915_vbuf_render_map_vertices;
665 i915_render->base.unmap_vertices = i915_vbuf_render_unmap_vertices;
666 i915_render->base.set_primitive = i915_vbuf_render_set_primitive;
667 i915_render->base.draw_elements = i915_vbuf_render_draw_elements;
668 i915_render->base.draw_arrays = i915_vbuf_render_draw_arrays;
669 i915_render->base.release_vertices = i915_vbuf_render_release_vertices;
670 i915_render->base.destroy = i915_vbuf_render_destroy;
671
672 i915_render->vbo = NULL;
673 i915_render->vbo_ptr = NULL;
674 i915_render->vbo_size = 0;
675 i915_render->vbo_hw_offset = 0;
676 i915_render->vbo_sw_offset = 0;
677 i915_render->vbo_alloc_size = i915_render->base.max_vertex_buffer_bytes * 4;
678
679 return &i915_render->base;
680 }
681
682 /**
683 * Create a new primitive vbuf/render stage.
684 */
685 struct draw_stage *
i915_draw_vbuf_stage(struct i915_context * i915)686 i915_draw_vbuf_stage(struct i915_context *i915)
687 {
688 struct vbuf_render *render;
689 struct draw_stage *stage;
690
691 render = i915_vbuf_render_create(i915);
692 if (!render)
693 return NULL;
694
695 stage = draw_vbuf_stage(i915->draw, render);
696 if (!stage) {
697 render->destroy(render);
698 return NULL;
699 }
700 /** TODO JB: this shouldn't be here */
701 draw_set_render(i915->draw, render);
702
703 return stage;
704 }
705