1 /*
2 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved.
3 * The term “Broadcom” refers to Broadcom Inc.
4 * and/or its subsidiaries.
5 * SPDX-License-Identifier: MIT
6 */
7
8
9 #include "svga_cmd.h"
10
11 #include "util/u_debug.h"
12 #include "util/u_memory.h"
13 #include "util/u_debug_stack.h"
14 #include "util/u_debug_flush.h"
15 #include "util/u_hash_table.h"
16 #include "pipebuffer/pb_buffer.h"
17 #include "pipebuffer/pb_validate.h"
18
19 #include "svga_winsys.h"
20 #include "vmw_context.h"
21 #include "vmw_screen.h"
22 #include "vmw_buffer.h"
23 #include "vmw_surface.h"
24 #include "vmw_fence.h"
25 #include "vmw_shader.h"
26 #include "vmw_query.h"
27
28 #define VMW_COMMAND_SIZE (64*1024)
29 #define VMW_SURFACE_RELOCS (1024)
30 #define VMW_SHADER_RELOCS (1024)
31 #define VMW_REGION_RELOCS (512)
32
33 #define VMW_MUST_FLUSH_STACK 8
34
35 /*
36 * A factor applied to the maximum mob memory size to determine
37 * the optimial time to preemptively flush the command buffer.
38 * The constant is based on some performance trials with SpecViewperf.
39 */
40 #define VMW_MAX_MOB_MEM_FACTOR 2
41
42 /*
43 * A factor applied to the maximum surface memory size to determine
44 * the optimial time to preemptively flush the command buffer.
45 * The constant is based on some performance trials with SpecViewperf.
46 */
47 #define VMW_MAX_SURF_MEM_FACTOR 2
48
49
50
51 struct vmw_buffer_relocation
52 {
53 struct pb_buffer *buffer;
54 bool is_mob;
55 uint32 offset;
56
57 union {
58 struct {
59 struct SVGAGuestPtr *where;
60 } region;
61 struct {
62 SVGAMobId *id;
63 uint32 *offset_into_mob;
64 } mob;
65 };
66 };
67
68 struct vmw_ctx_validate_item {
69 union {
70 struct vmw_svga_winsys_surface *vsurf;
71 struct vmw_svga_winsys_shader *vshader;
72 };
73 bool referenced;
74 };
75
76 struct vmw_svga_winsys_context
77 {
78 struct svga_winsys_context base;
79
80 struct vmw_winsys_screen *vws;
81 struct hash_table *hash;
82
83 #if MESA_DEBUG
84 bool must_flush;
85 struct debug_stack_frame must_flush_stack[VMW_MUST_FLUSH_STACK];
86 struct debug_flush_ctx *fctx;
87 #endif
88
89 struct {
90 uint8_t buffer[VMW_COMMAND_SIZE];
91 uint32_t size;
92 uint32_t used;
93 uint32_t reserved;
94 } command;
95
96 struct {
97 struct vmw_ctx_validate_item items[VMW_SURFACE_RELOCS];
98 uint32_t size;
99 uint32_t used;
100 uint32_t staged;
101 uint32_t reserved;
102 } surface;
103
104 struct {
105 struct vmw_buffer_relocation relocs[VMW_REGION_RELOCS];
106 uint32_t size;
107 uint32_t used;
108 uint32_t staged;
109 uint32_t reserved;
110 } region;
111
112 struct {
113 struct vmw_ctx_validate_item items[VMW_SHADER_RELOCS];
114 uint32_t size;
115 uint32_t used;
116 uint32_t staged;
117 uint32_t reserved;
118 } shader;
119
120 struct pb_validate *validate;
121
122 /**
123 * The amount of surface, GMR or MOB memory that is referred by the commands
124 * currently batched in the context command buffer.
125 */
126 uint64_t seen_surfaces;
127 uint64_t seen_regions;
128 uint64_t seen_mobs;
129
130 /**
131 * Whether this context should fail to reserve more commands, not because it
132 * ran out of command space, but because a substantial ammount of GMR was
133 * referred.
134 */
135 bool preemptive_flush;
136 };
137
138
139 static inline struct vmw_svga_winsys_context *
vmw_svga_winsys_context(struct svga_winsys_context * swc)140 vmw_svga_winsys_context(struct svga_winsys_context *swc)
141 {
142 assert(swc);
143 return (struct vmw_svga_winsys_context *)swc;
144 }
145
146
147 static inline enum pb_usage_flags
vmw_translate_to_pb_flags(unsigned flags)148 vmw_translate_to_pb_flags(unsigned flags)
149 {
150 enum pb_usage_flags f = 0;
151 if (flags & SVGA_RELOC_READ)
152 f |= PB_USAGE_GPU_READ;
153
154 if (flags & SVGA_RELOC_WRITE)
155 f |= PB_USAGE_GPU_WRITE;
156
157 return f;
158 }
159
160 static enum pipe_error
vmw_swc_flush(struct svga_winsys_context * swc,struct pipe_fence_handle ** pfence)161 vmw_swc_flush(struct svga_winsys_context *swc,
162 struct pipe_fence_handle **pfence)
163 {
164 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
165 struct vmw_winsys_screen *vws = vswc->vws;
166 struct pipe_fence_handle *fence = NULL;
167 unsigned i;
168 enum pipe_error ret;
169
170 /*
171 * If we hit a retry, lock the mutex and retry immediately.
172 * If we then still hit a retry, sleep until another thread
173 * wakes us up after it has released its buffers from the
174 * validate list.
175 *
176 * If we hit another error condition, we still need to broadcast since
177 * pb_validate_validate releases validated buffers in its error path.
178 */
179
180 ret = pb_validate_validate(vswc->validate);
181 if (ret != PIPE_OK) {
182 mtx_lock(&vws->cs_mutex);
183 while (ret == PIPE_ERROR_RETRY) {
184 ret = pb_validate_validate(vswc->validate);
185 if (ret == PIPE_ERROR_RETRY) {
186 cnd_wait(&vws->cs_cond, &vws->cs_mutex);
187 }
188 }
189 if (ret != PIPE_OK) {
190 cnd_broadcast(&vws->cs_cond);
191 }
192 mtx_unlock(&vws->cs_mutex);
193 }
194
195 assert(ret == PIPE_OK);
196 if(ret == PIPE_OK) {
197
198 /* Apply relocations */
199 for(i = 0; i < vswc->region.used; ++i) {
200 struct vmw_buffer_relocation *reloc = &vswc->region.relocs[i];
201 struct SVGAGuestPtr ptr;
202
203 if(!vmw_dma_bufmgr_region_ptr(reloc->buffer, &ptr))
204 assert(0);
205
206 ptr.offset += reloc->offset;
207
208 if (reloc->is_mob) {
209 if (reloc->mob.id)
210 *reloc->mob.id = ptr.gmrId;
211 if (reloc->mob.offset_into_mob)
212 *reloc->mob.offset_into_mob = ptr.offset;
213 else {
214 assert(ptr.offset == 0);
215 }
216 } else
217 *reloc->region.where = ptr;
218 }
219
220 if (vswc->command.used || pfence != NULL)
221 vmw_ioctl_command(vws,
222 vswc->base.cid,
223 0,
224 vswc->command.buffer,
225 vswc->command.used,
226 &fence,
227 vswc->base.imported_fence_fd,
228 vswc->base.hints);
229
230 pb_validate_fence(vswc->validate, fence);
231 mtx_lock(&vws->cs_mutex);
232 cnd_broadcast(&vws->cs_cond);
233 mtx_unlock(&vws->cs_mutex);
234 }
235
236 vswc->command.used = 0;
237 vswc->command.reserved = 0;
238
239 for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) {
240 struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
241 if (isurf->referenced)
242 p_atomic_dec(&isurf->vsurf->validated);
243 vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
244 }
245
246 _mesa_hash_table_clear(vswc->hash, NULL);
247 vswc->surface.used = 0;
248 vswc->surface.reserved = 0;
249
250 for(i = 0; i < vswc->shader.used + vswc->shader.staged; ++i) {
251 struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
252 if (ishader->referenced)
253 p_atomic_dec(&ishader->vshader->validated);
254 vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
255 }
256
257 vswc->shader.used = 0;
258 vswc->shader.reserved = 0;
259
260 vswc->region.used = 0;
261 vswc->region.reserved = 0;
262
263 #if MESA_DEBUG
264 vswc->must_flush = false;
265 debug_flush_flush(vswc->fctx);
266 #endif
267 swc->hints &= ~SVGA_HINT_FLAG_CAN_PRE_FLUSH;
268 swc->hints &= ~SVGA_HINT_FLAG_EXPORT_FENCE_FD;
269 vswc->preemptive_flush = false;
270 vswc->seen_surfaces = 0;
271 vswc->seen_regions = 0;
272 vswc->seen_mobs = 0;
273
274 if (vswc->base.imported_fence_fd != -1) {
275 close(vswc->base.imported_fence_fd);
276 vswc->base.imported_fence_fd = -1;
277 }
278
279 if(pfence)
280 vmw_fence_reference(vswc->vws, pfence, fence);
281
282 vmw_fence_reference(vswc->vws, &fence, NULL);
283
284 return ret;
285 }
286
287
288 static void *
vmw_swc_reserve(struct svga_winsys_context * swc,uint32_t nr_bytes,uint32_t nr_relocs)289 vmw_swc_reserve(struct svga_winsys_context *swc,
290 uint32_t nr_bytes, uint32_t nr_relocs )
291 {
292 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
293
294 #if MESA_DEBUG
295 /* Check if somebody forgot to check the previous failure */
296 if(vswc->must_flush) {
297 debug_printf("Forgot to flush:\n");
298 debug_backtrace_dump(vswc->must_flush_stack, VMW_MUST_FLUSH_STACK);
299 assert(!vswc->must_flush);
300 }
301 debug_flush_might_flush(vswc->fctx);
302 #endif
303
304 assert(nr_bytes <= vswc->command.size);
305 if(nr_bytes > vswc->command.size)
306 return NULL;
307
308 if(vswc->preemptive_flush ||
309 vswc->command.used + nr_bytes > vswc->command.size ||
310 vswc->surface.used + nr_relocs > vswc->surface.size ||
311 vswc->shader.used + nr_relocs > vswc->shader.size ||
312 vswc->region.used + nr_relocs > vswc->region.size) {
313 #if MESA_DEBUG
314 vswc->must_flush = true;
315 debug_backtrace_capture(vswc->must_flush_stack, 1,
316 VMW_MUST_FLUSH_STACK);
317 #endif
318 return NULL;
319 }
320
321 assert(vswc->command.used + nr_bytes <= vswc->command.size);
322 assert(vswc->surface.used + nr_relocs <= vswc->surface.size);
323 assert(vswc->shader.used + nr_relocs <= vswc->shader.size);
324 assert(vswc->region.used + nr_relocs <= vswc->region.size);
325
326 vswc->command.reserved = nr_bytes;
327 vswc->surface.reserved = nr_relocs;
328 vswc->surface.staged = 0;
329 vswc->shader.reserved = nr_relocs;
330 vswc->shader.staged = 0;
331 vswc->region.reserved = nr_relocs;
332 vswc->region.staged = 0;
333
334 return vswc->command.buffer + vswc->command.used;
335 }
336
337 static unsigned
vmw_swc_get_command_buffer_size(struct svga_winsys_context * swc)338 vmw_swc_get_command_buffer_size(struct svga_winsys_context *swc)
339 {
340 const struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
341 return vswc->command.used;
342 }
343
344 static void
vmw_swc_context_relocation(struct svga_winsys_context * swc,uint32 * cid)345 vmw_swc_context_relocation(struct svga_winsys_context *swc,
346 uint32 *cid)
347 {
348 *cid = swc->cid;
349 }
350
351 static bool
vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context * vswc,struct pb_buffer * pb_buf,unsigned flags)352 vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context *vswc,
353 struct pb_buffer *pb_buf,
354 unsigned flags)
355 {
356 ASSERTED enum pipe_error ret;
357 unsigned translated_flags;
358 bool already_present;
359
360 translated_flags = vmw_translate_to_pb_flags(flags);
361 ret = pb_validate_add_buffer(vswc->validate, pb_buf, translated_flags,
362 vswc->hash, &already_present);
363 assert(ret == PIPE_OK);
364 return !already_present;
365 }
366
367 static void
vmw_swc_region_relocation(struct svga_winsys_context * swc,struct SVGAGuestPtr * where,struct svga_winsys_buffer * buffer,uint32 offset,unsigned flags)368 vmw_swc_region_relocation(struct svga_winsys_context *swc,
369 struct SVGAGuestPtr *where,
370 struct svga_winsys_buffer *buffer,
371 uint32 offset,
372 unsigned flags)
373 {
374 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
375 struct vmw_buffer_relocation *reloc;
376
377 assert(vswc->region.staged < vswc->region.reserved);
378
379 reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
380 reloc->region.where = where;
381
382 /*
383 * pb_validate holds a refcount to the buffer, so no need to
384 * refcount it again in the relocation.
385 */
386 reloc->buffer = vmw_pb_buffer(buffer);
387 reloc->offset = offset;
388 reloc->is_mob = false;
389 ++vswc->region.staged;
390
391 if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) {
392 vswc->seen_regions += reloc->buffer->base.size;
393 if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
394 vswc->seen_regions >= VMW_GMR_POOL_SIZE/5)
395 vswc->preemptive_flush = true;
396 }
397
398 #if MESA_DEBUG
399 if (!(flags & SVGA_RELOC_INTERNAL))
400 debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
401 #endif
402 }
403
404 static void
vmw_swc_mob_relocation(struct svga_winsys_context * swc,SVGAMobId * id,uint32 * offset_into_mob,struct svga_winsys_buffer * buffer,uint32 offset,unsigned flags)405 vmw_swc_mob_relocation(struct svga_winsys_context *swc,
406 SVGAMobId *id,
407 uint32 *offset_into_mob,
408 struct svga_winsys_buffer *buffer,
409 uint32 offset,
410 unsigned flags)
411 {
412 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
413 struct vmw_buffer_relocation *reloc;
414 struct pb_buffer *pb_buffer = vmw_pb_buffer(buffer);
415
416 if (id) {
417 assert(vswc->region.staged < vswc->region.reserved);
418
419 reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
420 reloc->mob.id = id;
421 reloc->mob.offset_into_mob = offset_into_mob;
422
423 /*
424 * pb_validate holds a refcount to the buffer, so no need to
425 * refcount it again in the relocation.
426 */
427 reloc->buffer = pb_buffer;
428 reloc->offset = offset;
429 reloc->is_mob = true;
430 ++vswc->region.staged;
431 }
432
433 if (vmw_swc_add_validate_buffer(vswc, pb_buffer, flags)) {
434 vswc->seen_mobs += pb_buffer->base.size;
435
436 if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
437 vswc->seen_mobs >=
438 vswc->vws->ioctl.max_mob_memory / VMW_MAX_MOB_MEM_FACTOR)
439 vswc->preemptive_flush = true;
440 }
441
442 #if MESA_DEBUG
443 if (!(flags & SVGA_RELOC_INTERNAL))
444 debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
445 #endif
446 }
447
448
449 /**
450 * vmw_swc_surface_clear_reference - Clear referenced info for a surface
451 *
452 * @swc: Pointer to an svga_winsys_context
453 * @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which
454 * we want to clear
455 *
456 * This is primarily used by a discard surface map to indicate that the
457 * surface data is no longer referenced by a draw call, and mapping it
458 * should therefore no longer cause a flush.
459 */
460 void
vmw_swc_surface_clear_reference(struct svga_winsys_context * swc,struct vmw_svga_winsys_surface * vsurf)461 vmw_swc_surface_clear_reference(struct svga_winsys_context *swc,
462 struct vmw_svga_winsys_surface *vsurf)
463 {
464 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
465 struct vmw_ctx_validate_item *isrf =
466 util_hash_table_get(vswc->hash, vsurf);
467
468 if (isrf && isrf->referenced) {
469 isrf->referenced = false;
470 p_atomic_dec(&vsurf->validated);
471 }
472 }
473
474 static void
vmw_swc_surface_only_relocation(struct svga_winsys_context * swc,uint32 * where,struct vmw_svga_winsys_surface * vsurf,unsigned flags)475 vmw_swc_surface_only_relocation(struct svga_winsys_context *swc,
476 uint32 *where,
477 struct vmw_svga_winsys_surface *vsurf,
478 unsigned flags)
479 {
480 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
481 struct vmw_ctx_validate_item *isrf;
482
483 assert(vswc->surface.staged < vswc->surface.reserved);
484 isrf = util_hash_table_get(vswc->hash, vsurf);
485
486 if (isrf == NULL) {
487 isrf = &vswc->surface.items[vswc->surface.used + vswc->surface.staged];
488 vmw_svga_winsys_surface_reference(&isrf->vsurf, vsurf);
489 isrf->referenced = false;
490
491 _mesa_hash_table_insert(vswc->hash, vsurf, isrf);
492 ++vswc->surface.staged;
493
494 vswc->seen_surfaces += vsurf->size;
495 if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
496 vswc->seen_surfaces >=
497 vswc->vws->ioctl.max_surface_memory / VMW_MAX_SURF_MEM_FACTOR)
498 vswc->preemptive_flush = true;
499 }
500
501 if (!(flags & SVGA_RELOC_INTERNAL) && !isrf->referenced) {
502 isrf->referenced = true;
503 p_atomic_inc(&vsurf->validated);
504 }
505
506 if (where)
507 *where = vsurf->sid;
508 }
509
510 static void
vmw_swc_surface_relocation(struct svga_winsys_context * swc,uint32 * where,uint32 * mobid,struct svga_winsys_surface * surface,unsigned flags)511 vmw_swc_surface_relocation(struct svga_winsys_context *swc,
512 uint32 *where,
513 uint32 *mobid,
514 struct svga_winsys_surface *surface,
515 unsigned flags)
516 {
517 struct vmw_svga_winsys_surface *vsurf;
518
519 assert(swc->have_gb_objects || mobid == NULL);
520
521 if (!surface) {
522 *where = SVGA3D_INVALID_ID;
523 if (mobid)
524 *mobid = SVGA3D_INVALID_ID;
525 return;
526 }
527
528 vsurf = vmw_svga_winsys_surface(surface);
529 vmw_swc_surface_only_relocation(swc, where, vsurf, flags);
530
531 if (swc->have_gb_objects && vsurf->buf != NULL) {
532
533 /*
534 * Make sure backup buffer ends up fenced.
535 */
536
537 mtx_lock(&vsurf->mutex);
538 assert(vsurf->buf != NULL);
539
540 /*
541 * An internal reloc means that the surface transfer direction
542 * is opposite to the MOB transfer direction...
543 */
544 if ((flags & SVGA_RELOC_INTERNAL) &&
545 (flags & (SVGA_RELOC_READ | SVGA_RELOC_WRITE)) !=
546 (SVGA_RELOC_READ | SVGA_RELOC_WRITE))
547 flags ^= (SVGA_RELOC_READ | SVGA_RELOC_WRITE);
548 vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
549 vsurf->buf, 0, flags);
550 mtx_unlock(&vsurf->mutex);
551 }
552 }
553
554 static void
vmw_swc_shader_relocation(struct svga_winsys_context * swc,uint32 * shid,uint32 * mobid,uint32 * offset,struct svga_winsys_gb_shader * shader,unsigned flags)555 vmw_swc_shader_relocation(struct svga_winsys_context *swc,
556 uint32 *shid,
557 uint32 *mobid,
558 uint32 *offset,
559 struct svga_winsys_gb_shader *shader,
560 unsigned flags)
561 {
562 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
563 struct vmw_winsys_screen *vws = vswc->vws;
564 struct vmw_svga_winsys_shader *vshader;
565 struct vmw_ctx_validate_item *ishader;
566
567 if(!shader) {
568 *shid = SVGA3D_INVALID_ID;
569 return;
570 }
571
572 vshader = vmw_svga_winsys_shader(shader);
573
574 if (!vws->base.have_vgpu10) {
575 assert(vswc->shader.staged < vswc->shader.reserved);
576 ishader = util_hash_table_get(vswc->hash, vshader);
577
578 if (ishader == NULL) {
579 ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
580 vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
581 ishader->referenced = false;
582
583 _mesa_hash_table_insert(vswc->hash, vshader, ishader);
584 ++vswc->shader.staged;
585 }
586
587 if (!ishader->referenced) {
588 ishader->referenced = true;
589 p_atomic_inc(&vshader->validated);
590 }
591 }
592
593 if (shid)
594 *shid = vshader->shid;
595
596 if (vshader->buf)
597 vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf,
598 0, SVGA_RELOC_READ);
599 }
600
601 static void
vmw_swc_query_relocation(struct svga_winsys_context * swc,SVGAMobId * id,struct svga_winsys_gb_query * query)602 vmw_swc_query_relocation(struct svga_winsys_context *swc,
603 SVGAMobId *id,
604 struct svga_winsys_gb_query *query)
605 {
606 /* Queries are backed by one big MOB */
607 vmw_swc_mob_relocation(swc, id, NULL, query->buf, 0,
608 SVGA_RELOC_READ | SVGA_RELOC_WRITE);
609 }
610
611 static void
vmw_swc_commit(struct svga_winsys_context * swc)612 vmw_swc_commit(struct svga_winsys_context *swc)
613 {
614 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
615
616 assert(vswc->command.used + vswc->command.reserved <= vswc->command.size);
617 vswc->command.used += vswc->command.reserved;
618 vswc->command.reserved = 0;
619
620 assert(vswc->surface.staged <= vswc->surface.reserved);
621 assert(vswc->surface.used + vswc->surface.staged <= vswc->surface.size);
622 vswc->surface.used += vswc->surface.staged;
623 vswc->surface.staged = 0;
624 vswc->surface.reserved = 0;
625
626 assert(vswc->shader.staged <= vswc->shader.reserved);
627 assert(vswc->shader.used + vswc->shader.staged <= vswc->shader.size);
628 vswc->shader.used += vswc->shader.staged;
629 vswc->shader.staged = 0;
630 vswc->shader.reserved = 0;
631
632 assert(vswc->region.staged <= vswc->region.reserved);
633 assert(vswc->region.used + vswc->region.staged <= vswc->region.size);
634 vswc->region.used += vswc->region.staged;
635 vswc->region.staged = 0;
636 vswc->region.reserved = 0;
637 }
638
639
640 static void
vmw_swc_destroy(struct svga_winsys_context * swc)641 vmw_swc_destroy(struct svga_winsys_context *swc)
642 {
643 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
644 unsigned i;
645
646 for(i = 0; i < vswc->surface.used; ++i) {
647 struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
648 if (isurf->referenced)
649 p_atomic_dec(&isurf->vsurf->validated);
650 vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
651 }
652
653 for(i = 0; i < vswc->shader.used; ++i) {
654 struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
655 if (ishader->referenced)
656 p_atomic_dec(&ishader->vshader->validated);
657 vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
658 }
659
660 _mesa_hash_table_destroy(vswc->hash, NULL);
661 pb_validate_destroy(vswc->validate);
662 vmw_ioctl_context_destroy(vswc->vws, swc->cid);
663 #if MESA_DEBUG
664 debug_flush_ctx_destroy(vswc->fctx);
665 #endif
666 FREE(vswc);
667 }
668
669 /**
670 * vmw_svga_winsys_vgpu10_shader_screate - The winsys shader_crate callback
671 *
672 * @swc: The winsys context.
673 * @shaderId: Previously allocated shader id.
674 * @shaderType: The shader type.
675 * @bytecode: The shader bytecode
676 * @bytecodelen: The length of the bytecode.
677 *
678 * Creates an svga_winsys_gb_shader structure and allocates a buffer for the
679 * shader code and copies the shader code into the buffer. Shader
680 * resource creation is not done.
681 */
682 static struct svga_winsys_gb_shader *
vmw_svga_winsys_vgpu10_shader_create(struct svga_winsys_context * swc,uint32 shaderId,SVGA3dShaderType shaderType,const uint32 * bytecode,uint32 bytecodeLen,const SVGA3dDXShaderSignatureHeader * sgnInfo,uint32 sgnLen)683 vmw_svga_winsys_vgpu10_shader_create(struct svga_winsys_context *swc,
684 uint32 shaderId,
685 SVGA3dShaderType shaderType,
686 const uint32 *bytecode,
687 uint32 bytecodeLen,
688 const SVGA3dDXShaderSignatureHeader *sgnInfo,
689 uint32 sgnLen)
690 {
691 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
692 struct vmw_svga_winsys_shader *shader;
693 shader = vmw_svga_shader_create(&vswc->vws->base, shaderType, bytecode,
694 bytecodeLen, sgnInfo, sgnLen);
695 if (!shader)
696 return NULL;
697
698 shader->shid = shaderId;
699 return svga_winsys_shader(shader);
700 }
701
702 /**
703 * vmw_svga_winsys_vgpu10_shader_destroy - The winsys shader_destroy callback.
704 *
705 * @swc: The winsys context.
706 * @shader: A shader structure previously allocated by shader_create.
707 *
708 * Frees the shader structure and the buffer holding the shader code.
709 */
710 static void
vmw_svga_winsys_vgpu10_shader_destroy(struct svga_winsys_context * swc,struct svga_winsys_gb_shader * shader)711 vmw_svga_winsys_vgpu10_shader_destroy(struct svga_winsys_context *swc,
712 struct svga_winsys_gb_shader *shader)
713 {
714 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
715
716 vmw_svga_winsys_shader_destroy(&vswc->vws->base, shader);
717 }
718
719 /**
720 * vmw_svga_winsys_resource_rebind - The winsys resource_rebind callback
721 *
722 * @swc: The winsys context.
723 * @surface: The surface to be referenced.
724 * @shader: The shader to be referenced.
725 * @flags: Relocation flags.
726 *
727 * This callback is needed because shader backing buffers are sub-allocated, and
728 * hence the kernel fencing is not sufficient. The buffers need to be put on
729 * the context's validation list and fenced after command submission to avoid
730 * reuse of busy shader buffers. In addition, surfaces need to be put on the
731 * validation list in order for the driver to regard them as referenced
732 * by the command stream.
733 */
734 static enum pipe_error
vmw_svga_winsys_resource_rebind(struct svga_winsys_context * swc,struct svga_winsys_surface * surface,struct svga_winsys_gb_shader * shader,unsigned flags)735 vmw_svga_winsys_resource_rebind(struct svga_winsys_context *swc,
736 struct svga_winsys_surface *surface,
737 struct svga_winsys_gb_shader *shader,
738 unsigned flags)
739 {
740 /**
741 * Need to reserve one validation item for either the surface or
742 * the shader.
743 */
744 if (!vmw_swc_reserve(swc, 0, 1))
745 return PIPE_ERROR_OUT_OF_MEMORY;
746
747 if (surface)
748 vmw_swc_surface_relocation(swc, NULL, NULL, surface, flags);
749 else if (shader)
750 vmw_swc_shader_relocation(swc, NULL, NULL, NULL, shader, flags);
751
752 vmw_swc_commit(swc);
753
754 return PIPE_OK;
755 }
756
757 struct svga_winsys_context *
vmw_svga_winsys_context_create(struct svga_winsys_screen * sws)758 vmw_svga_winsys_context_create(struct svga_winsys_screen *sws)
759 {
760 struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
761 struct vmw_svga_winsys_context *vswc;
762
763 vswc = CALLOC_STRUCT(vmw_svga_winsys_context);
764 if(!vswc)
765 return NULL;
766
767 vswc->base.destroy = vmw_swc_destroy;
768 vswc->base.reserve = vmw_swc_reserve;
769 vswc->base.get_command_buffer_size = vmw_swc_get_command_buffer_size;
770 vswc->base.surface_relocation = vmw_swc_surface_relocation;
771 vswc->base.region_relocation = vmw_swc_region_relocation;
772 vswc->base.mob_relocation = vmw_swc_mob_relocation;
773 vswc->base.query_relocation = vmw_swc_query_relocation;
774 vswc->base.query_bind = vmw_swc_query_bind;
775 vswc->base.context_relocation = vmw_swc_context_relocation;
776 vswc->base.shader_relocation = vmw_swc_shader_relocation;
777 vswc->base.commit = vmw_swc_commit;
778 vswc->base.flush = vmw_swc_flush;
779 vswc->base.surface_map = vmw_svga_winsys_surface_map;
780 vswc->base.surface_unmap = vmw_svga_winsys_surface_unmap;
781
782 vswc->base.shader_create = vmw_svga_winsys_vgpu10_shader_create;
783 vswc->base.shader_destroy = vmw_svga_winsys_vgpu10_shader_destroy;
784
785 vswc->base.resource_rebind = vmw_svga_winsys_resource_rebind;
786
787 if (sws->have_vgpu10)
788 vswc->base.cid = vmw_ioctl_extended_context_create(vws, sws->have_vgpu10);
789 else
790 vswc->base.cid = vmw_ioctl_context_create(vws);
791
792 if (vswc->base.cid == -1)
793 goto out_no_context;
794
795 vswc->base.imported_fence_fd = -1;
796
797 vswc->base.have_gb_objects = sws->have_gb_objects;
798
799 vswc->vws = vws;
800
801 vswc->command.size = VMW_COMMAND_SIZE;
802 vswc->surface.size = VMW_SURFACE_RELOCS;
803 vswc->shader.size = VMW_SHADER_RELOCS;
804 vswc->region.size = VMW_REGION_RELOCS;
805
806 vswc->validate = pb_validate_create();
807 if(!vswc->validate)
808 goto out_no_validate;
809
810 vswc->hash = util_hash_table_create_ptr_keys();
811 if (!vswc->hash)
812 goto out_no_hash;
813
814 #if MESA_DEBUG
815 vswc->fctx = debug_flush_ctx_create(true, VMW_DEBUG_FLUSH_STACK);
816 #endif
817
818 vswc->base.force_coherent = vws->force_coherent;
819 return &vswc->base;
820
821 out_no_hash:
822 pb_validate_destroy(vswc->validate);
823 out_no_validate:
824 vmw_ioctl_context_destroy(vws, vswc->base.cid);
825 out_no_context:
826 FREE(vswc);
827 return NULL;
828 }
829