1 /*
2 * Copyright © 2012-2018 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <[email protected]>
7 */
8
9 #include <assert.h>
10 #include <inttypes.h>
11
12 #include "util/hash_table.h"
13 #include "util/set.h"
14 #include "util/slab.h"
15
16 #include "drm/freedreno_ringbuffer.h"
17 #include "msm_priv.h"
18
19 /* The legacy implementation of submit/ringbuffer, which still does the
20 * traditional reloc and cmd tracking
21 */
22
23 #define INIT_SIZE 0x1000
24
25 struct msm_submit {
26 struct fd_submit base;
27
28 DECLARE_ARRAY(struct drm_msm_gem_submit_bo, submit_bos);
29 DECLARE_ARRAY(struct fd_bo *, bos);
30
31 /* maps fd_bo to idx in bos table: */
32 struct hash_table *bo_table;
33
34 struct slab_mempool ring_pool;
35
36 /* hash-set of associated rings: */
37 struct set *ring_set;
38
39 /* Allow for sub-allocation of stateobj ring buffers (ie. sharing
40 * the same underlying bo)..
41 *
42 * We also rely on previous stateobj having been fully constructed
43 * so we can reclaim extra space at it's end.
44 */
45 struct fd_ringbuffer *suballoc_ring;
46 };
47 FD_DEFINE_CAST(fd_submit, msm_submit);
48
49 /* for FD_RINGBUFFER_GROWABLE rb's, tracks the 'finalized' cmdstream buffers
50 * and sizes. Ie. a finalized buffer can have no more commands appended to
51 * it.
52 */
53 struct msm_cmd {
54 struct fd_bo *ring_bo;
55 unsigned size;
56 DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
57 };
58
59 static struct msm_cmd *
cmd_new(struct fd_bo * ring_bo)60 cmd_new(struct fd_bo *ring_bo)
61 {
62 struct msm_cmd *cmd = malloc(sizeof(*cmd));
63 cmd->ring_bo = fd_bo_ref(ring_bo);
64 cmd->size = 0;
65 cmd->nr_relocs = cmd->max_relocs = 0;
66 cmd->relocs = NULL;
67 return cmd;
68 }
69
70 static void
cmd_free(struct msm_cmd * cmd)71 cmd_free(struct msm_cmd *cmd)
72 {
73 fd_bo_del(cmd->ring_bo);
74 free(cmd->relocs);
75 free(cmd);
76 }
77
78 struct msm_ringbuffer {
79 struct fd_ringbuffer base;
80
81 /* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */
82 unsigned offset;
83
84 union {
85 /* for _FD_RINGBUFFER_OBJECT case: */
86 struct {
87 struct fd_pipe *pipe;
88 DECLARE_ARRAY(struct fd_bo *, reloc_bos);
89 struct set *ring_set;
90 };
91 /* for other cases: */
92 struct {
93 struct fd_submit *submit;
94 DECLARE_ARRAY(struct msm_cmd *, cmds);
95 };
96 } u;
97
98 struct msm_cmd *cmd; /* current cmd */
99 struct fd_bo *ring_bo;
100 };
101 FD_DEFINE_CAST(fd_ringbuffer, msm_ringbuffer);
102
103 static void finalize_current_cmd(struct fd_ringbuffer *ring);
104 static struct fd_ringbuffer *
105 msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
106 enum fd_ringbuffer_flags flags);
107
108 /* add (if needed) bo to submit and return index: */
109 static uint32_t
append_bo(struct msm_submit * submit,struct fd_bo * bo)110 append_bo(struct msm_submit *submit, struct fd_bo *bo)
111 {
112 uint32_t idx;
113
114 /* NOTE: it is legal to use the same bo on different threads for
115 * different submits. But it is not legal to use the same submit
116 * from given threads.
117 */
118 idx = READ_ONCE(bo->idx);
119
120 if (unlikely((idx >= submit->nr_submit_bos) ||
121 (submit->submit_bos[idx].handle != bo->handle))) {
122 uint32_t hash = _mesa_hash_pointer(bo);
123 struct hash_entry *entry;
124
125 entry = _mesa_hash_table_search_pre_hashed(submit->bo_table, hash, bo);
126 if (entry) {
127 /* found */
128 idx = (uint32_t)(uintptr_t)entry->data;
129 } else {
130 idx = APPEND(
131 submit, submit_bos,
132 (struct drm_msm_gem_submit_bo){
133 .flags = bo->reloc_flags & (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE),
134 .handle = bo->handle,
135 .presumed = 0,
136 });
137 APPEND(submit, bos, fd_bo_ref(bo));
138
139 _mesa_hash_table_insert_pre_hashed(submit->bo_table, hash, bo,
140 (void *)(uintptr_t)idx);
141 }
142 bo->idx = idx;
143 }
144
145 return idx;
146 }
147
148 static void
append_ring(struct set * set,struct fd_ringbuffer * ring)149 append_ring(struct set *set, struct fd_ringbuffer *ring)
150 {
151 uint32_t hash = _mesa_hash_pointer(ring);
152
153 if (!_mesa_set_search_pre_hashed(set, hash, ring)) {
154 fd_ringbuffer_ref(ring);
155 _mesa_set_add_pre_hashed(set, hash, ring);
156 }
157 }
158
159 static void
msm_submit_suballoc_ring_bo(struct fd_submit * submit,struct msm_ringbuffer * msm_ring,uint32_t size)160 msm_submit_suballoc_ring_bo(struct fd_submit *submit,
161 struct msm_ringbuffer *msm_ring, uint32_t size)
162 {
163 struct msm_submit *msm_submit = to_msm_submit(submit);
164 unsigned suballoc_offset = 0;
165 struct fd_bo *suballoc_bo = NULL;
166
167 if (msm_submit->suballoc_ring) {
168 struct msm_ringbuffer *suballoc_ring =
169 to_msm_ringbuffer(msm_submit->suballoc_ring);
170
171 suballoc_bo = suballoc_ring->ring_bo;
172 suballoc_offset =
173 fd_ringbuffer_size(msm_submit->suballoc_ring) + suballoc_ring->offset;
174
175 suballoc_offset = align(suballoc_offset, 0x10);
176
177 if ((size + suballoc_offset) > suballoc_bo->size) {
178 suballoc_bo = NULL;
179 }
180 }
181
182 if (!suballoc_bo) {
183 // TODO possibly larger size for streaming bo?
184 msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, 0x8000);
185 msm_ring->offset = 0;
186 } else {
187 msm_ring->ring_bo = fd_bo_ref(suballoc_bo);
188 msm_ring->offset = suballoc_offset;
189 }
190
191 struct fd_ringbuffer *old_suballoc_ring = msm_submit->suballoc_ring;
192
193 msm_submit->suballoc_ring = fd_ringbuffer_ref(&msm_ring->base);
194
195 if (old_suballoc_ring)
196 fd_ringbuffer_del(old_suballoc_ring);
197 }
198
199 static struct fd_ringbuffer *
msm_submit_new_ringbuffer(struct fd_submit * submit,uint32_t size,enum fd_ringbuffer_flags flags)200 msm_submit_new_ringbuffer(struct fd_submit *submit, uint32_t size,
201 enum fd_ringbuffer_flags flags)
202 {
203 struct msm_submit *msm_submit = to_msm_submit(submit);
204 struct msm_ringbuffer *msm_ring;
205
206 msm_ring = slab_alloc_st(&msm_submit->ring_pool);
207
208 msm_ring->u.submit = submit;
209
210 /* NOTE: needs to be before _suballoc_ring_bo() since it could
211 * increment the refcnt of the current ring
212 */
213 msm_ring->base.refcnt = 1;
214
215 if (flags & FD_RINGBUFFER_STREAMING) {
216 msm_submit_suballoc_ring_bo(submit, msm_ring, size);
217 } else {
218 if (flags & FD_RINGBUFFER_GROWABLE)
219 size = INIT_SIZE;
220
221 msm_ring->offset = 0;
222 msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, size);
223 }
224
225 if (!msm_ringbuffer_init(msm_ring, size, flags))
226 return NULL;
227
228 return &msm_ring->base;
229 }
230
231 static struct drm_msm_gem_submit_reloc *
handle_stateobj_relocs(struct msm_submit * submit,struct msm_ringbuffer * ring)232 handle_stateobj_relocs(struct msm_submit *submit, struct msm_ringbuffer *ring)
233 {
234 struct msm_cmd *cmd = ring->cmd;
235 struct drm_msm_gem_submit_reloc *relocs;
236
237 relocs = malloc(cmd->nr_relocs * sizeof(*relocs));
238
239 for (unsigned i = 0; i < cmd->nr_relocs; i++) {
240 unsigned idx = cmd->relocs[i].reloc_idx;
241 struct fd_bo *bo = ring->u.reloc_bos[idx];
242
243 relocs[i] = cmd->relocs[i];
244 relocs[i].reloc_idx = append_bo(submit, bo);
245 }
246
247 return relocs;
248 }
249
250 static struct fd_fence *
msm_submit_flush(struct fd_submit * submit,int in_fence_fd,bool use_fence_fd)251 msm_submit_flush(struct fd_submit *submit, int in_fence_fd, bool use_fence_fd)
252 {
253 struct msm_submit *msm_submit = to_msm_submit(submit);
254 struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
255 struct drm_msm_gem_submit req = {
256 .flags = msm_pipe->pipe,
257 .queueid = msm_pipe->queue_id,
258 };
259 int ret;
260
261 finalize_current_cmd(submit->primary);
262 append_ring(msm_submit->ring_set, submit->primary);
263
264 unsigned nr_cmds = 0;
265 unsigned nr_objs = 0;
266
267 set_foreach (msm_submit->ring_set, entry) {
268 struct fd_ringbuffer *ring = (void *)entry->key;
269 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
270 nr_cmds += 1;
271 nr_objs += 1;
272 } else {
273 if (ring != submit->primary)
274 finalize_current_cmd(ring);
275 nr_cmds += to_msm_ringbuffer(ring)->u.nr_cmds;
276 }
277 }
278
279 void *obj_relocs[nr_objs];
280 struct drm_msm_gem_submit_cmd cmds[nr_cmds];
281 unsigned i = 0, o = 0;
282
283 set_foreach (msm_submit->ring_set, entry) {
284 struct fd_ringbuffer *ring = (void *)entry->key;
285 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
286
287 assert(i < nr_cmds);
288
289 // TODO handle relocs:
290 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
291
292 assert(o < nr_objs);
293
294 void *relocs = handle_stateobj_relocs(msm_submit, msm_ring);
295 obj_relocs[o++] = relocs;
296
297 cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
298 cmds[i].submit_idx = append_bo(msm_submit, msm_ring->ring_bo);
299 cmds[i].submit_offset = submit_offset(msm_ring->ring_bo, msm_ring->offset);
300 cmds[i].size = offset_bytes(ring->cur, ring->start);
301 cmds[i].pad = 0;
302 cmds[i].nr_relocs = msm_ring->cmd->nr_relocs;
303 cmds[i].relocs = VOID2U64(relocs);
304
305 i++;
306 } else {
307 for (unsigned j = 0; j < msm_ring->u.nr_cmds; j++) {
308 if (ring->flags & FD_RINGBUFFER_PRIMARY) {
309 cmds[i].type = MSM_SUBMIT_CMD_BUF;
310 } else {
311 cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
312 }
313 struct fd_bo *ring_bo = msm_ring->u.cmds[j]->ring_bo;
314 cmds[i].submit_idx = append_bo(msm_submit, ring_bo);
315 cmds[i].submit_offset = submit_offset(ring_bo, msm_ring->offset);
316 cmds[i].size = msm_ring->u.cmds[j]->size;
317 cmds[i].pad = 0;
318 cmds[i].nr_relocs = msm_ring->u.cmds[j]->nr_relocs;
319 cmds[i].relocs = VOID2U64(msm_ring->u.cmds[j]->relocs);
320
321 i++;
322 }
323 }
324 }
325
326 struct fd_fence *out_fence = fd_fence_new(submit->pipe, use_fence_fd);
327
328 simple_mtx_lock(&fence_lock);
329 for (unsigned j = 0; j < msm_submit->nr_bos; j++) {
330 fd_bo_add_fence(msm_submit->bos[j], out_fence);
331 }
332 simple_mtx_unlock(&fence_lock);
333
334 if (in_fence_fd != -1) {
335 req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
336 req.fence_fd = in_fence_fd;
337 }
338
339 if (out_fence->use_fence_fd) {
340 req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
341 }
342
343 /* needs to be after get_cmd() as that could create bos/cmds table: */
344 req.bos = VOID2U64(msm_submit->submit_bos),
345 req.nr_bos = msm_submit->nr_submit_bos;
346 req.cmds = VOID2U64(cmds), req.nr_cmds = nr_cmds;
347
348 DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
349
350 ret = drmCommandWriteRead(submit->pipe->dev->fd, DRM_MSM_GEM_SUBMIT, &req,
351 sizeof(req));
352 if (ret) {
353 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
354 fd_fence_del(out_fence);
355 out_fence = NULL;
356 msm_dump_submit(&req);
357 } else if (!ret && out_fence) {
358 out_fence->kfence = req.fence;
359 out_fence->ufence = submit->fence;
360 out_fence->fence_fd = req.fence_fd;
361 }
362
363 for (unsigned o = 0; o < nr_objs; o++)
364 free(obj_relocs[o]);
365
366 return out_fence;
367 }
368
369 static void
unref_rings(struct set_entry * entry)370 unref_rings(struct set_entry *entry)
371 {
372 struct fd_ringbuffer *ring = (void *)entry->key;
373 fd_ringbuffer_del(ring);
374 }
375
376 static void
msm_submit_destroy(struct fd_submit * submit)377 msm_submit_destroy(struct fd_submit *submit)
378 {
379 struct msm_submit *msm_submit = to_msm_submit(submit);
380
381 if (msm_submit->suballoc_ring)
382 fd_ringbuffer_del(msm_submit->suballoc_ring);
383
384 _mesa_hash_table_destroy(msm_submit->bo_table, NULL);
385 _mesa_set_destroy(msm_submit->ring_set, unref_rings);
386
387 // TODO it would be nice to have a way to assert() if all
388 // rb's haven't been free'd back to the slab, because that is
389 // an indication that we are leaking bo's
390 slab_destroy(&msm_submit->ring_pool);
391
392 for (unsigned i = 0; i < msm_submit->nr_bos; i++)
393 fd_bo_del(msm_submit->bos[i]);
394
395 free(msm_submit->submit_bos);
396 free(msm_submit->bos);
397 free(msm_submit);
398 }
399
400 static const struct fd_submit_funcs submit_funcs = {
401 .new_ringbuffer = msm_submit_new_ringbuffer,
402 .flush = msm_submit_flush,
403 .destroy = msm_submit_destroy,
404 };
405
406 struct fd_submit *
msm_submit_new(struct fd_pipe * pipe)407 msm_submit_new(struct fd_pipe *pipe)
408 {
409 struct msm_submit *msm_submit = calloc(1, sizeof(*msm_submit));
410 struct fd_submit *submit;
411
412 msm_submit->bo_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
413 _mesa_key_pointer_equal);
414 msm_submit->ring_set =
415 _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
416 // TODO tune size:
417 slab_create(&msm_submit->ring_pool, sizeof(struct msm_ringbuffer), 16);
418
419 submit = &msm_submit->base;
420 submit->funcs = &submit_funcs;
421
422 return submit;
423 }
424
425 static void
finalize_current_cmd(struct fd_ringbuffer * ring)426 finalize_current_cmd(struct fd_ringbuffer *ring)
427 {
428 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
429
430 assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
431
432 if (!msm_ring->cmd)
433 return;
434
435 assert(msm_ring->cmd->ring_bo == msm_ring->ring_bo);
436
437 msm_ring->cmd->size = offset_bytes(ring->cur, ring->start);
438 APPEND(&msm_ring->u, cmds, msm_ring->cmd);
439 msm_ring->cmd = NULL;
440 }
441
442 static void
msm_ringbuffer_grow(struct fd_ringbuffer * ring,uint32_t size)443 msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
444 {
445 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
446 struct fd_pipe *pipe = msm_ring->u.submit->pipe;
447
448 assert(ring->flags & FD_RINGBUFFER_GROWABLE);
449
450 finalize_current_cmd(ring);
451
452 fd_bo_del(msm_ring->ring_bo);
453 msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
454 msm_ring->cmd = cmd_new(msm_ring->ring_bo);
455
456 ring->start = fd_bo_map(msm_ring->ring_bo);
457 ring->end = &(ring->start[size / 4]);
458 ring->cur = ring->start;
459 ring->size = size;
460 }
461
462 static void
msm_ringbuffer_emit_reloc(struct fd_ringbuffer * ring,const struct fd_reloc * reloc)463 msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
464 const struct fd_reloc *reloc)
465 {
466 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
467 struct fd_pipe *pipe;
468 unsigned reloc_idx;
469
470 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
471 unsigned idx = APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(reloc->bo));
472
473 /* this gets fixed up at submit->flush() time, since this state-
474 * object rb can be used with many different submits
475 */
476 reloc_idx = idx;
477
478 pipe = msm_ring->u.pipe;
479 } else {
480 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
481
482 reloc_idx = append_bo(msm_submit, reloc->bo);
483
484 pipe = msm_ring->u.submit->pipe;
485 }
486
487 APPEND(msm_ring->cmd, relocs,
488 (struct drm_msm_gem_submit_reloc){
489 .reloc_idx = reloc_idx,
490 .reloc_offset = reloc->offset,
491 .or = reloc->orval,
492 .shift = reloc->shift,
493 .submit_offset =
494 offset_bytes(ring->cur, ring->start) + msm_ring->offset,
495 });
496
497 ring->cur++;
498
499 if (pipe->is_64bit) {
500 APPEND(msm_ring->cmd, relocs,
501 (struct drm_msm_gem_submit_reloc){
502 .reloc_idx = reloc_idx,
503 .reloc_offset = reloc->offset,
504 .or = reloc->orval >> 32,
505 .shift = reloc->shift - 32,
506 .submit_offset =
507 offset_bytes(ring->cur, ring->start) + msm_ring->offset,
508 });
509
510 ring->cur++;
511 }
512 }
513
514 static void
append_stateobj_rings(struct msm_submit * submit,struct fd_ringbuffer * target)515 append_stateobj_rings(struct msm_submit *submit, struct fd_ringbuffer *target)
516 {
517 struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
518
519 assert(target->flags & _FD_RINGBUFFER_OBJECT);
520
521 set_foreach (msm_target->u.ring_set, entry) {
522 struct fd_ringbuffer *ring = (void *)entry->key;
523
524 append_ring(submit->ring_set, ring);
525
526 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
527 append_stateobj_rings(submit, ring);
528 }
529 }
530 }
531
532 static uint32_t
msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer * ring,struct fd_ringbuffer * target,uint32_t cmd_idx)533 msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
534 struct fd_ringbuffer *target, uint32_t cmd_idx)
535 {
536 struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
537 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
538 struct fd_bo *bo;
539 uint32_t size;
540
541 if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
542 (cmd_idx < msm_target->u.nr_cmds)) {
543 bo = msm_target->u.cmds[cmd_idx]->ring_bo;
544 size = msm_target->u.cmds[cmd_idx]->size;
545 } else {
546 bo = msm_target->ring_bo;
547 size = offset_bytes(target->cur, target->start);
548 }
549
550 msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
551 .bo = bo,
552 .iova = bo->iova + msm_target->offset,
553 .offset = msm_target->offset,
554 });
555
556 if (!size)
557 return 0;
558
559 if ((target->flags & _FD_RINGBUFFER_OBJECT) &&
560 !(ring->flags & _FD_RINGBUFFER_OBJECT)) {
561 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
562
563 append_stateobj_rings(msm_submit, target);
564 }
565
566 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
567 append_ring(msm_ring->u.ring_set, target);
568 } else {
569 struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
570 append_ring(msm_submit->ring_set, target);
571 }
572
573 return size;
574 }
575
576 static uint32_t
msm_ringbuffer_cmd_count(struct fd_ringbuffer * ring)577 msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
578 {
579 if (ring->flags & FD_RINGBUFFER_GROWABLE)
580 return to_msm_ringbuffer(ring)->u.nr_cmds + 1;
581 return 1;
582 }
583
584 static bool
msm_ringbuffer_check_size(struct fd_ringbuffer * ring)585 msm_ringbuffer_check_size(struct fd_ringbuffer *ring)
586 {
587 assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
588 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
589 struct fd_submit *submit = msm_ring->u.submit;
590 struct fd_pipe *pipe = submit->pipe;
591
592 if ((fd_device_version(pipe->dev) < FD_VERSION_UNLIMITED_CMDS) &&
593 ((ring->cur - ring->start) > (ring->size / 4 - 0x1000))) {
594 return false;
595 }
596
597 if (to_msm_submit(submit)->nr_bos > MAX_ARRAY_SIZE/2) {
598 return false;
599 }
600
601 return true;
602 }
603
604 static void
msm_ringbuffer_destroy(struct fd_ringbuffer * ring)605 msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
606 {
607 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
608
609 fd_bo_del(msm_ring->ring_bo);
610 if (msm_ring->cmd)
611 cmd_free(msm_ring->cmd);
612
613 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
614 for (unsigned i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
615 fd_bo_del(msm_ring->u.reloc_bos[i]);
616 }
617
618 _mesa_set_destroy(msm_ring->u.ring_set, unref_rings);
619
620 free(msm_ring->u.reloc_bos);
621 free(msm_ring);
622 } else {
623 struct fd_submit *submit = msm_ring->u.submit;
624
625 for (unsigned i = 0; i < msm_ring->u.nr_cmds; i++) {
626 cmd_free(msm_ring->u.cmds[i]);
627 }
628
629 free(msm_ring->u.cmds);
630 slab_free_st(&to_msm_submit(submit)->ring_pool, msm_ring);
631 }
632 }
633
634 static const struct fd_ringbuffer_funcs ring_funcs = {
635 .grow = msm_ringbuffer_grow,
636 .emit_reloc = msm_ringbuffer_emit_reloc,
637 .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
638 .cmd_count = msm_ringbuffer_cmd_count,
639 .check_size = msm_ringbuffer_check_size,
640 .destroy = msm_ringbuffer_destroy,
641 };
642
643 static inline struct fd_ringbuffer *
msm_ringbuffer_init(struct msm_ringbuffer * msm_ring,uint32_t size,enum fd_ringbuffer_flags flags)644 msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
645 enum fd_ringbuffer_flags flags)
646 {
647 struct fd_ringbuffer *ring = &msm_ring->base;
648
649 assert(msm_ring->ring_bo);
650
651 uint8_t *base = fd_bo_map(msm_ring->ring_bo);
652 ring->start = (void *)(base + msm_ring->offset);
653 ring->end = &(ring->start[size / 4]);
654 ring->cur = ring->start;
655
656 ring->size = size;
657 ring->flags = flags;
658
659 ring->funcs = &ring_funcs;
660
661 msm_ring->u.cmds = NULL;
662 msm_ring->u.nr_cmds = msm_ring->u.max_cmds = 0;
663
664 msm_ring->cmd = cmd_new(msm_ring->ring_bo);
665
666 return ring;
667 }
668
669 struct fd_ringbuffer *
msm_ringbuffer_new_object(struct fd_pipe * pipe,uint32_t size)670 msm_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size)
671 {
672 struct msm_ringbuffer *msm_ring = malloc(sizeof(*msm_ring));
673
674 msm_ring->u.pipe = pipe;
675 msm_ring->offset = 0;
676 msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
677 msm_ring->base.refcnt = 1;
678
679 msm_ring->u.reloc_bos = NULL;
680 msm_ring->u.nr_reloc_bos = msm_ring->u.max_reloc_bos = 0;
681
682 msm_ring->u.ring_set =
683 _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
684
685 return msm_ringbuffer_init(msm_ring, size, _FD_RINGBUFFER_OBJECT);
686 }
687