1 /*
2 * Copyright © 2021 Google, Inc.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #ifdef X
7 #undef X
8 #endif
9
10 #if PTRSZ == 32
11 #define X(n) n##_32
12 #else
13 #define X(n) n##_64
14 #endif
15
X(emit_reloc_common)16 static void X(emit_reloc_common)(struct fd_ringbuffer *ring, uint64_t iova)
17 {
18 #if PTRSZ == 64
19 uint64_t *p64 = (uint64_t *)ring->cur;
20 *p64 = iova;
21 ring->cur += 2;
22 #else
23 (*ring->cur++) = (uint32_t)iova;
24 #endif
25 }
26
X(fd_ringbuffer_sp_emit_reloc_nonobj)27 static void X(fd_ringbuffer_sp_emit_reloc_nonobj)(struct fd_ringbuffer *ring,
28 const struct fd_reloc *reloc)
29 {
30 X(emit_reloc_common)(ring, reloc->iova);
31 fd_ringbuffer_sp_emit_bo_nonobj(ring, reloc->bo);
32 }
33
X(fd_ringbuffer_sp_emit_reloc_obj)34 static void X(fd_ringbuffer_sp_emit_reloc_obj)(struct fd_ringbuffer *ring,
35 const struct fd_reloc *reloc)
36 {
37 X(emit_reloc_common)(ring, reloc->iova);
38 fd_ringbuffer_sp_emit_bo_obj(ring, reloc->bo);
39 }
40
X(fd_ringbuffer_sp_emit_reloc_ring)41 static uint32_t X(fd_ringbuffer_sp_emit_reloc_ring)(
42 struct fd_ringbuffer *ring, struct fd_ringbuffer *target, uint32_t cmd_idx)
43 {
44 struct fd_ringbuffer_sp *fd_target = to_fd_ringbuffer_sp(target);
45 struct fd_bo *bo;
46 uint32_t size;
47
48 if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
49 (cmd_idx < fd_target->u.nr_cmds)) {
50 bo = fd_target->u.cmds[cmd_idx].ring_bo;
51 size = fd_target->u.cmds[cmd_idx].size;
52 } else {
53 bo = fd_target->ring_bo;
54 size = offset_bytes(target->cur, target->start);
55 }
56
57 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
58 X(fd_ringbuffer_sp_emit_reloc_obj)(ring, &(struct fd_reloc){
59 .bo = bo,
60 .iova = bo->iova + fd_target->offset,
61 .offset = fd_target->offset,
62 });
63 } else {
64 X(fd_ringbuffer_sp_emit_reloc_nonobj)(ring, &(struct fd_reloc){
65 .bo = bo,
66 .iova = bo->iova + fd_target->offset,
67 .offset = fd_target->offset,
68 });
69 }
70
71 if (!(target->flags & _FD_RINGBUFFER_OBJECT))
72 return size;
73
74 struct fd_ringbuffer_sp *fd_ring = to_fd_ringbuffer_sp(ring);
75
76 if (ring->flags & _FD_RINGBUFFER_OBJECT) {
77 for (unsigned i = 0; i < fd_target->u.nr_reloc_bos; i++) {
78 struct fd_bo *target_bo = fd_target->u.reloc_bos[i];
79 if (!fd_ringbuffer_references_bo(ring, target_bo))
80 APPEND(&fd_ring->u, reloc_bos, fd_bo_ref(target_bo));
81 }
82 } else {
83 struct fd_submit_sp *fd_submit = to_fd_submit_sp(fd_ring->u.submit);
84
85 if (fd_submit->seqno != fd_target->u.last_submit_seqno) {
86 for (unsigned i = 0; i < fd_target->u.nr_reloc_bos; i++) {
87 fd_submit_append_bo(fd_submit, fd_target->u.reloc_bos[i]);
88 }
89 fd_target->u.last_submit_seqno = fd_submit->seqno;
90 }
91
92 #ifndef NDEBUG
93 /* Dealing with assert'd BOs is deferred until the submit is known,
94 * since the batch resource tracking attaches BOs directly to
95 * the submit instead of the long lived stateobj
96 */
97 for (unsigned i = 0; i < fd_target->u.nr_assert_bos; i++) {
98 fd_ringbuffer_sp_assert_attached_nonobj(ring, fd_target->u.assert_bos[i]);
99 }
100 #endif
101 }
102
103 return size;
104 }
105