1 /*
2 * Copyright © 2022 Google, Inc.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "freedreno_drmif.h"
7 #include "freedreno_drm_perfetto.h"
8 #include "freedreno_priv.h"
9
10 struct sa_bo {
11 struct fd_bo base;
12 struct fd_bo_heap *heap;
13 unsigned offset;
14 };
15 FD_DEFINE_CAST(fd_bo, sa_bo);
16
17 #define HEAP_DEBUG 0
18
19 static void heap_clean(struct fd_bo_heap *heap, bool idle);
20 static void heap_dump(struct fd_bo_heap *heap);
21
22 struct fd_bo_heap *
fd_bo_heap_new(struct fd_device * dev,uint32_t flags)23 fd_bo_heap_new(struct fd_device *dev, uint32_t flags)
24 {
25 struct fd_bo_heap *heap;
26
27 /* We cannot suballocate shared buffers! Implicit sync is not supported! */
28 assert(!(flags & FD_BO_SHARED));
29
30 /* No internal buffers either, we need userspace fencing: */
31 assert(!(flags & _FD_BO_NOSYNC));
32
33 heap = calloc(1, sizeof(*heap));
34
35 heap->dev = dev;
36 heap->flags = flags;
37 simple_mtx_init(&heap->lock, mtx_plain);
38 list_inithead(&heap->freelist);
39
40 /* Note that util_vma_heap_init doesn't like offset==0, so we shift the
41 * entire range by one block size (see block_idx()):
42 */
43 util_vma_heap_init(&heap->heap, FD_BO_HEAP_BLOCK_SIZE,
44 FD_BO_HEAP_BLOCK_SIZE * ARRAY_SIZE(heap->blocks));
45 heap->heap.alloc_high = false;
46 heap->heap.nospan_shift = ffs(FD_BO_HEAP_BLOCK_SIZE) - 1;
47
48 heap_dump(heap);
49
50 return heap;
51 }
52
fd_bo_heap_destroy(struct fd_bo_heap * heap)53 void fd_bo_heap_destroy(struct fd_bo_heap *heap)
54 {
55 /* drain the freelist: */
56 heap_clean(heap, false);
57
58 util_vma_heap_finish(&heap->heap);
59 for (unsigned i = 0; i < ARRAY_SIZE(heap->blocks); i++)
60 if (heap->blocks[i])
61 fd_bo_del(heap->blocks[i]);
62 free(heap);
63 }
64
65 static bool
sa_idle(struct fd_bo * bo)66 sa_idle(struct fd_bo *bo)
67 {
68 enum fd_bo_state state = fd_bo_state(bo);
69 assert(state != FD_BO_STATE_UNKNOWN);
70 return state == FD_BO_STATE_IDLE;
71 }
72
73 /**
74 * The backing block is determined by the offset within the heap, since all
75 * the blocks are equal size
76 */
77 static unsigned
block_idx(struct sa_bo * s)78 block_idx(struct sa_bo *s)
79 {
80 /* The vma allocator doesn't like offset=0 so the range is shifted up
81 * by one block size:
82 */
83 return (s->offset / FD_BO_HEAP_BLOCK_SIZE) - 1;
84 }
85
86 static unsigned
block_offset(struct sa_bo * s)87 block_offset(struct sa_bo *s)
88 {
89 return s->offset % FD_BO_HEAP_BLOCK_SIZE;
90 }
91
92 static void
heap_dump(struct fd_bo_heap * heap)93 heap_dump(struct fd_bo_heap *heap)
94 {
95 if (!HEAP_DEBUG)
96 return;
97 fprintf(stderr, "HEAP[%x]: freelist: %u\n", heap->flags, list_length(&heap->freelist));
98 util_vma_heap_print(&heap->heap, stderr, "",
99 FD_BO_HEAP_BLOCK_SIZE * ARRAY_SIZE(heap->blocks));
100 }
101
102 static void
sa_release(struct fd_bo * bo)103 sa_release(struct fd_bo *bo)
104 {
105 struct sa_bo *s = to_sa_bo(bo);
106
107 simple_mtx_assert_locked(&s->heap->lock);
108
109 /*
110 * We don't track heap allocs in valgrind
111 * VG_BO_FREE(bo);
112 */
113
114 fd_bo_fini_fences(bo);
115
116 if (HEAP_DEBUG)
117 mesa_logi("release: %08x-%x idx=%d", s->offset, bo->size, block_idx(s));
118
119 util_vma_heap_free(&s->heap->heap, s->offset, bo->size);
120
121 /* The BO has already been moved ACTIVE->NONE, now move it back to heap: */
122 fd_alloc_log(bo, FD_ALLOC_NONE, FD_ALLOC_HEAP);
123
124 /* Drop our reference to the backing block object: */
125 fd_bo_del(s->heap->blocks[block_idx(s)]);
126
127 list_del(&bo->node);
128
129 if ((++s->heap->cnt % 256) == 0)
130 heap_dump(s->heap);
131
132 free(bo);
133 }
134
135 static int
sa_madvise(struct fd_bo * bo,int willneed)136 sa_madvise(struct fd_bo *bo, int willneed)
137 {
138 return willneed;
139 }
140
141 static uint64_t
sa_iova(struct fd_bo * bo)142 sa_iova(struct fd_bo *bo)
143 {
144 struct sa_bo *s = to_sa_bo(bo);
145
146 return s->heap->blocks[block_idx(s)]->iova + block_offset(s);
147 }
148
149 static void
sa_set_name(struct fd_bo * bo,const char * fmt,va_list ap)150 sa_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
151 {
152 /* No-op, kernel has a single name for the entire buffer we suballoc from */
153 }
154
155 static void
sa_destroy(struct fd_bo * bo)156 sa_destroy(struct fd_bo *bo)
157 {
158 struct fd_bo_heap *heap = to_sa_bo(bo)->heap;
159
160 simple_mtx_lock(&heap->lock);
161 list_addtail(&bo->node, &heap->freelist);
162 simple_mtx_unlock(&heap->lock);
163 }
164
165 static struct fd_bo_funcs heap_bo_funcs = {
166 .madvise = sa_madvise,
167 .iova = sa_iova,
168 .map = fd_bo_map_os_mmap,
169 .set_name = sa_set_name,
170 .destroy = sa_destroy,
171 };
172
173 /**
174 * Get the backing heap block of a suballocated bo
175 */
176 struct fd_bo *
fd_bo_heap_block(struct fd_bo * bo)177 fd_bo_heap_block(struct fd_bo *bo)
178 {
179 assert(suballoc_bo(bo));
180
181 struct sa_bo *s = to_sa_bo(bo);
182 return s->heap->blocks[block_idx(s)];
183 }
184
185 static void
heap_clean(struct fd_bo_heap * heap,bool idle)186 heap_clean(struct fd_bo_heap *heap, bool idle)
187 {
188 simple_mtx_lock(&heap->lock);
189 foreach_bo_safe (bo, &heap->freelist) {
190 /* It might be nice if we could keep freelist sorted by fence # */
191 if (idle && !sa_idle(bo))
192 break;
193 sa_release(bo);
194 }
195 simple_mtx_unlock(&heap->lock);
196 }
197
198 struct fd_bo *
fd_bo_heap_alloc(struct fd_bo_heap * heap,uint32_t size,uint32_t flags)199 fd_bo_heap_alloc(struct fd_bo_heap *heap, uint32_t size, uint32_t flags)
200 {
201 heap_clean(heap, true);
202
203 /* util_vma does not like zero byte allocations, which we get, for
204 * ex, with the initial query buffer allocation on pre-a5xx:
205 */
206 size = MAX2(size, SUBALLOC_ALIGNMENT);
207
208 size = ALIGN(size, SUBALLOC_ALIGNMENT);
209
210 simple_mtx_lock(&heap->lock);
211 /* Allocate larger buffers from the bottom, and smaller buffers from top
212 * to help limit fragmentation:
213 *
214 * (The 8k threshold is just a random guess, but seems to work ok)
215 */
216 heap->heap.alloc_high = (size <= 8 * 1024);
217 uint64_t offset = util_vma_heap_alloc(&heap->heap, size, SUBALLOC_ALIGNMENT);
218 if (!offset) {
219 simple_mtx_unlock(&heap->lock);
220 return NULL;
221 }
222
223 struct sa_bo *s = calloc(1, sizeof(*s));
224
225 s->heap = heap;
226 s->offset = offset;
227
228 assert((s->offset / FD_BO_HEAP_BLOCK_SIZE) == (s->offset + size - 1) / FD_BO_HEAP_BLOCK_SIZE);
229 unsigned idx = block_idx(s);
230 if (HEAP_DEBUG)
231 mesa_logi("alloc: %08x-%x idx=%d", s->offset, size, idx);
232 if (!heap->blocks[idx]) {
233 heap->blocks[idx] = fd_bo_new(
234 heap->dev, FD_BO_HEAP_BLOCK_SIZE, heap->flags | _FD_BO_HINT_HEAP,
235 "heap-%x-block-%u", heap->flags, idx);
236 if (heap->flags == RING_FLAGS)
237 fd_bo_mark_for_dump(heap->blocks[idx]);
238 }
239 /* Take a reference to the backing obj: */
240 fd_bo_ref(heap->blocks[idx]);
241 simple_mtx_unlock(&heap->lock);
242
243 struct fd_bo *bo = &s->base;
244
245 bo->size = size;
246 bo->funcs = &heap_bo_funcs;
247 bo->handle = 1; /* dummy handle to make fd_bo_init_common() happy */
248 bo->alloc_flags = flags;
249
250 /* Pre-initialize mmap ptr, to avoid trying to os_mmap() */
251 bo->map = ((uint8_t *)fd_bo_map(heap->blocks[idx])) + block_offset(s);
252
253 fd_bo_init_common(bo, heap->dev);
254
255 bo->handle = FD_BO_SUBALLOC_HANDLE;
256
257 fd_alloc_log(bo, FD_ALLOC_HEAP, FD_ALLOC_ACTIVE);
258
259 return bo;
260 }
261