xref: /aosp_15_r20/external/mesa3d/src/intel/vulkan_hasvk/anv_allocator.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <assert.h>
28 #include <sys/mman.h>
29 
30 #include "anv_private.h"
31 
32 #include "util/anon_file.h"
33 #include "util/futex.h"
34 
35 #ifdef HAVE_VALGRIND
36 #define VG_NOACCESS_READ(__ptr) ({                       \
37    VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
38    __typeof(*(__ptr)) __val = *(__ptr);                  \
39    VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
40    __val;                                                \
41 })
42 #define VG_NOACCESS_WRITE(__ptr, __val) ({                  \
43    VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr)));  \
44    *(__ptr) = (__val);                                      \
45    VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));   \
46 })
47 #else
48 #define VG_NOACCESS_READ(__ptr) (*(__ptr))
49 #define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
50 #endif
51 
52 #ifndef MAP_POPULATE
53 #define MAP_POPULATE 0
54 #endif
55 
56 /* Design goals:
57  *
58  *  - Lock free (except when resizing underlying bos)
59  *
60  *  - Constant time allocation with typically only one atomic
61  *
62  *  - Multiple allocation sizes without fragmentation
63  *
64  *  - Can grow while keeping addresses and offset of contents stable
65  *
66  *  - All allocations within one bo so we can point one of the
67  *    STATE_BASE_ADDRESS pointers at it.
68  *
69  * The overall design is a two-level allocator: top level is a fixed size, big
70  * block (8k) allocator, which operates out of a bo.  Allocation is done by
71  * either pulling a block from the free list or growing the used range of the
72  * bo.  Growing the range may run out of space in the bo which we then need to
73  * grow.  Growing the bo is tricky in a multi-threaded, lockless environment:
74  * we need to keep all pointers and contents in the old map valid.  GEM bos in
75  * general can't grow, but we use a trick: we create a memfd and use ftruncate
76  * to grow it as necessary.  We mmap the new size and then create a gem bo for
77  * it using the new gem userptr ioctl.  Without heavy-handed locking around
78  * our allocation fast-path, there isn't really a way to munmap the old mmap,
79  * so we just keep it around until garbage collection time.  While the block
80  * allocator is lockless for normal operations, we block other threads trying
81  * to allocate while we're growing the map.  It shouldn't happen often, and
82  * growing is fast anyway.
83  *
84  * At the next level we can use various sub-allocators.  The state pool is a
85  * pool of smaller, fixed size objects, which operates much like the block
86  * pool.  It uses a free list for freeing objects, but when it runs out of
87  * space it just allocates a new block from the block pool.  This allocator is
88  * intended for longer lived state objects such as SURFACE_STATE and most
89  * other persistent state objects in the API.  We may need to track more info
90  * with these object and a pointer back to the CPU object (eg VkImage).  In
91  * those cases we just allocate a slightly bigger object and put the extra
92  * state after the GPU state object.
93  *
94  * The state stream allocator works similar to how the i965 DRI driver streams
95  * all its state.  Even with Vulkan, we need to emit transient state (whether
96  * surface state base or dynamic state base), and for that we can just get a
97  * block and fill it up.  These cases are local to a command buffer and the
98  * sub-allocator need not be thread safe.  The streaming allocator gets a new
99  * block when it runs out of space and chains them together so they can be
100  * easily freed.
101  */
102 
103 /* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
104  * We use it to indicate the free list is empty. */
105 #define EMPTY UINT32_MAX
106 
107 /* On FreeBSD PAGE_SIZE is already defined in
108  * /usr/include/machine/param.h that is indirectly
109  * included here.
110  */
111 #ifndef PAGE_SIZE
112 #define PAGE_SIZE 4096
113 #endif
114 
115 struct anv_mmap_cleanup {
116    void *map;
117    size_t size;
118 };
119 
120 static inline uint32_t
ilog2_round_up(uint32_t value)121 ilog2_round_up(uint32_t value)
122 {
123    assert(value != 0);
124    return 32 - __builtin_clz(value - 1);
125 }
126 
127 static inline uint32_t
round_to_power_of_two(uint32_t value)128 round_to_power_of_two(uint32_t value)
129 {
130    return 1 << ilog2_round_up(value);
131 }
132 
133 struct anv_state_table_cleanup {
134    void *map;
135    size_t size;
136 };
137 
138 #define ANV_STATE_TABLE_CLEANUP_INIT ((struct anv_state_table_cleanup){0})
139 #define ANV_STATE_ENTRY_SIZE (sizeof(struct anv_free_entry))
140 
141 static VkResult
142 anv_state_table_expand_range(struct anv_state_table *table, uint32_t size);
143 
144 VkResult
anv_state_table_init(struct anv_state_table * table,struct anv_device * device,uint32_t initial_entries)145 anv_state_table_init(struct anv_state_table *table,
146                     struct anv_device *device,
147                     uint32_t initial_entries)
148 {
149    VkResult result;
150 
151    table->device = device;
152 
153    /* Just make it 2GB up-front.  The Linux kernel won't actually back it
154     * with pages until we either map and fault on one of them or we use
155     * userptr and send a chunk of it off to the GPU.
156     */
157    table->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "state table");
158    if (table->fd == -1)
159       return vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
160 
161    if (!u_vector_init(&table->cleanups, 8,
162                       sizeof(struct anv_state_table_cleanup))) {
163       result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
164       goto fail_fd;
165    }
166 
167    table->state.next = 0;
168    table->state.end = 0;
169    table->size = 0;
170 
171    uint32_t initial_size = initial_entries * ANV_STATE_ENTRY_SIZE;
172    result = anv_state_table_expand_range(table, initial_size);
173    if (result != VK_SUCCESS)
174       goto fail_cleanups;
175 
176    return VK_SUCCESS;
177 
178  fail_cleanups:
179    u_vector_finish(&table->cleanups);
180  fail_fd:
181    close(table->fd);
182 
183    return result;
184 }
185 
186 static VkResult
anv_state_table_expand_range(struct anv_state_table * table,uint32_t size)187 anv_state_table_expand_range(struct anv_state_table *table, uint32_t size)
188 {
189    void *map;
190    struct anv_state_table_cleanup *cleanup;
191 
192    /* Assert that we only ever grow the pool */
193    assert(size >= table->state.end);
194 
195    /* Make sure that we don't go outside the bounds of the memfd */
196    if (size > BLOCK_POOL_MEMFD_SIZE)
197       return vk_error(table->device, VK_ERROR_OUT_OF_HOST_MEMORY);
198 
199    cleanup = u_vector_add(&table->cleanups);
200    if (!cleanup)
201       return vk_error(table->device, VK_ERROR_OUT_OF_HOST_MEMORY);
202 
203    *cleanup = ANV_STATE_TABLE_CLEANUP_INIT;
204 
205    /* Just leak the old map until we destroy the pool.  We can't munmap it
206     * without races or imposing locking on the block allocate fast path. On
207     * the whole the leaked maps adds up to less than the size of the
208     * current map.  MAP_POPULATE seems like the right thing to do, but we
209     * should try to get some numbers.
210     */
211    map = mmap(NULL, size, PROT_READ | PROT_WRITE,
212               MAP_SHARED | MAP_POPULATE, table->fd, 0);
213    if (map == MAP_FAILED) {
214       return vk_errorf(table->device, VK_ERROR_OUT_OF_HOST_MEMORY,
215                        "mmap failed: %m");
216    }
217 
218    cleanup->map = map;
219    cleanup->size = size;
220 
221    table->map = map;
222    table->size = size;
223 
224    return VK_SUCCESS;
225 }
226 
227 static VkResult
anv_state_table_grow(struct anv_state_table * table)228 anv_state_table_grow(struct anv_state_table *table)
229 {
230    VkResult result = VK_SUCCESS;
231 
232    uint32_t used = align(table->state.next * ANV_STATE_ENTRY_SIZE, PAGE_SIZE);
233    uint32_t old_size = table->size;
234 
235    /* The block pool is always initialized to a nonzero size and this function
236     * is always called after initialization.
237     */
238    assert(old_size > 0);
239 
240    uint32_t required = MAX2(used, old_size);
241    if (used * 2 <= required) {
242       /* If we're in this case then this isn't the firsta allocation and we
243        * already have enough space on both sides to hold double what we
244        * have allocated.  There's nothing for us to do.
245        */
246       goto done;
247    }
248 
249    uint32_t size = old_size * 2;
250    while (size < required)
251       size *= 2;
252 
253    assert(size > table->size);
254 
255    result = anv_state_table_expand_range(table, size);
256 
257  done:
258    return result;
259 }
260 
261 void
anv_state_table_finish(struct anv_state_table * table)262 anv_state_table_finish(struct anv_state_table *table)
263 {
264    struct anv_state_table_cleanup *cleanup;
265 
266    u_vector_foreach(cleanup, &table->cleanups) {
267       if (cleanup->map)
268          munmap(cleanup->map, cleanup->size);
269    }
270 
271    u_vector_finish(&table->cleanups);
272 
273    close(table->fd);
274 }
275 
276 VkResult
anv_state_table_add(struct anv_state_table * table,uint32_t * idx,uint32_t count)277 anv_state_table_add(struct anv_state_table *table, uint32_t *idx,
278                     uint32_t count)
279 {
280    struct anv_block_state state, old, new;
281    VkResult result;
282 
283    assert(idx);
284 
285    while(1) {
286       state.u64 = __sync_fetch_and_add(&table->state.u64, count);
287       if (state.next + count <= state.end) {
288          assert(table->map);
289          struct anv_free_entry *entry = &table->map[state.next];
290          for (int i = 0; i < count; i++) {
291             entry[i].state.idx = state.next + i;
292          }
293          *idx = state.next;
294          return VK_SUCCESS;
295       } else if (state.next <= state.end) {
296          /* We allocated the first block outside the pool so we have to grow
297           * the pool.  pool_state->next acts a mutex: threads who try to
298           * allocate now will get block indexes above the current limit and
299           * hit futex_wait below.
300           */
301          new.next = state.next + count;
302          do {
303             result = anv_state_table_grow(table);
304             if (result != VK_SUCCESS)
305                return result;
306             new.end = table->size / ANV_STATE_ENTRY_SIZE;
307          } while (new.end < new.next);
308 
309          old.u64 = __sync_lock_test_and_set(&table->state.u64, new.u64);
310          if (old.next != state.next)
311             futex_wake(&table->state.end, INT32_MAX);
312       } else {
313          futex_wait(&table->state.end, state.end, NULL);
314          continue;
315       }
316    }
317 }
318 
319 void
anv_free_list_push(union anv_free_list * list,struct anv_state_table * table,uint32_t first,uint32_t count)320 anv_free_list_push(union anv_free_list *list,
321                    struct anv_state_table *table,
322                    uint32_t first, uint32_t count)
323 {
324    union anv_free_list current, old, new;
325    uint32_t last = first;
326 
327    for (uint32_t i = 1; i < count; i++, last++)
328       table->map[last].next = last + 1;
329 
330    old.u64 = list->u64;
331    do {
332       current = old;
333       table->map[last].next = current.offset;
334       new.offset = first;
335       new.count = current.count + 1;
336       old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
337    } while (old.u64 != current.u64);
338 }
339 
340 struct anv_state *
anv_free_list_pop(union anv_free_list * list,struct anv_state_table * table)341 anv_free_list_pop(union anv_free_list *list,
342                   struct anv_state_table *table)
343 {
344    union anv_free_list current, new, old;
345 
346    current.u64 = list->u64;
347    while (current.offset != EMPTY) {
348       __sync_synchronize();
349       new.offset = table->map[current.offset].next;
350       new.count = current.count + 1;
351       old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
352       if (old.u64 == current.u64) {
353          struct anv_free_entry *entry = &table->map[current.offset];
354          return &entry->state;
355       }
356       current = old;
357    }
358 
359    return NULL;
360 }
361 
362 static VkResult
363 anv_block_pool_expand_range(struct anv_block_pool *pool,
364                             uint32_t center_bo_offset, uint32_t size);
365 
366 VkResult
anv_block_pool_init(struct anv_block_pool * pool,struct anv_device * device,const char * name,uint64_t start_address,uint32_t initial_size)367 anv_block_pool_init(struct anv_block_pool *pool,
368                     struct anv_device *device,
369                     const char *name,
370                     uint64_t start_address,
371                     uint32_t initial_size)
372 {
373    VkResult result;
374 
375    pool->name = name;
376    pool->device = device;
377    pool->use_relocations = anv_use_relocations(device->physical);
378    pool->nbos = 0;
379    pool->size = 0;
380    pool->center_bo_offset = 0;
381    pool->start_address = intel_canonical_address(start_address);
382    pool->map = NULL;
383 
384    if (!pool->use_relocations) {
385       pool->bo = NULL;
386       pool->fd = -1;
387    } else {
388       /* Just make it 2GB up-front.  The Linux kernel won't actually back it
389        * with pages until we either map and fault on one of them or we use
390        * userptr and send a chunk of it off to the GPU.
391        */
392       pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool");
393       if (pool->fd == -1)
394          return vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
395 
396       pool->wrapper_bo = (struct anv_bo) {
397          .refcount = 1,
398          .offset = -1,
399          .is_wrapper = true,
400       };
401       pool->bo = &pool->wrapper_bo;
402    }
403 
404    if (!u_vector_init(&pool->mmap_cleanups, 8,
405                       sizeof(struct anv_mmap_cleanup))) {
406       result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
407       goto fail_fd;
408    }
409 
410    pool->state.next = 0;
411    pool->state.end = 0;
412    pool->back_state.next = 0;
413    pool->back_state.end = 0;
414 
415    result = anv_block_pool_expand_range(pool, 0, initial_size);
416    if (result != VK_SUCCESS)
417       goto fail_mmap_cleanups;
418 
419    /* Make the entire pool available in the front of the pool.  If back
420     * allocation needs to use this space, the "ends" will be re-arranged.
421     */
422    pool->state.end = pool->size;
423 
424    return VK_SUCCESS;
425 
426  fail_mmap_cleanups:
427    u_vector_finish(&pool->mmap_cleanups);
428  fail_fd:
429    if (pool->fd >= 0)
430       close(pool->fd);
431 
432    return result;
433 }
434 
435 void
anv_block_pool_finish(struct anv_block_pool * pool)436 anv_block_pool_finish(struct anv_block_pool *pool)
437 {
438    anv_block_pool_foreach_bo(bo, pool) {
439       assert(bo->refcount == 1);
440       anv_device_release_bo(pool->device, bo);
441    }
442 
443    struct anv_mmap_cleanup *cleanup;
444    u_vector_foreach(cleanup, &pool->mmap_cleanups)
445       munmap(cleanup->map, cleanup->size);
446    u_vector_finish(&pool->mmap_cleanups);
447 
448    if (pool->fd >= 0)
449       close(pool->fd);
450 }
451 
452 static VkResult
anv_block_pool_expand_range(struct anv_block_pool * pool,uint32_t center_bo_offset,uint32_t size)453 anv_block_pool_expand_range(struct anv_block_pool *pool,
454                             uint32_t center_bo_offset, uint32_t size)
455 {
456    /* Assert that we only ever grow the pool */
457    assert(center_bo_offset >= pool->back_state.end);
458    assert(size - center_bo_offset >= pool->state.end);
459 
460    /* Assert that we don't go outside the bounds of the memfd */
461    assert(center_bo_offset <= BLOCK_POOL_MEMFD_CENTER);
462    assert(!pool->use_relocations ||
463           size - center_bo_offset <=
464           BLOCK_POOL_MEMFD_SIZE - BLOCK_POOL_MEMFD_CENTER);
465 
466    /* For state pool BOs we have to be a bit careful about where we place them
467     * in the GTT.  There are two documented workarounds for state base address
468     * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
469     * which state that those two base addresses do not support 48-bit
470     * addresses and need to be placed in the bottom 32-bit range.
471     * Unfortunately, this is not quite accurate.
472     *
473     * The real problem is that we always set the size of our state pools in
474     * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
475     * likely significantly smaller.  We do this because we do not no at the
476     * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
477     * the pool during command buffer building so we don't actually have a
478     * valid final size.  If the address + size, as seen by STATE_BASE_ADDRESS
479     * overflows 48 bits, the GPU appears to treat all accesses to the buffer
480     * as being out of bounds and returns zero.  For dynamic state, this
481     * usually just leads to rendering corruptions, but shaders that are all
482     * zero hang the GPU immediately.
483     *
484     * The easiest solution to do is exactly what the bogus workarounds say to
485     * do: restrict these buffers to 32-bit addresses.  We could also pin the
486     * BO to some particular location of our choosing, but that's significantly
487     * more work than just not setting a flag.  So, we explicitly DO NOT set
488     * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
489     * hard work for us.  When using softpin, we're in control and the fixed
490     * addresses we choose are fine for base addresses.
491     */
492    enum anv_bo_alloc_flags bo_alloc_flags = ANV_BO_ALLOC_CAPTURE;
493    if (pool->use_relocations)
494       bo_alloc_flags |= ANV_BO_ALLOC_32BIT_ADDRESS;
495 
496    if (!pool->use_relocations) {
497       uint32_t new_bo_size = size - pool->size;
498       struct anv_bo *new_bo;
499       assert(center_bo_offset == 0);
500       VkResult result = anv_device_alloc_bo(pool->device,
501                                             pool->name,
502                                             new_bo_size,
503                                             bo_alloc_flags |
504                                             ANV_BO_ALLOC_FIXED_ADDRESS |
505                                             ANV_BO_ALLOC_MAPPED |
506                                             ANV_BO_ALLOC_SNOOPED,
507                                             pool->start_address + pool->size,
508                                             &new_bo);
509       if (result != VK_SUCCESS)
510          return result;
511 
512       pool->bos[pool->nbos++] = new_bo;
513 
514       /* This pointer will always point to the first BO in the list */
515       pool->bo = pool->bos[0];
516    } else {
517       /* Just leak the old map until we destroy the pool.  We can't munmap it
518        * without races or imposing locking on the block allocate fast path. On
519        * the whole the leaked maps adds up to less than the size of the
520        * current map.  MAP_POPULATE seems like the right thing to do, but we
521        * should try to get some numbers.
522        */
523       void *map = mmap(NULL, size, PROT_READ | PROT_WRITE,
524                        MAP_SHARED | MAP_POPULATE, pool->fd,
525                        BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
526       if (map == MAP_FAILED)
527          return vk_errorf(pool->device, VK_ERROR_MEMORY_MAP_FAILED,
528                           "mmap failed: %m");
529 
530       struct anv_bo *new_bo;
531       VkResult result = anv_device_import_bo_from_host_ptr(pool->device,
532                                                            map, size,
533                                                            bo_alloc_flags,
534                                                            0 /* client_address */,
535                                                            &new_bo);
536       if (result != VK_SUCCESS) {
537          munmap(map, size);
538          return result;
539       }
540 
541       struct anv_mmap_cleanup *cleanup = u_vector_add(&pool->mmap_cleanups);
542       if (!cleanup) {
543          munmap(map, size);
544          anv_device_release_bo(pool->device, new_bo);
545          return vk_error(pool->device, VK_ERROR_OUT_OF_HOST_MEMORY);
546       }
547       cleanup->map = map;
548       cleanup->size = size;
549 
550       /* Now that we mapped the new memory, we can write the new
551        * center_bo_offset back into pool and update pool->map. */
552       pool->center_bo_offset = center_bo_offset;
553       pool->map = map + center_bo_offset;
554 
555       pool->bos[pool->nbos++] = new_bo;
556       pool->wrapper_bo.map = new_bo;
557    }
558 
559    assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS);
560    pool->size = size;
561 
562    return VK_SUCCESS;
563 }
564 
565 /** Returns current memory map of the block pool.
566  *
567  * The returned pointer points to the map for the memory at the specified
568  * offset. The offset parameter is relative to the "center" of the block pool
569  * rather than the start of the block pool BO map.
570  */
571 void*
anv_block_pool_map(struct anv_block_pool * pool,int32_t offset,uint32_t size)572 anv_block_pool_map(struct anv_block_pool *pool, int32_t offset, uint32_t size)
573 {
574    if (!pool->use_relocations) {
575       struct anv_bo *bo = NULL;
576       int32_t bo_offset = 0;
577       anv_block_pool_foreach_bo(iter_bo, pool) {
578          if (offset < bo_offset + iter_bo->size) {
579             bo = iter_bo;
580             break;
581          }
582          bo_offset += iter_bo->size;
583       }
584       assert(bo != NULL);
585       assert(offset >= bo_offset);
586       assert((offset - bo_offset) + size <= bo->size);
587 
588       return bo->map + (offset - bo_offset);
589    } else {
590       return pool->map + offset;
591    }
592 }
593 
594 /** Grows and re-centers the block pool.
595  *
596  * We grow the block pool in one or both directions in such a way that the
597  * following conditions are met:
598  *
599  *  1) The size of the entire pool is always a power of two.
600  *
601  *  2) The pool only grows on both ends.  Neither end can get
602  *     shortened.
603  *
604  *  3) At the end of the allocation, we have about twice as much space
605  *     allocated for each end as we have used.  This way the pool doesn't
606  *     grow too far in one direction or the other.
607  *
608  *  4) If the _alloc_back() has never been called, then the back portion of
609  *     the pool retains a size of zero.  (This makes it easier for users of
610  *     the block pool that only want a one-sided pool.)
611  *
612  *  5) We have enough space allocated for at least one more block in
613  *     whichever side `state` points to.
614  *
615  *  6) The center of the pool is always aligned to both the block_size of
616  *     the pool and a 4K CPU page.
617  */
618 static uint32_t
anv_block_pool_grow(struct anv_block_pool * pool,struct anv_block_state * state,uint32_t contiguous_size)619 anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state,
620                     uint32_t contiguous_size)
621 {
622    VkResult result = VK_SUCCESS;
623 
624    pthread_mutex_lock(&pool->device->mutex);
625 
626    assert(state == &pool->state || state == &pool->back_state);
627 
628    /* Gather a little usage information on the pool.  Since we may have
629     * threadsd waiting in queue to get some storage while we resize, it's
630     * actually possible that total_used will be larger than old_size.  In
631     * particular, block_pool_alloc() increments state->next prior to
632     * calling block_pool_grow, so this ensures that we get enough space for
633     * which ever side tries to grow the pool.
634     *
635     * We align to a page size because it makes it easier to do our
636     * calculations later in such a way that we state page-aigned.
637     */
638    uint32_t back_used = align(pool->back_state.next, PAGE_SIZE);
639    uint32_t front_used = align(pool->state.next, PAGE_SIZE);
640    uint32_t total_used = front_used + back_used;
641 
642    assert(state == &pool->state || back_used > 0);
643 
644    uint32_t old_size = pool->size;
645 
646    /* The block pool is always initialized to a nonzero size and this function
647     * is always called after initialization.
648     */
649    assert(old_size > 0);
650 
651    const uint32_t old_back = pool->center_bo_offset;
652    const uint32_t old_front = old_size - pool->center_bo_offset;
653 
654    /* The back_used and front_used may actually be smaller than the actual
655     * requirement because they are based on the next pointers which are
656     * updated prior to calling this function.
657     */
658    uint32_t back_required = MAX2(back_used, old_back);
659    uint32_t front_required = MAX2(front_used, old_front);
660 
661    if (!pool->use_relocations) {
662       /* With softpin, the pool is made up of a bunch of buffers with separate
663        * maps.  Make sure we have enough contiguous space that we can get a
664        * properly contiguous map for the next chunk.
665        */
666       assert(old_back == 0);
667       front_required = MAX2(front_required, old_front + contiguous_size);
668    }
669 
670    if (back_used * 2 <= back_required && front_used * 2 <= front_required) {
671       /* If we're in this case then this isn't the firsta allocation and we
672        * already have enough space on both sides to hold double what we
673        * have allocated.  There's nothing for us to do.
674        */
675       goto done;
676    }
677 
678    uint32_t size = old_size * 2;
679    while (size < back_required + front_required)
680       size *= 2;
681 
682    assert(size > pool->size);
683 
684    /* We compute a new center_bo_offset such that, when we double the size
685     * of the pool, we maintain the ratio of how much is used by each side.
686     * This way things should remain more-or-less balanced.
687     */
688    uint32_t center_bo_offset;
689    if (back_used == 0) {
690       /* If we're in this case then we have never called alloc_back().  In
691        * this case, we want keep the offset at 0 to make things as simple
692        * as possible for users that don't care about back allocations.
693        */
694       center_bo_offset = 0;
695    } else {
696       /* Try to "center" the allocation based on how much is currently in
697        * use on each side of the center line.
698        */
699       center_bo_offset = ((uint64_t)size * back_used) / total_used;
700 
701       /* Align down to a multiple of the page size */
702       center_bo_offset &= ~(PAGE_SIZE - 1);
703 
704       assert(center_bo_offset >= back_used);
705 
706       /* Make sure we don't shrink the back end of the pool */
707       if (center_bo_offset < back_required)
708          center_bo_offset = back_required;
709 
710       /* Make sure that we don't shrink the front end of the pool */
711       if (size - center_bo_offset < front_required)
712          center_bo_offset = size - front_required;
713    }
714 
715    assert(center_bo_offset % PAGE_SIZE == 0);
716 
717    result = anv_block_pool_expand_range(pool, center_bo_offset, size);
718 
719 done:
720    pthread_mutex_unlock(&pool->device->mutex);
721 
722    if (result == VK_SUCCESS) {
723       /* Return the appropriate new size.  This function never actually
724        * updates state->next.  Instead, we let the caller do that because it
725        * needs to do so in order to maintain its concurrency model.
726        */
727       if (state == &pool->state) {
728          return pool->size - pool->center_bo_offset;
729       } else {
730          assert(pool->center_bo_offset > 0);
731          return pool->center_bo_offset;
732       }
733    } else {
734       return 0;
735    }
736 }
737 
738 static uint32_t
anv_block_pool_alloc_new(struct anv_block_pool * pool,struct anv_block_state * pool_state,uint32_t block_size,uint32_t * padding)739 anv_block_pool_alloc_new(struct anv_block_pool *pool,
740                          struct anv_block_state *pool_state,
741                          uint32_t block_size, uint32_t *padding)
742 {
743    struct anv_block_state state, old, new;
744 
745    /* Most allocations won't generate any padding */
746    if (padding)
747       *padding = 0;
748 
749    while (1) {
750       state.u64 = __sync_fetch_and_add(&pool_state->u64, block_size);
751       if (state.next + block_size <= state.end) {
752          return state.next;
753       } else if (state.next <= state.end) {
754          if (!pool->use_relocations && state.next < state.end) {
755             /* We need to grow the block pool, but still have some leftover
756              * space that can't be used by that particular allocation. So we
757              * add that as a "padding", and return it.
758              */
759             uint32_t leftover = state.end - state.next;
760 
761             /* If there is some leftover space in the pool, the caller must
762              * deal with it.
763              */
764             assert(leftover == 0 || padding);
765             if (padding)
766                *padding = leftover;
767             state.next += leftover;
768          }
769 
770          /* We allocated the first block outside the pool so we have to grow
771           * the pool.  pool_state->next acts a mutex: threads who try to
772           * allocate now will get block indexes above the current limit and
773           * hit futex_wait below.
774           */
775          new.next = state.next + block_size;
776          do {
777             new.end = anv_block_pool_grow(pool, pool_state, block_size);
778          } while (new.end < new.next);
779 
780          old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
781          if (old.next != state.next)
782             futex_wake(&pool_state->end, INT32_MAX);
783          return state.next;
784       } else {
785          futex_wait(&pool_state->end, state.end, NULL);
786          continue;
787       }
788    }
789 }
790 
791 int32_t
anv_block_pool_alloc(struct anv_block_pool * pool,uint32_t block_size,uint32_t * padding)792 anv_block_pool_alloc(struct anv_block_pool *pool,
793                      uint32_t block_size, uint32_t *padding)
794 {
795    uint32_t offset;
796 
797    offset = anv_block_pool_alloc_new(pool, &pool->state, block_size, padding);
798 
799    return offset;
800 }
801 
802 /* Allocates a block out of the back of the block pool.
803  *
804  * This will allocated a block earlier than the "start" of the block pool.
805  * The offsets returned from this function will be negative but will still
806  * be correct relative to the block pool's map pointer.
807  *
808  * If you ever use anv_block_pool_alloc_back, then you will have to do
809  * gymnastics with the block pool's BO when doing relocations.
810  */
811 int32_t
anv_block_pool_alloc_back(struct anv_block_pool * pool,uint32_t block_size)812 anv_block_pool_alloc_back(struct anv_block_pool *pool,
813                           uint32_t block_size)
814 {
815    int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state,
816                                              block_size, NULL);
817 
818    /* The offset we get out of anv_block_pool_alloc_new() is actually the
819     * number of bytes downwards from the middle to the end of the block.
820     * We need to turn it into a (negative) offset from the middle to the
821     * start of the block.
822     */
823    assert(offset >= 0);
824    return -(offset + block_size);
825 }
826 
827 VkResult
anv_state_pool_init(struct anv_state_pool * pool,struct anv_device * device,const char * name,uint64_t base_address,int32_t start_offset,uint32_t block_size)828 anv_state_pool_init(struct anv_state_pool *pool,
829                     struct anv_device *device,
830                     const char *name,
831                     uint64_t base_address,
832                     int32_t start_offset,
833                     uint32_t block_size)
834 {
835    /* We don't want to ever see signed overflow */
836    assert(start_offset < INT32_MAX - (int32_t)BLOCK_POOL_MEMFD_SIZE);
837 
838    uint32_t initial_size = block_size * 16;
839 
840    VkResult result = anv_block_pool_init(&pool->block_pool, device, name,
841                                          base_address + start_offset,
842                                          initial_size);
843    if (result != VK_SUCCESS)
844       return result;
845 
846    pool->start_offset = start_offset;
847 
848    result = anv_state_table_init(&pool->table, device, 64);
849    if (result != VK_SUCCESS) {
850       anv_block_pool_finish(&pool->block_pool);
851       return result;
852    }
853 
854    assert(util_is_power_of_two_or_zero(block_size));
855    pool->block_size = block_size;
856    pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
857    for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
858       pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
859       pool->buckets[i].block.next = 0;
860       pool->buckets[i].block.end = 0;
861    }
862    VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
863 
864    return VK_SUCCESS;
865 }
866 
867 void
anv_state_pool_finish(struct anv_state_pool * pool)868 anv_state_pool_finish(struct anv_state_pool *pool)
869 {
870    VG(VALGRIND_DESTROY_MEMPOOL(pool));
871    anv_state_table_finish(&pool->table);
872    anv_block_pool_finish(&pool->block_pool);
873 }
874 
875 static uint32_t
anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool * pool,struct anv_block_pool * block_pool,uint32_t state_size,uint32_t block_size,uint32_t * padding)876 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
877                                     struct anv_block_pool *block_pool,
878                                     uint32_t state_size,
879                                     uint32_t block_size,
880                                     uint32_t *padding)
881 {
882    struct anv_block_state block, old, new;
883    uint32_t offset;
884 
885    /* We don't always use anv_block_pool_alloc(), which would set *padding to
886     * zero for us. So if we have a pointer to padding, we must zero it out
887     * ourselves here, to make sure we always return some sensible value.
888     */
889    if (padding)
890       *padding = 0;
891 
892    /* If our state is large, we don't need any sub-allocation from a block.
893     * Instead, we just grab whole (potentially large) blocks.
894     */
895    if (state_size >= block_size)
896       return anv_block_pool_alloc(block_pool, state_size, padding);
897 
898  restart:
899    block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
900 
901    if (block.next < block.end) {
902       return block.next;
903    } else if (block.next == block.end) {
904       offset = anv_block_pool_alloc(block_pool, block_size, padding);
905       new.next = offset + state_size;
906       new.end = offset + block_size;
907       old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
908       if (old.next != block.next)
909          futex_wake(&pool->block.end, INT32_MAX);
910       return offset;
911    } else {
912       futex_wait(&pool->block.end, block.end, NULL);
913       goto restart;
914    }
915 }
916 
917 static uint32_t
anv_state_pool_get_bucket(uint32_t size)918 anv_state_pool_get_bucket(uint32_t size)
919 {
920    unsigned size_log2 = ilog2_round_up(size);
921    assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
922    if (size_log2 < ANV_MIN_STATE_SIZE_LOG2)
923       size_log2 = ANV_MIN_STATE_SIZE_LOG2;
924    return size_log2 - ANV_MIN_STATE_SIZE_LOG2;
925 }
926 
927 static uint32_t
anv_state_pool_get_bucket_size(uint32_t bucket)928 anv_state_pool_get_bucket_size(uint32_t bucket)
929 {
930    uint32_t size_log2 = bucket + ANV_MIN_STATE_SIZE_LOG2;
931    return 1 << size_log2;
932 }
933 
934 /** Helper to push a chunk into the state table.
935  *
936  * It creates 'count' entries into the state table and update their sizes,
937  * offsets and maps, also pushing them as "free" states.
938  */
939 static void
anv_state_pool_return_blocks(struct anv_state_pool * pool,uint32_t chunk_offset,uint32_t count,uint32_t block_size)940 anv_state_pool_return_blocks(struct anv_state_pool *pool,
941                              uint32_t chunk_offset, uint32_t count,
942                              uint32_t block_size)
943 {
944    /* Disallow returning 0 chunks */
945    assert(count != 0);
946 
947    /* Make sure we always return chunks aligned to the block_size */
948    assert(chunk_offset % block_size == 0);
949 
950    uint32_t st_idx;
951    UNUSED VkResult result = anv_state_table_add(&pool->table, &st_idx, count);
952    assert(result == VK_SUCCESS);
953    for (int i = 0; i < count; i++) {
954       /* update states that were added back to the state table */
955       struct anv_state *state_i = anv_state_table_get(&pool->table,
956                                                       st_idx + i);
957       state_i->alloc_size = block_size;
958       state_i->offset = pool->start_offset + chunk_offset + block_size * i;
959       state_i->map = anv_block_pool_map(&pool->block_pool,
960                                         state_i->offset,
961                                         state_i->alloc_size);
962    }
963 
964    uint32_t block_bucket = anv_state_pool_get_bucket(block_size);
965    anv_free_list_push(&pool->buckets[block_bucket].free_list,
966                       &pool->table, st_idx, count);
967 }
968 
969 /** Returns a chunk of memory back to the state pool.
970  *
971  * Do a two-level split. If chunk_size is bigger than divisor
972  * (pool->block_size), we return as many divisor sized blocks as we can, from
973  * the end of the chunk.
974  *
975  * The remaining is then split into smaller blocks (starting at small_size if
976  * it is non-zero), with larger blocks always being taken from the end of the
977  * chunk.
978  */
979 static void
anv_state_pool_return_chunk(struct anv_state_pool * pool,uint32_t chunk_offset,uint32_t chunk_size,uint32_t small_size)980 anv_state_pool_return_chunk(struct anv_state_pool *pool,
981                             uint32_t chunk_offset, uint32_t chunk_size,
982                             uint32_t small_size)
983 {
984    uint32_t divisor = pool->block_size;
985    uint32_t nblocks = chunk_size / divisor;
986    uint32_t rest = chunk_size - nblocks * divisor;
987 
988    if (nblocks > 0) {
989       /* First return divisor aligned and sized chunks. We start returning
990        * larger blocks from the end of the chunk, since they should already be
991        * aligned to divisor. Also anv_state_pool_return_blocks() only accepts
992        * aligned chunks.
993        */
994       uint32_t offset = chunk_offset + rest;
995       anv_state_pool_return_blocks(pool, offset, nblocks, divisor);
996    }
997 
998    chunk_size = rest;
999    divisor /= 2;
1000 
1001    if (small_size > 0 && small_size < divisor)
1002       divisor = small_size;
1003 
1004    uint32_t min_size = 1 << ANV_MIN_STATE_SIZE_LOG2;
1005 
1006    /* Just as before, return larger divisor aligned blocks from the end of the
1007     * chunk first.
1008     */
1009    while (chunk_size > 0 && divisor >= min_size) {
1010       nblocks = chunk_size / divisor;
1011       rest = chunk_size - nblocks * divisor;
1012       if (nblocks > 0) {
1013          anv_state_pool_return_blocks(pool, chunk_offset + rest,
1014                                       nblocks, divisor);
1015          chunk_size = rest;
1016       }
1017       divisor /= 2;
1018    }
1019 }
1020 
1021 static struct anv_state
anv_state_pool_alloc_no_vg(struct anv_state_pool * pool,uint32_t size,uint32_t align)1022 anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
1023                            uint32_t size, uint32_t align)
1024 {
1025    uint32_t bucket = anv_state_pool_get_bucket(MAX2(size, align));
1026 
1027    struct anv_state *state;
1028    uint32_t alloc_size = anv_state_pool_get_bucket_size(bucket);
1029    int32_t offset;
1030 
1031    /* Try free list first. */
1032    state = anv_free_list_pop(&pool->buckets[bucket].free_list,
1033                              &pool->table);
1034    if (state) {
1035       assert(state->offset >= pool->start_offset);
1036       goto done;
1037    }
1038 
1039    /* Try to grab a chunk from some larger bucket and split it up */
1040    for (unsigned b = bucket + 1; b < ANV_STATE_BUCKETS; b++) {
1041       state = anv_free_list_pop(&pool->buckets[b].free_list, &pool->table);
1042       if (state) {
1043          unsigned chunk_size = anv_state_pool_get_bucket_size(b);
1044          int32_t chunk_offset = state->offset;
1045 
1046          /* First lets update the state we got to its new size. offset and map
1047           * remain the same.
1048           */
1049          state->alloc_size = alloc_size;
1050 
1051          /* Now return the unused part of the chunk back to the pool as free
1052           * blocks
1053           *
1054           * There are a couple of options as to what we do with it:
1055           *
1056           *    1) We could fully split the chunk into state.alloc_size sized
1057           *       pieces.  However, this would mean that allocating a 16B
1058           *       state could potentially split a 2MB chunk into 512K smaller
1059           *       chunks.  This would lead to unnecessary fragmentation.
1060           *
1061           *    2) The classic "buddy allocator" method would have us split the
1062           *       chunk in half and return one half.  Then we would split the
1063           *       remaining half in half and return one half, and repeat as
1064           *       needed until we get down to the size we want.  However, if
1065           *       you are allocating a bunch of the same size state (which is
1066           *       the common case), this means that every other allocation has
1067           *       to go up a level and every fourth goes up two levels, etc.
1068           *       This is not nearly as efficient as it could be if we did a
1069           *       little more work up-front.
1070           *
1071           *    3) Split the difference between (1) and (2) by doing a
1072           *       two-level split.  If it's bigger than some fixed block_size,
1073           *       we split it into block_size sized chunks and return all but
1074           *       one of them.  Then we split what remains into
1075           *       state.alloc_size sized chunks and return them.
1076           *
1077           * We choose something close to option (3), which is implemented with
1078           * anv_state_pool_return_chunk(). That is done by returning the
1079           * remaining of the chunk, with alloc_size as a hint of the size that
1080           * we want the smaller chunk split into.
1081           */
1082          anv_state_pool_return_chunk(pool, chunk_offset + alloc_size,
1083                                      chunk_size - alloc_size, alloc_size);
1084          goto done;
1085       }
1086    }
1087 
1088    uint32_t padding;
1089    offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
1090                                                 &pool->block_pool,
1091                                                 alloc_size,
1092                                                 pool->block_size,
1093                                                 &padding);
1094    /* Every time we allocate a new state, add it to the state pool */
1095    uint32_t idx;
1096    UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1);
1097    assert(result == VK_SUCCESS);
1098 
1099    state = anv_state_table_get(&pool->table, idx);
1100    state->offset = pool->start_offset + offset;
1101    state->alloc_size = alloc_size;
1102    state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
1103 
1104    if (padding > 0) {
1105       uint32_t return_offset = offset - padding;
1106       anv_state_pool_return_chunk(pool, return_offset, padding, 0);
1107    }
1108 
1109 done:
1110    return *state;
1111 }
1112 
1113 struct anv_state
anv_state_pool_alloc(struct anv_state_pool * pool,uint32_t size,uint32_t align)1114 anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align)
1115 {
1116    if (size == 0)
1117       return ANV_STATE_NULL;
1118 
1119    struct anv_state state = anv_state_pool_alloc_no_vg(pool, size, align);
1120    VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size));
1121    return state;
1122 }
1123 
1124 struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool * pool)1125 anv_state_pool_alloc_back(struct anv_state_pool *pool)
1126 {
1127    struct anv_state *state;
1128    uint32_t alloc_size = pool->block_size;
1129 
1130    /* This function is only used with pools where start_offset == 0 */
1131    assert(pool->start_offset == 0);
1132 
1133    state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table);
1134    if (state) {
1135       assert(state->offset < pool->start_offset);
1136       goto done;
1137    }
1138 
1139    int32_t offset;
1140    offset = anv_block_pool_alloc_back(&pool->block_pool,
1141                                       pool->block_size);
1142    uint32_t idx;
1143    UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1);
1144    assert(result == VK_SUCCESS);
1145 
1146    state = anv_state_table_get(&pool->table, idx);
1147    state->offset = pool->start_offset + offset;
1148    state->alloc_size = alloc_size;
1149    state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
1150 
1151 done:
1152    VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size));
1153    return *state;
1154 }
1155 
1156 static void
anv_state_pool_free_no_vg(struct anv_state_pool * pool,struct anv_state state)1157 anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
1158 {
1159    assert(util_is_power_of_two_or_zero(state.alloc_size));
1160    unsigned bucket = anv_state_pool_get_bucket(state.alloc_size);
1161 
1162    if (state.offset < pool->start_offset) {
1163       assert(state.alloc_size == pool->block_size);
1164       anv_free_list_push(&pool->back_alloc_free_list,
1165                          &pool->table, state.idx, 1);
1166    } else {
1167       anv_free_list_push(&pool->buckets[bucket].free_list,
1168                          &pool->table, state.idx, 1);
1169    }
1170 }
1171 
1172 void
anv_state_pool_free(struct anv_state_pool * pool,struct anv_state state)1173 anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state)
1174 {
1175    if (state.alloc_size == 0)
1176       return;
1177 
1178    VG(VALGRIND_MEMPOOL_FREE(pool, state.map));
1179    anv_state_pool_free_no_vg(pool, state);
1180 }
1181 
1182 struct anv_state_stream_block {
1183    struct anv_state block;
1184 
1185    /* The next block */
1186    struct anv_state_stream_block *next;
1187 
1188 #ifdef HAVE_VALGRIND
1189    /* A pointer to the first user-allocated thing in this block.  This is
1190     * what valgrind sees as the start of the block.
1191     */
1192    void *_vg_ptr;
1193 #endif
1194 };
1195 
1196 /* The state stream allocator is a one-shot, single threaded allocator for
1197  * variable sized blocks.  We use it for allocating dynamic state.
1198  */
1199 void
anv_state_stream_init(struct anv_state_stream * stream,struct anv_state_pool * state_pool,uint32_t block_size)1200 anv_state_stream_init(struct anv_state_stream *stream,
1201                       struct anv_state_pool *state_pool,
1202                       uint32_t block_size)
1203 {
1204    stream->state_pool = state_pool;
1205    stream->block_size = block_size;
1206 
1207    stream->block = ANV_STATE_NULL;
1208 
1209    /* Ensure that next + whatever > block_size.  This way the first call to
1210     * state_stream_alloc fetches a new block.
1211     */
1212    stream->next = block_size;
1213 
1214    util_dynarray_init(&stream->all_blocks, NULL);
1215 
1216    VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
1217 }
1218 
1219 void
anv_state_stream_finish(struct anv_state_stream * stream)1220 anv_state_stream_finish(struct anv_state_stream *stream)
1221 {
1222    util_dynarray_foreach(&stream->all_blocks, struct anv_state, block) {
1223       VG(VALGRIND_MEMPOOL_FREE(stream, block->map));
1224       VG(VALGRIND_MAKE_MEM_NOACCESS(block->map, block->alloc_size));
1225       anv_state_pool_free_no_vg(stream->state_pool, *block);
1226    }
1227    util_dynarray_fini(&stream->all_blocks);
1228 
1229    VG(VALGRIND_DESTROY_MEMPOOL(stream));
1230 }
1231 
1232 struct anv_state
anv_state_stream_alloc(struct anv_state_stream * stream,uint32_t size,uint32_t alignment)1233 anv_state_stream_alloc(struct anv_state_stream *stream,
1234                        uint32_t size, uint32_t alignment)
1235 {
1236    if (size == 0)
1237       return ANV_STATE_NULL;
1238 
1239    assert(alignment <= PAGE_SIZE);
1240 
1241    uint32_t offset = align(stream->next, alignment);
1242    if (offset + size > stream->block.alloc_size) {
1243       uint32_t block_size = stream->block_size;
1244       if (block_size < size)
1245          block_size = round_to_power_of_two(size);
1246 
1247       stream->block = anv_state_pool_alloc_no_vg(stream->state_pool,
1248                                                  block_size, PAGE_SIZE);
1249       util_dynarray_append(&stream->all_blocks,
1250                            struct anv_state, stream->block);
1251       VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, block_size));
1252 
1253       /* Reset back to the start */
1254       stream->next = offset = 0;
1255       assert(offset + size <= stream->block.alloc_size);
1256    }
1257    const bool new_block = stream->next == 0;
1258 
1259    struct anv_state state = stream->block;
1260    state.offset += offset;
1261    state.alloc_size = size;
1262    state.map += offset;
1263 
1264    stream->next = offset + size;
1265 
1266    if (new_block) {
1267       assert(state.map == stream->block.map);
1268       VG(VALGRIND_MEMPOOL_ALLOC(stream, state.map, size));
1269    } else {
1270       /* This only updates the mempool.  The newly allocated chunk is still
1271        * marked as NOACCESS. */
1272       VG(VALGRIND_MEMPOOL_CHANGE(stream, stream->block.map, stream->block.map,
1273                                  stream->next));
1274       /* Mark the newly allocated chunk as undefined */
1275       VG(VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size));
1276    }
1277 
1278    return state;
1279 }
1280 
1281 void
anv_state_reserved_pool_init(struct anv_state_reserved_pool * pool,struct anv_state_pool * parent,uint32_t count,uint32_t size,uint32_t alignment)1282 anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool,
1283                              struct anv_state_pool *parent,
1284                              uint32_t count, uint32_t size, uint32_t alignment)
1285 {
1286    pool->pool = parent;
1287    pool->reserved_blocks = ANV_FREE_LIST_EMPTY;
1288    pool->count = count;
1289 
1290    for (unsigned i = 0; i < count; i++) {
1291       struct anv_state state = anv_state_pool_alloc(pool->pool, size, alignment);
1292       anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
1293    }
1294 }
1295 
1296 void
anv_state_reserved_pool_finish(struct anv_state_reserved_pool * pool)1297 anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool)
1298 {
1299    struct anv_state *state;
1300 
1301    while ((state = anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table))) {
1302       anv_state_pool_free(pool->pool, *state);
1303       pool->count--;
1304    }
1305    assert(pool->count == 0);
1306 }
1307 
1308 struct anv_state
anv_state_reserved_pool_alloc(struct anv_state_reserved_pool * pool)1309 anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool)
1310 {
1311    return *anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table);
1312 }
1313 
1314 void
anv_state_reserved_pool_free(struct anv_state_reserved_pool * pool,struct anv_state state)1315 anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool,
1316                              struct anv_state state)
1317 {
1318    anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
1319 }
1320 
1321 void
anv_bo_pool_init(struct anv_bo_pool * pool,struct anv_device * device,const char * name)1322 anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device,
1323                  const char *name)
1324 {
1325    pool->name = name;
1326    pool->device = device;
1327    for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) {
1328       util_sparse_array_free_list_init(&pool->free_list[i],
1329                                        &device->bo_cache.bo_map, 0,
1330                                        offsetof(struct anv_bo, free_index));
1331    }
1332 
1333    VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
1334 }
1335 
1336 void
anv_bo_pool_finish(struct anv_bo_pool * pool)1337 anv_bo_pool_finish(struct anv_bo_pool *pool)
1338 {
1339    for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) {
1340       while (1) {
1341          struct anv_bo *bo =
1342             util_sparse_array_free_list_pop_elem(&pool->free_list[i]);
1343          if (bo == NULL)
1344             break;
1345 
1346          /* anv_device_release_bo is going to "free" it */
1347          VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1));
1348          anv_device_release_bo(pool->device, bo);
1349       }
1350    }
1351 
1352    VG(VALGRIND_DESTROY_MEMPOOL(pool));
1353 }
1354 
1355 VkResult
anv_bo_pool_alloc(struct anv_bo_pool * pool,uint32_t size,struct anv_bo ** bo_out)1356 anv_bo_pool_alloc(struct anv_bo_pool *pool, uint32_t size,
1357                   struct anv_bo **bo_out)
1358 {
1359    const unsigned size_log2 = size < 4096 ? 12 : ilog2_round_up(size);
1360    const unsigned pow2_size = 1 << size_log2;
1361    const unsigned bucket = size_log2 - 12;
1362    assert(bucket < ARRAY_SIZE(pool->free_list));
1363 
1364    struct anv_bo *bo =
1365       util_sparse_array_free_list_pop_elem(&pool->free_list[bucket]);
1366    if (bo != NULL) {
1367       VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
1368       *bo_out = bo;
1369       return VK_SUCCESS;
1370    }
1371 
1372    VkResult result = anv_device_alloc_bo(pool->device,
1373                                          pool->name,
1374                                          pow2_size,
1375                                          ANV_BO_ALLOC_MAPPED |
1376                                          ANV_BO_ALLOC_SNOOPED |
1377                                          ANV_BO_ALLOC_CAPTURE,
1378                                          0 /* explicit_address */,
1379                                          &bo);
1380    if (result != VK_SUCCESS)
1381       return result;
1382 
1383    /* We want it to look like it came from this pool */
1384    VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
1385    VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
1386 
1387    *bo_out = bo;
1388 
1389    return VK_SUCCESS;
1390 }
1391 
1392 void
anv_bo_pool_free(struct anv_bo_pool * pool,struct anv_bo * bo)1393 anv_bo_pool_free(struct anv_bo_pool *pool, struct anv_bo *bo)
1394 {
1395    VG(VALGRIND_MEMPOOL_FREE(pool, bo->map));
1396 
1397    assert(util_is_power_of_two_or_zero(bo->size));
1398    const unsigned size_log2 = ilog2_round_up(bo->size);
1399    const unsigned bucket = size_log2 - 12;
1400    assert(bucket < ARRAY_SIZE(pool->free_list));
1401 
1402    assert(util_sparse_array_get(&pool->device->bo_cache.bo_map,
1403                                 bo->gem_handle) == bo);
1404    util_sparse_array_free_list_push(&pool->free_list[bucket],
1405                                     &bo->gem_handle, 1);
1406 }
1407 
1408 // Scratch pool
1409 
1410 void
anv_scratch_pool_init(struct anv_device * device,struct anv_scratch_pool * pool)1411 anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool)
1412 {
1413    memset(pool, 0, sizeof(*pool));
1414 }
1415 
1416 void
anv_scratch_pool_finish(struct anv_device * device,struct anv_scratch_pool * pool)1417 anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool)
1418 {
1419    for (unsigned s = 0; s < ARRAY_SIZE(pool->bos[0]); s++) {
1420       for (unsigned i = 0; i < 16; i++) {
1421          if (pool->bos[i][s] != NULL)
1422             anv_device_release_bo(device, pool->bos[i][s]);
1423       }
1424    }
1425 }
1426 
1427 struct anv_bo *
anv_scratch_pool_alloc(struct anv_device * device,struct anv_scratch_pool * pool,gl_shader_stage stage,unsigned per_thread_scratch)1428 anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
1429                        gl_shader_stage stage, unsigned per_thread_scratch)
1430 {
1431    if (per_thread_scratch == 0)
1432       return NULL;
1433 
1434    unsigned scratch_size_log2 = ffs(per_thread_scratch / 2048);
1435    assert(scratch_size_log2 < 16);
1436 
1437    assert(stage < ARRAY_SIZE(pool->bos));
1438 
1439    const struct intel_device_info *devinfo = device->info;
1440 
1441    struct anv_bo *bo = p_atomic_read(&pool->bos[scratch_size_log2][stage]);
1442 
1443    if (bo != NULL)
1444       return bo;
1445 
1446    assert(stage < ARRAY_SIZE(devinfo->max_scratch_ids));
1447    uint32_t size = per_thread_scratch * devinfo->max_scratch_ids[stage];
1448 
1449    /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
1450     * are still relative to the general state base address.  When we emit
1451     * STATE_BASE_ADDRESS, we set general state base address to 0 and the size
1452     * to the maximum (1 page under 4GB).  This allows us to just place the
1453     * scratch buffers anywhere we wish in the bottom 32 bits of address space
1454     * and just set the scratch base pointer in 3DSTATE_*S using a relocation.
1455     * However, in order to do so, we need to ensure that the kernel does not
1456     * place the scratch BO above the 32-bit boundary.
1457     *
1458     * NOTE: Technically, it can't go "anywhere" because the top page is off
1459     * limits.  However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
1460     * kernel allocates space using
1461     *
1462     *    end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
1463     *
1464     * so nothing will ever touch the top page.
1465     */
1466    VkResult result = anv_device_alloc_bo(device, "scratch", size,
1467                                          ANV_BO_ALLOC_32BIT_ADDRESS,
1468                                          0 /* explicit_address */,
1469                                          &bo);
1470    if (result != VK_SUCCESS)
1471       return NULL; /* TODO */
1472 
1473    struct anv_bo *current_bo =
1474       p_atomic_cmpxchg(&pool->bos[scratch_size_log2][stage], NULL, bo);
1475    if (current_bo) {
1476       anv_device_release_bo(device, bo);
1477       return current_bo;
1478    } else {
1479       return bo;
1480    }
1481 }
1482 
1483 VkResult
anv_bo_cache_init(struct anv_bo_cache * cache,struct anv_device * device)1484 anv_bo_cache_init(struct anv_bo_cache *cache, struct anv_device *device)
1485 {
1486    util_sparse_array_init(&cache->bo_map, sizeof(struct anv_bo), 1024);
1487 
1488    if (pthread_mutex_init(&cache->mutex, NULL)) {
1489       util_sparse_array_finish(&cache->bo_map);
1490       return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
1491                        "pthread_mutex_init failed: %m");
1492    }
1493 
1494    return VK_SUCCESS;
1495 }
1496 
1497 void
anv_bo_cache_finish(struct anv_bo_cache * cache)1498 anv_bo_cache_finish(struct anv_bo_cache *cache)
1499 {
1500    util_sparse_array_finish(&cache->bo_map);
1501    pthread_mutex_destroy(&cache->mutex);
1502 }
1503 
1504 #define ANV_BO_CACHE_SUPPORTED_FLAGS \
1505    (EXEC_OBJECT_WRITE | \
1506     EXEC_OBJECT_ASYNC | \
1507     EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
1508     EXEC_OBJECT_PINNED | \
1509     EXEC_OBJECT_CAPTURE)
1510 
1511 static uint32_t
anv_bo_alloc_flags_to_bo_flags(struct anv_device * device,enum anv_bo_alloc_flags alloc_flags)1512 anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
1513                                enum anv_bo_alloc_flags alloc_flags)
1514 {
1515    struct anv_physical_device *pdevice = device->physical;
1516 
1517    uint64_t bo_flags = 0;
1518    if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS) &&
1519        pdevice->supports_48bit_addresses)
1520       bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
1521 
1522    if ((alloc_flags & ANV_BO_ALLOC_CAPTURE) && pdevice->has_exec_capture)
1523       bo_flags |= EXEC_OBJECT_CAPTURE;
1524 
1525    if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) {
1526       assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC);
1527       bo_flags |= EXEC_OBJECT_WRITE;
1528    }
1529 
1530    if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async)
1531       bo_flags |= EXEC_OBJECT_ASYNC;
1532 
1533    if (pdevice->use_softpin)
1534       bo_flags |= EXEC_OBJECT_PINNED;
1535 
1536    return bo_flags;
1537 }
1538 
1539 static void
anv_bo_finish(struct anv_device * device,struct anv_bo * bo)1540 anv_bo_finish(struct anv_device *device, struct anv_bo *bo)
1541 {
1542    if (bo->offset != 0 && anv_bo_is_pinned(bo) && !bo->has_fixed_address)
1543       anv_vma_free(device, bo->offset, bo->size);
1544 
1545    if (bo->map && !bo->from_host_ptr)
1546       anv_device_unmap_bo(device, bo, bo->map, bo->size);
1547 
1548    assert(bo->gem_handle != 0);
1549    anv_gem_close(device, bo->gem_handle);
1550 }
1551 
1552 static VkResult
anv_bo_vma_alloc_or_close(struct anv_device * device,struct anv_bo * bo,enum anv_bo_alloc_flags alloc_flags,uint64_t explicit_address)1553 anv_bo_vma_alloc_or_close(struct anv_device *device,
1554                           struct anv_bo *bo,
1555                           enum anv_bo_alloc_flags alloc_flags,
1556                           uint64_t explicit_address)
1557 {
1558    assert(anv_bo_is_pinned(bo));
1559    assert(explicit_address == intel_48b_address(explicit_address));
1560 
1561    uint32_t align = 4096;
1562 
1563    if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) {
1564       bo->has_fixed_address = true;
1565       bo->offset = explicit_address;
1566    } else {
1567       bo->offset = anv_vma_alloc(device, bo->size,
1568                                  align, alloc_flags, explicit_address);
1569       if (bo->offset == 0) {
1570          anv_bo_finish(device, bo);
1571          return vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
1572                           "failed to allocate virtual address for BO");
1573       }
1574    }
1575 
1576    return VK_SUCCESS;
1577 }
1578 
1579 VkResult
anv_device_alloc_bo(struct anv_device * device,const char * name,uint64_t size,enum anv_bo_alloc_flags alloc_flags,uint64_t explicit_address,struct anv_bo ** bo_out)1580 anv_device_alloc_bo(struct anv_device *device,
1581                     const char *name,
1582                     uint64_t size,
1583                     enum anv_bo_alloc_flags alloc_flags,
1584                     uint64_t explicit_address,
1585                     struct anv_bo **bo_out)
1586 {
1587    const uint32_t bo_flags =
1588       anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
1589    assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
1590 
1591    /* The kernel is going to give us whole pages anyway */
1592    size = align64(size, 4096);
1593 
1594    uint32_t gem_handle = anv_gem_create(device, size);
1595    if (gem_handle == 0)
1596       return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1597 
1598    struct anv_bo new_bo = {
1599       .name = name,
1600       .gem_handle = gem_handle,
1601       .refcount = 1,
1602       .offset = -1,
1603       .size = size,
1604       .flags = bo_flags,
1605       .is_external = (alloc_flags & ANV_BO_ALLOC_EXTERNAL),
1606       .has_client_visible_address =
1607          (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
1608    };
1609 
1610    if (alloc_flags & ANV_BO_ALLOC_MAPPED) {
1611       VkResult result = anv_device_map_bo(device, &new_bo, 0, size,
1612                                           0 /* gem_flags */, &new_bo.map);
1613       if (unlikely(result != VK_SUCCESS)) {
1614          anv_gem_close(device, new_bo.gem_handle);
1615          return result;
1616       }
1617    }
1618 
1619    if (alloc_flags & ANV_BO_ALLOC_SNOOPED) {
1620       assert(alloc_flags & ANV_BO_ALLOC_MAPPED);
1621       /* We don't want to change these defaults if it's going to be shared
1622        * with another process.
1623        */
1624       assert(!(alloc_flags & ANV_BO_ALLOC_EXTERNAL));
1625 
1626       /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
1627        * I915_CACHING_NONE on non-LLC platforms.  For many internal state
1628        * objects, we'd rather take the snooping overhead than risk forgetting
1629        * a CLFLUSH somewhere.  Userptr objects are always created as
1630        * I915_CACHING_CACHED, which on non-LLC means snooped so there's no
1631        * need to do this there.
1632        */
1633       if (!device->info->has_llc) {
1634          anv_gem_set_caching(device, new_bo.gem_handle,
1635                              I915_CACHING_CACHED);
1636       }
1637    }
1638 
1639    if (anv_bo_is_pinned(&new_bo)) {
1640       VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo,
1641                                                   alloc_flags,
1642                                                   explicit_address);
1643       if (result != VK_SUCCESS)
1644          return result;
1645    } else {
1646       assert(!new_bo.has_client_visible_address);
1647    }
1648 
1649    assert(new_bo.gem_handle);
1650 
1651    /* If we just got this gem_handle from anv_bo_init_new then we know no one
1652     * else is touching this BO at the moment so we don't need to lock here.
1653     */
1654    struct anv_bo *bo = anv_device_lookup_bo(device, new_bo.gem_handle);
1655    *bo = new_bo;
1656 
1657    *bo_out = bo;
1658 
1659    return VK_SUCCESS;
1660 }
1661 
1662 VkResult
anv_device_map_bo(struct anv_device * device,struct anv_bo * bo,uint64_t offset,size_t size,uint32_t gem_flags,void ** map_out)1663 anv_device_map_bo(struct anv_device *device,
1664                   struct anv_bo *bo,
1665                   uint64_t offset,
1666                   size_t size,
1667                   uint32_t gem_flags,
1668                   void **map_out)
1669 {
1670    assert(!bo->is_wrapper && !bo->from_host_ptr);
1671    assert(size > 0);
1672 
1673    void *map = anv_gem_mmap(device, bo->gem_handle, offset, size, gem_flags);
1674    if (unlikely(map == MAP_FAILED))
1675       return vk_errorf(device, VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
1676 
1677    assert(map != NULL);
1678 
1679    if (map_out)
1680       *map_out = map;
1681 
1682    return VK_SUCCESS;
1683 }
1684 
1685 void
anv_device_unmap_bo(struct anv_device * device,struct anv_bo * bo,void * map,size_t map_size)1686 anv_device_unmap_bo(struct anv_device *device,
1687                     struct anv_bo *bo,
1688                     void *map, size_t map_size)
1689 {
1690    assert(!bo->is_wrapper && !bo->from_host_ptr);
1691 
1692    anv_gem_munmap(device, map, map_size);
1693 }
1694 
1695 VkResult
anv_device_import_bo_from_host_ptr(struct anv_device * device,void * host_ptr,uint32_t size,enum anv_bo_alloc_flags alloc_flags,uint64_t client_address,struct anv_bo ** bo_out)1696 anv_device_import_bo_from_host_ptr(struct anv_device *device,
1697                                    void *host_ptr, uint32_t size,
1698                                    enum anv_bo_alloc_flags alloc_flags,
1699                                    uint64_t client_address,
1700                                    struct anv_bo **bo_out)
1701 {
1702    assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED |
1703                            ANV_BO_ALLOC_SNOOPED |
1704                            ANV_BO_ALLOC_FIXED_ADDRESS)));
1705 
1706    struct anv_bo_cache *cache = &device->bo_cache;
1707    const uint32_t bo_flags =
1708       anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
1709    assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
1710 
1711    uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size);
1712    if (!gem_handle)
1713       return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
1714 
1715    pthread_mutex_lock(&cache->mutex);
1716 
1717    struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1718    if (bo->refcount > 0) {
1719       /* VK_EXT_external_memory_host doesn't require handling importing the
1720        * same pointer twice at the same time, but we don't get in the way.  If
1721        * kernel gives us the same gem_handle, only succeed if the flags match.
1722        */
1723       assert(bo->gem_handle == gem_handle);
1724       if (bo_flags != bo->flags) {
1725          pthread_mutex_unlock(&cache->mutex);
1726          return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1727                           "same host pointer imported two different ways");
1728       }
1729 
1730       if (bo->has_client_visible_address !=
1731           ((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
1732          pthread_mutex_unlock(&cache->mutex);
1733          return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1734                           "The same BO was imported with and without buffer "
1735                           "device address");
1736       }
1737 
1738       if (client_address && client_address != intel_48b_address(bo->offset)) {
1739          pthread_mutex_unlock(&cache->mutex);
1740          return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1741                           "The same BO was imported at two different "
1742                           "addresses");
1743       }
1744 
1745       __sync_fetch_and_add(&bo->refcount, 1);
1746    } else {
1747       struct anv_bo new_bo = {
1748          .name = "host-ptr",
1749          .gem_handle = gem_handle,
1750          .refcount = 1,
1751          .offset = -1,
1752          .size = size,
1753          .map = host_ptr,
1754          .flags = bo_flags,
1755          .is_external = true,
1756          .from_host_ptr = true,
1757          .has_client_visible_address =
1758             (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
1759       };
1760 
1761       if (anv_bo_is_pinned(&new_bo)) {
1762          VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo,
1763                                                      alloc_flags,
1764                                                      client_address);
1765          if (result != VK_SUCCESS) {
1766             pthread_mutex_unlock(&cache->mutex);
1767             return result;
1768          }
1769       } else {
1770          assert(!new_bo.has_client_visible_address);
1771       }
1772 
1773       *bo = new_bo;
1774    }
1775 
1776    pthread_mutex_unlock(&cache->mutex);
1777    *bo_out = bo;
1778 
1779    return VK_SUCCESS;
1780 }
1781 
1782 VkResult
anv_device_import_bo(struct anv_device * device,int fd,enum anv_bo_alloc_flags alloc_flags,uint64_t client_address,struct anv_bo ** bo_out)1783 anv_device_import_bo(struct anv_device *device,
1784                      int fd,
1785                      enum anv_bo_alloc_flags alloc_flags,
1786                      uint64_t client_address,
1787                      struct anv_bo **bo_out)
1788 {
1789    assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED |
1790                            ANV_BO_ALLOC_SNOOPED |
1791                            ANV_BO_ALLOC_FIXED_ADDRESS)));
1792 
1793    struct anv_bo_cache *cache = &device->bo_cache;
1794    const uint32_t bo_flags =
1795       anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
1796    assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
1797 
1798    pthread_mutex_lock(&cache->mutex);
1799 
1800    uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
1801    if (!gem_handle) {
1802       pthread_mutex_unlock(&cache->mutex);
1803       return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
1804    }
1805 
1806    struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1807    if (bo->refcount > 0) {
1808       /* We have to be careful how we combine flags so that it makes sense.
1809        * Really, though, if we get to this case and it actually matters, the
1810        * client has imported a BO twice in different ways and they get what
1811        * they have coming.
1812        */
1813       uint64_t new_flags = 0;
1814       new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_WRITE;
1815       new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_ASYNC;
1816       new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
1817       new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_PINNED;
1818       new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_CAPTURE;
1819 
1820       /* It's theoretically possible for a BO to get imported such that it's
1821        * both pinned and not pinned.  The only way this can happen is if it
1822        * gets imported as both a semaphore and a memory object and that would
1823        * be an application error.  Just fail out in that case.
1824        */
1825       if ((bo->flags & EXEC_OBJECT_PINNED) !=
1826           (bo_flags & EXEC_OBJECT_PINNED)) {
1827          pthread_mutex_unlock(&cache->mutex);
1828          return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1829                           "The same BO was imported two different ways");
1830       }
1831 
1832       /* It's also theoretically possible that someone could export a BO from
1833        * one heap and import it into another or to import the same BO into two
1834        * different heaps.  If this happens, we could potentially end up both
1835        * allowing and disallowing 48-bit addresses.  There's not much we can
1836        * do about it if we're pinning so we just throw an error and hope no
1837        * app is actually that stupid.
1838        */
1839       if ((new_flags & EXEC_OBJECT_PINNED) &&
1840           (bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
1841           (bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
1842          pthread_mutex_unlock(&cache->mutex);
1843          return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1844                           "The same BO was imported on two different heaps");
1845       }
1846 
1847       if (bo->has_client_visible_address !=
1848           ((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
1849          pthread_mutex_unlock(&cache->mutex);
1850          return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1851                           "The same BO was imported with and without buffer "
1852                           "device address");
1853       }
1854 
1855       if (client_address && client_address != intel_48b_address(bo->offset)) {
1856          pthread_mutex_unlock(&cache->mutex);
1857          return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1858                           "The same BO was imported at two different "
1859                           "addresses");
1860       }
1861 
1862       bo->flags = new_flags;
1863 
1864       __sync_fetch_and_add(&bo->refcount, 1);
1865    } else {
1866       off_t size = lseek(fd, 0, SEEK_END);
1867       if (size == (off_t)-1) {
1868          anv_gem_close(device, gem_handle);
1869          pthread_mutex_unlock(&cache->mutex);
1870          return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
1871       }
1872 
1873       struct anv_bo new_bo = {
1874          .name = "imported",
1875          .gem_handle = gem_handle,
1876          .refcount = 1,
1877          .offset = -1,
1878          .size = size,
1879          .flags = bo_flags,
1880          .is_external = true,
1881          .has_client_visible_address =
1882             (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
1883       };
1884 
1885       if (anv_bo_is_pinned(&new_bo)) {
1886          VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo,
1887                                                      alloc_flags,
1888                                                      client_address);
1889          if (result != VK_SUCCESS) {
1890             pthread_mutex_unlock(&cache->mutex);
1891             return result;
1892          }
1893       } else {
1894          assert(!new_bo.has_client_visible_address);
1895       }
1896 
1897       *bo = new_bo;
1898    }
1899 
1900    pthread_mutex_unlock(&cache->mutex);
1901    *bo_out = bo;
1902 
1903    return VK_SUCCESS;
1904 }
1905 
1906 VkResult
anv_device_export_bo(struct anv_device * device,struct anv_bo * bo,int * fd_out)1907 anv_device_export_bo(struct anv_device *device,
1908                      struct anv_bo *bo, int *fd_out)
1909 {
1910    assert(anv_device_lookup_bo(device, bo->gem_handle) == bo);
1911 
1912    /* This BO must have been flagged external in order for us to be able
1913     * to export it.  This is done based on external options passed into
1914     * anv_AllocateMemory.
1915     */
1916    assert(bo->is_external);
1917 
1918    int fd = anv_gem_handle_to_fd(device, bo->gem_handle);
1919    if (fd < 0)
1920       return vk_error(device, VK_ERROR_TOO_MANY_OBJECTS);
1921 
1922    *fd_out = fd;
1923 
1924    return VK_SUCCESS;
1925 }
1926 
1927 VkResult
anv_device_get_bo_tiling(struct anv_device * device,struct anv_bo * bo,enum isl_tiling * tiling_out)1928 anv_device_get_bo_tiling(struct anv_device *device,
1929                          struct anv_bo *bo,
1930                          enum isl_tiling *tiling_out)
1931 {
1932    int i915_tiling = anv_gem_get_tiling(device, bo->gem_handle);
1933    if (i915_tiling < 0) {
1934       return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1935                        "failed to get BO tiling: %m");
1936    }
1937 
1938    *tiling_out = isl_tiling_from_i915_tiling(i915_tiling);
1939 
1940    return VK_SUCCESS;
1941 }
1942 
1943 VkResult
anv_device_set_bo_tiling(struct anv_device * device,struct anv_bo * bo,uint32_t row_pitch_B,enum isl_tiling tiling)1944 anv_device_set_bo_tiling(struct anv_device *device,
1945                          struct anv_bo *bo,
1946                          uint32_t row_pitch_B,
1947                          enum isl_tiling tiling)
1948 {
1949    int ret = anv_gem_set_tiling(device, bo->gem_handle, row_pitch_B,
1950                                 isl_tiling_to_i915_tiling(tiling));
1951    if (ret) {
1952       return vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
1953                        "failed to set BO tiling: %m");
1954    }
1955 
1956    return VK_SUCCESS;
1957 }
1958 
1959 static bool
atomic_dec_not_one(uint32_t * counter)1960 atomic_dec_not_one(uint32_t *counter)
1961 {
1962    uint32_t old, val;
1963 
1964    val = *counter;
1965    while (1) {
1966       if (val == 1)
1967          return false;
1968 
1969       old = __sync_val_compare_and_swap(counter, val, val - 1);
1970       if (old == val)
1971          return true;
1972 
1973       val = old;
1974    }
1975 }
1976 
1977 void
anv_device_release_bo(struct anv_device * device,struct anv_bo * bo)1978 anv_device_release_bo(struct anv_device *device,
1979                       struct anv_bo *bo)
1980 {
1981    struct anv_bo_cache *cache = &device->bo_cache;
1982    assert(anv_device_lookup_bo(device, bo->gem_handle) == bo);
1983 
1984    /* Try to decrement the counter but don't go below one.  If this succeeds
1985     * then the refcount has been decremented and we are not the last
1986     * reference.
1987     */
1988    if (atomic_dec_not_one(&bo->refcount))
1989       return;
1990 
1991    pthread_mutex_lock(&cache->mutex);
1992 
1993    /* We are probably the last reference since our attempt to decrement above
1994     * failed.  However, we can't actually know until we are inside the mutex.
1995     * Otherwise, someone could import the BO between the decrement and our
1996     * taking the mutex.
1997     */
1998    if (unlikely(__sync_sub_and_fetch(&bo->refcount, 1) > 0)) {
1999       /* Turns out we're not the last reference.  Unlock and bail. */
2000       pthread_mutex_unlock(&cache->mutex);
2001       return;
2002    }
2003    assert(bo->refcount == 0);
2004 
2005    /* Memset the BO just in case.  The refcount being zero should be enough to
2006     * prevent someone from assuming the data is valid but it's safer to just
2007     * stomp to zero just in case.  We explicitly do this *before* we actually
2008     * close the GEM handle to ensure that if anyone allocates something and
2009     * gets the same GEM handle, the memset has already happen and won't stomp
2010     * all over any data they may write in this BO.
2011     */
2012    struct anv_bo old_bo = *bo;
2013 
2014    memset(bo, 0, sizeof(*bo));
2015 
2016    anv_bo_finish(device, &old_bo);
2017 
2018    /* Don't unlock until we've actually closed the BO.  The whole point of
2019     * the BO cache is to ensure that we correctly handle races with creating
2020     * and releasing GEM handles and we don't want to let someone import the BO
2021     * again between mutex unlock and closing the GEM handle.
2022     */
2023    pthread_mutex_unlock(&cache->mutex);
2024 }
2025