1 /*
2 * Copyright 2018 Alyssa Rosenzweig
3 * Copyright 2019 Collabora, Ltd.
4 * SPDX-License-Identifier: MIT
5 *
6 */
7
8 #include "pool.h"
9 #include "agx_bo.h"
10 #include "agx_device.h"
11
12 /* Transient command stream pooling: command stream uploads try to simply copy
13 * into wherever we left off. If there isn't space, we allocate a new entry
14 * into the pool and copy there */
15
16 #define POOL_SLAB_SIZE (256 * 1024)
17
18 static struct agx_bo *
agx_pool_alloc_backing(struct agx_pool * pool,size_t bo_sz)19 agx_pool_alloc_backing(struct agx_pool *pool, size_t bo_sz)
20 {
21 struct agx_bo *bo =
22 agx_bo_create(pool->dev, bo_sz, 0, pool->create_flags, "Pool");
23
24 util_dynarray_append(&pool->bos, struct agx_bo *, bo);
25 pool->transient_bo = bo;
26 pool->transient_offset = 0;
27
28 return bo;
29 }
30
31 void
agx_pool_init(struct agx_pool * pool,struct agx_device * dev,unsigned create_flags,bool prealloc)32 agx_pool_init(struct agx_pool *pool, struct agx_device *dev,
33 unsigned create_flags, bool prealloc)
34 {
35 memset(pool, 0, sizeof(*pool));
36 pool->dev = dev;
37 pool->create_flags = create_flags;
38 util_dynarray_init(&pool->bos, NULL);
39
40 if (prealloc)
41 agx_pool_alloc_backing(pool, POOL_SLAB_SIZE);
42 }
43
44 void
agx_pool_cleanup(struct agx_pool * pool)45 agx_pool_cleanup(struct agx_pool *pool)
46 {
47 util_dynarray_foreach(&pool->bos, struct agx_bo *, bo) {
48 agx_bo_unreference(pool->dev, *bo);
49 }
50
51 util_dynarray_fini(&pool->bos);
52 }
53
54 void
agx_pool_get_bo_handles(struct agx_pool * pool,uint32_t * handles)55 agx_pool_get_bo_handles(struct agx_pool *pool, uint32_t *handles)
56 {
57 unsigned idx = 0;
58 util_dynarray_foreach(&pool->bos, struct agx_bo *, bo) {
59 handles[idx++] = (*bo)->handle;
60 }
61 }
62
63 struct agx_ptr
agx_pool_alloc_aligned_with_bo(struct agx_pool * pool,size_t sz,unsigned alignment,struct agx_bo ** out_bo)64 agx_pool_alloc_aligned_with_bo(struct agx_pool *pool, size_t sz,
65 unsigned alignment, struct agx_bo **out_bo)
66 {
67 assert(alignment == util_next_power_of_two(alignment));
68
69 /* Find or create a suitable BO */
70 struct agx_bo *bo = pool->transient_bo;
71 unsigned offset = ALIGN_POT(pool->transient_offset, alignment);
72
73 /* If we don't fit, allocate a new backing */
74 if (unlikely(bo == NULL || (offset + sz) >= POOL_SLAB_SIZE)) {
75 bo = agx_pool_alloc_backing(pool,
76 ALIGN_POT(MAX2(POOL_SLAB_SIZE, sz), 4096));
77 offset = 0;
78 }
79
80 pool->transient_offset = offset + sz;
81
82 struct agx_ptr ret = {
83 .cpu = bo->map + offset,
84 .gpu = bo->va->addr + offset,
85 };
86
87 if (out_bo)
88 *out_bo = bo;
89
90 return ret;
91 }
92
93 uint64_t
agx_pool_upload(struct agx_pool * pool,const void * data,size_t sz)94 agx_pool_upload(struct agx_pool *pool, const void *data, size_t sz)
95 {
96 return agx_pool_upload_aligned(pool, data, sz, util_next_power_of_two(sz));
97 }
98
99 uint64_t
agx_pool_upload_aligned_with_bo(struct agx_pool * pool,const void * data,size_t sz,unsigned alignment,struct agx_bo ** bo)100 agx_pool_upload_aligned_with_bo(struct agx_pool *pool, const void *data,
101 size_t sz, unsigned alignment,
102 struct agx_bo **bo)
103 {
104 struct agx_ptr transfer =
105 agx_pool_alloc_aligned_with_bo(pool, sz, alignment, bo);
106 memcpy(transfer.cpu, data, sz);
107 return transfer.gpu;
108 }
109