xref: /aosp_15_r20/external/mesa3d/src/gallium/auxiliary/pipebuffer/pb_slab.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
21  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  */
27 
28 #include "pb_slab.h"
29 
30 #include "util/u_math.h"
31 #include "util/u_memory.h"
32 
33 /* All slab allocations from the same heap and with the same size belong
34  * to the same group.
35  */
36 struct pb_slab_group
37 {
38    /* Slabs with allocation candidates. Typically, slabs in this list should
39     * have some free entries.
40     *
41     * However, when the head becomes full we purposefully keep it around
42     * until the next allocation attempt, at which time we try a reclaim.
43     * The intention is to keep serving allocations from the same slab as long
44     * as possible for better locality.
45     *
46     * Due to a race in new slab allocation, additional slabs in this list
47     * can be fully allocated as well.
48     */
49    struct list_head slabs;
50 };
51 
52 
53 static void
pb_slab_reclaim(struct pb_slabs * slabs,struct pb_slab_entry * entry)54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
55 {
56    struct pb_slab *slab = entry->slab;
57 
58    list_del(&entry->head); /* remove from reclaim list */
59    list_add(&entry->head, &slab->free);
60    slab->num_free++;
61 
62    /* Add slab to the group's list if it isn't already linked. */
63    if (!list_is_linked(&slab->head)) {
64       struct pb_slab_group *group = &slabs->groups[entry->slab->group_index];
65       list_addtail(&slab->head, &group->slabs);
66    }
67 
68    if (slab->num_free >= slab->num_entries) {
69       list_del(&slab->head);
70       slabs->slab_free(slabs->priv, slab);
71    }
72 }
73 
74 #define MAX_FAILED_RECLAIMS 2
75 
76 static unsigned
pb_slabs_reclaim_locked(struct pb_slabs * slabs)77 pb_slabs_reclaim_locked(struct pb_slabs *slabs)
78 {
79    struct pb_slab_entry *entry, *next;
80    unsigned num_failed_reclaims = 0;
81    unsigned num_reclaims = 0;
82    LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs->reclaim, head) {
83       if (slabs->can_reclaim(slabs->priv, entry)) {
84          pb_slab_reclaim(slabs, entry);
85          num_reclaims++;
86       /* there are typically three possible scenarios when reclaiming:
87        * - all entries reclaimed
88        * - no entries reclaimed
89        * - all but one entry reclaimed
90        * in the scenario where a slab contains many (10+) unused entries,
91        * the driver should not walk the entire list, as this is likely to
92        * result in zero reclaims if the first few entries fail to reclaim
93        */
94       } else if (++num_failed_reclaims >= MAX_FAILED_RECLAIMS) {
95          break;
96       }
97    }
98    return num_reclaims;
99 }
100 
101 static unsigned
pb_slabs_reclaim_all_locked(struct pb_slabs * slabs)102 pb_slabs_reclaim_all_locked(struct pb_slabs *slabs)
103 {
104    struct pb_slab_entry *entry, *next;
105    unsigned num_reclaims = 0;
106    LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs->reclaim, head) {
107       if (slabs->can_reclaim(slabs->priv, entry)) {
108          pb_slab_reclaim(slabs, entry);
109          num_reclaims++;
110       }
111    }
112    return num_reclaims;
113 }
114 
115 /* Allocate a slab entry of the given size from the given heap.
116  *
117  * This will try to re-use entries that have previously been freed. However,
118  * if no entries are free (or all free entries are still "in flight" as
119  * determined by the can_reclaim fallback function), a new slab will be
120  * requested via the slab_alloc callback.
121  *
122  * Note that slab_free can also be called by this function.
123  */
124 struct pb_slab_entry *
pb_slab_alloc_reclaimed(struct pb_slabs * slabs,unsigned size,unsigned heap,bool reclaim_all)125 pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bool reclaim_all)
126 {
127    unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));
128    unsigned group_index;
129    struct pb_slab_group *group;
130    struct pb_slab *slab;
131    struct pb_slab_entry *entry;
132    unsigned entry_size = 1 << order;
133    bool three_fourths = false;
134 
135    /* If the size is <= 3/4 of the entry size, use a slab with entries using
136     * 3/4 sizes to reduce overallocation.
137     */
138    if (slabs->allow_three_fourths_allocations && size <= entry_size * 3 / 4) {
139       entry_size = entry_size * 3 / 4;
140       three_fourths = true;
141    }
142 
143    assert(order < slabs->min_order + slabs->num_orders);
144    assert(heap < slabs->num_heaps);
145 
146    group_index = (heap * slabs->num_orders + (order - slabs->min_order)) *
147                  (1 + slabs->allow_three_fourths_allocations) + three_fourths;
148    group = &slabs->groups[group_index];
149 
150    simple_mtx_lock(&slabs->mutex);
151 
152    /* If there is no candidate slab at all, or the first slab has no free
153     * entries, try reclaiming entries.
154     */
155    if (list_is_empty(&group->slabs) ||
156        list_is_empty(&list_entry(group->slabs.next, struct pb_slab, head)->free)) {
157       if (reclaim_all)
158          pb_slabs_reclaim_all_locked(slabs);
159       else
160          pb_slabs_reclaim_locked(slabs);
161    }
162 
163    /* Remove slabs without free entries. */
164    while (!list_is_empty(&group->slabs)) {
165       slab = list_entry(group->slabs.next, struct pb_slab, head);
166       if (!list_is_empty(&slab->free))
167          break;
168 
169       list_del(&slab->head);
170    }
171 
172    if (list_is_empty(&group->slabs)) {
173       /* Drop the mutex temporarily to prevent a deadlock where the allocation
174        * calls back into slab functions (most likely to happen for
175        * pb_slab_reclaim if memory is low).
176        *
177        * There's a chance that racing threads will end up allocating multiple
178        * slabs for the same group, but that doesn't hurt correctness.
179        */
180       simple_mtx_unlock(&slabs->mutex);
181       slab = slabs->slab_alloc(slabs->priv, heap, entry_size, group_index);
182       if (!slab)
183          return NULL;
184       simple_mtx_lock(&slabs->mutex);
185 
186       list_add(&slab->head, &group->slabs);
187    }
188 
189    entry = list_entry(slab->free.next, struct pb_slab_entry, head);
190    list_del(&entry->head);
191    slab->num_free--;
192 
193    simple_mtx_unlock(&slabs->mutex);
194 
195    return entry;
196 }
197 
198 struct pb_slab_entry *
pb_slab_alloc(struct pb_slabs * slabs,unsigned size,unsigned heap)199 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
200 {
201    return pb_slab_alloc_reclaimed(slabs, size, heap, false);
202 }
203 
204 /* Free the given slab entry.
205  *
206  * The entry may still be in use e.g. by in-flight command submissions. The
207  * can_reclaim callback function will be called to determine whether the entry
208  * can be handed out again by pb_slab_alloc.
209  */
210 void
pb_slab_free(struct pb_slabs * slabs,struct pb_slab_entry * entry)211 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
212 {
213    simple_mtx_lock(&slabs->mutex);
214    list_addtail(&entry->head, &slabs->reclaim);
215    simple_mtx_unlock(&slabs->mutex);
216 }
217 
218 /* Check if any of the entries handed to pb_slab_free are ready to be re-used.
219  *
220  * This may end up freeing some slabs and is therefore useful to try to reclaim
221  * some no longer used memory. However, calling this function is not strictly
222  * required since pb_slab_alloc will eventually do the same thing.
223  */
224 unsigned
pb_slabs_reclaim(struct pb_slabs * slabs)225 pb_slabs_reclaim(struct pb_slabs *slabs)
226 {
227    unsigned num_reclaims;
228    simple_mtx_lock(&slabs->mutex);
229    num_reclaims = pb_slabs_reclaim_locked(slabs);
230    simple_mtx_unlock(&slabs->mutex);
231    return num_reclaims;
232 }
233 
234 /* Initialize the slabs manager.
235  *
236  * The minimum and maximum size of slab entries are 2^min_order and
237  * 2^max_order, respectively.
238  *
239  * priv will be passed to the given callback functions.
240  */
241 bool
pb_slabs_init(struct pb_slabs * slabs,unsigned min_order,unsigned max_order,unsigned num_heaps,bool allow_three_fourth_allocations,void * priv,slab_can_reclaim_fn * can_reclaim,slab_alloc_fn * slab_alloc,slab_free_fn * slab_free)242 pb_slabs_init(struct pb_slabs *slabs,
243               unsigned min_order, unsigned max_order,
244               unsigned num_heaps, bool allow_three_fourth_allocations,
245               void *priv,
246               slab_can_reclaim_fn *can_reclaim,
247               slab_alloc_fn *slab_alloc,
248               slab_free_fn *slab_free)
249 {
250    unsigned num_groups;
251    unsigned i;
252 
253    assert(min_order <= max_order);
254    assert(max_order < sizeof(unsigned) * 8 - 1);
255 
256    slabs->min_order = min_order;
257    slabs->num_orders = max_order - min_order + 1;
258    slabs->num_heaps = num_heaps;
259    slabs->allow_three_fourths_allocations = allow_three_fourth_allocations;
260 
261    slabs->priv = priv;
262    slabs->can_reclaim = can_reclaim;
263    slabs->slab_alloc = slab_alloc;
264    slabs->slab_free = slab_free;
265 
266    list_inithead(&slabs->reclaim);
267 
268    num_groups = slabs->num_orders * slabs->num_heaps *
269                 (1 + allow_three_fourth_allocations);
270    slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
271    if (!slabs->groups)
272       return false;
273 
274    for (i = 0; i < num_groups; ++i) {
275       struct pb_slab_group *group = &slabs->groups[i];
276       list_inithead(&group->slabs);
277    }
278 
279    (void) simple_mtx_init(&slabs->mutex, mtx_plain);
280 
281    return true;
282 }
283 
284 /* Shutdown the slab manager.
285  *
286  * This will free all allocated slabs and internal structures, even if some
287  * of the slab entries are still in flight (i.e. if can_reclaim would return
288  * false).
289  */
290 void
pb_slabs_deinit(struct pb_slabs * slabs)291 pb_slabs_deinit(struct pb_slabs *slabs)
292 {
293    /* Reclaim all slab entries (even those that are still in flight). This
294     * implicitly calls slab_free for everything.
295     */
296    while (!list_is_empty(&slabs->reclaim)) {
297       struct pb_slab_entry *entry =
298          list_entry(slabs->reclaim.next, struct pb_slab_entry, head);
299       pb_slab_reclaim(slabs, entry);
300    }
301 
302    FREE(slabs->groups);
303    simple_mtx_destroy(&slabs->mutex);
304 }
305