1*54fd6939SJiyong Park /*
2*54fd6939SJiyong Park * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
3*54fd6939SJiyong Park *
4*54fd6939SJiyong Park * SPDX-License-Identifier: BSD-3-Clause
5*54fd6939SJiyong Park */
6*54fd6939SJiyong Park
7*54fd6939SJiyong Park #ifndef OBJECT_POOL_H
8*54fd6939SJiyong Park #define OBJECT_POOL_H
9*54fd6939SJiyong Park
10*54fd6939SJiyong Park #include <stdlib.h>
11*54fd6939SJiyong Park
12*54fd6939SJiyong Park #include <common/debug.h>
13*54fd6939SJiyong Park #include <lib/utils_def.h>
14*54fd6939SJiyong Park
15*54fd6939SJiyong Park /*
16*54fd6939SJiyong Park * Pool of statically allocated objects.
17*54fd6939SJiyong Park *
18*54fd6939SJiyong Park * Objects can be reserved but not freed. This is by design and it is not a
19*54fd6939SJiyong Park * limitation. We do not want to introduce complexity induced by memory freeing,
20*54fd6939SJiyong Park * such as use-after-free bugs, memory fragmentation and so on.
21*54fd6939SJiyong Park *
22*54fd6939SJiyong Park * The object size and capacity of the pool are fixed at build time. So is the
23*54fd6939SJiyong Park * address of the objects back store.
24*54fd6939SJiyong Park */
25*54fd6939SJiyong Park struct object_pool {
26*54fd6939SJiyong Park /* Size of 1 object in the pool in byte unit. */
27*54fd6939SJiyong Park const size_t obj_size;
28*54fd6939SJiyong Park
29*54fd6939SJiyong Park /* Number of objects in the pool. */
30*54fd6939SJiyong Park const size_t capacity;
31*54fd6939SJiyong Park
32*54fd6939SJiyong Park /* Objects back store. */
33*54fd6939SJiyong Park void *const objects;
34*54fd6939SJiyong Park
35*54fd6939SJiyong Park /* How many objects are currently allocated. */
36*54fd6939SJiyong Park size_t used;
37*54fd6939SJiyong Park };
38*54fd6939SJiyong Park
39*54fd6939SJiyong Park /* Create a static pool of objects. */
40*54fd6939SJiyong Park #define OBJECT_POOL(_pool_name, _obj_backstore, _obj_size, _obj_count) \
41*54fd6939SJiyong Park struct object_pool _pool_name = { \
42*54fd6939SJiyong Park .objects = (_obj_backstore), \
43*54fd6939SJiyong Park .obj_size = (_obj_size), \
44*54fd6939SJiyong Park .capacity = (_obj_count), \
45*54fd6939SJiyong Park .used = 0U, \
46*54fd6939SJiyong Park }
47*54fd6939SJiyong Park
48*54fd6939SJiyong Park /* Create a static pool of objects out of an array of pre-allocated objects. */
49*54fd6939SJiyong Park #define OBJECT_POOL_ARRAY(_pool_name, _obj_array) \
50*54fd6939SJiyong Park OBJECT_POOL(_pool_name, (_obj_array), \
51*54fd6939SJiyong Park sizeof((_obj_array)[0]), ARRAY_SIZE(_obj_array))
52*54fd6939SJiyong Park
53*54fd6939SJiyong Park /*
54*54fd6939SJiyong Park * Allocate 'count' objects from a pool.
55*54fd6939SJiyong Park * Return the address of the first object. Panic on error.
56*54fd6939SJiyong Park */
pool_alloc_n(struct object_pool * pool,size_t count)57*54fd6939SJiyong Park static inline void *pool_alloc_n(struct object_pool *pool, size_t count)
58*54fd6939SJiyong Park {
59*54fd6939SJiyong Park if ((pool->used + count) > pool->capacity) {
60*54fd6939SJiyong Park ERROR("Cannot allocate %zu objects out of pool (%zu objects left).\n",
61*54fd6939SJiyong Park count, pool->capacity - pool->used);
62*54fd6939SJiyong Park panic();
63*54fd6939SJiyong Park }
64*54fd6939SJiyong Park
65*54fd6939SJiyong Park void *obj = (char *)(pool->objects) + (pool->obj_size * pool->used);
66*54fd6939SJiyong Park pool->used += count;
67*54fd6939SJiyong Park return obj;
68*54fd6939SJiyong Park }
69*54fd6939SJiyong Park
70*54fd6939SJiyong Park /*
71*54fd6939SJiyong Park * Allocate 1 object from a pool.
72*54fd6939SJiyong Park * Return the address of the object. Panic on error.
73*54fd6939SJiyong Park */
pool_alloc(struct object_pool * pool)74*54fd6939SJiyong Park static inline void *pool_alloc(struct object_pool *pool)
75*54fd6939SJiyong Park {
76*54fd6939SJiyong Park return pool_alloc_n(pool, 1U);
77*54fd6939SJiyong Park }
78*54fd6939SJiyong Park
79*54fd6939SJiyong Park #endif /* OBJECT_POOL_H */
80