Lines Matching +full:slot +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
12 * objpool: ring-array based lockless MPMC/FIFO queues
20 struct objpool_slot *slot, in objpool_init_percpu_slot() argument
24 void *obj = (void *)&slot->entries[pool->capacity]; in objpool_init_percpu_slot()
28 slot->mask = pool->capacity - 1; in objpool_init_percpu_slot()
36 slot->entries[slot->tail & slot->mask] = obj; in objpool_init_percpu_slot()
37 obj = obj + pool->obj_size; in objpool_init_percpu_slot()
38 slot->tail++; in objpool_init_percpu_slot()
39 slot->last = slot->tail; in objpool_init_percpu_slot()
40 pool->nr_objs++; in objpool_init_percpu_slot()
55 struct objpool_slot *slot; in objpool_init_percpu_slots() local
56 int nodes, size, rc; in objpool_init_percpu_slots() local
62 /* compute how many objects to be allocated with this slot */ in objpool_init_percpu_slots()
63 nodes = nr_objs / pool->nr_possible_cpus; in objpool_init_percpu_slots()
64 if (cpu_count < (nr_objs % pool->nr_possible_cpus)) in objpool_init_percpu_slots()
68 size = struct_size(slot, entries, pool->capacity) + in objpool_init_percpu_slots()
69 pool->obj_size * nodes; in objpool_init_percpu_slots()
72 * here we allocate percpu-slot & objs together in a single in objpool_init_percpu_slots()
76 * mimimal size of vmalloc is one page since vmalloc would in objpool_init_percpu_slots()
77 * always align the requested size to page size. in objpool_init_percpu_slots()
79 * allocate percpu slot with kmalloc. in objpool_init_percpu_slots()
81 slot = NULL; in objpool_init_percpu_slots()
83 if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC) in objpool_init_percpu_slots()
84 slot = __vmalloc_node(size, sizeof(void *), pool->gfp, in objpool_init_percpu_slots()
87 if (!slot) { in objpool_init_percpu_slots()
88 slot = kmalloc_node(size, pool->gfp, cpu_to_node(i)); in objpool_init_percpu_slots()
89 if (!slot) in objpool_init_percpu_slots()
90 return -ENOMEM; in objpool_init_percpu_slots()
92 memset(slot, 0, size); in objpool_init_percpu_slots()
93 pool->cpu_slots[i] = slot; in objpool_init_percpu_slots()
96 rc = objpool_init_percpu_slot(pool, slot, nodes, context, objinit); in objpool_init_percpu_slots()
109 if (!pool->cpu_slots) in objpool_fini_percpu_slots()
113 kvfree(pool->cpu_slots[i]); in objpool_fini_percpu_slots()
114 kfree(pool->cpu_slots); in objpool_fini_percpu_slots()
117 /* initialize object pool and pre-allocate objects */
127 return -EINVAL; in objpool_init()
129 /* align up to unsigned long size */ in objpool_init()
135 return -EINVAL; in objpool_init()
139 pool->nr_possible_cpus = num_possible_cpus(); in objpool_init()
140 pool->obj_size = object_size; in objpool_init()
141 pool->capacity = capacity; in objpool_init()
142 pool->gfp = gfp & ~__GFP_ZERO; in objpool_init()
143 pool->context = context; in objpool_init()
144 pool->release = release; in objpool_init()
146 pool->cpu_slots = kzalloc(slot_size, pool->gfp); in objpool_init()
147 if (!pool->cpu_slots) in objpool_init()
148 return -ENOMEM; in objpool_init()
150 /* initialize per-cpu slots */ in objpool_init()
155 refcount_set(&pool->ref, pool->nr_objs + 1); in objpool_init()
164 if (!pool->cpu_slots) in objpool_free()
171 if (pool->release) in objpool_free()
172 pool->release(pool, pool->context); in objpool_free()
180 return -EINVAL; in objpool_drop()
182 if (refcount_dec_and_test(&pool->ref)) { in objpool_drop()
187 return -EAGAIN; in objpool_drop()
200 if (refcount_sub_and_test(count, &pool->ref)) in objpool_fini()