Lines Matching +full:wait +full:- +full:queue
1 /* SPDX-License-Identifier: GPL-2.0-only */
6 * Copyright (C) 2013-2014 Jens Axboe
22 #include <linux/wait.h>
27 * struct sbitmap_word - Word in a &struct sbitmap.
41 * @swap_lock: serializes simultaneous updates of ->word and ->cleared
47 * struct sbitmap - Scalable bitmap.
49 * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
69 * @round_robin: Allocate bits in strict round-robin order.
81 * This is per-cpu, which allows multiple users to stick to different
91 * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
95 * @wait: Wait queue.
97 wait_queue_head_t wait; member
101 * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
104 * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
105 * avoid contention on the wait queue spinlock. This ensures that we don't hit a
122 * @wake_index: Next wait queue in @ws to wake up.
127 * @ws: Wait queues.
155 * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
176 if (index == sb->map_nr - 1) in __map_depth()
177 return sb->depth - (index << sb->shift); in __map_depth()
178 return 1U << sb->shift; in __map_depth()
182 * sbitmap_free() - Free memory used by a &struct sbitmap.
187 free_percpu(sb->alloc_hint); in sbitmap_free()
188 kvfree(sb->map); in sbitmap_free()
189 sb->map = NULL; in sbitmap_free()
193 * sbitmap_resize() - Resize a &struct sbitmap.
203 * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
208 * Return: Non-negative allocated bit number if successful, -1 otherwise.
213 * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
219 * different allocation limits. E.g., there can be a high-priority class that
220 * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
221 * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
223 * from starving out the high-priority class.
225 * Return: Non-negative allocated bit number if successful, -1 otherwise.
230 * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
237 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
238 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
243 * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
249 * This is inline even though it's non-trivial so that the function calls to the
260 if (start >= sb->depth) in __sbitmap_for_each_set()
265 while (scanned < sb->depth) { in __sbitmap_for_each_set()
268 __map_depth(sb, index) - nr, in __sbitmap_for_each_set()
269 sb->depth - scanned); in __sbitmap_for_each_set()
272 word = sb->map[index].word & ~sb->map[index].cleared; in __sbitmap_for_each_set()
286 if (!fn(sb, (index << sb->shift) + nr, data)) in __sbitmap_for_each_set()
293 if (++index >= sb->map_nr) in __sbitmap_for_each_set()
299 * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
313 return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; in __sbitmap_word()
330 * sets the corresponding bit in the ->cleared mask instead. Paired with
332 * will clear the previously freed entries in the corresponding ->word.
336 unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; in sbitmap_deferred_clear_bit()
349 if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth)) in sbitmap_put()
350 *raw_cpu_ptr(sb->alloc_hint) = bitnr; in sbitmap_put()
370 shift--; in sbitmap_calculate_shift()
377 * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
387 * sbitmap_weight() - Return how many set and not cleared bits in a &struct
396 * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
407 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
409 * @sbq: Bitmap queue to initialize.
422 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
424 * @sbq: Bitmap queue to free.
428 kfree(sbq->ws); in sbitmap_queue_free()
429 sbitmap_free(&sbq->sb); in sbitmap_queue_free()
433 * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
434 * @sbq: Bitmap queue to recalculate wake batch.
438 * by depth. This interface is for HCTX shared tags or queue shared tags.
444 * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
445 * @sbq: Bitmap queue to resize.
455 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
457 * @sbq: Bitmap queue to allocate from.
459 * Return: Non-negative allocated bit number if successful, -1 otherwise.
464 * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
465 * @sbq: Bitmap queue to allocate from.
477 * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
480 * @sbq: Bitmap queue to allocate from.
487 * Return: Non-negative allocated bit number if successful, -1 otherwise.
493 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
495 * @sbq: Bitmap queue to allocate from.
499 * Return: Non-negative allocated bit number if successful, -1 otherwise.
513 * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
515 * @sbq: Bitmap queue in question.
530 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
540 * sbitmap_queue_clear_batch() - Free a batch of allocated bits
552 return (index + 1) & (SBQ_WAIT_QUEUES - 1); in sbq_index_inc()
563 * sbq_wait_ptr() - Get the next wait queue to use for a &struct
565 * @sbq: Bitmap queue to wait on.
573 ws = &sbq->ws[atomic_read(wait_index)]; in sbq_wait_ptr()
579 * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
581 * @sbq: Bitmap queue to wake up.
586 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
588 * @sbq: Bitmap queue to wake up.
594 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
596 * @sbq: Bitmap queue to show.
605 struct wait_queue_entry wait; member
611 .wait = { \
614 .entry = LIST_HEAD_INIT((name).wait.entry), \