Lines Matching +full:time +full:- +full:slots
1 // SPDX-License-Identifier: GPL-2.0
3 * Manage cache of swap slots to be used for and returned from
10 * We allocate the swap slots from the global pool and put
12 * of no needing to acquire the swap_info lock every time
17 * lock. We do not reuse the returned slots directly but
19 * allows the slots to coalesce and reduce fragmentation.
25 * The swap slots cache is protected by a mutex instead of
26 * a spin lock as when we search for slots with scan_swap_map,
43 /* Serialize swap slots cache enable/disable operations */
114 swp_entry_t *slots; in alloc_swap_slot_cache() local
121 slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), in alloc_swap_slot_cache()
123 if (!slots) in alloc_swap_slot_cache()
124 return -ENOMEM; in alloc_swap_slot_cache()
128 if (cache->slots) { in alloc_swap_slot_cache()
132 kvfree(slots); in alloc_swap_slot_cache()
137 if (!cache->lock_initialized) { in alloc_swap_slot_cache()
138 mutex_init(&cache->alloc_lock); in alloc_swap_slot_cache()
139 cache->lock_initialized = true; in alloc_swap_slot_cache()
141 cache->nr = 0; in alloc_swap_slot_cache()
142 cache->cur = 0; in alloc_swap_slot_cache()
143 cache->n_ret = 0; in alloc_swap_slot_cache()
146 * !cache->slots or !cache->slots_ret to know if it is safe to acquire in alloc_swap_slot_cache()
151 cache->slots = slots; in alloc_swap_slot_cache()
161 if (cache->slots) { in drain_slots_cache_cpu()
162 mutex_lock(&cache->alloc_lock); in drain_slots_cache_cpu()
163 swapcache_free_entries(cache->slots + cache->cur, cache->nr); in drain_slots_cache_cpu()
164 cache->cur = 0; in drain_slots_cache_cpu()
165 cache->nr = 0; in drain_slots_cache_cpu()
166 if (free_slots && cache->slots) { in drain_slots_cache_cpu()
167 kvfree(cache->slots); in drain_slots_cache_cpu()
168 cache->slots = NULL; in drain_slots_cache_cpu()
170 mutex_unlock(&cache->alloc_lock); in drain_slots_cache_cpu()
181 * left over slots are in cache when we remove in __drain_swap_slots_cache()
184 * on swap slots when allocating memory and need in __drain_swap_slots_cache()
185 * to return swap slots to global pool. in __drain_swap_slots_cache()
190 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback in __drain_swap_slots_cache()
191 * -> memory allocation -> direct reclaim -> folio_alloc_swap in __drain_swap_slots_cache()
192 * -> drain_swap_slots_cache in __drain_swap_slots_cache()
198 * fill any swap slots in slots cache of such cpu. in __drain_swap_slots_cache()
199 * There are no slots on such cpu that need to be drained. in __drain_swap_slots_cache()
222 "without swap slots cache.\n", __func__)) in enable_swap_slots_cache()
239 cache->cur = 0; in refill_swap_slots_cache()
241 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, in refill_swap_slots_cache()
242 cache->slots, 0); in refill_swap_slots_cache()
244 return cache->nr; in refill_swap_slots_cache()
263 * accesses to the per-CPU data structure are protected by the in folio_alloc_swap()
264 * mutex cache->alloc_lock. in folio_alloc_swap()
266 * The alloc path here does not touch cache->slots_ret in folio_alloc_swap()
267 * so cache->free_lock is not taken. in folio_alloc_swap()
271 if (likely(check_cache_active() && cache->slots)) { in folio_alloc_swap()
272 mutex_lock(&cache->alloc_lock); in folio_alloc_swap()
273 if (cache->slots) { in folio_alloc_swap()
275 if (cache->nr) { in folio_alloc_swap()
276 entry = cache->slots[cache->cur]; in folio_alloc_swap()
277 cache->slots[cache->cur++].val = 0; in folio_alloc_swap()
278 cache->nr--; in folio_alloc_swap()
283 mutex_unlock(&cache->alloc_lock); in folio_alloc_swap()