Lines Matching full:si

56 static void swap_entry_range_free(struct swap_info_struct *si,
59 static void swap_range_alloc(struct swap_info_struct *si,
62 static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
147 static long swap_usage_in_pages(struct swap_info_struct *si) in swap_usage_in_pages() argument
149 return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK; in swap_usage_in_pages()
164 static bool swap_is_has_cache(struct swap_info_struct *si, in swap_is_has_cache() argument
167 unsigned char *map = si->swap_map + offset; in swap_is_has_cache()
179 static bool swap_is_last_map(struct swap_info_struct *si, in swap_is_last_map() argument
182 unsigned char *map = si->swap_map + offset; in swap_is_last_map()
203 static int __try_to_reclaim_swap(struct swap_info_struct *si, in __try_to_reclaim_swap() argument
206 swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap()
245 ci = lock_cluster(si, offset); in __try_to_reclaim_swap()
246 need_reclaim = swap_is_has_cache(si, offset, nr_pages); in __try_to_reclaim_swap()
265 ci = lock_cluster(si, offset); in __try_to_reclaim_swap()
266 swap_entry_range_free(si, ci, entry, nr_pages); in __try_to_reclaim_swap()
292 static int discard_swap(struct swap_info_struct *si) in discard_swap() argument
300 se = first_se(si); in discard_swap()
304 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
315 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
362 static void discard_swap_cluster(struct swap_info_struct *si, in discard_swap_cluster() argument
365 struct swap_extent *se = offset_to_swap_extent(si, start_page); in discard_swap_cluster()
379 if (blkdev_issue_discard(si->bdev, start_block, in discard_swap_cluster()
421 static inline unsigned int cluster_index(struct swap_info_struct *si, in cluster_index() argument
424 return ci - si->cluster_info; in cluster_index()
427 static inline struct swap_cluster_info *offset_to_cluster(struct swap_info_struct *si, in offset_to_cluster() argument
430 return &si->cluster_info[offset / SWAPFILE_CLUSTER]; in offset_to_cluster()
433 static inline unsigned int cluster_offset(struct swap_info_struct *si, in cluster_offset() argument
436 return cluster_index(si, ci) * SWAPFILE_CLUSTER; in cluster_offset()
439 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, in lock_cluster() argument
444 ci = offset_to_cluster(si, offset); in lock_cluster()
455 static void move_cluster(struct swap_info_struct *si, in move_cluster() argument
464 spin_lock(&si->lock); in move_cluster()
469 spin_unlock(&si->lock); in move_cluster()
472 atomic_long_dec(&si->frag_cluster_nr[ci->order]); in move_cluster()
474 atomic_long_inc(&si->frag_cluster_nr[ci->order]); in move_cluster()
479 static void swap_cluster_schedule_discard(struct swap_info_struct *si, in swap_cluster_schedule_discard() argument
482 unsigned int idx = cluster_index(si, ci); in swap_cluster_schedule_discard()
485 * si->swap_map directly. To make sure the discarding cluster isn't in swap_cluster_schedule_discard()
489 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_cluster_schedule_discard()
492 move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); in swap_cluster_schedule_discard()
493 schedule_work(&si->discard_work); in swap_cluster_schedule_discard()
496 static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) in __free_cluster() argument
499 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); in __free_cluster()
507 * list status without touching si lock.
513 struct swap_info_struct *si, struct list_head *list) in isolate_lock_cluster() argument
517 spin_lock(&si->lock); in isolate_lock_cluster()
519 if (unlikely(!(si->flags & SWP_WRITEOK))) in isolate_lock_cluster()
537 spin_unlock(&si->lock); in isolate_lock_cluster()
548 static bool swap_do_scheduled_discard(struct swap_info_struct *si) in swap_do_scheduled_discard() argument
554 spin_lock(&si->lock); in swap_do_scheduled_discard()
555 while (!list_empty(&si->discard_clusters)) { in swap_do_scheduled_discard()
556 ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); in swap_do_scheduled_discard()
563 idx = cluster_index(si, ci); in swap_do_scheduled_discard()
564 spin_unlock(&si->lock); in swap_do_scheduled_discard()
565 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
574 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
576 __free_cluster(si, ci); in swap_do_scheduled_discard()
579 spin_lock(&si->lock); in swap_do_scheduled_discard()
581 spin_unlock(&si->lock); in swap_do_scheduled_discard()
587 struct swap_info_struct *si; in swap_discard_work() local
589 si = container_of(work, struct swap_info_struct, discard_work); in swap_discard_work()
591 swap_do_scheduled_discard(si); in swap_discard_work()
596 struct swap_info_struct *si; in swap_users_ref_free() local
598 si = container_of(ref, struct swap_info_struct, users); in swap_users_ref_free()
599 complete(&si->comp); in swap_users_ref_free()
606 static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) in free_cluster() argument
617 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == in free_cluster()
619 swap_cluster_schedule_discard(si, ci); in free_cluster()
623 __free_cluster(si, ci); in free_cluster()
630 static void partial_free_cluster(struct swap_info_struct *si, in partial_free_cluster() argument
637 move_cluster(si, ci, &si->nonfull_clusters[ci->order], in partial_free_cluster()
643 * Note: allocation doesn't acquire si lock, and may drop the ci lock for
646 static void relocate_cluster(struct swap_info_struct *si, in relocate_cluster() argument
657 free_cluster(si, ci); in relocate_cluster()
660 move_cluster(si, ci, &si->frag_clusters[ci->order], in relocate_cluster()
664 move_cluster(si, ci, &si->full_clusters, in relocate_cluster()
674 static void inc_cluster_info_page(struct swap_info_struct *si, in inc_cluster_info_page() argument
687 static bool cluster_reclaim_range(struct swap_info_struct *si, in cluster_reclaim_range() argument
691 unsigned char *map = si->swap_map; in cluster_reclaim_range()
702 nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT); in cluster_reclaim_range()
725 static bool cluster_scan_range(struct swap_info_struct *si, in cluster_scan_range() argument
731 unsigned char *map = si->swap_map; in cluster_scan_range()
750 static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, in cluster_alloc_range() argument
758 if (!(si->flags & SWP_WRITEOK)) in cluster_alloc_range()
768 memset(si->swap_map + start, usage, nr_pages); in cluster_alloc_range()
769 swap_range_alloc(si, nr_pages); in cluster_alloc_range()
776 static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, in alloc_swap_scan_cluster() argument
784 unsigned long end = min(start + SWAPFILE_CLUSTER, si->max); in alloc_swap_scan_cluster()
795 if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) in alloc_swap_scan_cluster()
798 ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages); in alloc_swap_scan_cluster()
813 if (!cluster_alloc_range(si, ci, offset, usage, order)) in alloc_swap_scan_cluster()
822 relocate_cluster(si, ci); in alloc_swap_scan_cluster()
824 if (si->flags & SWP_SOLIDSTATE) in alloc_swap_scan_cluster()
825 __this_cpu_write(si->percpu_cluster->next[order], next); in alloc_swap_scan_cluster()
827 si->global_cluster->next[order] = next; in alloc_swap_scan_cluster()
832 static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) in swap_reclaim_full_clusters() argument
837 unsigned char *map = si->swap_map; in swap_reclaim_full_clusters()
841 to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER; in swap_reclaim_full_clusters()
843 while ((ci = isolate_lock_cluster(si, &si->full_clusters))) { in swap_reclaim_full_clusters()
844 offset = cluster_offset(si, ci); in swap_reclaim_full_clusters()
845 end = min(si->max, offset + SWAPFILE_CLUSTER); in swap_reclaim_full_clusters()
851 nr_reclaim = __try_to_reclaim_swap(si, offset, in swap_reclaim_full_clusters()
864 relocate_cluster(si, ci); in swap_reclaim_full_clusters()
874 struct swap_info_struct *si; in swap_reclaim_work() local
876 si = container_of(work, struct swap_info_struct, reclaim_work); in swap_reclaim_work()
878 swap_reclaim_full_clusters(si, true); in swap_reclaim_work()
886 static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, in cluster_alloc_swap_entry() argument
892 if (si->flags & SWP_SOLIDSTATE) { in cluster_alloc_swap_entry()
894 local_lock(&si->percpu_cluster->lock); in cluster_alloc_swap_entry()
895 offset = __this_cpu_read(si->percpu_cluster->next[order]); in cluster_alloc_swap_entry()
898 spin_lock(&si->global_cluster_lock); in cluster_alloc_swap_entry()
899 offset = si->global_cluster->next[order]; in cluster_alloc_swap_entry()
903 ci = lock_cluster(si, offset); in cluster_alloc_swap_entry()
907 offset = cluster_offset(si, ci); in cluster_alloc_swap_entry()
908 found = alloc_swap_scan_cluster(si, ci, offset, in cluster_alloc_swap_entry()
918 ci = isolate_lock_cluster(si, &si->free_clusters); in cluster_alloc_swap_entry()
920 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), in cluster_alloc_swap_entry()
928 swap_reclaim_full_clusters(si, false); in cluster_alloc_swap_entry()
933 while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) { in cluster_alloc_swap_entry()
934 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), in cluster_alloc_swap_entry()
942 frags_existing = atomic_long_read(&si->frag_cluster_nr[order]); in cluster_alloc_swap_entry()
944 (ci = isolate_lock_cluster(si, &si->frag_clusters[order]))) { in cluster_alloc_swap_entry()
945 atomic_long_dec(&si->frag_cluster_nr[order]); in cluster_alloc_swap_entry()
952 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), in cluster_alloc_swap_entry()
963 * reread cluster_next_cpu since we dropped si->lock in cluster_alloc_swap_entry()
965 if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si)) in cluster_alloc_swap_entry()
975 * allocation, but reclaim may drop si->lock and race with another user. in cluster_alloc_swap_entry()
977 while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) { in cluster_alloc_swap_entry()
978 atomic_long_dec(&si->frag_cluster_nr[o]); in cluster_alloc_swap_entry()
979 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), in cluster_alloc_swap_entry()
985 while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) { in cluster_alloc_swap_entry()
986 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), in cluster_alloc_swap_entry()
993 if (si->flags & SWP_SOLIDSTATE) in cluster_alloc_swap_entry()
994 local_unlock(&si->percpu_cluster->lock); in cluster_alloc_swap_entry()
996 spin_unlock(&si->global_cluster_lock); in cluster_alloc_swap_entry()
1001 static void del_from_avail_list(struct swap_info_struct *si, bool swapoff) in del_from_avail_list() argument
1011 * swapoff here so it's synchronized by both si->lock and in del_from_avail_list()
1015 lockdep_assert_held(&si->lock); in del_from_avail_list()
1016 si->flags &= ~SWP_WRITEOK; in del_from_avail_list()
1017 atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); in del_from_avail_list()
1022 * si->inuse_pages == pages), any concurrent slot freeing, in del_from_avail_list()
1026 pages = si->pages; in del_from_avail_list()
1027 if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages, in del_from_avail_list()
1033 plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]); in del_from_avail_list()
1040 static void add_to_avail_list(struct swap_info_struct *si, bool swapon) in add_to_avail_list() argument
1050 lockdep_assert_held(&si->lock); in add_to_avail_list()
1051 si->flags |= SWP_WRITEOK; in add_to_avail_list()
1053 if (!(READ_ONCE(si->flags) & SWP_WRITEOK)) in add_to_avail_list()
1057 if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT)) in add_to_avail_list()
1060 val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); in add_to_avail_list()
1064 * see (inuse_pages == si->pages) and will call del_from_avail_list. If in add_to_avail_list()
1067 pages = si->pages; in add_to_avail_list()
1070 if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages, in add_to_avail_list()
1076 plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]); in add_to_avail_list()
1087 static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries) in swap_usage_add() argument
1089 long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages); in swap_usage_add()
1095 if (unlikely(val == si->pages)) { in swap_usage_add()
1096 del_from_avail_list(si, false); in swap_usage_add()
1103 static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries) in swap_usage_sub() argument
1105 long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages); in swap_usage_sub()
1112 add_to_avail_list(si, false); in swap_usage_sub()
1115 static void swap_range_alloc(struct swap_info_struct *si, in swap_range_alloc() argument
1118 if (swap_usage_add(si, nr_entries)) { in swap_range_alloc()
1120 schedule_work(&si->reclaim_work); in swap_range_alloc()
1124 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, in swap_range_free() argument
1137 clear_bit(offset + i, si->zeromap); in swap_range_free()
1138 zswap_invalidate(swp_entry(si->type, offset + i)); in swap_range_free()
1141 if (si->flags & SWP_BLKDEV) in swap_range_free()
1143 si->bdev->bd_disk->fops->swap_slot_free_notify; in swap_range_free()
1147 arch_swap_invalidate_page(si->type, offset); in swap_range_free()
1149 swap_slot_free_notify(si->bdev, offset); in swap_range_free()
1152 clear_shadow_from_swap_cache(si->type, begin, end); in swap_range_free()
1155 * Make sure that try_to_unuse() observes si->inuse_pages reaching 0 in swap_range_free()
1160 swap_usage_sub(si, nr_entries); in swap_range_free()
1163 static int cluster_alloc_swap(struct swap_info_struct *si, in cluster_alloc_swap() argument
1170 unsigned long offset = cluster_alloc_swap_entry(si, order, usage); in cluster_alloc_swap()
1174 slots[n_ret++] = swp_entry(si->type, offset); in cluster_alloc_swap()
1180 static int scan_swap_map_slots(struct swap_info_struct *si, in scan_swap_map_slots() argument
1211 if (!(si->flags & SWP_BLKDEV)) in scan_swap_map_slots()
1215 return cluster_alloc_swap(si, usage, nr, slots, order); in scan_swap_map_slots()
1218 static bool get_swap_device_info(struct swap_info_struct *si) in get_swap_device_info() argument
1220 if (!percpu_ref_tryget_live(&si->users)) in get_swap_device_info()
1223 * Guarantee the si->users are checked before accessing other in get_swap_device_info()
1224 * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is in get_swap_device_info()
1238 struct swap_info_struct *si, *next; in get_swap_pages() local
1257 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { in get_swap_pages()
1258 /* requeue si to after same-priority siblings */ in get_swap_pages()
1259 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); in get_swap_pages()
1261 if (get_swap_device_info(si)) { in get_swap_pages()
1262 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, in get_swap_pages()
1264 put_swap_device(si); in get_swap_pages()
1271 * if we got here, it's likely that si was almost full before, in get_swap_pages()
1272 * and since scan_swap_map_slots() can drop the si->lock, in get_swap_pages()
1274 * same si and it filled up before we could get one; or, the si in get_swap_pages()
1276 * si->lock. Since we dropped the swap_avail_lock, the in get_swap_pages()
1297 struct swap_info_struct *si; in _swap_info_get() local
1302 si = swp_swap_info(entry); in _swap_info_get()
1303 if (!si) in _swap_info_get()
1305 if (data_race(!(si->flags & SWP_USED))) in _swap_info_get()
1308 if (offset >= si->max) in _swap_info_get()
1310 if (data_race(!si->swap_map[swp_offset(entry)])) in _swap_info_get()
1312 return si; in _swap_info_get()
1329 static unsigned char __swap_entry_free_locked(struct swap_info_struct *si, in __swap_entry_free_locked() argument
1336 count = si->swap_map[offset]; in __swap_entry_free_locked()
1352 if (swap_count_continued(si, offset, count)) in __swap_entry_free_locked()
1362 WRITE_ONCE(si->swap_map[offset], usage); in __swap_entry_free_locked()
1364 WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE); in __swap_entry_free_locked()
1410 struct swap_info_struct *si; in get_swap_device() local
1415 si = swp_swap_info(entry); in get_swap_device()
1416 if (!si) in get_swap_device()
1418 if (!get_swap_device_info(si)) in get_swap_device()
1421 if (offset >= si->max) in get_swap_device()
1424 return si; in get_swap_device()
1431 percpu_ref_put(&si->users); in get_swap_device()
1435 static unsigned char __swap_entry_free(struct swap_info_struct *si, in __swap_entry_free() argument
1442 ci = lock_cluster(si, offset); in __swap_entry_free()
1443 usage = __swap_entry_free_locked(si, offset, 1); in __swap_entry_free()
1445 swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1); in __swap_entry_free()
1451 static bool __swap_entries_free(struct swap_info_struct *si, in __swap_entries_free() argument
1461 if (nr <= 1 || swap_count(data_race(si->swap_map[offset])) != 1) in __swap_entries_free()
1467 ci = lock_cluster(si, offset); in __swap_entries_free()
1468 if (!swap_is_last_map(si, offset, nr, &has_cache)) { in __swap_entries_free()
1473 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE); in __swap_entries_free()
1475 swap_entry_range_free(si, ci, entry, nr); in __swap_entries_free()
1482 if (data_race(si->swap_map[offset + i])) { in __swap_entries_free()
1483 count = __swap_entry_free(si, swp_entry(type, offset + i)); in __swap_entries_free()
1497 static void swap_entry_range_free(struct swap_info_struct *si, in swap_entry_range_free() argument
1502 unsigned char *map = si->swap_map + offset; in swap_entry_range_free()
1506 VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1)); in swap_entry_range_free()
1517 swap_range_free(si, offset, nr_pages); in swap_entry_range_free()
1520 free_cluster(si, ci); in swap_entry_range_free()
1522 partial_free_cluster(si, ci); in swap_entry_range_free()
1525 static void cluster_swap_free_nr(struct swap_info_struct *si, in cluster_swap_free_nr() argument
1532 ci = lock_cluster(si, offset); in cluster_swap_free_nr()
1534 if (!__swap_entry_free_locked(si, offset, usage)) in cluster_swap_free_nr()
1535 swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1); in cluster_swap_free_nr()
1569 struct swap_info_struct *si; in put_swap_folio() local
1572 si = _swap_info_get(entry); in put_swap_folio()
1573 if (!si) in put_swap_folio()
1576 ci = lock_cluster(si, offset); in put_swap_folio()
1577 if (swap_is_has_cache(si, offset, size)) in put_swap_folio()
1578 swap_entry_range_free(si, ci, entry, size); in put_swap_folio()
1581 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) in put_swap_folio()
1582 swap_entry_range_free(si, ci, entry, 1); in put_swap_folio()
1592 struct swap_info_struct *si = NULL; in swapcache_free_entries() local
1598 si = _swap_info_get(entries[i]); in swapcache_free_entries()
1599 if (si) { in swapcache_free_entries()
1600 ci = lock_cluster(si, swp_offset(entries[i])); in swapcache_free_entries()
1601 swap_entry_range_free(si, ci, entries[i], 1); in swapcache_free_entries()
1609 struct swap_info_struct *si = swp_swap_info(entry); in __swap_count() local
1612 return swap_count(si->swap_map[offset]); in __swap_count()
1620 int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) in swap_swapcount() argument
1626 ci = lock_cluster(si, offset); in swap_swapcount()
1627 count = swap_count(si->swap_map[offset]); in swap_swapcount()
1639 struct swap_info_struct *si; in swp_swapcount() local
1645 si = _swap_info_get(entry); in swp_swapcount()
1646 if (!si) in swp_swapcount()
1651 ci = lock_cluster(si, offset); in swp_swapcount()
1653 count = swap_count(si->swap_map[offset]); in swp_swapcount()
1660 page = vmalloc_to_page(si->swap_map + offset); in swp_swapcount()
1678 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, in swap_page_trans_huge_swapped() argument
1682 unsigned char *map = si->swap_map; in swap_page_trans_huge_swapped()
1689 ci = lock_cluster(si, offset); in swap_page_trans_huge_swapped()
1709 struct swap_info_struct *si = _swap_info_get(entry); in folio_swapped() local
1711 if (!si) in folio_swapped()
1715 return swap_swapcount(si, entry) != 0; in folio_swapped()
1717 return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); in folio_swapped()
1785 struct swap_info_struct *si; in free_swap_and_cache_nr() local
1792 si = get_swap_device(entry); in free_swap_and_cache_nr()
1793 if (!si) in free_swap_and_cache_nr()
1796 if (WARN_ON(end_offset > si->max)) in free_swap_and_cache_nr()
1802 any_only_cache = __swap_entries_free(si, entry, nr); in free_swap_and_cache_nr()
1822 if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { in free_swap_and_cache_nr()
1831 nr = __try_to_reclaim_swap(si, offset, in free_swap_and_cache_nr()
1842 put_swap_device(si); in free_swap_and_cache_nr()
1849 struct swap_info_struct *si = swap_type_to_swap_info(type); in get_swap_page_of_type() local
1852 if (!si) in get_swap_page_of_type()
1856 if (get_swap_device_info(si)) { in get_swap_page_of_type()
1857 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) in get_swap_page_of_type()
1859 put_swap_device(si); in get_swap_page_of_type()
1924 struct swap_info_struct *si = swap_type_to_swap_info(type); in swapdev_block() local
1927 if (!si || !(si->flags & SWP_WRITEOK)) in swapdev_block()
1929 se = offset_to_swap_extent(si, offset); in swapdev_block()
2076 struct swap_info_struct *si; in unuse_pte_range() local
2078 si = swap_info[type]; in unuse_pte_range()
2119 swp_count = READ_ONCE(si->swap_map[offset]); in unuse_pte_range()
2249 static unsigned int find_next_to_unuse(struct swap_info_struct *si, in find_next_to_unuse() argument
2261 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
2262 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
2269 if (i == si->max) in find_next_to_unuse()
2281 struct swap_info_struct *si = swap_info[type]; in try_to_unuse() local
2286 if (!swap_usage_in_pages(si)) in try_to_unuse()
2299 while (swap_usage_in_pages(si) && in try_to_unuse()
2327 while (swap_usage_in_pages(si) && in try_to_unuse()
2329 (i = find_next_to_unuse(si, i)) != 0) { in try_to_unuse()
2362 if (swap_usage_in_pages(si)) { in try_to_unuse()
2371 * after swap_range_free() reduces si->inuse_pages to 0. in try_to_unuse()
2523 static int swap_node(struct swap_info_struct *si) in swap_node() argument
2527 if (si->bdev) in swap_node()
2528 bdev = si->bdev; in swap_node()
2530 bdev = si->swap_file->f_inode->i_sb->s_bdev; in swap_node()
2535 static void setup_swap_info(struct swap_info_struct *si, int prio, in setup_swap_info() argument
2543 si->prio = prio; in setup_swap_info()
2545 si->prio = --least_priority; in setup_swap_info()
2550 si->list.prio = -si->prio; in setup_swap_info()
2552 if (si->prio >= 0) in setup_swap_info()
2553 si->avail_lists[i].prio = -si->prio; in setup_swap_info()
2555 if (swap_node(si) == i) in setup_swap_info()
2556 si->avail_lists[i].prio = 1; in setup_swap_info()
2558 si->avail_lists[i].prio = -si->prio; in setup_swap_info()
2561 si->swap_map = swap_map; in setup_swap_info()
2562 si->cluster_info = cluster_info; in setup_swap_info()
2563 si->zeromap = zeromap; in setup_swap_info()
2566 static void _enable_swap_info(struct swap_info_struct *si) in _enable_swap_info() argument
2568 atomic_long_add(si->pages, &nr_swap_pages); in _enable_swap_info()
2569 total_swap_pages += si->pages; in _enable_swap_info()
2582 plist_add(&si->list, &swap_active_head); in _enable_swap_info()
2585 add_to_avail_list(si, true); in _enable_swap_info()
2588 static void enable_swap_info(struct swap_info_struct *si, int prio, in enable_swap_info() argument
2594 spin_lock(&si->lock); in enable_swap_info()
2595 setup_swap_info(si, prio, swap_map, cluster_info, zeromap); in enable_swap_info()
2596 spin_unlock(&si->lock); in enable_swap_info()
2601 percpu_ref_resurrect(&si->users); in enable_swap_info()
2603 spin_lock(&si->lock); in enable_swap_info()
2604 _enable_swap_info(si); in enable_swap_info()
2605 spin_unlock(&si->lock); in enable_swap_info()
2609 static void reinsert_swap_info(struct swap_info_struct *si) in reinsert_swap_info() argument
2612 spin_lock(&si->lock); in reinsert_swap_info()
2613 setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap); in reinsert_swap_info()
2614 _enable_swap_info(si); in reinsert_swap_info()
2615 spin_unlock(&si->lock); in reinsert_swap_info()
2638 static void wait_for_allocation(struct swap_info_struct *si) in wait_for_allocation() argument
2641 unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER); in wait_for_allocation()
2644 BUG_ON(si->flags & SWP_WRITEOK); in wait_for_allocation()
2647 ci = lock_cluster(si, offset); in wait_for_allocation()
2703 struct swap_info_struct *si = p; in SYSCALL_DEFINE1() local
2706 plist_for_each_entry_continue(si, &swap_active_head, list) { in SYSCALL_DEFINE1()
2707 si->prio++; in SYSCALL_DEFINE1()
2708 si->list.prio--; in SYSCALL_DEFINE1()
2710 if (si->avail_lists[nid].prio != 1) in SYSCALL_DEFINE1()
2711 si->avail_lists[nid].prio--; in SYSCALL_DEFINE1()
2836 struct swap_info_struct *si; in swap_start() local
2845 for (type = 0; (si = swap_type_to_swap_info(type)); type++) { in swap_start()
2846 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_start()
2849 return si; in swap_start()
2857 struct swap_info_struct *si = v; in swap_next() local
2863 type = si->type + 1; in swap_next()
2866 for (; (si = swap_type_to_swap_info(type)); type++) { in swap_next()
2867 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_next()
2869 return si; in swap_next()
2882 struct swap_info_struct *si = v; in swap_show() local
2887 if (si == SEQ_START_TOKEN) { in swap_show()
2892 bytes = K(si->pages); in swap_show()
2893 inuse = K(swap_usage_in_pages(si)); in swap_show()
2895 file = si->swap_file; in swap_show()
2903 si->prio); in swap_show()
3016 static int claim_swapfile(struct swap_info_struct *si, struct inode *inode) in claim_swapfile() argument
3019 si->bdev = I_BDEV(inode); in claim_swapfile()
3025 if (bdev_is_zoned(si->bdev)) in claim_swapfile()
3027 si->flags |= SWP_BLKDEV; in claim_swapfile()
3029 si->bdev = inode->i_sb->s_bdev; in claim_swapfile()
3064 static unsigned long read_swap_header(struct swap_info_struct *si, in read_swap_header() argument
3134 static int setup_swap_map_and_extents(struct swap_info_struct *si, in setup_swap_map_and_extents() argument
3158 si->max = maxpages; in setup_swap_map_and_extents()
3159 si->pages = nr_good_pages; in setup_swap_map_and_extents()
3160 nr_extents = setup_swap_extents(si, span); in setup_swap_map_and_extents()
3163 nr_good_pages = si->pages; in setup_swap_map_and_extents()
3173 static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, in setup_clusters() argument
3189 if (si->flags & SWP_SOLIDSTATE) { in setup_clusters()
3190 si->percpu_cluster = alloc_percpu(struct percpu_cluster); in setup_clusters()
3191 if (!si->percpu_cluster) in setup_clusters()
3197 cluster = per_cpu_ptr(si->percpu_cluster, cpu); in setup_clusters()
3203 si->global_cluster = kmalloc(sizeof(*si->global_cluster), in setup_clusters()
3205 if (!si->global_cluster) in setup_clusters()
3208 si->global_cluster->next[i] = SWAP_ENTRY_INVALID; in setup_clusters()
3209 spin_lock_init(&si->global_cluster_lock); in setup_clusters()
3219 inc_cluster_info_page(si, cluster_info, 0); in setup_clusters()
3221 inc_cluster_info_page(si, cluster_info, in setup_clusters()
3224 inc_cluster_info_page(si, cluster_info, i); in setup_clusters()
3226 INIT_LIST_HEAD(&si->free_clusters); in setup_clusters()
3227 INIT_LIST_HEAD(&si->full_clusters); in setup_clusters()
3228 INIT_LIST_HEAD(&si->discard_clusters); in setup_clusters()
3231 INIT_LIST_HEAD(&si->nonfull_clusters[i]); in setup_clusters()
3232 INIT_LIST_HEAD(&si->frag_clusters[i]); in setup_clusters()
3233 atomic_long_set(&si->frag_cluster_nr[i], 0); in setup_clusters()
3250 list_add_tail(&ci->list, &si->nonfull_clusters[0]); in setup_clusters()
3254 list_add_tail(&ci->list, &si->free_clusters); in setup_clusters()
3268 struct swap_info_struct *si; in SYSCALL_DEFINE2() local
3295 si = alloc_swap_info(); in SYSCALL_DEFINE2()
3296 if (IS_ERR(si)) in SYSCALL_DEFINE2()
3297 return PTR_ERR(si); in SYSCALL_DEFINE2()
3299 INIT_WORK(&si->discard_work, swap_discard_work); in SYSCALL_DEFINE2()
3300 INIT_WORK(&si->reclaim_work, swap_reclaim_work); in SYSCALL_DEFINE2()
3315 si->swap_file = swap_file; in SYSCALL_DEFINE2()
3320 error = claim_swapfile(si, inode); in SYSCALL_DEFINE2()
3348 maxpages = read_swap_header(si, swap_header, inode); in SYSCALL_DEFINE2()
3361 error = swap_cgroup_swapon(si->type, maxpages); in SYSCALL_DEFINE2()
3365 nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map, in SYSCALL_DEFINE2()
3383 if (si->bdev && bdev_stable_writes(si->bdev)) in SYSCALL_DEFINE2()
3384 si->flags |= SWP_STABLE_WRITES; in SYSCALL_DEFINE2()
3386 if (si->bdev && bdev_synchronous(si->bdev)) in SYSCALL_DEFINE2()
3387 si->flags |= SWP_SYNCHRONOUS_IO; in SYSCALL_DEFINE2()
3389 if (si->bdev && bdev_nonrot(si->bdev)) { in SYSCALL_DEFINE2()
3390 si->flags |= SWP_SOLIDSTATE; in SYSCALL_DEFINE2()
3396 cluster_info = setup_clusters(si, swap_header, maxpages); in SYSCALL_DEFINE2()
3404 si->bdev && bdev_max_discard_sectors(si->bdev)) { in SYSCALL_DEFINE2()
3411 si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | in SYSCALL_DEFINE2()
3421 si->flags &= ~SWP_PAGE_DISCARD; in SYSCALL_DEFINE2()
3423 si->flags &= ~SWP_AREA_DISCARD; in SYSCALL_DEFINE2()
3426 if (si->flags & SWP_AREA_DISCARD) { in SYSCALL_DEFINE2()
3427 int err = discard_swap(si); in SYSCALL_DEFINE2()
3430 si, err); in SYSCALL_DEFINE2()
3434 error = init_swap_address_space(si->type, maxpages); in SYSCALL_DEFINE2()
3438 error = zswap_swapon(si->type, maxpages); in SYSCALL_DEFINE2()
3458 enable_swap_info(si, prio, swap_map, cluster_info, zeromap); in SYSCALL_DEFINE2()
3461 K(si->pages), name->name, si->prio, nr_extents, in SYSCALL_DEFINE2()
3463 (si->flags & SWP_SOLIDSTATE) ? "SS" : "", in SYSCALL_DEFINE2()
3464 (si->flags & SWP_DISCARDABLE) ? "D" : "", in SYSCALL_DEFINE2()
3465 (si->flags & SWP_AREA_DISCARD) ? "s" : "", in SYSCALL_DEFINE2()
3466 (si->flags & SWP_PAGE_DISCARD) ? "c" : ""); in SYSCALL_DEFINE2()
3475 zswap_swapoff(si->type); in SYSCALL_DEFINE2()
3477 exit_swap_address_space(si->type); in SYSCALL_DEFINE2()
3481 free_percpu(si->percpu_cluster); in SYSCALL_DEFINE2()
3482 si->percpu_cluster = NULL; in SYSCALL_DEFINE2()
3483 kfree(si->global_cluster); in SYSCALL_DEFINE2()
3484 si->global_cluster = NULL; in SYSCALL_DEFINE2()
3486 destroy_swap_extents(si); in SYSCALL_DEFINE2()
3487 swap_cgroup_swapoff(si->type); in SYSCALL_DEFINE2()
3489 si->swap_file = NULL; in SYSCALL_DEFINE2()
3490 si->flags = 0; in SYSCALL_DEFINE2()
3518 struct swap_info_struct *si = swap_info[type]; in si_swapinfo() local
3520 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in si_swapinfo()
3521 nr_to_be_unused += swap_usage_in_pages(si); in si_swapinfo()
3541 struct swap_info_struct *si; in __swap_duplicate() local
3548 si = swp_swap_info(entry); in __swap_duplicate()
3549 if (WARN_ON_ONCE(!si)) { in __swap_duplicate()
3557 ci = lock_cluster(si, offset); in __swap_duplicate()
3561 count = si->swap_map[offset + i]; in __swap_duplicate()
3589 count = si->swap_map[offset + i]; in __swap_duplicate()
3597 else if (swap_count_continued(si, offset + i, count)) in __swap_duplicate()
3608 WRITE_ONCE(si->swap_map[offset + i], count | has_cache); in __swap_duplicate()
3654 void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) in swapcache_clear() argument
3658 cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE); in swapcache_clear()
3698 struct swap_info_struct *si; in add_swap_count_continuation() local
3713 si = get_swap_device(entry); in add_swap_count_continuation()
3714 if (!si) { in add_swap_count_continuation()
3724 ci = lock_cluster(si, offset); in add_swap_count_continuation()
3726 count = swap_count(si->swap_map[offset]); in add_swap_count_continuation()
3742 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
3745 spin_lock(&si->cont_lock); in add_swap_count_continuation()
3754 si->flags |= SWP_CONTINUED; in add_swap_count_continuation()
3782 spin_unlock(&si->cont_lock); in add_swap_count_continuation()
3785 put_swap_device(si); in add_swap_count_continuation()
3801 static bool swap_count_continued(struct swap_info_struct *si, in swap_count_continued() argument
3809 head = vmalloc_to_page(si->swap_map + offset); in swap_count_continued()
3815 spin_lock(&si->cont_lock); in swap_count_continued()
3877 spin_unlock(&si->cont_lock); in swap_count_continued()
3885 static void free_swap_count_continuations(struct swap_info_struct *si) in free_swap_count_continuations() argument
3889 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { in free_swap_count_continuations()
3891 head = vmalloc_to_page(si->swap_map + offset); in free_swap_count_continuations()
3906 struct swap_info_struct *si, *next; in __folio_throttle_swaprate() local
3926 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], in __folio_throttle_swaprate()
3928 if (si->bdev) { in __folio_throttle_swaprate()
3929 blkcg_schedule_throttle(si->bdev->bd_disk, true); in __folio_throttle_swaprate()