Lines Matching full:class

22  *	class->lock
124 * determined). NOTE: all those class sizes must be set as multiple of
174 * Size of objects stored in this class. Must be multiple
264 unsigned int class:CLASS_BITS + 1; member
365 /* class->lock(which owns the handle) synchronizes races */
460 /* Protected by class->lock */
510 return pool->size_class[zspage->class]; in zspage_class()
515 * class maintains a list of zspages where each zspage is divided
518 * size class which has chunk size big enough to hold the given size.
531 static inline void class_stat_add(struct size_class *class, int type, in class_stat_add() argument
534 class->stats.objs[type] += cnt; in class_stat_add()
537 static inline void class_stat_sub(struct size_class *class, int type, in class_stat_sub() argument
540 class->stats.objs[type] -= cnt; in class_stat_sub()
543 static inline unsigned long class_stat_read(struct size_class *class, int type) in class_stat_read() argument
545 return class->stats.objs[type]; in class_stat_read()
565 static unsigned long zs_can_compact(struct size_class *class);
571 struct size_class *class; in zs_stats_size_show() local
579 "class", "size", "10%", "20%", "30%", "40%", in zs_stats_size_show()
586 class = pool->size_class[i]; in zs_stats_size_show()
588 if (class->index != i) in zs_stats_size_show()
591 spin_lock(&class->lock); in zs_stats_size_show()
593 seq_printf(s, " %5u %5u ", i, class->size); in zs_stats_size_show()
595 inuse_totals[fg] += class_stat_read(class, fg); in zs_stats_size_show()
596 seq_printf(s, "%9lu ", class_stat_read(class, fg)); in zs_stats_size_show()
599 obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); in zs_stats_size_show()
600 obj_used = class_stat_read(class, ZS_OBJS_INUSE); in zs_stats_size_show()
601 freeable = zs_can_compact(class); in zs_stats_size_show()
602 spin_unlock(&class->lock); in zs_stats_size_show()
604 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
606 class->pages_per_zspage; in zs_stats_size_show()
610 class->pages_per_zspage, freeable); in zs_stats_size_show()
670 * For each size class, zspages are divided into different groups
674 static int get_fullness_group(struct size_class *class, struct zspage *zspage) in get_fullness_group() argument
679 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
696 * Each size class maintains various freelists and zspages are assigned
699 * identified by <class, fullness_group>.
701 static void insert_zspage(struct size_class *class, in insert_zspage() argument
705 class_stat_add(class, fullness, 1); in insert_zspage()
706 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
712 * by <class, fullness_group>.
714 static void remove_zspage(struct size_class *class, struct zspage *zspage) in remove_zspage() argument
718 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
721 class_stat_sub(class, fullness, 1); in remove_zspage()
725 * Each size class maintains zspages in different fullness groups depending
733 static int fix_fullness_group(struct size_class *class, struct zspage *zspage) in fix_fullness_group() argument
737 newfg = get_fullness_group(class, zspage); in fix_fullness_group()
741 remove_zspage(class, zspage); in fix_fullness_group()
742 insert_zspage(class, zspage, newfg); in fix_fullness_group()
855 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
860 assert_spin_locked(&class->lock); in __free_zspage()
878 class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in __free_zspage()
879 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); in __free_zspage()
882 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
898 remove_zspage(class, zspage); in free_zspage()
899 __free_zspage(pool, class, zspage); in free_zspage()
903 static void init_zspage(struct size_class *class, struct zspage *zspage) in init_zspage() argument
919 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
921 link += class->size / sizeof(*link); in init_zspage()
947 static void create_page_chain(struct size_class *class, struct zspage *zspage, in create_page_chain() argument
953 int nr_zpdescs = class->pages_per_zspage; in create_page_chain()
970 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
971 class->pages_per_zspage == 1)) in create_page_chain()
981 * Allocate a zspage for the given size class
984 struct size_class *class, in alloc_zspage() argument
997 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1016 create_page_chain(class, zspage, zpdescs); in alloc_zspage()
1017 init_zspage(class, zspage); in alloc_zspage()
1019 zspage->class = class->index; in alloc_zspage()
1024 static struct zspage *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1030 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1136 static bool zspage_full(struct size_class *class, struct zspage *zspage) in zspage_full() argument
1138 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1159 struct size_class *class; in zs_lookup_class_index() local
1161 class = pool->size_class[get_size_class_index(size)]; in zs_lookup_class_index()
1163 return class->index; in zs_lookup_class_index()
1196 struct size_class *class; in zs_map_object() local
1215 * migration cannot move any zpages in this zspage. Here, class->lock in zs_map_object()
1217 * zs_unmap_object API so delegate the locking from class to zspage in zs_map_object()
1223 class = zspage_class(pool, zspage); in zs_map_object()
1224 off = offset_in_page(class->size * obj_idx); in zs_map_object()
1229 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1241 ret = __zs_map_object(area, zpdescs, off, class->size); in zs_map_object()
1257 struct size_class *class; in zs_unmap_object() local
1263 class = zspage_class(pool, zspage); in zs_unmap_object()
1264 off = offset_in_page(class->size * obj_idx); in zs_unmap_object()
1267 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1276 __zs_unmap_object(area, zpdescs, off, class->size); in zs_unmap_object()
1289 * The function returns the size of the first huge class - any object of equal
1309 struct size_class *class; in obj_malloc() local
1315 class = pool->size_class[zspage->class]; in obj_malloc()
1318 offset = obj * class->size; in obj_malloc()
1358 struct size_class *class; in zs_malloc() local
1374 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1376 /* class->lock effectively protects the zpage migration */ in zs_malloc()
1377 spin_lock(&class->lock); in zs_malloc()
1378 zspage = find_get_zspage(class); in zs_malloc()
1382 fix_fullness_group(class, zspage); in zs_malloc()
1383 class_stat_add(class, ZS_OBJS_INUSE, 1); in zs_malloc()
1388 spin_unlock(&class->lock); in zs_malloc()
1390 zspage = alloc_zspage(pool, class, gfp); in zs_malloc()
1396 spin_lock(&class->lock); in zs_malloc()
1398 newfg = get_fullness_group(class, zspage); in zs_malloc()
1399 insert_zspage(class, zspage, newfg); in zs_malloc()
1400 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); in zs_malloc()
1401 class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1402 class_stat_add(class, ZS_OBJS_INUSE, 1); in zs_malloc()
1407 spin_unlock(&class->lock); in zs_malloc()
1446 struct size_class *class; in zs_free() local
1460 class = zspage_class(pool, zspage); in zs_free()
1461 spin_lock(&class->lock); in zs_free()
1464 class_stat_sub(class, ZS_OBJS_INUSE, 1); in zs_free()
1465 obj_free(class->size, obj); in zs_free()
1467 fullness = fix_fullness_group(class, zspage); in zs_free()
1469 free_zspage(pool, class, zspage); in zs_free()
1471 spin_unlock(&class->lock); in zs_free()
1476 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy() argument
1486 s_size = d_size = class->size; in zs_object_copy()
1491 s_off = offset_in_page(class->size * s_objidx); in zs_object_copy()
1492 d_off = offset_in_page(class->size * d_objidx); in zs_object_copy()
1494 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1497 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1508 if (written == class->size) in zs_object_copy()
1529 s_size = class->size - written; in zs_object_copy()
1537 d_size = class->size - written; in zs_object_copy()
1550 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj() argument
1559 offset += class->size * index; in find_alloced_obj()
1565 offset += class->size; in find_alloced_obj()
1583 struct size_class *class = pool->size_class[src_zspage->class]; in migrate_zspage() local
1586 handle = find_alloced_obj(class, s_zpdesc, &obj_idx); in migrate_zspage()
1597 zs_object_copy(class, free_obj, used_obj); in migrate_zspage()
1599 obj_free(class->size, used_obj); in migrate_zspage()
1602 if (zspage_full(class, dst_zspage)) in migrate_zspage()
1611 static struct zspage *isolate_src_zspage(struct size_class *class) in isolate_src_zspage() argument
1617 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_src_zspage()
1620 remove_zspage(class, zspage); in isolate_src_zspage()
1628 static struct zspage *isolate_dst_zspage(struct size_class *class) in isolate_dst_zspage() argument
1634 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_dst_zspage()
1637 remove_zspage(class, zspage); in isolate_dst_zspage()
1646 * putback_zspage - add @zspage into right class's fullness list
1647 * @class: destination class
1652 static int putback_zspage(struct size_class *class, struct zspage *zspage) in putback_zspage() argument
1656 fullness = get_fullness_group(class, zspage); in putback_zspage()
1657 insert_zspage(class, zspage, fullness); in putback_zspage()
1735 static void replace_sub_page(struct size_class *class, struct zspage *zspage, in replace_sub_page() argument
1752 create_page_chain(class, zspage, zpdescs); in replace_sub_page()
1775 struct size_class *class; in zs_page_migrate() local
1800 class = zspage_class(pool, zspage); in zs_page_migrate()
1803 * the class lock protects zpage alloc/free in the zspage. in zs_page_migrate()
1805 spin_lock(&class->lock); in zs_page_migrate()
1820 addr += class->size) { in zs_page_migrate()
1831 replace_sub_page(class, zspage, newzpdesc, zpdesc); in zs_page_migrate()
1837 spin_unlock(&class->lock); in zs_page_migrate()
1870 struct size_class *class; in async_free_zspage() local
1877 class = pool->size_class[i]; in async_free_zspage()
1878 if (class->index != i) in async_free_zspage()
1881 spin_lock(&class->lock); in async_free_zspage()
1882 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], in async_free_zspage()
1884 spin_unlock(&class->lock); in async_free_zspage()
1891 class = zspage_class(pool, zspage); in async_free_zspage()
1892 spin_lock(&class->lock); in async_free_zspage()
1893 class_stat_sub(class, ZS_INUSE_RATIO_0, 1); in async_free_zspage()
1894 __free_zspage(pool, class, zspage); in async_free_zspage()
1895 spin_unlock(&class->lock); in async_free_zspage()
1933 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact() argument
1936 unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); in zs_can_compact()
1937 unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE); in zs_can_compact()
1943 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
1945 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
1949 struct size_class *class) in __zs_compact() argument
1960 spin_lock(&class->lock); in __zs_compact()
1961 while (zs_can_compact(class)) { in __zs_compact()
1965 dst_zspage = isolate_dst_zspage(class); in __zs_compact()
1970 src_zspage = isolate_src_zspage(class); in __zs_compact()
1978 fg = putback_zspage(class, src_zspage); in __zs_compact()
1980 free_zspage(pool, class, src_zspage); in __zs_compact()
1981 pages_freed += class->pages_per_zspage; in __zs_compact()
1985 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 in __zs_compact()
1987 putback_zspage(class, dst_zspage); in __zs_compact()
1990 spin_unlock(&class->lock); in __zs_compact()
1994 spin_lock(&class->lock); in __zs_compact()
1999 putback_zspage(class, src_zspage); in __zs_compact()
2002 putback_zspage(class, dst_zspage); in __zs_compact()
2004 spin_unlock(&class->lock); in __zs_compact()
2013 struct size_class *class; in zs_compact() local
2026 class = pool->size_class[i]; in zs_compact()
2027 if (class->index != i) in zs_compact()
2029 pages_freed += __zs_compact(pool, class); in zs_compact()
2064 struct size_class *class; in zs_shrinker_count() local
2069 class = pool->size_class[i]; in zs_shrinker_count()
2070 if (class->index != i) in zs_shrinker_count()
2073 pages_to_free += zs_can_compact(class); in zs_shrinker_count()
2160 struct size_class *class; in zs_create_pool() local
2172 * class. Any object bigger than or equal to that will in zs_create_pool()
2173 * endup in the huge class. in zs_create_pool()
2182 * size class search - so object may be smaller than in zs_create_pool()
2183 * huge class size, yet it still can end up in the huge in zs_create_pool()
2184 * class because it grows by ZS_HANDLE_SIZE extra bytes in zs_create_pool()
2185 * right before class lookup. in zs_create_pool()
2206 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
2207 if (!class) in zs_create_pool()
2210 class->size = size; in zs_create_pool()
2211 class->index = i; in zs_create_pool()
2212 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2213 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2214 spin_lock_init(&class->lock); in zs_create_pool()
2215 pool->size_class[i] = class; in zs_create_pool()
2219 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2223 prev_class = class; in zs_create_pool()
2255 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
2257 if (!class) in zs_destroy_pool()
2260 if (class->index != i) in zs_destroy_pool()
2264 if (list_empty(&class->fullness_list[fg])) in zs_destroy_pool()
2267 pr_err("Class-%d fullness group %d is not empty\n", in zs_destroy_pool()
2268 class->size, fg); in zs_destroy_pool()
2270 kfree(class); in zs_destroy_pool()