Lines Matching defs:class

22  *	class->lock
124 * determined). NOTE: all those class sizes must be set as multiple of
174 * Size of objects stored in this class. Must be multiple
264 unsigned int class:CLASS_BITS + 1;
365 /* class->lock(which owns the handle) synchronizes races */
460 /* Protected by class->lock */
510 return pool->size_class[zspage->class];
515 * class maintains a list of zspages where each zspage is divided
518 * size class which has chunk size big enough to hold the given size.
531 static inline void class_stat_add(struct size_class *class, int type,
534 class->stats.objs[type] += cnt;
537 static inline void class_stat_sub(struct size_class *class, int type,
540 class->stats.objs[type] -= cnt;
543 static inline unsigned long class_stat_read(struct size_class *class, int type)
545 return class->stats.objs[type];
565 static unsigned long zs_can_compact(struct size_class *class);
571 struct size_class *class;
579 "class", "size", "10%", "20%", "30%", "40%",
586 class = pool->size_class[i];
588 if (class->index != i)
591 spin_lock(&class->lock);
593 seq_printf(s, " %5u %5u ", i, class->size);
595 inuse_totals[fg] += class_stat_read(class, fg);
596 seq_printf(s, "%9lu ", class_stat_read(class, fg));
599 obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED);
600 obj_used = class_stat_read(class, ZS_OBJS_INUSE);
601 freeable = zs_can_compact(class);
602 spin_unlock(&class->lock);
604 objs_per_zspage = class->objs_per_zspage;
606 class->pages_per_zspage;
610 class->pages_per_zspage, freeable);
670 * For each size class, zspages are divided into different groups
674 static int get_fullness_group(struct size_class *class, struct zspage *zspage)
679 objs_per_zspage = class->objs_per_zspage;
696 * Each size class maintains various freelists and zspages are assigned
699 * identified by <class, fullness_group>.
701 static void insert_zspage(struct size_class *class,
705 class_stat_add(class, fullness, 1);
706 list_add(&zspage->list, &class->fullness_list[fullness]);
712 * by <class, fullness_group>.
714 static void remove_zspage(struct size_class *class, struct zspage *zspage)
718 VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
721 class_stat_sub(class, fullness, 1);
725 * Each size class maintains zspages in different fullness groups depending
733 static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
737 newfg = get_fullness_group(class, zspage);
741 remove_zspage(class, zspage);
742 insert_zspage(class, zspage, newfg);
855 static void __free_zspage(struct zs_pool *pool, struct size_class *class,
860 assert_spin_locked(&class->lock);
878 class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
879 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
882 static void free_zspage(struct zs_pool *pool, struct size_class *class,
898 remove_zspage(class, zspage);
899 __free_zspage(pool, class, zspage);
903 static void init_zspage(struct size_class *class, struct zspage *zspage)
919 while ((off += class->size) < PAGE_SIZE) {
921 link += class->size / sizeof(*link);
947 static void create_page_chain(struct size_class *class, struct zspage *zspage,
953 int nr_zpdescs = class->pages_per_zspage;
970 if (unlikely(class->objs_per_zspage == 1 &&
971 class->pages_per_zspage == 1))
981 * Allocate a zspage for the given size class
984 struct size_class *class,
997 for (i = 0; i < class->pages_per_zspage; i++) {
1016 create_page_chain(class, zspage, zpdescs);
1017 init_zspage(class, zspage);
1019 zspage->class = class->index;
1024 static struct zspage *find_get_zspage(struct size_class *class)
1030 zspage = list_first_entry_or_null(&class->fullness_list[i],
1136 static bool zspage_full(struct size_class *class, struct zspage *zspage)
1138 return get_zspage_inuse(zspage) == class->objs_per_zspage;
1159 struct size_class *class;
1161 class = pool->size_class[get_size_class_index(size)];
1163 return class->index;
1196 struct size_class *class;
1215 * migration cannot move any zpages in this zspage. Here, class->lock
1217 * zs_unmap_object API so delegate the locking from class to zspage
1223 class = zspage_class(pool, zspage);
1224 off = offset_in_page(class->size * obj_idx);
1229 if (off + class->size <= PAGE_SIZE) {
1241 ret = __zs_map_object(area, zpdescs, off, class->size);
1257 struct size_class *class;
1263 class = zspage_class(pool, zspage);
1264 off = offset_in_page(class->size * obj_idx);
1267 if (off + class->size <= PAGE_SIZE)
1276 __zs_unmap_object(area, zpdescs, off, class->size);
1289 * The function returns the size of the first huge class - any object of equal
1309 struct size_class *class;
1315 class = pool->size_class[zspage->class];
1318 offset = obj * class->size;
1358 struct size_class *class;
1374 class = pool->size_class[get_size_class_index(size)];
1376 /* class->lock effectively protects the zpage migration */
1377 spin_lock(&class->lock);
1378 zspage = find_get_zspage(class);
1382 fix_fullness_group(class, zspage);
1383 class_stat_add(class, ZS_OBJS_INUSE, 1);
1388 spin_unlock(&class->lock);
1390 zspage = alloc_zspage(pool, class, gfp);
1396 spin_lock(&class->lock);
1398 newfg = get_fullness_group(class, zspage);
1399 insert_zspage(class, zspage, newfg);
1400 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
1401 class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
1402 class_stat_add(class, ZS_OBJS_INUSE, 1);
1407 spin_unlock(&class->lock);
1446 struct size_class *class;
1460 class = zspage_class(pool, zspage);
1461 spin_lock(&class->lock);
1464 class_stat_sub(class, ZS_OBJS_INUSE, 1);
1465 obj_free(class->size, obj);
1467 fullness = fix_fullness_group(class, zspage);
1469 free_zspage(pool, class, zspage);
1471 spin_unlock(&class->lock);
1476 static void zs_object_copy(struct size_class *class, unsigned long dst,
1486 s_size = d_size = class->size;
1491 s_off = offset_in_page(class->size * s_objidx);
1492 d_off = offset_in_page(class->size * d_objidx);
1494 if (s_off + class->size > PAGE_SIZE)
1497 if (d_off + class->size > PAGE_SIZE)
1508 if (written == class->size)
1529 s_size = class->size - written;
1537 d_size = class->size - written;
1550 static unsigned long find_alloced_obj(struct size_class *class,
1559 offset += class->size * index;
1565 offset += class->size;
1583 struct size_class *class = pool->size_class[src_zspage->class];
1586 handle = find_alloced_obj(class, s_zpdesc, &obj_idx);
1597 zs_object_copy(class, free_obj, used_obj);
1599 obj_free(class->size, used_obj);
1602 if (zspage_full(class, dst_zspage))
1611 static struct zspage *isolate_src_zspage(struct size_class *class)
1617 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1620 remove_zspage(class, zspage);
1628 static struct zspage *isolate_dst_zspage(struct size_class *class)
1634 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1637 remove_zspage(class, zspage);
1646 * putback_zspage - add @zspage into right class's fullness list
1647 * @class: destination class
1652 static int putback_zspage(struct size_class *class, struct zspage *zspage)
1656 fullness = get_fullness_group(class, zspage);
1657 insert_zspage(class, zspage, fullness);
1735 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1752 create_page_chain(class, zspage, zpdescs);
1775 struct size_class *class;
1800 class = zspage_class(pool, zspage);
1803 * the class lock protects zpage alloc/free in the zspage.
1805 spin_lock(&class->lock);
1820 addr += class->size) {
1831 replace_sub_page(class, zspage, newzpdesc, zpdesc);
1837 spin_unlock(&class->lock);
1870 struct size_class *class;
1877 class = pool->size_class[i];
1878 if (class->index != i)
1881 spin_lock(&class->lock);
1882 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0],
1884 spin_unlock(&class->lock);
1891 class = zspage_class(pool, zspage);
1892 spin_lock(&class->lock);
1893 class_stat_sub(class, ZS_INUSE_RATIO_0, 1);
1894 __free_zspage(pool, class, zspage);
1895 spin_unlock(&class->lock);
1933 static unsigned long zs_can_compact(struct size_class *class)
1936 unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED);
1937 unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE);
1943 obj_wasted /= class->objs_per_zspage;
1945 return obj_wasted * class->pages_per_zspage;
1949 struct size_class *class)
1960 spin_lock(&class->lock);
1961 while (zs_can_compact(class)) {
1965 dst_zspage = isolate_dst_zspage(class);
1970 src_zspage = isolate_src_zspage(class);
1978 fg = putback_zspage(class, src_zspage);
1980 free_zspage(pool, class, src_zspage);
1981 pages_freed += class->pages_per_zspage;
1985 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
1987 putback_zspage(class, dst_zspage);
1990 spin_unlock(&class->lock);
1994 spin_lock(&class->lock);
1999 putback_zspage(class, src_zspage);
2002 putback_zspage(class, dst_zspage);
2004 spin_unlock(&class->lock);
2013 struct size_class *class;
2026 class = pool->size_class[i];
2027 if (class->index != i)
2029 pages_freed += __zs_compact(pool, class);
2064 struct size_class *class;
2069 class = pool->size_class[i];
2070 if (class->index != i)
2073 pages_to_free += zs_can_compact(class);
2160 struct size_class *class;
2172 * class. Any object bigger than or equal to that will
2173 * endup in the huge class.
2182 * size class search - so object may be smaller than
2183 * huge class size, yet it still can end up in the huge
2184 * class because it grows by ZS_HANDLE_SIZE extra bytes
2185 * right before class lookup.
2206 class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
2207 if (!class)
2210 class->size = size;
2211 class->index = i;
2212 class->pages_per_zspage = pages_per_zspage;
2213 class->objs_per_zspage = objs_per_zspage;
2214 spin_lock_init(&class->lock);
2215 pool->size_class[i] = class;
2219 INIT_LIST_HEAD(&class->fullness_list[fullness]);
2223 prev_class = class;
2255 struct size_class *class = pool->size_class[i];
2257 if (!class)
2260 if (class->index != i)
2264 if (list_empty(&class->fullness_list[fg]))
2268 class->size, fg);
2270 kfree(class);