Lines Matching +full:pool +full:- +full:long
1 // SPDX-License-Identifier: GPL-2.0-or-later
12 * Released under the terms of 3-clause BSD License
21 * pool->lock
22 * class->lock
23 * zspage->lock
53 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
57 * a single (unsigned long) handle value.
76 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
82 * header keeps handle which is 4byte-aligned address so we
90 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
91 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
108 * trader-off here:
109 * - Large number of size classes is potentially wasteful as free page are
111 * - Small number of size classes causes large internal fragmentation
112 * - Probably its better to use specific size classes (empirically
120 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
125 * of ->inuse objects to all objects that page can store). For example,
130 * number of ->inuse objects) and the most busy page (maximum permitted
131 * number of ->inuse objects) at a reasonable value.
150 unsigned long objs[NR_CLASS_STAT_TYPES];
177 * For every zspage, zspage->freeobj gives head of this list.
185 * It's valid for non-allocated object
187 unsigned long next;
191 unsigned long handle;
251 #define ZS_PAGE_WRLOCKED -1
270 struct zs_pool *pool; member
277 struct zspage_lock *zsl = &zspage->zsl; in zspage_lock_init()
279 lockdep_init_map(&zsl->dep_map, "zspage->lock", &__key, 0); in zspage_lock_init()
280 spin_lock_init(&zsl->lock); in zspage_lock_init()
281 zsl->cnt = ZS_PAGE_UNLOCKED; in zspage_lock_init()
291 * - Writers are blocked by both writers and readers, while readers are only
294 * - Writers are always atomic (to allow readers to spin waiting for them).
296 * - Writers always use trylock (as the lock may be held be sleeping readers).
298 * - Readers may spin on the lock (as they can only wait for atomic writers).
300 * - Readers may sleep while holding the lock (as writes only use trylock).
304 struct zspage_lock *zsl = &zspage->zsl; in zspage_read_lock()
306 rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_); in zspage_read_lock()
308 spin_lock(&zsl->lock); in zspage_read_lock()
309 zsl->cnt++; in zspage_read_lock()
310 spin_unlock(&zsl->lock); in zspage_read_lock()
312 lock_acquired(&zsl->dep_map, _RET_IP_); in zspage_read_lock()
317 struct zspage_lock *zsl = &zspage->zsl; in zspage_read_unlock()
319 rwsem_release(&zsl->dep_map, _RET_IP_); in zspage_read_unlock()
321 spin_lock(&zsl->lock); in zspage_read_unlock()
322 zsl->cnt--; in zspage_read_unlock()
323 spin_unlock(&zsl->lock); in zspage_read_unlock()
328 struct zspage_lock *zsl = &zspage->zsl; in zspage_write_trylock()
330 spin_lock(&zsl->lock); in zspage_write_trylock()
331 if (zsl->cnt == ZS_PAGE_UNLOCKED) { in zspage_write_trylock()
332 zsl->cnt = ZS_PAGE_WRLOCKED; in zspage_write_trylock()
333 rwsem_acquire(&zsl->dep_map, 0, 1, _RET_IP_); in zspage_write_trylock()
334 lock_acquired(&zsl->dep_map, _RET_IP_); in zspage_write_trylock()
338 spin_unlock(&zsl->lock); in zspage_write_trylock()
344 struct zspage_lock *zsl = &zspage->zsl; in zspage_write_unlock()
346 rwsem_release(&zsl->dep_map, _RET_IP_); in zspage_write_unlock()
348 zsl->cnt = ZS_PAGE_UNLOCKED; in zspage_write_unlock()
349 spin_unlock(&zsl->lock); in zspage_write_unlock()
355 zspage->huge = 1; in SetZsHugePage()
360 return zspage->huge; in ZsHugePage()
364 static void kick_deferred_free(struct zs_pool *pool);
365 static void init_deferred_free(struct zs_pool *pool);
366 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
368 static void kick_deferred_free(struct zs_pool *pool) {} in kick_deferred_free() argument
369 static void init_deferred_free(struct zs_pool *pool) {} in init_deferred_free() argument
370 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} in SetZsPageMovable() argument
373 static int create_cache(struct zs_pool *pool) in create_cache() argument
377 name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name); in create_cache()
379 return -ENOMEM; in create_cache()
380 pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE, in create_cache()
383 if (!pool->handle_cachep) in create_cache()
384 return -EINVAL; in create_cache()
386 name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name); in create_cache()
388 return -ENOMEM; in create_cache()
389 pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage), in create_cache()
392 if (!pool->zspage_cachep) { in create_cache()
393 kmem_cache_destroy(pool->handle_cachep); in create_cache()
394 pool->handle_cachep = NULL; in create_cache()
395 return -EINVAL; in create_cache()
401 static void destroy_cache(struct zs_pool *pool) in destroy_cache() argument
403 kmem_cache_destroy(pool->handle_cachep); in destroy_cache()
404 kmem_cache_destroy(pool->zspage_cachep); in destroy_cache()
407 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) in cache_alloc_handle() argument
409 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, in cache_alloc_handle()
413 static void cache_free_handle(struct zs_pool *pool, unsigned long handle) in cache_free_handle() argument
415 kmem_cache_free(pool->handle_cachep, (void *)handle); in cache_free_handle()
418 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags) in cache_alloc_zspage() argument
420 return kmem_cache_zalloc(pool->zspage_cachep, in cache_alloc_zspage()
424 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) in cache_free_zspage() argument
426 kmem_cache_free(pool->zspage_cachep, zspage); in cache_free_zspage()
429 /* class->lock(which owns the handle) synchronizes races */
430 static void record_obj(unsigned long handle, unsigned long obj) in record_obj()
432 *(unsigned long *)handle = obj; in record_obj()
440 /* Protected by class->lock */
443 return zspage->inuse; in get_zspage_inuse()
448 zspage->inuse += val; in mod_zspage_inuse()
453 struct zpdesc *first_zpdesc = zspage->first_zpdesc; in get_first_zpdesc()
464 return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK; in get_first_obj_offset()
473 zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK; in set_first_obj_offset()
474 zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK; in set_first_obj_offset()
479 return zspage->freeobj; in get_freeobj()
484 zspage->freeobj = obj; in set_freeobj()
487 static struct size_class *zspage_class(struct zs_pool *pool, in zspage_class() argument
490 return pool->size_class[zspage->class]; in zspage_class()
494 * zsmalloc divides the pool into various size classes where each
505 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, in get_size_class_index()
508 return min_t(int, ZS_SIZE_CLASSES - 1, idx); in get_size_class_index()
512 unsigned long cnt) in class_stat_add()
514 class->stats.objs[type] += cnt; in class_stat_add()
518 unsigned long cnt) in class_stat_sub()
520 class->stats.objs[type] -= cnt; in class_stat_sub()
523 static inline unsigned long class_stat_read(struct size_class *class, int type) in class_stat_read()
525 return class->stats.objs[type]; in class_stat_read()
545 static unsigned long zs_can_compact(struct size_class *class);
550 struct zs_pool *pool = s->private; in zs_stats_size_show() local
553 unsigned long obj_allocated, obj_used, pages_used, freeable; in zs_stats_size_show()
554 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; in zs_stats_size_show()
555 unsigned long total_freeable = 0; in zs_stats_size_show()
556 unsigned long inuse_totals[NR_FULLNESS_GROUPS] = {0, }; in zs_stats_size_show()
566 class = pool->size_class[i]; in zs_stats_size_show()
568 if (class->index != i) in zs_stats_size_show()
571 spin_lock(&class->lock); in zs_stats_size_show()
573 seq_printf(s, " %5u %5u ", i, class->size); in zs_stats_size_show()
582 spin_unlock(&class->lock); in zs_stats_size_show()
584 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
586 class->pages_per_zspage; in zs_stats_size_show()
590 class->pages_per_zspage, freeable); in zs_stats_size_show()
612 static void zs_pool_stat_create(struct zs_pool *pool, const char *name) in zs_pool_stat_create() argument
619 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root); in zs_pool_stat_create()
621 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool, in zs_pool_stat_create()
625 static void zs_pool_stat_destroy(struct zs_pool *pool) in zs_pool_stat_destroy() argument
627 debugfs_remove_recursive(pool->stat_dentry); in zs_pool_stat_destroy()
639 static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name) in zs_pool_stat_create() argument
643 static inline void zs_pool_stat_destroy(struct zs_pool *pool) in zs_pool_stat_destroy() argument
659 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
686 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
687 zspage->fullness = fullness; in insert_zspage()
696 int fullness = zspage->fullness; in remove_zspage()
698 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
700 list_del_init(&zspage->list); in remove_zspage()
718 if (newfg == zspage->fullness) in fix_fullness_group()
729 struct zspage *zspage = zpdesc->zspage; in get_zspage()
731 BUG_ON(zspage->magic != ZSPAGE_MAGIC); in get_zspage()
742 return zpdesc->next; in get_next_zpdesc()
746 * obj_to_location - get (<zpdesc>, <obj_idx>) from encoded object value
751 static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc, in obj_to_location()
758 static void obj_to_zpdesc(unsigned long obj, struct zpdesc **zpdesc) in obj_to_zpdesc()
764 * location_to_obj - get obj value encoded from (<zpdesc>, <obj_idx>)
768 static unsigned long location_to_obj(struct zpdesc *zpdesc, unsigned int obj_idx) in location_to_obj()
770 unsigned long obj; in location_to_obj()
778 static unsigned long handle_to_obj(unsigned long handle) in handle_to_obj()
780 return *(unsigned long *)handle; in handle_to_obj()
784 unsigned long *phandle) in obj_allocated()
786 unsigned long handle; in obj_allocated()
791 handle = zpdesc->handle; in obj_allocated()
793 handle = *(unsigned long *)obj; in obj_allocated()
808 zpdesc->zspage = NULL; in reset_zpdesc()
809 zpdesc->next = NULL; in reset_zpdesc()
834 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
839 assert_spin_locked(&class->lock); in __free_zspage()
842 VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0); in __free_zspage()
855 cache_free_zspage(pool, zspage); in __free_zspage()
857 class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in __free_zspage()
858 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); in __free_zspage()
861 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
865 VM_BUG_ON(list_empty(&zspage->list)); in free_zspage()
873 kick_deferred_free(pool); in free_zspage()
878 __free_zspage(pool, class, zspage); in free_zspage()
885 unsigned long off = 0; in init_zspage()
898 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
899 link->next = freeobj++ << OBJ_TAG_BITS; in init_zspage()
900 link += class->size / sizeof(*link); in init_zspage()
910 link->next = freeobj++ << OBJ_TAG_BITS; in init_zspage()
916 link->next = -1UL << OBJ_TAG_BITS; in init_zspage()
932 int nr_zpdescs = class->pages_per_zspage; in create_page_chain()
936 * 1. all pages are linked together using zpdesc->next in create_page_chain()
937 * 2. each sub-page point to zspage using zpdesc->zspage in create_page_chain()
944 zpdesc->zspage = zspage; in create_page_chain()
945 zpdesc->next = NULL; in create_page_chain()
947 zspage->first_zpdesc = zpdesc; in create_page_chain()
949 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
950 class->pages_per_zspage == 1)) in create_page_chain()
953 prev_zpdesc->next = zpdesc; in create_page_chain()
962 static struct zspage *alloc_zspage(struct zs_pool *pool, in alloc_zspage() argument
968 struct zspage *zspage = cache_alloc_zspage(pool, gfp); in alloc_zspage()
976 zspage->magic = ZSPAGE_MAGIC; in alloc_zspage()
977 zspage->pool = pool; in alloc_zspage()
978 zspage->class = class->index; in alloc_zspage()
981 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
986 while (--i >= 0) { in alloc_zspage()
990 cache_free_zspage(pool, zspage); in alloc_zspage()
1010 for (i = ZS_INUSE_RATIO_99; i >= ZS_INUSE_RATIO_0; i--) { in find_get_zspage()
1011 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1023 if (prev->pages_per_zspage == pages_per_zspage && in can_merge()
1024 prev->objs_per_zspage == objs_per_zspage) in can_merge()
1032 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1041 * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
1043 * @pool: zsmalloc pool to use
1051 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size) in zs_lookup_class_index() argument
1055 class = pool->size_class[get_size_class_index(size)]; in zs_lookup_class_index()
1057 return class->index; in zs_lookup_class_index()
1061 unsigned long zs_get_total_pages(struct zs_pool *pool) in zs_get_total_pages() argument
1063 return atomic_long_read(&pool->pages_allocated); in zs_get_total_pages()
1067 void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, in zs_obj_read_begin() argument
1072 unsigned long obj, off; in zs_obj_read_begin()
1078 read_lock(&pool->lock); in zs_obj_read_begin()
1085 read_unlock(&pool->lock); in zs_obj_read_begin()
1087 class = zspage_class(pool, zspage); in zs_obj_read_begin()
1088 off = offset_in_page(class->size * obj_idx); in zs_obj_read_begin()
1090 if (off + class->size <= PAGE_SIZE) { in zs_obj_read_begin()
1098 sizes[0] = PAGE_SIZE - off; in zs_obj_read_begin()
1099 sizes[1] = class->size - sizes[0]; in zs_obj_read_begin()
1117 void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, in zs_obj_read_end() argument
1122 unsigned long obj, off; in zs_obj_read_end()
1129 class = zspage_class(pool, zspage); in zs_obj_read_end()
1130 off = offset_in_page(class->size * obj_idx); in zs_obj_read_end()
1132 if (off + class->size <= PAGE_SIZE) { in zs_obj_read_end()
1135 handle_mem -= off; in zs_obj_read_end()
1143 void zs_obj_write(struct zs_pool *pool, unsigned long handle, in zs_obj_write() argument
1148 unsigned long obj, off; in zs_obj_write()
1153 read_lock(&pool->lock); in zs_obj_write()
1160 read_unlock(&pool->lock); in zs_obj_write()
1162 class = zspage_class(pool, zspage); in zs_obj_write()
1163 off = offset_in_page(class->size * obj_idx); in zs_obj_write()
1178 sizes[0] = PAGE_SIZE - off; in zs_obj_write()
1179 sizes[1] = mem_len - sizes[0]; in zs_obj_write()
1193 * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1195 * @pool: zsmalloc pool to use
1197 * The function returns the size of the first huge class - any object of equal
1205 size_t zs_huge_class_size(struct zs_pool *pool) in zs_huge_class_size() argument
1211 static unsigned long obj_malloc(struct zs_pool *pool, in obj_malloc() argument
1212 struct zspage *zspage, unsigned long handle) in obj_malloc()
1215 unsigned long obj; in obj_malloc()
1220 unsigned long m_offset; in obj_malloc()
1223 class = pool->size_class[zspage->class]; in obj_malloc()
1226 offset = obj * class->size; in obj_malloc()
1236 set_freeobj(zspage, link->next >> OBJ_TAG_BITS); in obj_malloc()
1239 link->handle = handle | OBJ_ALLOCATED_TAG; in obj_malloc()
1241 zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG; in obj_malloc()
1254 * zs_malloc - Allocate block of given size from pool.
1255 * @pool: pool to allocate from
1264 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp, in zs_malloc() argument
1267 unsigned long handle; in zs_malloc()
1273 return (unsigned long)ERR_PTR(-EINVAL); in zs_malloc()
1276 return (unsigned long)ERR_PTR(-ENOSPC); in zs_malloc()
1278 handle = cache_alloc_handle(pool, gfp); in zs_malloc()
1280 return (unsigned long)ERR_PTR(-ENOMEM); in zs_malloc()
1284 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1286 /* class->lock effectively protects the zpage migration */ in zs_malloc()
1287 spin_lock(&class->lock); in zs_malloc()
1290 obj_malloc(pool, zspage, handle); in zs_malloc()
1298 spin_unlock(&class->lock); in zs_malloc()
1300 zspage = alloc_zspage(pool, class, gfp, nid); in zs_malloc()
1302 cache_free_handle(pool, handle); in zs_malloc()
1303 return (unsigned long)ERR_PTR(-ENOMEM); in zs_malloc()
1306 spin_lock(&class->lock); in zs_malloc()
1307 obj_malloc(pool, zspage, handle); in zs_malloc()
1310 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); in zs_malloc()
1311 class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1315 SetZsPageMovable(pool, zspage); in zs_malloc()
1317 spin_unlock(&class->lock); in zs_malloc()
1323 static void obj_free(int class_size, unsigned long obj) in obj_free()
1328 unsigned long f_offset; in obj_free()
1342 link->next = get_freeobj(zspage) << OBJ_TAG_BITS; in obj_free()
1344 f_zpdesc->handle = 0; in obj_free()
1348 mod_zspage_inuse(zspage, -1); in obj_free()
1351 void zs_free(struct zs_pool *pool, unsigned long handle) in zs_free() argument
1355 unsigned long obj; in zs_free()
1363 * The pool->lock protects the race with zpage's migration in zs_free()
1366 read_lock(&pool->lock); in zs_free()
1370 class = zspage_class(pool, zspage); in zs_free()
1371 spin_lock(&class->lock); in zs_free()
1372 read_unlock(&pool->lock); in zs_free()
1375 obj_free(class->size, obj); in zs_free()
1379 free_zspage(pool, class, zspage); in zs_free()
1381 spin_unlock(&class->lock); in zs_free()
1382 cache_free_handle(pool, handle); in zs_free()
1386 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy()
1387 unsigned long src) in zs_object_copy()
1391 unsigned long s_off, d_off; in zs_object_copy()
1396 s_size = d_size = class->size; in zs_object_copy()
1401 s_off = offset_in_page(class->size * s_objidx); in zs_object_copy()
1402 d_off = offset_in_page(class->size * d_objidx); in zs_object_copy()
1404 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1405 s_size = PAGE_SIZE - s_off; in zs_object_copy()
1407 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1408 d_size = PAGE_SIZE - d_off; in zs_object_copy()
1418 if (written == class->size) in zs_object_copy()
1422 s_size -= size; in zs_object_copy()
1424 d_size -= size; in zs_object_copy()
1439 s_size = class->size - written; in zs_object_copy()
1447 d_size = class->size - written; in zs_object_copy()
1460 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj()
1465 unsigned long handle = 0; in find_alloced_obj()
1469 offset += class->size * index; in find_alloced_obj()
1475 offset += class->size; in find_alloced_obj()
1486 static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, in migrate_zspage() argument
1489 unsigned long used_obj, free_obj; in migrate_zspage()
1490 unsigned long handle; in migrate_zspage()
1493 struct size_class *class = pool->size_class[src_zspage->class]; in migrate_zspage()
1506 free_obj = obj_malloc(pool, dst_zspage, handle); in migrate_zspage()
1509 obj_free(class->size, used_obj); in migrate_zspage()
1527 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_src_zspage()
1543 for (fg = ZS_INUSE_RATIO_99; fg >= ZS_INUSE_RATIO_10; fg--) { in isolate_dst_zspage()
1544 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_dst_zspage()
1556 * putback_zspage - add @zspage into right class's fullness list
1639 newzpdesc->handle = oldzpdesc->handle; in replace_sub_page()
1650 return page_zpdesc(page)->zspage; in zs_page_isolate()
1656 struct zs_pool *pool; in zs_page_migrate() local
1664 unsigned long handle; in zs_page_migrate()
1665 unsigned long old_obj, new_obj; in zs_page_migrate()
1672 * and defer destroying such pages once they are un-isolated (putback) in zs_page_migrate()
1675 if (!zpdesc->zspage) in zs_page_migrate()
1680 pool = zspage->pool; in zs_page_migrate()
1683 * The pool migrate_lock protects the race between zpage migration in zs_page_migrate()
1686 write_lock(&pool->lock); in zs_page_migrate()
1687 class = zspage_class(pool, zspage); in zs_page_migrate()
1692 spin_lock(&class->lock); in zs_page_migrate()
1695 spin_unlock(&class->lock); in zs_page_migrate()
1696 write_unlock(&pool->lock); in zs_page_migrate()
1697 return -EINVAL; in zs_page_migrate()
1714 addr += class->size) { in zs_page_migrate()
1719 new_obj = (unsigned long)location_to_obj(newzpdesc, obj_idx); in zs_page_migrate()
1730 write_unlock(&pool->lock); in zs_page_migrate()
1731 spin_unlock(&class->lock); in zs_page_migrate()
1766 struct zs_pool *pool = container_of(work, struct zs_pool, in async_free_zspage() local
1770 class = pool->size_class[i]; in async_free_zspage()
1771 if (class->index != i) in async_free_zspage()
1774 spin_lock(&class->lock); in async_free_zspage()
1775 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], in async_free_zspage()
1777 spin_unlock(&class->lock); in async_free_zspage()
1781 list_del(&zspage->list); in async_free_zspage()
1784 class = zspage_class(pool, zspage); in async_free_zspage()
1785 spin_lock(&class->lock); in async_free_zspage()
1787 __free_zspage(pool, class, zspage); in async_free_zspage()
1788 spin_unlock(&class->lock); in async_free_zspage()
1792 static void kick_deferred_free(struct zs_pool *pool) in kick_deferred_free() argument
1794 schedule_work(&pool->free_work); in kick_deferred_free()
1797 static void zs_flush_migration(struct zs_pool *pool) in zs_flush_migration() argument
1799 flush_work(&pool->free_work); in zs_flush_migration()
1802 static void init_deferred_free(struct zs_pool *pool) in init_deferred_free() argument
1804 INIT_WORK(&pool->free_work, async_free_zspage); in init_deferred_free()
1807 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) in SetZsPageMovable() argument
1818 static inline void zs_flush_migration(struct zs_pool *pool) { } in zs_flush_migration() argument
1826 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact()
1828 unsigned long obj_wasted; in zs_can_compact()
1829 unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); in zs_can_compact()
1830 unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE); in zs_can_compact()
1835 obj_wasted = obj_allocated - obj_used; in zs_can_compact()
1836 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
1838 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
1841 static unsigned long __zs_compact(struct zs_pool *pool, in __zs_compact() argument
1846 unsigned long pages_freed = 0; in __zs_compact()
1852 write_lock(&pool->lock); in __zs_compact()
1853 spin_lock(&class->lock); in __zs_compact()
1870 migrate_zspage(pool, src_zspage, dst_zspage); in __zs_compact()
1875 free_zspage(pool, class, src_zspage); in __zs_compact()
1876 pages_freed += class->pages_per_zspage; in __zs_compact()
1881 || rwlock_is_contended(&pool->lock)) { in __zs_compact()
1885 spin_unlock(&class->lock); in __zs_compact()
1886 write_unlock(&pool->lock); in __zs_compact()
1888 write_lock(&pool->lock); in __zs_compact()
1889 spin_lock(&class->lock); in __zs_compact()
1899 spin_unlock(&class->lock); in __zs_compact()
1900 write_unlock(&pool->lock); in __zs_compact()
1905 unsigned long zs_compact(struct zs_pool *pool) in zs_compact() argument
1909 unsigned long pages_freed = 0; in zs_compact()
1912 * Pool compaction is performed under pool->lock so it is basically in zs_compact()
1913 * single-threaded. Having more than one thread in __zs_compact() in zs_compact()
1914 * will increase pool->lock contention, which will impact other in zs_compact()
1915 * zsmalloc operations that need pool->lock. in zs_compact()
1917 if (atomic_xchg(&pool->compaction_in_progress, 1)) in zs_compact()
1920 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_compact()
1921 class = pool->size_class[i]; in zs_compact()
1922 if (class->index != i) in zs_compact()
1924 pages_freed += __zs_compact(pool, class); in zs_compact()
1926 atomic_long_add(pages_freed, &pool->stats.pages_compacted); in zs_compact()
1927 atomic_set(&pool->compaction_in_progress, 0); in zs_compact()
1933 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) in zs_pool_stats() argument
1935 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); in zs_pool_stats()
1939 static unsigned long zs_shrinker_scan(struct shrinker *shrinker, in zs_shrinker_scan()
1942 unsigned long pages_freed; in zs_shrinker_scan()
1943 struct zs_pool *pool = shrinker->private_data; in zs_shrinker_scan() local
1950 pages_freed = zs_compact(pool); in zs_shrinker_scan()
1955 static unsigned long zs_shrinker_count(struct shrinker *shrinker, in zs_shrinker_count()
1960 unsigned long pages_to_free = 0; in zs_shrinker_count()
1961 struct zs_pool *pool = shrinker->private_data; in zs_shrinker_count() local
1963 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_shrinker_count()
1964 class = pool->size_class[i]; in zs_shrinker_count()
1965 if (class->index != i) in zs_shrinker_count()
1974 static void zs_unregister_shrinker(struct zs_pool *pool) in zs_unregister_shrinker() argument
1976 shrinker_free(pool->shrinker); in zs_unregister_shrinker()
1979 static int zs_register_shrinker(struct zs_pool *pool) in zs_register_shrinker() argument
1981 pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name); in zs_register_shrinker()
1982 if (!pool->shrinker) in zs_register_shrinker()
1983 return -ENOMEM; in zs_register_shrinker()
1985 pool->shrinker->scan_objects = zs_shrinker_scan; in zs_register_shrinker()
1986 pool->shrinker->count_objects = zs_shrinker_count; in zs_register_shrinker()
1987 pool->shrinker->batch = 0; in zs_register_shrinker()
1988 pool->shrinker->private_data = pool; in zs_register_shrinker()
1990 shrinker_register(pool->shrinker); in zs_register_shrinker()
2017 * zs_create_pool - Creates an allocation pool to work from.
2018 * @name: pool name to be created
2023 * On success, a pointer to the newly created pool is returned,
2029 struct zs_pool *pool; in zs_create_pool() local
2032 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in zs_create_pool()
2033 if (!pool) in zs_create_pool()
2036 init_deferred_free(pool); in zs_create_pool()
2037 rwlock_init(&pool->lock); in zs_create_pool()
2038 atomic_set(&pool->compaction_in_progress, 0); in zs_create_pool()
2040 pool->name = kstrdup(name, GFP_KERNEL); in zs_create_pool()
2041 if (!pool->name) in zs_create_pool()
2044 if (create_cache(pool)) in zs_create_pool()
2051 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_create_pool()
2077 * size class search - so object may be smaller than in zs_create_pool()
2082 huge_class_size -= (ZS_HANDLE_SIZE - 1); in zs_create_pool()
2096 pool->size_class[i] = prev_class; in zs_create_pool()
2105 class->size = size; in zs_create_pool()
2106 class->index = i; in zs_create_pool()
2107 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2108 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2109 spin_lock_init(&class->lock); in zs_create_pool()
2110 pool->size_class[i] = class; in zs_create_pool()
2114 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2122 zs_pool_stat_create(pool, name); in zs_create_pool()
2126 * defragmentation of the pool which is pretty optional thing. If in zs_create_pool()
2127 * registration fails we still can use the pool normally and user can in zs_create_pool()
2130 zs_register_shrinker(pool); in zs_create_pool()
2132 return pool; in zs_create_pool()
2135 zs_destroy_pool(pool); in zs_create_pool()
2140 void zs_destroy_pool(struct zs_pool *pool) in zs_destroy_pool() argument
2144 zs_unregister_shrinker(pool); in zs_destroy_pool()
2145 zs_flush_migration(pool); in zs_destroy_pool()
2146 zs_pool_stat_destroy(pool); in zs_destroy_pool()
2150 struct size_class *class = pool->size_class[i]; in zs_destroy_pool()
2155 if (class->index != i) in zs_destroy_pool()
2159 if (list_empty(&class->fullness_list[fg])) in zs_destroy_pool()
2162 pr_err("Class-%d fullness group %d is not empty\n", in zs_destroy_pool()
2163 class->size, fg); in zs_destroy_pool()
2168 destroy_cache(pool); in zs_destroy_pool()
2169 kfree(pool->name); in zs_destroy_pool()
2170 kfree(pool); in zs_destroy_pool()