Lines Matching full:pool
21 * pool->lock
270 struct zs_pool *pool; member
364 static void kick_deferred_free(struct zs_pool *pool);
365 static void init_deferred_free(struct zs_pool *pool);
366 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
368 static void kick_deferred_free(struct zs_pool *pool) {} in kick_deferred_free() argument
369 static void init_deferred_free(struct zs_pool *pool) {} in init_deferred_free() argument
370 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} in SetZsPageMovable() argument
373 static int create_cache(struct zs_pool *pool) in create_cache() argument
377 name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name); in create_cache()
380 pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE, in create_cache()
383 if (!pool->handle_cachep) in create_cache()
386 name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name); in create_cache()
389 pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage), in create_cache()
392 if (!pool->zspage_cachep) { in create_cache()
393 kmem_cache_destroy(pool->handle_cachep); in create_cache()
394 pool->handle_cachep = NULL; in create_cache()
401 static void destroy_cache(struct zs_pool *pool) in destroy_cache() argument
403 kmem_cache_destroy(pool->handle_cachep); in destroy_cache()
404 kmem_cache_destroy(pool->zspage_cachep); in destroy_cache()
407 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) in cache_alloc_handle() argument
409 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, in cache_alloc_handle()
413 static void cache_free_handle(struct zs_pool *pool, unsigned long handle) in cache_free_handle() argument
415 kmem_cache_free(pool->handle_cachep, (void *)handle); in cache_free_handle()
418 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags) in cache_alloc_zspage() argument
420 return kmem_cache_zalloc(pool->zspage_cachep, in cache_alloc_zspage()
424 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) in cache_free_zspage() argument
426 kmem_cache_free(pool->zspage_cachep, zspage); in cache_free_zspage()
487 static struct size_class *zspage_class(struct zs_pool *pool, in zspage_class() argument
490 return pool->size_class[zspage->class]; in zspage_class()
494 * zsmalloc divides the pool into various size classes where each
550 struct zs_pool *pool = s->private; in zs_stats_size_show() local
566 class = pool->size_class[i]; in zs_stats_size_show()
612 static void zs_pool_stat_create(struct zs_pool *pool, const char *name) in zs_pool_stat_create() argument
619 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root); in zs_pool_stat_create()
621 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool, in zs_pool_stat_create()
625 static void zs_pool_stat_destroy(struct zs_pool *pool) in zs_pool_stat_destroy() argument
627 debugfs_remove_recursive(pool->stat_dentry); in zs_pool_stat_destroy()
639 static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name) in zs_pool_stat_create() argument
643 static inline void zs_pool_stat_destroy(struct zs_pool *pool) in zs_pool_stat_destroy() argument
834 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
855 cache_free_zspage(pool, zspage); in __free_zspage()
858 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); in __free_zspage()
861 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
873 kick_deferred_free(pool); in free_zspage()
878 __free_zspage(pool, class, zspage); in free_zspage()
962 static struct zspage *alloc_zspage(struct zs_pool *pool, in alloc_zspage() argument
968 struct zspage *zspage = cache_alloc_zspage(pool, gfp); in alloc_zspage()
977 zspage->pool = pool; in alloc_zspage()
990 cache_free_zspage(pool, zspage); in alloc_zspage()
1043 * @pool: zsmalloc pool to use
1051 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size) in zs_lookup_class_index() argument
1055 class = pool->size_class[get_size_class_index(size)]; in zs_lookup_class_index()
1061 unsigned long zs_get_total_pages(struct zs_pool *pool) in zs_get_total_pages() argument
1063 return atomic_long_read(&pool->pages_allocated); in zs_get_total_pages()
1067 void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, in zs_obj_read_begin() argument
1078 read_lock(&pool->lock); in zs_obj_read_begin()
1085 read_unlock(&pool->lock); in zs_obj_read_begin()
1087 class = zspage_class(pool, zspage); in zs_obj_read_begin()
1117 void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, in zs_obj_read_end() argument
1129 class = zspage_class(pool, zspage); in zs_obj_read_end()
1143 void zs_obj_write(struct zs_pool *pool, unsigned long handle, in zs_obj_write() argument
1153 read_lock(&pool->lock); in zs_obj_write()
1160 read_unlock(&pool->lock); in zs_obj_write()
1162 class = zspage_class(pool, zspage); in zs_obj_write()
1195 * @pool: zsmalloc pool to use
1205 size_t zs_huge_class_size(struct zs_pool *pool) in zs_huge_class_size() argument
1211 static unsigned long obj_malloc(struct zs_pool *pool, in obj_malloc() argument
1223 class = pool->size_class[zspage->class]; in obj_malloc()
1254 * zs_malloc - Allocate block of given size from pool.
1255 * @pool: pool to allocate from
1264 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp, in zs_malloc() argument
1278 handle = cache_alloc_handle(pool, gfp); in zs_malloc()
1284 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1290 obj_malloc(pool, zspage, handle); in zs_malloc()
1300 zspage = alloc_zspage(pool, class, gfp, nid); in zs_malloc()
1302 cache_free_handle(pool, handle); in zs_malloc()
1307 obj_malloc(pool, zspage, handle); in zs_malloc()
1310 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); in zs_malloc()
1315 SetZsPageMovable(pool, zspage); in zs_malloc()
1351 void zs_free(struct zs_pool *pool, unsigned long handle) in zs_free() argument
1363 * The pool->lock protects the race with zpage's migration in zs_free()
1366 read_lock(&pool->lock); in zs_free()
1370 class = zspage_class(pool, zspage); in zs_free()
1372 read_unlock(&pool->lock); in zs_free()
1379 free_zspage(pool, class, zspage); in zs_free()
1382 cache_free_handle(pool, handle); in zs_free()
1486 static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, in migrate_zspage() argument
1493 struct size_class *class = pool->size_class[src_zspage->class]; in migrate_zspage()
1506 free_obj = obj_malloc(pool, dst_zspage, handle); in migrate_zspage()
1656 struct zs_pool *pool; in zs_page_migrate() local
1680 pool = zspage->pool; in zs_page_migrate()
1683 * The pool migrate_lock protects the race between zpage migration in zs_page_migrate()
1686 write_lock(&pool->lock); in zs_page_migrate()
1687 class = zspage_class(pool, zspage); in zs_page_migrate()
1696 write_unlock(&pool->lock); in zs_page_migrate()
1730 write_unlock(&pool->lock); in zs_page_migrate()
1766 struct zs_pool *pool = container_of(work, struct zs_pool, in async_free_zspage() local
1770 class = pool->size_class[i]; in async_free_zspage()
1784 class = zspage_class(pool, zspage); in async_free_zspage()
1787 __free_zspage(pool, class, zspage); in async_free_zspage()
1792 static void kick_deferred_free(struct zs_pool *pool) in kick_deferred_free() argument
1794 schedule_work(&pool->free_work); in kick_deferred_free()
1797 static void zs_flush_migration(struct zs_pool *pool) in zs_flush_migration() argument
1799 flush_work(&pool->free_work); in zs_flush_migration()
1802 static void init_deferred_free(struct zs_pool *pool) in init_deferred_free() argument
1804 INIT_WORK(&pool->free_work, async_free_zspage); in init_deferred_free()
1807 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) in SetZsPageMovable() argument
1818 static inline void zs_flush_migration(struct zs_pool *pool) { } in zs_flush_migration() argument
1841 static unsigned long __zs_compact(struct zs_pool *pool, in __zs_compact() argument
1852 write_lock(&pool->lock); in __zs_compact()
1870 migrate_zspage(pool, src_zspage, dst_zspage); in __zs_compact()
1875 free_zspage(pool, class, src_zspage); in __zs_compact()
1881 || rwlock_is_contended(&pool->lock)) { in __zs_compact()
1886 write_unlock(&pool->lock); in __zs_compact()
1888 write_lock(&pool->lock); in __zs_compact()
1900 write_unlock(&pool->lock); in __zs_compact()
1905 unsigned long zs_compact(struct zs_pool *pool) in zs_compact() argument
1912 * Pool compaction is performed under pool->lock so it is basically in zs_compact()
1914 * will increase pool->lock contention, which will impact other in zs_compact()
1915 * zsmalloc operations that need pool->lock. in zs_compact()
1917 if (atomic_xchg(&pool->compaction_in_progress, 1)) in zs_compact()
1921 class = pool->size_class[i]; in zs_compact()
1924 pages_freed += __zs_compact(pool, class); in zs_compact()
1926 atomic_long_add(pages_freed, &pool->stats.pages_compacted); in zs_compact()
1927 atomic_set(&pool->compaction_in_progress, 0); in zs_compact()
1933 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) in zs_pool_stats() argument
1935 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); in zs_pool_stats()
1943 struct zs_pool *pool = shrinker->private_data; in zs_shrinker_scan() local
1950 pages_freed = zs_compact(pool); in zs_shrinker_scan()
1961 struct zs_pool *pool = shrinker->private_data; in zs_shrinker_count() local
1964 class = pool->size_class[i]; in zs_shrinker_count()
1974 static void zs_unregister_shrinker(struct zs_pool *pool) in zs_unregister_shrinker() argument
1976 shrinker_free(pool->shrinker); in zs_unregister_shrinker()
1979 static int zs_register_shrinker(struct zs_pool *pool) in zs_register_shrinker() argument
1981 pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name); in zs_register_shrinker()
1982 if (!pool->shrinker) in zs_register_shrinker()
1985 pool->shrinker->scan_objects = zs_shrinker_scan; in zs_register_shrinker()
1986 pool->shrinker->count_objects = zs_shrinker_count; in zs_register_shrinker()
1987 pool->shrinker->batch = 0; in zs_register_shrinker()
1988 pool->shrinker->private_data = pool; in zs_register_shrinker()
1990 shrinker_register(pool->shrinker); in zs_register_shrinker()
2017 * zs_create_pool - Creates an allocation pool to work from.
2018 * @name: pool name to be created
2023 * On success, a pointer to the newly created pool is returned,
2029 struct zs_pool *pool; in zs_create_pool() local
2032 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in zs_create_pool()
2033 if (!pool) in zs_create_pool()
2036 init_deferred_free(pool); in zs_create_pool()
2037 rwlock_init(&pool->lock); in zs_create_pool()
2038 atomic_set(&pool->compaction_in_progress, 0); in zs_create_pool()
2040 pool->name = kstrdup(name, GFP_KERNEL); in zs_create_pool()
2041 if (!pool->name) in zs_create_pool()
2044 if (create_cache(pool)) in zs_create_pool()
2096 pool->size_class[i] = prev_class; in zs_create_pool()
2110 pool->size_class[i] = class; in zs_create_pool()
2122 zs_pool_stat_create(pool, name); in zs_create_pool()
2126 * defragmentation of the pool which is pretty optional thing. If in zs_create_pool()
2127 * registration fails we still can use the pool normally and user can in zs_create_pool()
2130 zs_register_shrinker(pool); in zs_create_pool()
2132 return pool; in zs_create_pool()
2135 zs_destroy_pool(pool); in zs_create_pool()
2140 void zs_destroy_pool(struct zs_pool *pool) in zs_destroy_pool() argument
2144 zs_unregister_shrinker(pool); in zs_destroy_pool()
2145 zs_flush_migration(pool); in zs_destroy_pool()
2146 zs_pool_stat_destroy(pool); in zs_destroy_pool()
2150 struct size_class *class = pool->size_class[i]; in zs_destroy_pool()
2168 destroy_cache(pool); in zs_destroy_pool()
2169 kfree(pool->name); in zs_destroy_pool()
2170 kfree(pool); in zs_destroy_pool()