1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* 4 * zsmalloc memory allocator 5 * 6 * Copyright (C) 2011 Nitin Gupta 7 * Copyright (C) 2012, 2013 Minchan Kim 8 * 9 * This code is released using a dual license strategy: BSD/GPL 10 * You can choose the license that better fits your requirements. 11 * 12 * Released under the terms of 3-clause BSD License 13 * Released under the terms of GNU General Public License Version 2.0 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 /* 19 * lock ordering: 20 * page_lock 21 * pool->lock 22 * class->lock 23 * zspage->lock 24 */ 25 26 #include <linux/module.h> 27 #include <linux/kernel.h> 28 #include <linux/sched.h> 29 #include <linux/errno.h> 30 #include <linux/highmem.h> 31 #include <linux/string.h> 32 #include <linux/slab.h> 33 #include <linux/spinlock.h> 34 #include <linux/sprintf.h> 35 #include <linux/shrinker.h> 36 #include <linux/types.h> 37 #include <linux/debugfs.h> 38 #include <linux/zsmalloc.h> 39 #include <linux/zpool.h> 40 #include <linux/fs.h> 41 #include <linux/workqueue.h> 42 #include "zpdesc.h" 43 44 #define ZSPAGE_MAGIC 0x58 45 46 /* 47 * This must be power of 2 and greater than or equal to sizeof(link_free). 48 * These two conditions ensure that any 'struct link_free' itself doesn't 49 * span more than 1 page which avoids complex case of mapping 2 pages simply 50 * to restore link_free pointer values. 51 */ 52 #define ZS_ALIGN 8 53 54 #define ZS_HANDLE_SIZE (sizeof(unsigned long)) 55 56 /* 57 * Object location (<PFN>, <obj_idx>) is encoded as 58 * a single (unsigned long) handle value. 59 * 60 * Note that object index <obj_idx> starts from 0. 61 * 62 * This is made more complicated by various memory models and PAE. 63 */ 64 65 #ifndef MAX_POSSIBLE_PHYSMEM_BITS 66 #ifdef MAX_PHYSMEM_BITS 67 #define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS 68 #else 69 /* 70 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just 71 * be PAGE_SHIFT 72 */ 73 #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG 74 #endif 75 #endif 76 77 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT) 78 79 /* 80 * Head in allocated object should have OBJ_ALLOCATED_TAG 81 * to identify the object was allocated or not. 82 * It's okay to add the status bit in the least bit because 83 * header keeps handle which is 4byte-aligned address so we 84 * have room for two bit at least. 85 */ 86 #define OBJ_ALLOCATED_TAG 1 87 88 #define OBJ_TAG_BITS 1 89 #define OBJ_TAG_MASK OBJ_ALLOCATED_TAG 90 91 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) 92 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) 93 94 #define HUGE_BITS 1 95 #define FULLNESS_BITS 4 96 #define CLASS_BITS 8 97 #define MAGIC_VAL_BITS 8 98 99 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL)) 100 101 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ 102 #define ZS_MIN_ALLOC_SIZE \ 103 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) 104 /* each chunk includes extra space to keep handle */ 105 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE 106 107 /* 108 * On systems with 4K page size, this gives 255 size classes! There is a 109 * trader-off here: 110 * - Large number of size classes is potentially wasteful as free page are 111 * spread across these classes 112 * - Small number of size classes causes large internal fragmentation 113 * - Probably its better to use specific size classes (empirically 114 * determined). NOTE: all those class sizes must be set as multiple of 115 * ZS_ALIGN to make sure link_free itself never has to span 2 pages. 116 * 117 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN 118 * (reason above) 119 */ 120 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS) 121 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \ 122 ZS_SIZE_CLASS_DELTA) + 1) 123 124 /* 125 * Pages are distinguished by the ratio of used memory (that is the ratio 126 * of ->inuse objects to all objects that page can store). For example, 127 * INUSE_RATIO_10 means that the ratio of used objects is > 0% and <= 10%. 128 * 129 * The number of fullness groups is not random. It allows us to keep 130 * difference between the least busy page in the group (minimum permitted 131 * number of ->inuse objects) and the most busy page (maximum permitted 132 * number of ->inuse objects) at a reasonable value. 133 */ 134 enum fullness_group { 135 ZS_INUSE_RATIO_0, 136 ZS_INUSE_RATIO_10, 137 /* NOTE: 8 more fullness groups here */ 138 ZS_INUSE_RATIO_99 = 10, 139 ZS_INUSE_RATIO_100, 140 NR_FULLNESS_GROUPS, 141 }; 142 143 enum class_stat_type { 144 /* NOTE: stats for 12 fullness groups here: from inuse 0 to 100 */ 145 ZS_OBJS_ALLOCATED = NR_FULLNESS_GROUPS, 146 ZS_OBJS_INUSE, 147 NR_CLASS_STAT_TYPES, 148 }; 149 150 struct zs_size_stat { 151 unsigned long objs[NR_CLASS_STAT_TYPES]; 152 }; 153 154 #ifdef CONFIG_ZSMALLOC_STAT 155 static struct dentry *zs_stat_root; 156 #endif 157 158 static size_t huge_class_size; 159 160 struct size_class { 161 spinlock_t lock; 162 struct list_head fullness_list[NR_FULLNESS_GROUPS]; 163 /* 164 * Size of objects stored in this class. Must be multiple 165 * of ZS_ALIGN. 166 */ 167 int size; 168 int objs_per_zspage; 169 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ 170 int pages_per_zspage; 171 172 unsigned int index; 173 struct zs_size_stat stats; 174 }; 175 176 /* 177 * Placed within free objects to form a singly linked list. 178 * For every zspage, zspage->freeobj gives head of this list. 179 * 180 * This must be power of 2 and less than or equal to ZS_ALIGN 181 */ 182 struct link_free { 183 union { 184 /* 185 * Free object index; 186 * It's valid for non-allocated object 187 */ 188 unsigned long next; 189 /* 190 * Handle of allocated object. 191 */ 192 unsigned long handle; 193 }; 194 }; 195 196 struct zs_pool { 197 const char *name; 198 199 struct size_class *size_class[ZS_SIZE_CLASSES]; 200 struct kmem_cache *handle_cachep; 201 struct kmem_cache *zspage_cachep; 202 203 atomic_long_t pages_allocated; 204 205 struct zs_pool_stats stats; 206 207 /* Compact classes */ 208 struct shrinker *shrinker; 209 210 #ifdef CONFIG_ZSMALLOC_STAT 211 struct dentry *stat_dentry; 212 #endif 213 #ifdef CONFIG_COMPACTION 214 struct work_struct free_work; 215 #endif 216 /* protect zspage migration/compaction */ 217 rwlock_t lock; 218 atomic_t compaction_in_progress; 219 }; 220 221 static inline void zpdesc_set_first(struct zpdesc *zpdesc) 222 { 223 SetPagePrivate(zpdesc_page(zpdesc)); 224 } 225 226 static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc) 227 { 228 inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); 229 } 230 231 static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc) 232 { 233 dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); 234 } 235 236 static inline struct zpdesc *alloc_zpdesc(gfp_t gfp, const int nid) 237 { 238 struct page *page = alloc_pages_node(nid, gfp, 0); 239 240 return page_zpdesc(page); 241 } 242 243 static inline void free_zpdesc(struct zpdesc *zpdesc) 244 { 245 struct page *page = zpdesc_page(zpdesc); 246 247 /* PageZsmalloc is sticky until the page is freed to the buddy. */ 248 __free_page(page); 249 } 250 251 #define ZS_PAGE_UNLOCKED 0 252 #define ZS_PAGE_WRLOCKED -1 253 254 struct zspage_lock { 255 spinlock_t lock; 256 int cnt; 257 struct lockdep_map dep_map; 258 }; 259 260 struct zspage { 261 struct { 262 unsigned int huge:HUGE_BITS; 263 unsigned int fullness:FULLNESS_BITS; 264 unsigned int class:CLASS_BITS + 1; 265 unsigned int magic:MAGIC_VAL_BITS; 266 }; 267 unsigned int inuse; 268 unsigned int freeobj; 269 struct zpdesc *first_zpdesc; 270 struct list_head list; /* fullness list */ 271 struct zs_pool *pool; 272 struct zspage_lock zsl; 273 }; 274 275 static void zspage_lock_init(struct zspage *zspage) 276 { 277 static struct lock_class_key __key; 278 struct zspage_lock *zsl = &zspage->zsl; 279 280 lockdep_init_map(&zsl->dep_map, "zspage->lock", &__key, 0); 281 spin_lock_init(&zsl->lock); 282 zsl->cnt = ZS_PAGE_UNLOCKED; 283 } 284 285 /* 286 * The zspage lock can be held from atomic contexts, but it needs to remain 287 * preemptible when held for reading because it remains held outside of those 288 * atomic contexts, otherwise we unnecessarily lose preemptibility. 289 * 290 * To achieve this, the following rules are enforced on readers and writers: 291 * 292 * - Writers are blocked by both writers and readers, while readers are only 293 * blocked by writers (i.e. normal rwlock semantics). 294 * 295 * - Writers are always atomic (to allow readers to spin waiting for them). 296 * 297 * - Writers always use trylock (as the lock may be held be sleeping readers). 298 * 299 * - Readers may spin on the lock (as they can only wait for atomic writers). 300 * 301 * - Readers may sleep while holding the lock (as writes only use trylock). 302 */ 303 static void zspage_read_lock(struct zspage *zspage) 304 { 305 struct zspage_lock *zsl = &zspage->zsl; 306 307 rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_); 308 309 spin_lock(&zsl->lock); 310 zsl->cnt++; 311 spin_unlock(&zsl->lock); 312 313 lock_acquired(&zsl->dep_map, _RET_IP_); 314 } 315 316 static void zspage_read_unlock(struct zspage *zspage) 317 { 318 struct zspage_lock *zsl = &zspage->zsl; 319 320 rwsem_release(&zsl->dep_map, _RET_IP_); 321 322 spin_lock(&zsl->lock); 323 zsl->cnt--; 324 spin_unlock(&zsl->lock); 325 } 326 327 static __must_check bool zspage_write_trylock(struct zspage *zspage) 328 { 329 struct zspage_lock *zsl = &zspage->zsl; 330 331 spin_lock(&zsl->lock); 332 if (zsl->cnt == ZS_PAGE_UNLOCKED) { 333 zsl->cnt = ZS_PAGE_WRLOCKED; 334 rwsem_acquire(&zsl->dep_map, 0, 1, _RET_IP_); 335 lock_acquired(&zsl->dep_map, _RET_IP_); 336 return true; 337 } 338 339 spin_unlock(&zsl->lock); 340 return false; 341 } 342 343 static void zspage_write_unlock(struct zspage *zspage) 344 { 345 struct zspage_lock *zsl = &zspage->zsl; 346 347 rwsem_release(&zsl->dep_map, _RET_IP_); 348 349 zsl->cnt = ZS_PAGE_UNLOCKED; 350 spin_unlock(&zsl->lock); 351 } 352 353 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ 354 static void SetZsHugePage(struct zspage *zspage) 355 { 356 zspage->huge = 1; 357 } 358 359 static bool ZsHugePage(struct zspage *zspage) 360 { 361 return zspage->huge; 362 } 363 364 #ifdef CONFIG_COMPACTION 365 static void kick_deferred_free(struct zs_pool *pool); 366 static void init_deferred_free(struct zs_pool *pool); 367 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); 368 #else 369 static void kick_deferred_free(struct zs_pool *pool) {} 370 static void init_deferred_free(struct zs_pool *pool) {} 371 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} 372 #endif 373 374 static int create_cache(struct zs_pool *pool) 375 { 376 char *name; 377 378 name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name); 379 if (!name) 380 return -ENOMEM; 381 pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE, 382 0, 0, NULL); 383 kfree(name); 384 if (!pool->handle_cachep) 385 return -EINVAL; 386 387 name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name); 388 if (!name) 389 return -ENOMEM; 390 pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage), 391 0, 0, NULL); 392 kfree(name); 393 if (!pool->zspage_cachep) { 394 kmem_cache_destroy(pool->handle_cachep); 395 pool->handle_cachep = NULL; 396 return -EINVAL; 397 } 398 399 return 0; 400 } 401 402 static void destroy_cache(struct zs_pool *pool) 403 { 404 kmem_cache_destroy(pool->handle_cachep); 405 kmem_cache_destroy(pool->zspage_cachep); 406 } 407 408 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) 409 { 410 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, 411 gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); 412 } 413 414 static void cache_free_handle(struct zs_pool *pool, unsigned long handle) 415 { 416 kmem_cache_free(pool->handle_cachep, (void *)handle); 417 } 418 419 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags) 420 { 421 return kmem_cache_zalloc(pool->zspage_cachep, 422 flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); 423 } 424 425 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) 426 { 427 kmem_cache_free(pool->zspage_cachep, zspage); 428 } 429 430 /* class->lock(which owns the handle) synchronizes races */ 431 static void record_obj(unsigned long handle, unsigned long obj) 432 { 433 *(unsigned long *)handle = obj; 434 } 435 436 /* zpool driver */ 437 438 #ifdef CONFIG_ZPOOL 439 440 static void *zs_zpool_create(const char *name, gfp_t gfp) 441 { 442 /* 443 * Ignore global gfp flags: zs_malloc() may be invoked from 444 * different contexts and its caller must provide a valid 445 * gfp mask. 446 */ 447 return zs_create_pool(name); 448 } 449 450 static void zs_zpool_destroy(void *pool) 451 { 452 zs_destroy_pool(pool); 453 } 454 455 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, 456 unsigned long *handle, const int nid) 457 { 458 *handle = zs_malloc(pool, size, gfp, nid); 459 460 if (IS_ERR_VALUE(*handle)) 461 return PTR_ERR((void *)*handle); 462 return 0; 463 } 464 static void zs_zpool_free(void *pool, unsigned long handle) 465 { 466 zs_free(pool, handle); 467 } 468 469 static void *zs_zpool_obj_read_begin(void *pool, unsigned long handle, 470 void *local_copy) 471 { 472 return zs_obj_read_begin(pool, handle, local_copy); 473 } 474 475 static void zs_zpool_obj_read_end(void *pool, unsigned long handle, 476 void *handle_mem) 477 { 478 zs_obj_read_end(pool, handle, handle_mem); 479 } 480 481 static void zs_zpool_obj_write(void *pool, unsigned long handle, 482 void *handle_mem, size_t mem_len) 483 { 484 zs_obj_write(pool, handle, handle_mem, mem_len); 485 } 486 487 static u64 zs_zpool_total_pages(void *pool) 488 { 489 return zs_get_total_pages(pool); 490 } 491 492 static struct zpool_driver zs_zpool_driver = { 493 .type = "zsmalloc", 494 .owner = THIS_MODULE, 495 .create = zs_zpool_create, 496 .destroy = zs_zpool_destroy, 497 .malloc = zs_zpool_malloc, 498 .free = zs_zpool_free, 499 .obj_read_begin = zs_zpool_obj_read_begin, 500 .obj_read_end = zs_zpool_obj_read_end, 501 .obj_write = zs_zpool_obj_write, 502 .total_pages = zs_zpool_total_pages, 503 }; 504 505 MODULE_ALIAS("zpool-zsmalloc"); 506 #endif /* CONFIG_ZPOOL */ 507 508 static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc) 509 { 510 return PagePrivate(zpdesc_page(zpdesc)); 511 } 512 513 /* Protected by class->lock */ 514 static inline int get_zspage_inuse(struct zspage *zspage) 515 { 516 return zspage->inuse; 517 } 518 519 static inline void mod_zspage_inuse(struct zspage *zspage, int val) 520 { 521 zspage->inuse += val; 522 } 523 524 static struct zpdesc *get_first_zpdesc(struct zspage *zspage) 525 { 526 struct zpdesc *first_zpdesc = zspage->first_zpdesc; 527 528 VM_BUG_ON_PAGE(!is_first_zpdesc(first_zpdesc), zpdesc_page(first_zpdesc)); 529 return first_zpdesc; 530 } 531 532 #define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff 533 534 static inline unsigned int get_first_obj_offset(struct zpdesc *zpdesc) 535 { 536 VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); 537 return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK; 538 } 539 540 static inline void set_first_obj_offset(struct zpdesc *zpdesc, unsigned int offset) 541 { 542 /* With 24 bits available, we can support offsets into 16 MiB pages. */ 543 BUILD_BUG_ON(PAGE_SIZE > SZ_16M); 544 VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); 545 VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK); 546 zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK; 547 zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK; 548 } 549 550 static inline unsigned int get_freeobj(struct zspage *zspage) 551 { 552 return zspage->freeobj; 553 } 554 555 static inline void set_freeobj(struct zspage *zspage, unsigned int obj) 556 { 557 zspage->freeobj = obj; 558 } 559 560 static struct size_class *zspage_class(struct zs_pool *pool, 561 struct zspage *zspage) 562 { 563 return pool->size_class[zspage->class]; 564 } 565 566 /* 567 * zsmalloc divides the pool into various size classes where each 568 * class maintains a list of zspages where each zspage is divided 569 * into equal sized chunks. Each allocation falls into one of these 570 * classes depending on its size. This function returns index of the 571 * size class which has chunk size big enough to hold the given size. 572 */ 573 static int get_size_class_index(int size) 574 { 575 int idx = 0; 576 577 if (likely(size > ZS_MIN_ALLOC_SIZE)) 578 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, 579 ZS_SIZE_CLASS_DELTA); 580 581 return min_t(int, ZS_SIZE_CLASSES - 1, idx); 582 } 583 584 static inline void class_stat_add(struct size_class *class, int type, 585 unsigned long cnt) 586 { 587 class->stats.objs[type] += cnt; 588 } 589 590 static inline void class_stat_sub(struct size_class *class, int type, 591 unsigned long cnt) 592 { 593 class->stats.objs[type] -= cnt; 594 } 595 596 static inline unsigned long class_stat_read(struct size_class *class, int type) 597 { 598 return class->stats.objs[type]; 599 } 600 601 #ifdef CONFIG_ZSMALLOC_STAT 602 603 static void __init zs_stat_init(void) 604 { 605 if (!debugfs_initialized()) { 606 pr_warn("debugfs not available, stat dir not created\n"); 607 return; 608 } 609 610 zs_stat_root = debugfs_create_dir("zsmalloc", NULL); 611 } 612 613 static void __exit zs_stat_exit(void) 614 { 615 debugfs_remove_recursive(zs_stat_root); 616 } 617 618 static unsigned long zs_can_compact(struct size_class *class); 619 620 static int zs_stats_size_show(struct seq_file *s, void *v) 621 { 622 int i, fg; 623 struct zs_pool *pool = s->private; 624 struct size_class *class; 625 int objs_per_zspage; 626 unsigned long obj_allocated, obj_used, pages_used, freeable; 627 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; 628 unsigned long total_freeable = 0; 629 unsigned long inuse_totals[NR_FULLNESS_GROUPS] = {0, }; 630 631 seq_printf(s, " %5s %5s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %13s %10s %10s %16s %8s\n", 632 "class", "size", "10%", "20%", "30%", "40%", 633 "50%", "60%", "70%", "80%", "90%", "99%", "100%", 634 "obj_allocated", "obj_used", "pages_used", 635 "pages_per_zspage", "freeable"); 636 637 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 638 639 class = pool->size_class[i]; 640 641 if (class->index != i) 642 continue; 643 644 spin_lock(&class->lock); 645 646 seq_printf(s, " %5u %5u ", i, class->size); 647 for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) { 648 inuse_totals[fg] += class_stat_read(class, fg); 649 seq_printf(s, "%9lu ", class_stat_read(class, fg)); 650 } 651 652 obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); 653 obj_used = class_stat_read(class, ZS_OBJS_INUSE); 654 freeable = zs_can_compact(class); 655 spin_unlock(&class->lock); 656 657 objs_per_zspage = class->objs_per_zspage; 658 pages_used = obj_allocated / objs_per_zspage * 659 class->pages_per_zspage; 660 661 seq_printf(s, "%13lu %10lu %10lu %16d %8lu\n", 662 obj_allocated, obj_used, pages_used, 663 class->pages_per_zspage, freeable); 664 665 total_objs += obj_allocated; 666 total_used_objs += obj_used; 667 total_pages += pages_used; 668 total_freeable += freeable; 669 } 670 671 seq_puts(s, "\n"); 672 seq_printf(s, " %5s %5s ", "Total", ""); 673 674 for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) 675 seq_printf(s, "%9lu ", inuse_totals[fg]); 676 677 seq_printf(s, "%13lu %10lu %10lu %16s %8lu\n", 678 total_objs, total_used_objs, total_pages, "", 679 total_freeable); 680 681 return 0; 682 } 683 DEFINE_SHOW_ATTRIBUTE(zs_stats_size); 684 685 static void zs_pool_stat_create(struct zs_pool *pool, const char *name) 686 { 687 if (!zs_stat_root) { 688 pr_warn("no root stat dir, not creating <%s> stat dir\n", name); 689 return; 690 } 691 692 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root); 693 694 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool, 695 &zs_stats_size_fops); 696 } 697 698 static void zs_pool_stat_destroy(struct zs_pool *pool) 699 { 700 debugfs_remove_recursive(pool->stat_dentry); 701 } 702 703 #else /* CONFIG_ZSMALLOC_STAT */ 704 static void __init zs_stat_init(void) 705 { 706 } 707 708 static void __exit zs_stat_exit(void) 709 { 710 } 711 712 static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name) 713 { 714 } 715 716 static inline void zs_pool_stat_destroy(struct zs_pool *pool) 717 { 718 } 719 #endif 720 721 722 /* 723 * For each size class, zspages are divided into different groups 724 * depending on their usage ratio. This function returns fullness 725 * status of the given page. 726 */ 727 static int get_fullness_group(struct size_class *class, struct zspage *zspage) 728 { 729 int inuse, objs_per_zspage, ratio; 730 731 inuse = get_zspage_inuse(zspage); 732 objs_per_zspage = class->objs_per_zspage; 733 734 if (inuse == 0) 735 return ZS_INUSE_RATIO_0; 736 if (inuse == objs_per_zspage) 737 return ZS_INUSE_RATIO_100; 738 739 ratio = 100 * inuse / objs_per_zspage; 740 /* 741 * Take integer division into consideration: a page with one inuse 742 * object out of 127 possible, will end up having 0 usage ratio, 743 * which is wrong as it belongs in ZS_INUSE_RATIO_10 fullness group. 744 */ 745 return ratio / 10 + 1; 746 } 747 748 /* 749 * Each size class maintains various freelists and zspages are assigned 750 * to one of these freelists based on the number of live objects they 751 * have. This functions inserts the given zspage into the freelist 752 * identified by <class, fullness_group>. 753 */ 754 static void insert_zspage(struct size_class *class, 755 struct zspage *zspage, 756 int fullness) 757 { 758 class_stat_add(class, fullness, 1); 759 list_add(&zspage->list, &class->fullness_list[fullness]); 760 zspage->fullness = fullness; 761 } 762 763 /* 764 * This function removes the given zspage from the freelist identified 765 * by <class, fullness_group>. 766 */ 767 static void remove_zspage(struct size_class *class, struct zspage *zspage) 768 { 769 int fullness = zspage->fullness; 770 771 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); 772 773 list_del_init(&zspage->list); 774 class_stat_sub(class, fullness, 1); 775 } 776 777 /* 778 * Each size class maintains zspages in different fullness groups depending 779 * on the number of live objects they contain. When allocating or freeing 780 * objects, the fullness status of the page can change, for instance, from 781 * INUSE_RATIO_80 to INUSE_RATIO_70 when freeing an object. This function 782 * checks if such a status change has occurred for the given page and 783 * accordingly moves the page from the list of the old fullness group to that 784 * of the new fullness group. 785 */ 786 static int fix_fullness_group(struct size_class *class, struct zspage *zspage) 787 { 788 int newfg; 789 790 newfg = get_fullness_group(class, zspage); 791 if (newfg == zspage->fullness) 792 goto out; 793 794 remove_zspage(class, zspage); 795 insert_zspage(class, zspage, newfg); 796 out: 797 return newfg; 798 } 799 800 static struct zspage *get_zspage(struct zpdesc *zpdesc) 801 { 802 struct zspage *zspage = zpdesc->zspage; 803 804 BUG_ON(zspage->magic != ZSPAGE_MAGIC); 805 return zspage; 806 } 807 808 static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc) 809 { 810 struct zspage *zspage = get_zspage(zpdesc); 811 812 if (unlikely(ZsHugePage(zspage))) 813 return NULL; 814 815 return zpdesc->next; 816 } 817 818 /** 819 * obj_to_location - get (<zpdesc>, <obj_idx>) from encoded object value 820 * @obj: the encoded object value 821 * @zpdesc: zpdesc object resides in zspage 822 * @obj_idx: object index 823 */ 824 static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc, 825 unsigned int *obj_idx) 826 { 827 *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS); 828 *obj_idx = (obj & OBJ_INDEX_MASK); 829 } 830 831 static void obj_to_zpdesc(unsigned long obj, struct zpdesc **zpdesc) 832 { 833 *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS); 834 } 835 836 /** 837 * location_to_obj - get obj value encoded from (<zpdesc>, <obj_idx>) 838 * @zpdesc: zpdesc object resides in zspage 839 * @obj_idx: object index 840 */ 841 static unsigned long location_to_obj(struct zpdesc *zpdesc, unsigned int obj_idx) 842 { 843 unsigned long obj; 844 845 obj = zpdesc_pfn(zpdesc) << OBJ_INDEX_BITS; 846 obj |= obj_idx & OBJ_INDEX_MASK; 847 848 return obj; 849 } 850 851 static unsigned long handle_to_obj(unsigned long handle) 852 { 853 return *(unsigned long *)handle; 854 } 855 856 static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj, 857 unsigned long *phandle) 858 { 859 unsigned long handle; 860 struct zspage *zspage = get_zspage(zpdesc); 861 862 if (unlikely(ZsHugePage(zspage))) { 863 VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc)); 864 handle = zpdesc->handle; 865 } else 866 handle = *(unsigned long *)obj; 867 868 if (!(handle & OBJ_ALLOCATED_TAG)) 869 return false; 870 871 /* Clear all tags before returning the handle */ 872 *phandle = handle & ~OBJ_TAG_MASK; 873 return true; 874 } 875 876 static void reset_zpdesc(struct zpdesc *zpdesc) 877 { 878 struct page *page = zpdesc_page(zpdesc); 879 880 ClearPagePrivate(page); 881 zpdesc->zspage = NULL; 882 zpdesc->next = NULL; 883 /* PageZsmalloc is sticky until the page is freed to the buddy. */ 884 } 885 886 static int trylock_zspage(struct zspage *zspage) 887 { 888 struct zpdesc *cursor, *fail; 889 890 for (cursor = get_first_zpdesc(zspage); cursor != NULL; cursor = 891 get_next_zpdesc(cursor)) { 892 if (!zpdesc_trylock(cursor)) { 893 fail = cursor; 894 goto unlock; 895 } 896 } 897 898 return 1; 899 unlock: 900 for (cursor = get_first_zpdesc(zspage); cursor != fail; cursor = 901 get_next_zpdesc(cursor)) 902 zpdesc_unlock(cursor); 903 904 return 0; 905 } 906 907 static void __free_zspage(struct zs_pool *pool, struct size_class *class, 908 struct zspage *zspage) 909 { 910 struct zpdesc *zpdesc, *next; 911 912 assert_spin_locked(&class->lock); 913 914 VM_BUG_ON(get_zspage_inuse(zspage)); 915 VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0); 916 917 next = zpdesc = get_first_zpdesc(zspage); 918 do { 919 VM_BUG_ON_PAGE(!zpdesc_is_locked(zpdesc), zpdesc_page(zpdesc)); 920 next = get_next_zpdesc(zpdesc); 921 reset_zpdesc(zpdesc); 922 zpdesc_unlock(zpdesc); 923 zpdesc_dec_zone_page_state(zpdesc); 924 zpdesc_put(zpdesc); 925 zpdesc = next; 926 } while (zpdesc != NULL); 927 928 cache_free_zspage(pool, zspage); 929 930 class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); 931 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); 932 } 933 934 static void free_zspage(struct zs_pool *pool, struct size_class *class, 935 struct zspage *zspage) 936 { 937 VM_BUG_ON(get_zspage_inuse(zspage)); 938 VM_BUG_ON(list_empty(&zspage->list)); 939 940 /* 941 * Since zs_free couldn't be sleepable, this function cannot call 942 * lock_page. The page locks trylock_zspage got will be released 943 * by __free_zspage. 944 */ 945 if (!trylock_zspage(zspage)) { 946 kick_deferred_free(pool); 947 return; 948 } 949 950 remove_zspage(class, zspage); 951 __free_zspage(pool, class, zspage); 952 } 953 954 /* Initialize a newly allocated zspage */ 955 static void init_zspage(struct size_class *class, struct zspage *zspage) 956 { 957 unsigned int freeobj = 1; 958 unsigned long off = 0; 959 struct zpdesc *zpdesc = get_first_zpdesc(zspage); 960 961 while (zpdesc) { 962 struct zpdesc *next_zpdesc; 963 struct link_free *link; 964 void *vaddr; 965 966 set_first_obj_offset(zpdesc, off); 967 968 vaddr = kmap_local_zpdesc(zpdesc); 969 link = (struct link_free *)vaddr + off / sizeof(*link); 970 971 while ((off += class->size) < PAGE_SIZE) { 972 link->next = freeobj++ << OBJ_TAG_BITS; 973 link += class->size / sizeof(*link); 974 } 975 976 /* 977 * We now come to the last (full or partial) object on this 978 * page, which must point to the first object on the next 979 * page (if present) 980 */ 981 next_zpdesc = get_next_zpdesc(zpdesc); 982 if (next_zpdesc) { 983 link->next = freeobj++ << OBJ_TAG_BITS; 984 } else { 985 /* 986 * Reset OBJ_TAG_BITS bit to last link to tell 987 * whether it's allocated object or not. 988 */ 989 link->next = -1UL << OBJ_TAG_BITS; 990 } 991 kunmap_local(vaddr); 992 zpdesc = next_zpdesc; 993 off %= PAGE_SIZE; 994 } 995 996 set_freeobj(zspage, 0); 997 } 998 999 static void create_page_chain(struct size_class *class, struct zspage *zspage, 1000 struct zpdesc *zpdescs[]) 1001 { 1002 int i; 1003 struct zpdesc *zpdesc; 1004 struct zpdesc *prev_zpdesc = NULL; 1005 int nr_zpdescs = class->pages_per_zspage; 1006 1007 /* 1008 * Allocate individual pages and link them together as: 1009 * 1. all pages are linked together using zpdesc->next 1010 * 2. each sub-page point to zspage using zpdesc->zspage 1011 * 1012 * we set PG_private to identify the first zpdesc (i.e. no other zpdesc 1013 * has this flag set). 1014 */ 1015 for (i = 0; i < nr_zpdescs; i++) { 1016 zpdesc = zpdescs[i]; 1017 zpdesc->zspage = zspage; 1018 zpdesc->next = NULL; 1019 if (i == 0) { 1020 zspage->first_zpdesc = zpdesc; 1021 zpdesc_set_first(zpdesc); 1022 if (unlikely(class->objs_per_zspage == 1 && 1023 class->pages_per_zspage == 1)) 1024 SetZsHugePage(zspage); 1025 } else { 1026 prev_zpdesc->next = zpdesc; 1027 } 1028 prev_zpdesc = zpdesc; 1029 } 1030 } 1031 1032 /* 1033 * Allocate a zspage for the given size class 1034 */ 1035 static struct zspage *alloc_zspage(struct zs_pool *pool, 1036 struct size_class *class, 1037 gfp_t gfp, const int nid) 1038 { 1039 int i; 1040 struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE]; 1041 struct zspage *zspage = cache_alloc_zspage(pool, gfp); 1042 1043 if (!zspage) 1044 return NULL; 1045 1046 zspage->magic = ZSPAGE_MAGIC; 1047 zspage->pool = pool; 1048 zspage->class = class->index; 1049 zspage_lock_init(zspage); 1050 1051 for (i = 0; i < class->pages_per_zspage; i++) { 1052 struct zpdesc *zpdesc; 1053 1054 zpdesc = alloc_zpdesc(gfp, nid); 1055 if (!zpdesc) { 1056 while (--i >= 0) { 1057 zpdesc_dec_zone_page_state(zpdescs[i]); 1058 free_zpdesc(zpdescs[i]); 1059 } 1060 cache_free_zspage(pool, zspage); 1061 return NULL; 1062 } 1063 __zpdesc_set_zsmalloc(zpdesc); 1064 1065 zpdesc_inc_zone_page_state(zpdesc); 1066 zpdescs[i] = zpdesc; 1067 } 1068 1069 create_page_chain(class, zspage, zpdescs); 1070 init_zspage(class, zspage); 1071 1072 return zspage; 1073 } 1074 1075 static struct zspage *find_get_zspage(struct size_class *class) 1076 { 1077 int i; 1078 struct zspage *zspage; 1079 1080 for (i = ZS_INUSE_RATIO_99; i >= ZS_INUSE_RATIO_0; i--) { 1081 zspage = list_first_entry_or_null(&class->fullness_list[i], 1082 struct zspage, list); 1083 if (zspage) 1084 break; 1085 } 1086 1087 return zspage; 1088 } 1089 1090 static bool can_merge(struct size_class *prev, int pages_per_zspage, 1091 int objs_per_zspage) 1092 { 1093 if (prev->pages_per_zspage == pages_per_zspage && 1094 prev->objs_per_zspage == objs_per_zspage) 1095 return true; 1096 1097 return false; 1098 } 1099 1100 static bool zspage_full(struct size_class *class, struct zspage *zspage) 1101 { 1102 return get_zspage_inuse(zspage) == class->objs_per_zspage; 1103 } 1104 1105 static bool zspage_empty(struct zspage *zspage) 1106 { 1107 return get_zspage_inuse(zspage) == 0; 1108 } 1109 1110 /** 1111 * zs_lookup_class_index() - Returns index of the zsmalloc &size_class 1112 * that hold objects of the provided size. 1113 * @pool: zsmalloc pool to use 1114 * @size: object size 1115 * 1116 * Context: Any context. 1117 * 1118 * Return: the index of the zsmalloc &size_class that hold objects of the 1119 * provided size. 1120 */ 1121 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size) 1122 { 1123 struct size_class *class; 1124 1125 class = pool->size_class[get_size_class_index(size)]; 1126 1127 return class->index; 1128 } 1129 EXPORT_SYMBOL_GPL(zs_lookup_class_index); 1130 1131 unsigned long zs_get_total_pages(struct zs_pool *pool) 1132 { 1133 return atomic_long_read(&pool->pages_allocated); 1134 } 1135 EXPORT_SYMBOL_GPL(zs_get_total_pages); 1136 1137 void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, 1138 void *local_copy) 1139 { 1140 struct zspage *zspage; 1141 struct zpdesc *zpdesc; 1142 unsigned long obj, off; 1143 unsigned int obj_idx; 1144 struct size_class *class; 1145 void *addr; 1146 1147 /* Guarantee we can get zspage from handle safely */ 1148 read_lock(&pool->lock); 1149 obj = handle_to_obj(handle); 1150 obj_to_location(obj, &zpdesc, &obj_idx); 1151 zspage = get_zspage(zpdesc); 1152 1153 /* Make sure migration doesn't move any pages in this zspage */ 1154 zspage_read_lock(zspage); 1155 read_unlock(&pool->lock); 1156 1157 class = zspage_class(pool, zspage); 1158 off = offset_in_page(class->size * obj_idx); 1159 1160 if (off + class->size <= PAGE_SIZE) { 1161 /* this object is contained entirely within a page */ 1162 addr = kmap_local_zpdesc(zpdesc); 1163 addr += off; 1164 } else { 1165 size_t sizes[2]; 1166 1167 /* this object spans two pages */ 1168 sizes[0] = PAGE_SIZE - off; 1169 sizes[1] = class->size - sizes[0]; 1170 addr = local_copy; 1171 1172 memcpy_from_page(addr, zpdesc_page(zpdesc), 1173 off, sizes[0]); 1174 zpdesc = get_next_zpdesc(zpdesc); 1175 memcpy_from_page(addr + sizes[0], 1176 zpdesc_page(zpdesc), 1177 0, sizes[1]); 1178 } 1179 1180 if (!ZsHugePage(zspage)) 1181 addr += ZS_HANDLE_SIZE; 1182 1183 return addr; 1184 } 1185 EXPORT_SYMBOL_GPL(zs_obj_read_begin); 1186 1187 void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, 1188 void *handle_mem) 1189 { 1190 struct zspage *zspage; 1191 struct zpdesc *zpdesc; 1192 unsigned long obj, off; 1193 unsigned int obj_idx; 1194 struct size_class *class; 1195 1196 obj = handle_to_obj(handle); 1197 obj_to_location(obj, &zpdesc, &obj_idx); 1198 zspage = get_zspage(zpdesc); 1199 class = zspage_class(pool, zspage); 1200 off = offset_in_page(class->size * obj_idx); 1201 1202 if (off + class->size <= PAGE_SIZE) { 1203 if (!ZsHugePage(zspage)) 1204 off += ZS_HANDLE_SIZE; 1205 handle_mem -= off; 1206 kunmap_local(handle_mem); 1207 } 1208 1209 zspage_read_unlock(zspage); 1210 } 1211 EXPORT_SYMBOL_GPL(zs_obj_read_end); 1212 1213 void zs_obj_write(struct zs_pool *pool, unsigned long handle, 1214 void *handle_mem, size_t mem_len) 1215 { 1216 struct zspage *zspage; 1217 struct zpdesc *zpdesc; 1218 unsigned long obj, off; 1219 unsigned int obj_idx; 1220 struct size_class *class; 1221 1222 /* Guarantee we can get zspage from handle safely */ 1223 read_lock(&pool->lock); 1224 obj = handle_to_obj(handle); 1225 obj_to_location(obj, &zpdesc, &obj_idx); 1226 zspage = get_zspage(zpdesc); 1227 1228 /* Make sure migration doesn't move any pages in this zspage */ 1229 zspage_read_lock(zspage); 1230 read_unlock(&pool->lock); 1231 1232 class = zspage_class(pool, zspage); 1233 off = offset_in_page(class->size * obj_idx); 1234 1235 if (!ZsHugePage(zspage)) 1236 off += ZS_HANDLE_SIZE; 1237 1238 if (off + mem_len <= PAGE_SIZE) { 1239 /* this object is contained entirely within a page */ 1240 void *dst = kmap_local_zpdesc(zpdesc); 1241 1242 memcpy(dst + off, handle_mem, mem_len); 1243 kunmap_local(dst); 1244 } else { 1245 /* this object spans two pages */ 1246 size_t sizes[2]; 1247 1248 sizes[0] = PAGE_SIZE - off; 1249 sizes[1] = mem_len - sizes[0]; 1250 1251 memcpy_to_page(zpdesc_page(zpdesc), off, 1252 handle_mem, sizes[0]); 1253 zpdesc = get_next_zpdesc(zpdesc); 1254 memcpy_to_page(zpdesc_page(zpdesc), 0, 1255 handle_mem + sizes[0], sizes[1]); 1256 } 1257 1258 zspage_read_unlock(zspage); 1259 } 1260 EXPORT_SYMBOL_GPL(zs_obj_write); 1261 1262 /** 1263 * zs_huge_class_size() - Returns the size (in bytes) of the first huge 1264 * zsmalloc &size_class. 1265 * @pool: zsmalloc pool to use 1266 * 1267 * The function returns the size of the first huge class - any object of equal 1268 * or bigger size will be stored in zspage consisting of a single physical 1269 * page. 1270 * 1271 * Context: Any context. 1272 * 1273 * Return: the size (in bytes) of the first huge zsmalloc &size_class. 1274 */ 1275 size_t zs_huge_class_size(struct zs_pool *pool) 1276 { 1277 return huge_class_size; 1278 } 1279 EXPORT_SYMBOL_GPL(zs_huge_class_size); 1280 1281 static unsigned long obj_malloc(struct zs_pool *pool, 1282 struct zspage *zspage, unsigned long handle) 1283 { 1284 int i, nr_zpdesc, offset; 1285 unsigned long obj; 1286 struct link_free *link; 1287 struct size_class *class; 1288 1289 struct zpdesc *m_zpdesc; 1290 unsigned long m_offset; 1291 void *vaddr; 1292 1293 class = pool->size_class[zspage->class]; 1294 obj = get_freeobj(zspage); 1295 1296 offset = obj * class->size; 1297 nr_zpdesc = offset >> PAGE_SHIFT; 1298 m_offset = offset_in_page(offset); 1299 m_zpdesc = get_first_zpdesc(zspage); 1300 1301 for (i = 0; i < nr_zpdesc; i++) 1302 m_zpdesc = get_next_zpdesc(m_zpdesc); 1303 1304 vaddr = kmap_local_zpdesc(m_zpdesc); 1305 link = (struct link_free *)vaddr + m_offset / sizeof(*link); 1306 set_freeobj(zspage, link->next >> OBJ_TAG_BITS); 1307 if (likely(!ZsHugePage(zspage))) 1308 /* record handle in the header of allocated chunk */ 1309 link->handle = handle | OBJ_ALLOCATED_TAG; 1310 else 1311 zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG; 1312 1313 kunmap_local(vaddr); 1314 mod_zspage_inuse(zspage, 1); 1315 1316 obj = location_to_obj(m_zpdesc, obj); 1317 record_obj(handle, obj); 1318 1319 return obj; 1320 } 1321 1322 1323 /** 1324 * zs_malloc - Allocate block of given size from pool. 1325 * @pool: pool to allocate from 1326 * @size: size of block to allocate 1327 * @gfp: gfp flags when allocating object 1328 * @nid: The preferred node id to allocate new zspage (if needed) 1329 * 1330 * On success, handle to the allocated object is returned, 1331 * otherwise an ERR_PTR(). 1332 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. 1333 */ 1334 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp, 1335 const int nid) 1336 { 1337 unsigned long handle; 1338 struct size_class *class; 1339 int newfg; 1340 struct zspage *zspage; 1341 1342 if (unlikely(!size)) 1343 return (unsigned long)ERR_PTR(-EINVAL); 1344 1345 if (unlikely(size > ZS_MAX_ALLOC_SIZE)) 1346 return (unsigned long)ERR_PTR(-ENOSPC); 1347 1348 handle = cache_alloc_handle(pool, gfp); 1349 if (!handle) 1350 return (unsigned long)ERR_PTR(-ENOMEM); 1351 1352 /* extra space in chunk to keep the handle */ 1353 size += ZS_HANDLE_SIZE; 1354 class = pool->size_class[get_size_class_index(size)]; 1355 1356 /* class->lock effectively protects the zpage migration */ 1357 spin_lock(&class->lock); 1358 zspage = find_get_zspage(class); 1359 if (likely(zspage)) { 1360 obj_malloc(pool, zspage, handle); 1361 /* Now move the zspage to another fullness group, if required */ 1362 fix_fullness_group(class, zspage); 1363 class_stat_add(class, ZS_OBJS_INUSE, 1); 1364 1365 goto out; 1366 } 1367 1368 spin_unlock(&class->lock); 1369 1370 zspage = alloc_zspage(pool, class, gfp, nid); 1371 if (!zspage) { 1372 cache_free_handle(pool, handle); 1373 return (unsigned long)ERR_PTR(-ENOMEM); 1374 } 1375 1376 spin_lock(&class->lock); 1377 obj_malloc(pool, zspage, handle); 1378 newfg = get_fullness_group(class, zspage); 1379 insert_zspage(class, zspage, newfg); 1380 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); 1381 class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); 1382 class_stat_add(class, ZS_OBJS_INUSE, 1); 1383 1384 /* We completely set up zspage so mark them as movable */ 1385 SetZsPageMovable(pool, zspage); 1386 out: 1387 spin_unlock(&class->lock); 1388 1389 return handle; 1390 } 1391 EXPORT_SYMBOL_GPL(zs_malloc); 1392 1393 static void obj_free(int class_size, unsigned long obj) 1394 { 1395 struct link_free *link; 1396 struct zspage *zspage; 1397 struct zpdesc *f_zpdesc; 1398 unsigned long f_offset; 1399 unsigned int f_objidx; 1400 void *vaddr; 1401 1402 1403 obj_to_location(obj, &f_zpdesc, &f_objidx); 1404 f_offset = offset_in_page(class_size * f_objidx); 1405 zspage = get_zspage(f_zpdesc); 1406 1407 vaddr = kmap_local_zpdesc(f_zpdesc); 1408 link = (struct link_free *)(vaddr + f_offset); 1409 1410 /* Insert this object in containing zspage's freelist */ 1411 if (likely(!ZsHugePage(zspage))) 1412 link->next = get_freeobj(zspage) << OBJ_TAG_BITS; 1413 else 1414 f_zpdesc->handle = 0; 1415 set_freeobj(zspage, f_objidx); 1416 1417 kunmap_local(vaddr); 1418 mod_zspage_inuse(zspage, -1); 1419 } 1420 1421 void zs_free(struct zs_pool *pool, unsigned long handle) 1422 { 1423 struct zspage *zspage; 1424 struct zpdesc *f_zpdesc; 1425 unsigned long obj; 1426 struct size_class *class; 1427 int fullness; 1428 1429 if (IS_ERR_OR_NULL((void *)handle)) 1430 return; 1431 1432 /* 1433 * The pool->lock protects the race with zpage's migration 1434 * so it's safe to get the page from handle. 1435 */ 1436 read_lock(&pool->lock); 1437 obj = handle_to_obj(handle); 1438 obj_to_zpdesc(obj, &f_zpdesc); 1439 zspage = get_zspage(f_zpdesc); 1440 class = zspage_class(pool, zspage); 1441 spin_lock(&class->lock); 1442 read_unlock(&pool->lock); 1443 1444 class_stat_sub(class, ZS_OBJS_INUSE, 1); 1445 obj_free(class->size, obj); 1446 1447 fullness = fix_fullness_group(class, zspage); 1448 if (fullness == ZS_INUSE_RATIO_0) 1449 free_zspage(pool, class, zspage); 1450 1451 spin_unlock(&class->lock); 1452 cache_free_handle(pool, handle); 1453 } 1454 EXPORT_SYMBOL_GPL(zs_free); 1455 1456 static void zs_object_copy(struct size_class *class, unsigned long dst, 1457 unsigned long src) 1458 { 1459 struct zpdesc *s_zpdesc, *d_zpdesc; 1460 unsigned int s_objidx, d_objidx; 1461 unsigned long s_off, d_off; 1462 void *s_addr, *d_addr; 1463 int s_size, d_size, size; 1464 int written = 0; 1465 1466 s_size = d_size = class->size; 1467 1468 obj_to_location(src, &s_zpdesc, &s_objidx); 1469 obj_to_location(dst, &d_zpdesc, &d_objidx); 1470 1471 s_off = offset_in_page(class->size * s_objidx); 1472 d_off = offset_in_page(class->size * d_objidx); 1473 1474 if (s_off + class->size > PAGE_SIZE) 1475 s_size = PAGE_SIZE - s_off; 1476 1477 if (d_off + class->size > PAGE_SIZE) 1478 d_size = PAGE_SIZE - d_off; 1479 1480 s_addr = kmap_local_zpdesc(s_zpdesc); 1481 d_addr = kmap_local_zpdesc(d_zpdesc); 1482 1483 while (1) { 1484 size = min(s_size, d_size); 1485 memcpy(d_addr + d_off, s_addr + s_off, size); 1486 written += size; 1487 1488 if (written == class->size) 1489 break; 1490 1491 s_off += size; 1492 s_size -= size; 1493 d_off += size; 1494 d_size -= size; 1495 1496 /* 1497 * Calling kunmap_local(d_addr) is necessary. kunmap_local() 1498 * calls must occurs in reverse order of calls to kmap_local_page(). 1499 * So, to call kunmap_local(s_addr) we should first call 1500 * kunmap_local(d_addr). For more details see 1501 * Documentation/mm/highmem.rst. 1502 */ 1503 if (s_off >= PAGE_SIZE) { 1504 kunmap_local(d_addr); 1505 kunmap_local(s_addr); 1506 s_zpdesc = get_next_zpdesc(s_zpdesc); 1507 s_addr = kmap_local_zpdesc(s_zpdesc); 1508 d_addr = kmap_local_zpdesc(d_zpdesc); 1509 s_size = class->size - written; 1510 s_off = 0; 1511 } 1512 1513 if (d_off >= PAGE_SIZE) { 1514 kunmap_local(d_addr); 1515 d_zpdesc = get_next_zpdesc(d_zpdesc); 1516 d_addr = kmap_local_zpdesc(d_zpdesc); 1517 d_size = class->size - written; 1518 d_off = 0; 1519 } 1520 } 1521 1522 kunmap_local(d_addr); 1523 kunmap_local(s_addr); 1524 } 1525 1526 /* 1527 * Find alloced object in zspage from index object and 1528 * return handle. 1529 */ 1530 static unsigned long find_alloced_obj(struct size_class *class, 1531 struct zpdesc *zpdesc, int *obj_idx) 1532 { 1533 unsigned int offset; 1534 int index = *obj_idx; 1535 unsigned long handle = 0; 1536 void *addr = kmap_local_zpdesc(zpdesc); 1537 1538 offset = get_first_obj_offset(zpdesc); 1539 offset += class->size * index; 1540 1541 while (offset < PAGE_SIZE) { 1542 if (obj_allocated(zpdesc, addr + offset, &handle)) 1543 break; 1544 1545 offset += class->size; 1546 index++; 1547 } 1548 1549 kunmap_local(addr); 1550 1551 *obj_idx = index; 1552 1553 return handle; 1554 } 1555 1556 static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, 1557 struct zspage *dst_zspage) 1558 { 1559 unsigned long used_obj, free_obj; 1560 unsigned long handle; 1561 int obj_idx = 0; 1562 struct zpdesc *s_zpdesc = get_first_zpdesc(src_zspage); 1563 struct size_class *class = pool->size_class[src_zspage->class]; 1564 1565 while (1) { 1566 handle = find_alloced_obj(class, s_zpdesc, &obj_idx); 1567 if (!handle) { 1568 s_zpdesc = get_next_zpdesc(s_zpdesc); 1569 if (!s_zpdesc) 1570 break; 1571 obj_idx = 0; 1572 continue; 1573 } 1574 1575 used_obj = handle_to_obj(handle); 1576 free_obj = obj_malloc(pool, dst_zspage, handle); 1577 zs_object_copy(class, free_obj, used_obj); 1578 obj_idx++; 1579 obj_free(class->size, used_obj); 1580 1581 /* Stop if there is no more space */ 1582 if (zspage_full(class, dst_zspage)) 1583 break; 1584 1585 /* Stop if there are no more objects to migrate */ 1586 if (zspage_empty(src_zspage)) 1587 break; 1588 } 1589 } 1590 1591 static struct zspage *isolate_src_zspage(struct size_class *class) 1592 { 1593 struct zspage *zspage; 1594 int fg; 1595 1596 for (fg = ZS_INUSE_RATIO_10; fg <= ZS_INUSE_RATIO_99; fg++) { 1597 zspage = list_first_entry_or_null(&class->fullness_list[fg], 1598 struct zspage, list); 1599 if (zspage) { 1600 remove_zspage(class, zspage); 1601 return zspage; 1602 } 1603 } 1604 1605 return zspage; 1606 } 1607 1608 static struct zspage *isolate_dst_zspage(struct size_class *class) 1609 { 1610 struct zspage *zspage; 1611 int fg; 1612 1613 for (fg = ZS_INUSE_RATIO_99; fg >= ZS_INUSE_RATIO_10; fg--) { 1614 zspage = list_first_entry_or_null(&class->fullness_list[fg], 1615 struct zspage, list); 1616 if (zspage) { 1617 remove_zspage(class, zspage); 1618 return zspage; 1619 } 1620 } 1621 1622 return zspage; 1623 } 1624 1625 /* 1626 * putback_zspage - add @zspage into right class's fullness list 1627 * @class: destination class 1628 * @zspage: target page 1629 * 1630 * Return @zspage's fullness status 1631 */ 1632 static int putback_zspage(struct size_class *class, struct zspage *zspage) 1633 { 1634 int fullness; 1635 1636 fullness = get_fullness_group(class, zspage); 1637 insert_zspage(class, zspage, fullness); 1638 1639 return fullness; 1640 } 1641 1642 #ifdef CONFIG_COMPACTION 1643 /* 1644 * To prevent zspage destroy during migration, zspage freeing should 1645 * hold locks of all pages in the zspage. 1646 */ 1647 static void lock_zspage(struct zspage *zspage) 1648 { 1649 struct zpdesc *curr_zpdesc, *zpdesc; 1650 1651 /* 1652 * Pages we haven't locked yet can be migrated off the list while we're 1653 * trying to lock them, so we need to be careful and only attempt to 1654 * lock each page under zspage_read_lock(). Otherwise, the page we lock 1655 * may no longer belong to the zspage. This means that we may wait for 1656 * the wrong page to unlock, so we must take a reference to the page 1657 * prior to waiting for it to unlock outside zspage_read_lock(). 1658 */ 1659 while (1) { 1660 zspage_read_lock(zspage); 1661 zpdesc = get_first_zpdesc(zspage); 1662 if (zpdesc_trylock(zpdesc)) 1663 break; 1664 zpdesc_get(zpdesc); 1665 zspage_read_unlock(zspage); 1666 zpdesc_wait_locked(zpdesc); 1667 zpdesc_put(zpdesc); 1668 } 1669 1670 curr_zpdesc = zpdesc; 1671 while ((zpdesc = get_next_zpdesc(curr_zpdesc))) { 1672 if (zpdesc_trylock(zpdesc)) { 1673 curr_zpdesc = zpdesc; 1674 } else { 1675 zpdesc_get(zpdesc); 1676 zspage_read_unlock(zspage); 1677 zpdesc_wait_locked(zpdesc); 1678 zpdesc_put(zpdesc); 1679 zspage_read_lock(zspage); 1680 } 1681 } 1682 zspage_read_unlock(zspage); 1683 } 1684 #endif /* CONFIG_COMPACTION */ 1685 1686 #ifdef CONFIG_COMPACTION 1687 1688 static void replace_sub_page(struct size_class *class, struct zspage *zspage, 1689 struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc) 1690 { 1691 struct zpdesc *zpdesc; 1692 struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; 1693 unsigned int first_obj_offset; 1694 int idx = 0; 1695 1696 zpdesc = get_first_zpdesc(zspage); 1697 do { 1698 if (zpdesc == oldzpdesc) 1699 zpdescs[idx] = newzpdesc; 1700 else 1701 zpdescs[idx] = zpdesc; 1702 idx++; 1703 } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); 1704 1705 create_page_chain(class, zspage, zpdescs); 1706 first_obj_offset = get_first_obj_offset(oldzpdesc); 1707 set_first_obj_offset(newzpdesc, first_obj_offset); 1708 if (unlikely(ZsHugePage(zspage))) 1709 newzpdesc->handle = oldzpdesc->handle; 1710 __zpdesc_set_movable(newzpdesc); 1711 } 1712 1713 static bool zs_page_isolate(struct page *page, isolate_mode_t mode) 1714 { 1715 /* 1716 * Page is locked so zspage can't be destroyed concurrently 1717 * (see free_zspage()). But if the page was already destroyed 1718 * (see reset_zpdesc()), refuse isolation here. 1719 */ 1720 return page_zpdesc(page)->zspage; 1721 } 1722 1723 static int zs_page_migrate(struct page *newpage, struct page *page, 1724 enum migrate_mode mode) 1725 { 1726 struct zs_pool *pool; 1727 struct size_class *class; 1728 struct zspage *zspage; 1729 struct zpdesc *dummy; 1730 struct zpdesc *newzpdesc = page_zpdesc(newpage); 1731 struct zpdesc *zpdesc = page_zpdesc(page); 1732 void *s_addr, *d_addr, *addr; 1733 unsigned int offset; 1734 unsigned long handle; 1735 unsigned long old_obj, new_obj; 1736 unsigned int obj_idx; 1737 1738 /* 1739 * TODO: nothing prevents a zspage from getting destroyed while 1740 * it is isolated for migration, as the page lock is temporarily 1741 * dropped after zs_page_isolate() succeeded: we should rework that 1742 * and defer destroying such pages once they are un-isolated (putback) 1743 * instead. 1744 */ 1745 if (!zpdesc->zspage) 1746 return MIGRATEPAGE_SUCCESS; 1747 1748 /* The page is locked, so this pointer must remain valid */ 1749 zspage = get_zspage(zpdesc); 1750 pool = zspage->pool; 1751 1752 /* 1753 * The pool migrate_lock protects the race between zpage migration 1754 * and zs_free. 1755 */ 1756 write_lock(&pool->lock); 1757 class = zspage_class(pool, zspage); 1758 1759 /* 1760 * the class lock protects zpage alloc/free in the zspage. 1761 */ 1762 spin_lock(&class->lock); 1763 /* the zspage write_lock protects zpage access via zs_obj_read/write() */ 1764 if (!zspage_write_trylock(zspage)) { 1765 spin_unlock(&class->lock); 1766 write_unlock(&pool->lock); 1767 return -EINVAL; 1768 } 1769 1770 /* We're committed, tell the world that this is a Zsmalloc page. */ 1771 __zpdesc_set_zsmalloc(newzpdesc); 1772 1773 offset = get_first_obj_offset(zpdesc); 1774 s_addr = kmap_local_zpdesc(zpdesc); 1775 1776 /* 1777 * Here, any user cannot access all objects in the zspage so let's move. 1778 */ 1779 d_addr = kmap_local_zpdesc(newzpdesc); 1780 copy_page(d_addr, s_addr); 1781 kunmap_local(d_addr); 1782 1783 for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE; 1784 addr += class->size) { 1785 if (obj_allocated(zpdesc, addr, &handle)) { 1786 1787 old_obj = handle_to_obj(handle); 1788 obj_to_location(old_obj, &dummy, &obj_idx); 1789 new_obj = (unsigned long)location_to_obj(newzpdesc, obj_idx); 1790 record_obj(handle, new_obj); 1791 } 1792 } 1793 kunmap_local(s_addr); 1794 1795 replace_sub_page(class, zspage, newzpdesc, zpdesc); 1796 /* 1797 * Since we complete the data copy and set up new zspage structure, 1798 * it's okay to release migration_lock. 1799 */ 1800 write_unlock(&pool->lock); 1801 spin_unlock(&class->lock); 1802 zspage_write_unlock(zspage); 1803 1804 zpdesc_get(newzpdesc); 1805 if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) { 1806 zpdesc_dec_zone_page_state(zpdesc); 1807 zpdesc_inc_zone_page_state(newzpdesc); 1808 } 1809 1810 reset_zpdesc(zpdesc); 1811 zpdesc_put(zpdesc); 1812 1813 return MIGRATEPAGE_SUCCESS; 1814 } 1815 1816 static void zs_page_putback(struct page *page) 1817 { 1818 } 1819 1820 const struct movable_operations zsmalloc_mops = { 1821 .isolate_page = zs_page_isolate, 1822 .migrate_page = zs_page_migrate, 1823 .putback_page = zs_page_putback, 1824 }; 1825 1826 /* 1827 * Caller should hold page_lock of all pages in the zspage 1828 * In here, we cannot use zspage meta data. 1829 */ 1830 static void async_free_zspage(struct work_struct *work) 1831 { 1832 int i; 1833 struct size_class *class; 1834 struct zspage *zspage, *tmp; 1835 LIST_HEAD(free_pages); 1836 struct zs_pool *pool = container_of(work, struct zs_pool, 1837 free_work); 1838 1839 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 1840 class = pool->size_class[i]; 1841 if (class->index != i) 1842 continue; 1843 1844 spin_lock(&class->lock); 1845 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], 1846 &free_pages); 1847 spin_unlock(&class->lock); 1848 } 1849 1850 list_for_each_entry_safe(zspage, tmp, &free_pages, list) { 1851 list_del(&zspage->list); 1852 lock_zspage(zspage); 1853 1854 class = zspage_class(pool, zspage); 1855 spin_lock(&class->lock); 1856 class_stat_sub(class, ZS_INUSE_RATIO_0, 1); 1857 __free_zspage(pool, class, zspage); 1858 spin_unlock(&class->lock); 1859 } 1860 }; 1861 1862 static void kick_deferred_free(struct zs_pool *pool) 1863 { 1864 schedule_work(&pool->free_work); 1865 } 1866 1867 static void zs_flush_migration(struct zs_pool *pool) 1868 { 1869 flush_work(&pool->free_work); 1870 } 1871 1872 static void init_deferred_free(struct zs_pool *pool) 1873 { 1874 INIT_WORK(&pool->free_work, async_free_zspage); 1875 } 1876 1877 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) 1878 { 1879 struct zpdesc *zpdesc = get_first_zpdesc(zspage); 1880 1881 do { 1882 WARN_ON(!zpdesc_trylock(zpdesc)); 1883 __zpdesc_set_movable(zpdesc); 1884 zpdesc_unlock(zpdesc); 1885 } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); 1886 } 1887 #else 1888 static inline void zs_flush_migration(struct zs_pool *pool) { } 1889 #endif 1890 1891 /* 1892 * 1893 * Based on the number of unused allocated objects calculate 1894 * and return the number of pages that we can free. 1895 */ 1896 static unsigned long zs_can_compact(struct size_class *class) 1897 { 1898 unsigned long obj_wasted; 1899 unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); 1900 unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE); 1901 1902 if (obj_allocated <= obj_used) 1903 return 0; 1904 1905 obj_wasted = obj_allocated - obj_used; 1906 obj_wasted /= class->objs_per_zspage; 1907 1908 return obj_wasted * class->pages_per_zspage; 1909 } 1910 1911 static unsigned long __zs_compact(struct zs_pool *pool, 1912 struct size_class *class) 1913 { 1914 struct zspage *src_zspage = NULL; 1915 struct zspage *dst_zspage = NULL; 1916 unsigned long pages_freed = 0; 1917 1918 /* 1919 * protect the race between zpage migration and zs_free 1920 * as well as zpage allocation/free 1921 */ 1922 write_lock(&pool->lock); 1923 spin_lock(&class->lock); 1924 while (zs_can_compact(class)) { 1925 int fg; 1926 1927 if (!dst_zspage) { 1928 dst_zspage = isolate_dst_zspage(class); 1929 if (!dst_zspage) 1930 break; 1931 } 1932 1933 src_zspage = isolate_src_zspage(class); 1934 if (!src_zspage) 1935 break; 1936 1937 if (!zspage_write_trylock(src_zspage)) 1938 break; 1939 1940 migrate_zspage(pool, src_zspage, dst_zspage); 1941 zspage_write_unlock(src_zspage); 1942 1943 fg = putback_zspage(class, src_zspage); 1944 if (fg == ZS_INUSE_RATIO_0) { 1945 free_zspage(pool, class, src_zspage); 1946 pages_freed += class->pages_per_zspage; 1947 } 1948 src_zspage = NULL; 1949 1950 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 1951 || rwlock_is_contended(&pool->lock)) { 1952 putback_zspage(class, dst_zspage); 1953 dst_zspage = NULL; 1954 1955 spin_unlock(&class->lock); 1956 write_unlock(&pool->lock); 1957 cond_resched(); 1958 write_lock(&pool->lock); 1959 spin_lock(&class->lock); 1960 } 1961 } 1962 1963 if (src_zspage) 1964 putback_zspage(class, src_zspage); 1965 1966 if (dst_zspage) 1967 putback_zspage(class, dst_zspage); 1968 1969 spin_unlock(&class->lock); 1970 write_unlock(&pool->lock); 1971 1972 return pages_freed; 1973 } 1974 1975 unsigned long zs_compact(struct zs_pool *pool) 1976 { 1977 int i; 1978 struct size_class *class; 1979 unsigned long pages_freed = 0; 1980 1981 /* 1982 * Pool compaction is performed under pool->lock so it is basically 1983 * single-threaded. Having more than one thread in __zs_compact() 1984 * will increase pool->lock contention, which will impact other 1985 * zsmalloc operations that need pool->lock. 1986 */ 1987 if (atomic_xchg(&pool->compaction_in_progress, 1)) 1988 return 0; 1989 1990 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { 1991 class = pool->size_class[i]; 1992 if (class->index != i) 1993 continue; 1994 pages_freed += __zs_compact(pool, class); 1995 } 1996 atomic_long_add(pages_freed, &pool->stats.pages_compacted); 1997 atomic_set(&pool->compaction_in_progress, 0); 1998 1999 return pages_freed; 2000 } 2001 EXPORT_SYMBOL_GPL(zs_compact); 2002 2003 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) 2004 { 2005 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); 2006 } 2007 EXPORT_SYMBOL_GPL(zs_pool_stats); 2008 2009 static unsigned long zs_shrinker_scan(struct shrinker *shrinker, 2010 struct shrink_control *sc) 2011 { 2012 unsigned long pages_freed; 2013 struct zs_pool *pool = shrinker->private_data; 2014 2015 /* 2016 * Compact classes and calculate compaction delta. 2017 * Can run concurrently with a manually triggered 2018 * (by user) compaction. 2019 */ 2020 pages_freed = zs_compact(pool); 2021 2022 return pages_freed ? pages_freed : SHRINK_STOP; 2023 } 2024 2025 static unsigned long zs_shrinker_count(struct shrinker *shrinker, 2026 struct shrink_control *sc) 2027 { 2028 int i; 2029 struct size_class *class; 2030 unsigned long pages_to_free = 0; 2031 struct zs_pool *pool = shrinker->private_data; 2032 2033 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { 2034 class = pool->size_class[i]; 2035 if (class->index != i) 2036 continue; 2037 2038 pages_to_free += zs_can_compact(class); 2039 } 2040 2041 return pages_to_free; 2042 } 2043 2044 static void zs_unregister_shrinker(struct zs_pool *pool) 2045 { 2046 shrinker_free(pool->shrinker); 2047 } 2048 2049 static int zs_register_shrinker(struct zs_pool *pool) 2050 { 2051 pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name); 2052 if (!pool->shrinker) 2053 return -ENOMEM; 2054 2055 pool->shrinker->scan_objects = zs_shrinker_scan; 2056 pool->shrinker->count_objects = zs_shrinker_count; 2057 pool->shrinker->batch = 0; 2058 pool->shrinker->private_data = pool; 2059 2060 shrinker_register(pool->shrinker); 2061 2062 return 0; 2063 } 2064 2065 static int calculate_zspage_chain_size(int class_size) 2066 { 2067 int i, min_waste = INT_MAX; 2068 int chain_size = 1; 2069 2070 if (is_power_of_2(class_size)) 2071 return chain_size; 2072 2073 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { 2074 int waste; 2075 2076 waste = (i * PAGE_SIZE) % class_size; 2077 if (waste < min_waste) { 2078 min_waste = waste; 2079 chain_size = i; 2080 } 2081 } 2082 2083 return chain_size; 2084 } 2085 2086 /** 2087 * zs_create_pool - Creates an allocation pool to work from. 2088 * @name: pool name to be created 2089 * 2090 * This function must be called before anything when using 2091 * the zsmalloc allocator. 2092 * 2093 * On success, a pointer to the newly created pool is returned, 2094 * otherwise NULL. 2095 */ 2096 struct zs_pool *zs_create_pool(const char *name) 2097 { 2098 int i; 2099 struct zs_pool *pool; 2100 struct size_class *prev_class = NULL; 2101 2102 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 2103 if (!pool) 2104 return NULL; 2105 2106 init_deferred_free(pool); 2107 rwlock_init(&pool->lock); 2108 atomic_set(&pool->compaction_in_progress, 0); 2109 2110 pool->name = kstrdup(name, GFP_KERNEL); 2111 if (!pool->name) 2112 goto err; 2113 2114 if (create_cache(pool)) 2115 goto err; 2116 2117 /* 2118 * Iterate reversely, because, size of size_class that we want to use 2119 * for merging should be larger or equal to current size. 2120 */ 2121 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { 2122 int size; 2123 int pages_per_zspage; 2124 int objs_per_zspage; 2125 struct size_class *class; 2126 int fullness; 2127 2128 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; 2129 if (size > ZS_MAX_ALLOC_SIZE) 2130 size = ZS_MAX_ALLOC_SIZE; 2131 pages_per_zspage = calculate_zspage_chain_size(size); 2132 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size; 2133 2134 /* 2135 * We iterate from biggest down to smallest classes, 2136 * so huge_class_size holds the size of the first huge 2137 * class. Any object bigger than or equal to that will 2138 * endup in the huge class. 2139 */ 2140 if (pages_per_zspage != 1 && objs_per_zspage != 1 && 2141 !huge_class_size) { 2142 huge_class_size = size; 2143 /* 2144 * The object uses ZS_HANDLE_SIZE bytes to store the 2145 * handle. We need to subtract it, because zs_malloc() 2146 * unconditionally adds handle size before it performs 2147 * size class search - so object may be smaller than 2148 * huge class size, yet it still can end up in the huge 2149 * class because it grows by ZS_HANDLE_SIZE extra bytes 2150 * right before class lookup. 2151 */ 2152 huge_class_size -= (ZS_HANDLE_SIZE - 1); 2153 } 2154 2155 /* 2156 * size_class is used for normal zsmalloc operation such 2157 * as alloc/free for that size. Although it is natural that we 2158 * have one size_class for each size, there is a chance that we 2159 * can get more memory utilization if we use one size_class for 2160 * many different sizes whose size_class have same 2161 * characteristics. So, we makes size_class point to 2162 * previous size_class if possible. 2163 */ 2164 if (prev_class) { 2165 if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) { 2166 pool->size_class[i] = prev_class; 2167 continue; 2168 } 2169 } 2170 2171 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); 2172 if (!class) 2173 goto err; 2174 2175 class->size = size; 2176 class->index = i; 2177 class->pages_per_zspage = pages_per_zspage; 2178 class->objs_per_zspage = objs_per_zspage; 2179 spin_lock_init(&class->lock); 2180 pool->size_class[i] = class; 2181 2182 fullness = ZS_INUSE_RATIO_0; 2183 while (fullness < NR_FULLNESS_GROUPS) { 2184 INIT_LIST_HEAD(&class->fullness_list[fullness]); 2185 fullness++; 2186 } 2187 2188 prev_class = class; 2189 } 2190 2191 /* debug only, don't abort if it fails */ 2192 zs_pool_stat_create(pool, name); 2193 2194 /* 2195 * Not critical since shrinker is only used to trigger internal 2196 * defragmentation of the pool which is pretty optional thing. If 2197 * registration fails we still can use the pool normally and user can 2198 * trigger compaction manually. Thus, ignore return code. 2199 */ 2200 zs_register_shrinker(pool); 2201 2202 return pool; 2203 2204 err: 2205 zs_destroy_pool(pool); 2206 return NULL; 2207 } 2208 EXPORT_SYMBOL_GPL(zs_create_pool); 2209 2210 void zs_destroy_pool(struct zs_pool *pool) 2211 { 2212 int i; 2213 2214 zs_unregister_shrinker(pool); 2215 zs_flush_migration(pool); 2216 zs_pool_stat_destroy(pool); 2217 2218 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 2219 int fg; 2220 struct size_class *class = pool->size_class[i]; 2221 2222 if (!class) 2223 continue; 2224 2225 if (class->index != i) 2226 continue; 2227 2228 for (fg = ZS_INUSE_RATIO_0; fg < NR_FULLNESS_GROUPS; fg++) { 2229 if (list_empty(&class->fullness_list[fg])) 2230 continue; 2231 2232 pr_err("Class-%d fullness group %d is not empty\n", 2233 class->size, fg); 2234 } 2235 kfree(class); 2236 } 2237 2238 destroy_cache(pool); 2239 kfree(pool->name); 2240 kfree(pool); 2241 } 2242 EXPORT_SYMBOL_GPL(zs_destroy_pool); 2243 2244 static int __init zs_init(void) 2245 { 2246 #ifdef CONFIG_ZPOOL 2247 zpool_register_driver(&zs_zpool_driver); 2248 #endif 2249 zs_stat_init(); 2250 return 0; 2251 } 2252 2253 static void __exit zs_exit(void) 2254 { 2255 #ifdef CONFIG_ZPOOL 2256 zpool_unregister_driver(&zs_zpool_driver); 2257 #endif 2258 zs_stat_exit(); 2259 } 2260 2261 module_init(zs_init); 2262 module_exit(zs_exit); 2263 2264 MODULE_LICENSE("Dual BSD/GPL"); 2265 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 2266 MODULE_DESCRIPTION("zsmalloc memory allocator"); 2267