1 /* 2 * zsmalloc memory allocator 3 * 4 * Copyright (C) 2011 Nitin Gupta 5 * Copyright (C) 2012, 2013 Minchan Kim 6 * 7 * This code is released using a dual license strategy: BSD/GPL 8 * You can choose the license that better fits your requirements. 9 * 10 * Released under the terms of 3-clause BSD License 11 * Released under the terms of GNU General Public License Version 2.0 12 */ 13 14 /* 15 * Following is how we use various fields and flags of underlying 16 * struct page(s) to form a zspage. 17 * 18 * Usage of struct page fields: 19 * page->private: points to zspage 20 * page->index: links together all component pages of a zspage 21 * For the huge page, this is always 0, so we use this field 22 * to store handle. 23 * page->page_type: PG_zsmalloc, lower 16 bit locate the first object 24 * offset in a subpage of a zspage 25 * 26 * Usage of struct page flags: 27 * PG_private: identifies the first component page 28 * PG_owner_priv_1: identifies the huge component page 29 * 30 */ 31 32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34 /* 35 * lock ordering: 36 * page_lock 37 * pool->migrate_lock 38 * class->lock 39 * zspage->lock 40 */ 41 42 #include <linux/module.h> 43 #include <linux/kernel.h> 44 #include <linux/sched.h> 45 #include <linux/bitops.h> 46 #include <linux/errno.h> 47 #include <linux/highmem.h> 48 #include <linux/string.h> 49 #include <linux/slab.h> 50 #include <linux/pgtable.h> 51 #include <asm/tlbflush.h> 52 #include <linux/cpumask.h> 53 #include <linux/cpu.h> 54 #include <linux/vmalloc.h> 55 #include <linux/preempt.h> 56 #include <linux/spinlock.h> 57 #include <linux/shrinker.h> 58 #include <linux/types.h> 59 #include <linux/debugfs.h> 60 #include <linux/zsmalloc.h> 61 #include <linux/zpool.h> 62 #include <linux/migrate.h> 63 #include <linux/wait.h> 64 #include <linux/pagemap.h> 65 #include <linux/fs.h> 66 #include <linux/local_lock.h> 67 68 #define ZSPAGE_MAGIC 0x58 69 70 /* 71 * This must be power of 2 and greater than or equal to sizeof(link_free). 72 * These two conditions ensure that any 'struct link_free' itself doesn't 73 * span more than 1 page which avoids complex case of mapping 2 pages simply 74 * to restore link_free pointer values. 75 */ 76 #define ZS_ALIGN 8 77 78 #define ZS_HANDLE_SIZE (sizeof(unsigned long)) 79 80 /* 81 * Object location (<PFN>, <obj_idx>) is encoded as 82 * a single (unsigned long) handle value. 83 * 84 * Note that object index <obj_idx> starts from 0. 85 * 86 * This is made more complicated by various memory models and PAE. 87 */ 88 89 #ifndef MAX_POSSIBLE_PHYSMEM_BITS 90 #ifdef MAX_PHYSMEM_BITS 91 #define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS 92 #else 93 /* 94 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just 95 * be PAGE_SHIFT 96 */ 97 #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG 98 #endif 99 #endif 100 101 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT) 102 103 /* 104 * Head in allocated object should have OBJ_ALLOCATED_TAG 105 * to identify the object was allocated or not. 106 * It's okay to add the status bit in the least bit because 107 * header keeps handle which is 4byte-aligned address so we 108 * have room for two bit at least. 109 */ 110 #define OBJ_ALLOCATED_TAG 1 111 112 #define OBJ_TAG_BITS 1 113 #define OBJ_TAG_MASK OBJ_ALLOCATED_TAG 114 115 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) 116 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) 117 118 #define HUGE_BITS 1 119 #define FULLNESS_BITS 4 120 #define CLASS_BITS 8 121 #define MAGIC_VAL_BITS 8 122 123 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL)) 124 125 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ 126 #define ZS_MIN_ALLOC_SIZE \ 127 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) 128 /* each chunk includes extra space to keep handle */ 129 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE 130 131 /* 132 * On systems with 4K page size, this gives 255 size classes! There is a 133 * trader-off here: 134 * - Large number of size classes is potentially wasteful as free page are 135 * spread across these classes 136 * - Small number of size classes causes large internal fragmentation 137 * - Probably its better to use specific size classes (empirically 138 * determined). NOTE: all those class sizes must be set as multiple of 139 * ZS_ALIGN to make sure link_free itself never has to span 2 pages. 140 * 141 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN 142 * (reason above) 143 */ 144 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS) 145 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \ 146 ZS_SIZE_CLASS_DELTA) + 1) 147 148 /* 149 * Pages are distinguished by the ratio of used memory (that is the ratio 150 * of ->inuse objects to all objects that page can store). For example, 151 * INUSE_RATIO_10 means that the ratio of used objects is > 0% and <= 10%. 152 * 153 * The number of fullness groups is not random. It allows us to keep 154 * difference between the least busy page in the group (minimum permitted 155 * number of ->inuse objects) and the most busy page (maximum permitted 156 * number of ->inuse objects) at a reasonable value. 157 */ 158 enum fullness_group { 159 ZS_INUSE_RATIO_0, 160 ZS_INUSE_RATIO_10, 161 /* NOTE: 8 more fullness groups here */ 162 ZS_INUSE_RATIO_99 = 10, 163 ZS_INUSE_RATIO_100, 164 NR_FULLNESS_GROUPS, 165 }; 166 167 enum class_stat_type { 168 /* NOTE: stats for 12 fullness groups here: from inuse 0 to 100 */ 169 ZS_OBJS_ALLOCATED = NR_FULLNESS_GROUPS, 170 ZS_OBJS_INUSE, 171 NR_CLASS_STAT_TYPES, 172 }; 173 174 struct zs_size_stat { 175 unsigned long objs[NR_CLASS_STAT_TYPES]; 176 }; 177 178 #ifdef CONFIG_ZSMALLOC_STAT 179 static struct dentry *zs_stat_root; 180 #endif 181 182 static size_t huge_class_size; 183 184 struct size_class { 185 spinlock_t lock; 186 struct list_head fullness_list[NR_FULLNESS_GROUPS]; 187 /* 188 * Size of objects stored in this class. Must be multiple 189 * of ZS_ALIGN. 190 */ 191 int size; 192 int objs_per_zspage; 193 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ 194 int pages_per_zspage; 195 196 unsigned int index; 197 struct zs_size_stat stats; 198 }; 199 200 /* 201 * Placed within free objects to form a singly linked list. 202 * For every zspage, zspage->freeobj gives head of this list. 203 * 204 * This must be power of 2 and less than or equal to ZS_ALIGN 205 */ 206 struct link_free { 207 union { 208 /* 209 * Free object index; 210 * It's valid for non-allocated object 211 */ 212 unsigned long next; 213 /* 214 * Handle of allocated object. 215 */ 216 unsigned long handle; 217 }; 218 }; 219 220 struct zs_pool { 221 const char *name; 222 223 struct size_class *size_class[ZS_SIZE_CLASSES]; 224 struct kmem_cache *handle_cachep; 225 struct kmem_cache *zspage_cachep; 226 227 atomic_long_t pages_allocated; 228 229 struct zs_pool_stats stats; 230 231 /* Compact classes */ 232 struct shrinker *shrinker; 233 234 #ifdef CONFIG_ZSMALLOC_STAT 235 struct dentry *stat_dentry; 236 #endif 237 #ifdef CONFIG_COMPACTION 238 struct work_struct free_work; 239 #endif 240 /* protect page/zspage migration */ 241 rwlock_t migrate_lock; 242 atomic_t compaction_in_progress; 243 }; 244 245 struct zspage { 246 struct { 247 unsigned int huge:HUGE_BITS; 248 unsigned int fullness:FULLNESS_BITS; 249 unsigned int class:CLASS_BITS + 1; 250 unsigned int magic:MAGIC_VAL_BITS; 251 }; 252 unsigned int inuse; 253 unsigned int freeobj; 254 struct page *first_page; 255 struct list_head list; /* fullness list */ 256 struct zs_pool *pool; 257 rwlock_t lock; 258 }; 259 260 struct mapping_area { 261 local_lock_t lock; 262 char *vm_buf; /* copy buffer for objects that span pages */ 263 char *vm_addr; /* address of kmap_atomic()'ed pages */ 264 enum zs_mapmode vm_mm; /* mapping mode */ 265 }; 266 267 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ 268 static void SetZsHugePage(struct zspage *zspage) 269 { 270 zspage->huge = 1; 271 } 272 273 static bool ZsHugePage(struct zspage *zspage) 274 { 275 return zspage->huge; 276 } 277 278 static void migrate_lock_init(struct zspage *zspage); 279 static void migrate_read_lock(struct zspage *zspage); 280 static void migrate_read_unlock(struct zspage *zspage); 281 static void migrate_write_lock(struct zspage *zspage); 282 static void migrate_write_unlock(struct zspage *zspage); 283 284 #ifdef CONFIG_COMPACTION 285 static void kick_deferred_free(struct zs_pool *pool); 286 static void init_deferred_free(struct zs_pool *pool); 287 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); 288 #else 289 static void kick_deferred_free(struct zs_pool *pool) {} 290 static void init_deferred_free(struct zs_pool *pool) {} 291 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} 292 #endif 293 294 static int create_cache(struct zs_pool *pool) 295 { 296 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, 297 0, 0, NULL); 298 if (!pool->handle_cachep) 299 return 1; 300 301 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), 302 0, 0, NULL); 303 if (!pool->zspage_cachep) { 304 kmem_cache_destroy(pool->handle_cachep); 305 pool->handle_cachep = NULL; 306 return 1; 307 } 308 309 return 0; 310 } 311 312 static void destroy_cache(struct zs_pool *pool) 313 { 314 kmem_cache_destroy(pool->handle_cachep); 315 kmem_cache_destroy(pool->zspage_cachep); 316 } 317 318 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) 319 { 320 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, 321 gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); 322 } 323 324 static void cache_free_handle(struct zs_pool *pool, unsigned long handle) 325 { 326 kmem_cache_free(pool->handle_cachep, (void *)handle); 327 } 328 329 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags) 330 { 331 return kmem_cache_zalloc(pool->zspage_cachep, 332 flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); 333 } 334 335 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) 336 { 337 kmem_cache_free(pool->zspage_cachep, zspage); 338 } 339 340 /* class->lock(which owns the handle) synchronizes races */ 341 static void record_obj(unsigned long handle, unsigned long obj) 342 { 343 *(unsigned long *)handle = obj; 344 } 345 346 /* zpool driver */ 347 348 #ifdef CONFIG_ZPOOL 349 350 static void *zs_zpool_create(const char *name, gfp_t gfp) 351 { 352 /* 353 * Ignore global gfp flags: zs_malloc() may be invoked from 354 * different contexts and its caller must provide a valid 355 * gfp mask. 356 */ 357 return zs_create_pool(name); 358 } 359 360 static void zs_zpool_destroy(void *pool) 361 { 362 zs_destroy_pool(pool); 363 } 364 365 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, 366 unsigned long *handle) 367 { 368 *handle = zs_malloc(pool, size, gfp); 369 370 if (IS_ERR_VALUE(*handle)) 371 return PTR_ERR((void *)*handle); 372 return 0; 373 } 374 static void zs_zpool_free(void *pool, unsigned long handle) 375 { 376 zs_free(pool, handle); 377 } 378 379 static void *zs_zpool_map(void *pool, unsigned long handle, 380 enum zpool_mapmode mm) 381 { 382 enum zs_mapmode zs_mm; 383 384 switch (mm) { 385 case ZPOOL_MM_RO: 386 zs_mm = ZS_MM_RO; 387 break; 388 case ZPOOL_MM_WO: 389 zs_mm = ZS_MM_WO; 390 break; 391 case ZPOOL_MM_RW: 392 default: 393 zs_mm = ZS_MM_RW; 394 break; 395 } 396 397 return zs_map_object(pool, handle, zs_mm); 398 } 399 static void zs_zpool_unmap(void *pool, unsigned long handle) 400 { 401 zs_unmap_object(pool, handle); 402 } 403 404 static u64 zs_zpool_total_pages(void *pool) 405 { 406 return zs_get_total_pages(pool); 407 } 408 409 static struct zpool_driver zs_zpool_driver = { 410 .type = "zsmalloc", 411 .owner = THIS_MODULE, 412 .create = zs_zpool_create, 413 .destroy = zs_zpool_destroy, 414 .malloc_support_movable = true, 415 .malloc = zs_zpool_malloc, 416 .free = zs_zpool_free, 417 .map = zs_zpool_map, 418 .unmap = zs_zpool_unmap, 419 .total_pages = zs_zpool_total_pages, 420 }; 421 422 MODULE_ALIAS("zpool-zsmalloc"); 423 #endif /* CONFIG_ZPOOL */ 424 425 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ 426 static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = { 427 .lock = INIT_LOCAL_LOCK(lock), 428 }; 429 430 static __maybe_unused int is_first_page(struct page *page) 431 { 432 return PagePrivate(page); 433 } 434 435 /* Protected by class->lock */ 436 static inline int get_zspage_inuse(struct zspage *zspage) 437 { 438 return zspage->inuse; 439 } 440 441 442 static inline void mod_zspage_inuse(struct zspage *zspage, int val) 443 { 444 zspage->inuse += val; 445 } 446 447 static inline struct page *get_first_page(struct zspage *zspage) 448 { 449 struct page *first_page = zspage->first_page; 450 451 VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); 452 return first_page; 453 } 454 455 #define FIRST_OBJ_PAGE_TYPE_MASK 0xffff 456 457 static inline void reset_first_obj_offset(struct page *page) 458 { 459 VM_WARN_ON_ONCE(!PageZsmalloc(page)); 460 page->page_type |= FIRST_OBJ_PAGE_TYPE_MASK; 461 } 462 463 static inline unsigned int get_first_obj_offset(struct page *page) 464 { 465 VM_WARN_ON_ONCE(!PageZsmalloc(page)); 466 return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK; 467 } 468 469 static inline void set_first_obj_offset(struct page *page, unsigned int offset) 470 { 471 /* With 16 bit available, we can support offsets into 64 KiB pages. */ 472 BUILD_BUG_ON(PAGE_SIZE > SZ_64K); 473 VM_WARN_ON_ONCE(!PageZsmalloc(page)); 474 VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK); 475 page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK; 476 page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK; 477 } 478 479 static inline unsigned int get_freeobj(struct zspage *zspage) 480 { 481 return zspage->freeobj; 482 } 483 484 static inline void set_freeobj(struct zspage *zspage, unsigned int obj) 485 { 486 zspage->freeobj = obj; 487 } 488 489 static struct size_class *zspage_class(struct zs_pool *pool, 490 struct zspage *zspage) 491 { 492 return pool->size_class[zspage->class]; 493 } 494 495 /* 496 * zsmalloc divides the pool into various size classes where each 497 * class maintains a list of zspages where each zspage is divided 498 * into equal sized chunks. Each allocation falls into one of these 499 * classes depending on its size. This function returns index of the 500 * size class which has chunk size big enough to hold the given size. 501 */ 502 static int get_size_class_index(int size) 503 { 504 int idx = 0; 505 506 if (likely(size > ZS_MIN_ALLOC_SIZE)) 507 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, 508 ZS_SIZE_CLASS_DELTA); 509 510 return min_t(int, ZS_SIZE_CLASSES - 1, idx); 511 } 512 513 static inline void class_stat_add(struct size_class *class, int type, 514 unsigned long cnt) 515 { 516 class->stats.objs[type] += cnt; 517 } 518 519 static inline void class_stat_sub(struct size_class *class, int type, 520 unsigned long cnt) 521 { 522 class->stats.objs[type] -= cnt; 523 } 524 525 static inline unsigned long class_stat_read(struct size_class *class, int type) 526 { 527 return class->stats.objs[type]; 528 } 529 530 #ifdef CONFIG_ZSMALLOC_STAT 531 532 static void __init zs_stat_init(void) 533 { 534 if (!debugfs_initialized()) { 535 pr_warn("debugfs not available, stat dir not created\n"); 536 return; 537 } 538 539 zs_stat_root = debugfs_create_dir("zsmalloc", NULL); 540 } 541 542 static void __exit zs_stat_exit(void) 543 { 544 debugfs_remove_recursive(zs_stat_root); 545 } 546 547 static unsigned long zs_can_compact(struct size_class *class); 548 549 static int zs_stats_size_show(struct seq_file *s, void *v) 550 { 551 int i, fg; 552 struct zs_pool *pool = s->private; 553 struct size_class *class; 554 int objs_per_zspage; 555 unsigned long obj_allocated, obj_used, pages_used, freeable; 556 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; 557 unsigned long total_freeable = 0; 558 unsigned long inuse_totals[NR_FULLNESS_GROUPS] = {0, }; 559 560 seq_printf(s, " %5s %5s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %13s %10s %10s %16s %8s\n", 561 "class", "size", "10%", "20%", "30%", "40%", 562 "50%", "60%", "70%", "80%", "90%", "99%", "100%", 563 "obj_allocated", "obj_used", "pages_used", 564 "pages_per_zspage", "freeable"); 565 566 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 567 568 class = pool->size_class[i]; 569 570 if (class->index != i) 571 continue; 572 573 spin_lock(&class->lock); 574 575 seq_printf(s, " %5u %5u ", i, class->size); 576 for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) { 577 inuse_totals[fg] += class_stat_read(class, fg); 578 seq_printf(s, "%9lu ", class_stat_read(class, fg)); 579 } 580 581 obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); 582 obj_used = class_stat_read(class, ZS_OBJS_INUSE); 583 freeable = zs_can_compact(class); 584 spin_unlock(&class->lock); 585 586 objs_per_zspage = class->objs_per_zspage; 587 pages_used = obj_allocated / objs_per_zspage * 588 class->pages_per_zspage; 589 590 seq_printf(s, "%13lu %10lu %10lu %16d %8lu\n", 591 obj_allocated, obj_used, pages_used, 592 class->pages_per_zspage, freeable); 593 594 total_objs += obj_allocated; 595 total_used_objs += obj_used; 596 total_pages += pages_used; 597 total_freeable += freeable; 598 } 599 600 seq_puts(s, "\n"); 601 seq_printf(s, " %5s %5s ", "Total", ""); 602 603 for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) 604 seq_printf(s, "%9lu ", inuse_totals[fg]); 605 606 seq_printf(s, "%13lu %10lu %10lu %16s %8lu\n", 607 total_objs, total_used_objs, total_pages, "", 608 total_freeable); 609 610 return 0; 611 } 612 DEFINE_SHOW_ATTRIBUTE(zs_stats_size); 613 614 static void zs_pool_stat_create(struct zs_pool *pool, const char *name) 615 { 616 if (!zs_stat_root) { 617 pr_warn("no root stat dir, not creating <%s> stat dir\n", name); 618 return; 619 } 620 621 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root); 622 623 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool, 624 &zs_stats_size_fops); 625 } 626 627 static void zs_pool_stat_destroy(struct zs_pool *pool) 628 { 629 debugfs_remove_recursive(pool->stat_dentry); 630 } 631 632 #else /* CONFIG_ZSMALLOC_STAT */ 633 static void __init zs_stat_init(void) 634 { 635 } 636 637 static void __exit zs_stat_exit(void) 638 { 639 } 640 641 static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name) 642 { 643 } 644 645 static inline void zs_pool_stat_destroy(struct zs_pool *pool) 646 { 647 } 648 #endif 649 650 651 /* 652 * For each size class, zspages are divided into different groups 653 * depending on their usage ratio. This function returns fullness 654 * status of the given page. 655 */ 656 static int get_fullness_group(struct size_class *class, struct zspage *zspage) 657 { 658 int inuse, objs_per_zspage, ratio; 659 660 inuse = get_zspage_inuse(zspage); 661 objs_per_zspage = class->objs_per_zspage; 662 663 if (inuse == 0) 664 return ZS_INUSE_RATIO_0; 665 if (inuse == objs_per_zspage) 666 return ZS_INUSE_RATIO_100; 667 668 ratio = 100 * inuse / objs_per_zspage; 669 /* 670 * Take integer division into consideration: a page with one inuse 671 * object out of 127 possible, will end up having 0 usage ratio, 672 * which is wrong as it belongs in ZS_INUSE_RATIO_10 fullness group. 673 */ 674 return ratio / 10 + 1; 675 } 676 677 /* 678 * Each size class maintains various freelists and zspages are assigned 679 * to one of these freelists based on the number of live objects they 680 * have. This functions inserts the given zspage into the freelist 681 * identified by <class, fullness_group>. 682 */ 683 static void insert_zspage(struct size_class *class, 684 struct zspage *zspage, 685 int fullness) 686 { 687 class_stat_add(class, fullness, 1); 688 list_add(&zspage->list, &class->fullness_list[fullness]); 689 zspage->fullness = fullness; 690 } 691 692 /* 693 * This function removes the given zspage from the freelist identified 694 * by <class, fullness_group>. 695 */ 696 static void remove_zspage(struct size_class *class, struct zspage *zspage) 697 { 698 int fullness = zspage->fullness; 699 700 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); 701 702 list_del_init(&zspage->list); 703 class_stat_sub(class, fullness, 1); 704 } 705 706 /* 707 * Each size class maintains zspages in different fullness groups depending 708 * on the number of live objects they contain. When allocating or freeing 709 * objects, the fullness status of the page can change, for instance, from 710 * INUSE_RATIO_80 to INUSE_RATIO_70 when freeing an object. This function 711 * checks if such a status change has occurred for the given page and 712 * accordingly moves the page from the list of the old fullness group to that 713 * of the new fullness group. 714 */ 715 static int fix_fullness_group(struct size_class *class, struct zspage *zspage) 716 { 717 int newfg; 718 719 newfg = get_fullness_group(class, zspage); 720 if (newfg == zspage->fullness) 721 goto out; 722 723 remove_zspage(class, zspage); 724 insert_zspage(class, zspage, newfg); 725 out: 726 return newfg; 727 } 728 729 static struct zspage *get_zspage(struct page *page) 730 { 731 struct zspage *zspage = (struct zspage *)page_private(page); 732 733 BUG_ON(zspage->magic != ZSPAGE_MAGIC); 734 return zspage; 735 } 736 737 static struct page *get_next_page(struct page *page) 738 { 739 struct zspage *zspage = get_zspage(page); 740 741 if (unlikely(ZsHugePage(zspage))) 742 return NULL; 743 744 return (struct page *)page->index; 745 } 746 747 /** 748 * obj_to_location - get (<page>, <obj_idx>) from encoded object value 749 * @obj: the encoded object value 750 * @page: page object resides in zspage 751 * @obj_idx: object index 752 */ 753 static void obj_to_location(unsigned long obj, struct page **page, 754 unsigned int *obj_idx) 755 { 756 *page = pfn_to_page(obj >> OBJ_INDEX_BITS); 757 *obj_idx = (obj & OBJ_INDEX_MASK); 758 } 759 760 static void obj_to_page(unsigned long obj, struct page **page) 761 { 762 *page = pfn_to_page(obj >> OBJ_INDEX_BITS); 763 } 764 765 /** 766 * location_to_obj - get obj value encoded from (<page>, <obj_idx>) 767 * @page: page object resides in zspage 768 * @obj_idx: object index 769 */ 770 static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) 771 { 772 unsigned long obj; 773 774 obj = page_to_pfn(page) << OBJ_INDEX_BITS; 775 obj |= obj_idx & OBJ_INDEX_MASK; 776 777 return obj; 778 } 779 780 static unsigned long handle_to_obj(unsigned long handle) 781 { 782 return *(unsigned long *)handle; 783 } 784 785 static inline bool obj_allocated(struct page *page, void *obj, 786 unsigned long *phandle) 787 { 788 unsigned long handle; 789 struct zspage *zspage = get_zspage(page); 790 791 if (unlikely(ZsHugePage(zspage))) { 792 VM_BUG_ON_PAGE(!is_first_page(page), page); 793 handle = page->index; 794 } else 795 handle = *(unsigned long *)obj; 796 797 if (!(handle & OBJ_ALLOCATED_TAG)) 798 return false; 799 800 /* Clear all tags before returning the handle */ 801 *phandle = handle & ~OBJ_TAG_MASK; 802 return true; 803 } 804 805 static void reset_page(struct page *page) 806 { 807 __ClearPageMovable(page); 808 ClearPagePrivate(page); 809 set_page_private(page, 0); 810 page->index = 0; 811 reset_first_obj_offset(page); 812 __ClearPageZsmalloc(page); 813 } 814 815 static int trylock_zspage(struct zspage *zspage) 816 { 817 struct page *cursor, *fail; 818 819 for (cursor = get_first_page(zspage); cursor != NULL; cursor = 820 get_next_page(cursor)) { 821 if (!trylock_page(cursor)) { 822 fail = cursor; 823 goto unlock; 824 } 825 } 826 827 return 1; 828 unlock: 829 for (cursor = get_first_page(zspage); cursor != fail; cursor = 830 get_next_page(cursor)) 831 unlock_page(cursor); 832 833 return 0; 834 } 835 836 static void __free_zspage(struct zs_pool *pool, struct size_class *class, 837 struct zspage *zspage) 838 { 839 struct page *page, *next; 840 841 assert_spin_locked(&class->lock); 842 843 VM_BUG_ON(get_zspage_inuse(zspage)); 844 VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0); 845 846 next = page = get_first_page(zspage); 847 do { 848 VM_BUG_ON_PAGE(!PageLocked(page), page); 849 next = get_next_page(page); 850 reset_page(page); 851 unlock_page(page); 852 dec_zone_page_state(page, NR_ZSPAGES); 853 put_page(page); 854 page = next; 855 } while (page != NULL); 856 857 cache_free_zspage(pool, zspage); 858 859 class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); 860 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); 861 } 862 863 static void free_zspage(struct zs_pool *pool, struct size_class *class, 864 struct zspage *zspage) 865 { 866 VM_BUG_ON(get_zspage_inuse(zspage)); 867 VM_BUG_ON(list_empty(&zspage->list)); 868 869 /* 870 * Since zs_free couldn't be sleepable, this function cannot call 871 * lock_page. The page locks trylock_zspage got will be released 872 * by __free_zspage. 873 */ 874 if (!trylock_zspage(zspage)) { 875 kick_deferred_free(pool); 876 return; 877 } 878 879 remove_zspage(class, zspage); 880 __free_zspage(pool, class, zspage); 881 } 882 883 /* Initialize a newly allocated zspage */ 884 static void init_zspage(struct size_class *class, struct zspage *zspage) 885 { 886 unsigned int freeobj = 1; 887 unsigned long off = 0; 888 struct page *page = get_first_page(zspage); 889 890 while (page) { 891 struct page *next_page; 892 struct link_free *link; 893 void *vaddr; 894 895 set_first_obj_offset(page, off); 896 897 vaddr = kmap_atomic(page); 898 link = (struct link_free *)vaddr + off / sizeof(*link); 899 900 while ((off += class->size) < PAGE_SIZE) { 901 link->next = freeobj++ << OBJ_TAG_BITS; 902 link += class->size / sizeof(*link); 903 } 904 905 /* 906 * We now come to the last (full or partial) object on this 907 * page, which must point to the first object on the next 908 * page (if present) 909 */ 910 next_page = get_next_page(page); 911 if (next_page) { 912 link->next = freeobj++ << OBJ_TAG_BITS; 913 } else { 914 /* 915 * Reset OBJ_TAG_BITS bit to last link to tell 916 * whether it's allocated object or not. 917 */ 918 link->next = -1UL << OBJ_TAG_BITS; 919 } 920 kunmap_atomic(vaddr); 921 page = next_page; 922 off %= PAGE_SIZE; 923 } 924 925 set_freeobj(zspage, 0); 926 } 927 928 static void create_page_chain(struct size_class *class, struct zspage *zspage, 929 struct page *pages[]) 930 { 931 int i; 932 struct page *page; 933 struct page *prev_page = NULL; 934 int nr_pages = class->pages_per_zspage; 935 936 /* 937 * Allocate individual pages and link them together as: 938 * 1. all pages are linked together using page->index 939 * 2. each sub-page point to zspage using page->private 940 * 941 * we set PG_private to identify the first page (i.e. no other sub-page 942 * has this flag set). 943 */ 944 for (i = 0; i < nr_pages; i++) { 945 page = pages[i]; 946 set_page_private(page, (unsigned long)zspage); 947 page->index = 0; 948 if (i == 0) { 949 zspage->first_page = page; 950 SetPagePrivate(page); 951 if (unlikely(class->objs_per_zspage == 1 && 952 class->pages_per_zspage == 1)) 953 SetZsHugePage(zspage); 954 } else { 955 prev_page->index = (unsigned long)page; 956 } 957 prev_page = page; 958 } 959 } 960 961 /* 962 * Allocate a zspage for the given size class 963 */ 964 static struct zspage *alloc_zspage(struct zs_pool *pool, 965 struct size_class *class, 966 gfp_t gfp) 967 { 968 int i; 969 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; 970 struct zspage *zspage = cache_alloc_zspage(pool, gfp); 971 972 if (!zspage) 973 return NULL; 974 975 zspage->magic = ZSPAGE_MAGIC; 976 migrate_lock_init(zspage); 977 978 for (i = 0; i < class->pages_per_zspage; i++) { 979 struct page *page; 980 981 page = alloc_page(gfp); 982 if (!page) { 983 while (--i >= 0) { 984 dec_zone_page_state(pages[i], NR_ZSPAGES); 985 __ClearPageZsmalloc(pages[i]); 986 __free_page(pages[i]); 987 } 988 cache_free_zspage(pool, zspage); 989 return NULL; 990 } 991 __SetPageZsmalloc(page); 992 993 inc_zone_page_state(page, NR_ZSPAGES); 994 pages[i] = page; 995 } 996 997 create_page_chain(class, zspage, pages); 998 init_zspage(class, zspage); 999 zspage->pool = pool; 1000 zspage->class = class->index; 1001 1002 return zspage; 1003 } 1004 1005 static struct zspage *find_get_zspage(struct size_class *class) 1006 { 1007 int i; 1008 struct zspage *zspage; 1009 1010 for (i = ZS_INUSE_RATIO_99; i >= ZS_INUSE_RATIO_0; i--) { 1011 zspage = list_first_entry_or_null(&class->fullness_list[i], 1012 struct zspage, list); 1013 if (zspage) 1014 break; 1015 } 1016 1017 return zspage; 1018 } 1019 1020 static inline int __zs_cpu_up(struct mapping_area *area) 1021 { 1022 /* 1023 * Make sure we don't leak memory if a cpu UP notification 1024 * and zs_init() race and both call zs_cpu_up() on the same cpu 1025 */ 1026 if (area->vm_buf) 1027 return 0; 1028 area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); 1029 if (!area->vm_buf) 1030 return -ENOMEM; 1031 return 0; 1032 } 1033 1034 static inline void __zs_cpu_down(struct mapping_area *area) 1035 { 1036 kfree(area->vm_buf); 1037 area->vm_buf = NULL; 1038 } 1039 1040 static void *__zs_map_object(struct mapping_area *area, 1041 struct page *pages[2], int off, int size) 1042 { 1043 int sizes[2]; 1044 void *addr; 1045 char *buf = area->vm_buf; 1046 1047 /* disable page faults to match kmap_atomic() return conditions */ 1048 pagefault_disable(); 1049 1050 /* no read fastpath */ 1051 if (area->vm_mm == ZS_MM_WO) 1052 goto out; 1053 1054 sizes[0] = PAGE_SIZE - off; 1055 sizes[1] = size - sizes[0]; 1056 1057 /* copy object to per-cpu buffer */ 1058 addr = kmap_atomic(pages[0]); 1059 memcpy(buf, addr + off, sizes[0]); 1060 kunmap_atomic(addr); 1061 addr = kmap_atomic(pages[1]); 1062 memcpy(buf + sizes[0], addr, sizes[1]); 1063 kunmap_atomic(addr); 1064 out: 1065 return area->vm_buf; 1066 } 1067 1068 static void __zs_unmap_object(struct mapping_area *area, 1069 struct page *pages[2], int off, int size) 1070 { 1071 int sizes[2]; 1072 void *addr; 1073 char *buf; 1074 1075 /* no write fastpath */ 1076 if (area->vm_mm == ZS_MM_RO) 1077 goto out; 1078 1079 buf = area->vm_buf; 1080 buf = buf + ZS_HANDLE_SIZE; 1081 size -= ZS_HANDLE_SIZE; 1082 off += ZS_HANDLE_SIZE; 1083 1084 sizes[0] = PAGE_SIZE - off; 1085 sizes[1] = size - sizes[0]; 1086 1087 /* copy per-cpu buffer to object */ 1088 addr = kmap_atomic(pages[0]); 1089 memcpy(addr + off, buf, sizes[0]); 1090 kunmap_atomic(addr); 1091 addr = kmap_atomic(pages[1]); 1092 memcpy(addr, buf + sizes[0], sizes[1]); 1093 kunmap_atomic(addr); 1094 1095 out: 1096 /* enable page faults to match kunmap_atomic() return conditions */ 1097 pagefault_enable(); 1098 } 1099 1100 static int zs_cpu_prepare(unsigned int cpu) 1101 { 1102 struct mapping_area *area; 1103 1104 area = &per_cpu(zs_map_area, cpu); 1105 return __zs_cpu_up(area); 1106 } 1107 1108 static int zs_cpu_dead(unsigned int cpu) 1109 { 1110 struct mapping_area *area; 1111 1112 area = &per_cpu(zs_map_area, cpu); 1113 __zs_cpu_down(area); 1114 return 0; 1115 } 1116 1117 static bool can_merge(struct size_class *prev, int pages_per_zspage, 1118 int objs_per_zspage) 1119 { 1120 if (prev->pages_per_zspage == pages_per_zspage && 1121 prev->objs_per_zspage == objs_per_zspage) 1122 return true; 1123 1124 return false; 1125 } 1126 1127 static bool zspage_full(struct size_class *class, struct zspage *zspage) 1128 { 1129 return get_zspage_inuse(zspage) == class->objs_per_zspage; 1130 } 1131 1132 static bool zspage_empty(struct zspage *zspage) 1133 { 1134 return get_zspage_inuse(zspage) == 0; 1135 } 1136 1137 /** 1138 * zs_lookup_class_index() - Returns index of the zsmalloc &size_class 1139 * that hold objects of the provided size. 1140 * @pool: zsmalloc pool to use 1141 * @size: object size 1142 * 1143 * Context: Any context. 1144 * 1145 * Return: the index of the zsmalloc &size_class that hold objects of the 1146 * provided size. 1147 */ 1148 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size) 1149 { 1150 struct size_class *class; 1151 1152 class = pool->size_class[get_size_class_index(size)]; 1153 1154 return class->index; 1155 } 1156 EXPORT_SYMBOL_GPL(zs_lookup_class_index); 1157 1158 unsigned long zs_get_total_pages(struct zs_pool *pool) 1159 { 1160 return atomic_long_read(&pool->pages_allocated); 1161 } 1162 EXPORT_SYMBOL_GPL(zs_get_total_pages); 1163 1164 /** 1165 * zs_map_object - get address of allocated object from handle. 1166 * @pool: pool from which the object was allocated 1167 * @handle: handle returned from zs_malloc 1168 * @mm: mapping mode to use 1169 * 1170 * Before using an object allocated from zs_malloc, it must be mapped using 1171 * this function. When done with the object, it must be unmapped using 1172 * zs_unmap_object. 1173 * 1174 * Only one object can be mapped per cpu at a time. There is no protection 1175 * against nested mappings. 1176 * 1177 * This function returns with preemption and page faults disabled. 1178 */ 1179 void *zs_map_object(struct zs_pool *pool, unsigned long handle, 1180 enum zs_mapmode mm) 1181 { 1182 struct zspage *zspage; 1183 struct page *page; 1184 unsigned long obj, off; 1185 unsigned int obj_idx; 1186 1187 struct size_class *class; 1188 struct mapping_area *area; 1189 struct page *pages[2]; 1190 void *ret; 1191 1192 /* 1193 * Because we use per-cpu mapping areas shared among the 1194 * pools/users, we can't allow mapping in interrupt context 1195 * because it can corrupt another users mappings. 1196 */ 1197 BUG_ON(in_interrupt()); 1198 1199 /* It guarantees it can get zspage from handle safely */ 1200 read_lock(&pool->migrate_lock); 1201 obj = handle_to_obj(handle); 1202 obj_to_location(obj, &page, &obj_idx); 1203 zspage = get_zspage(page); 1204 1205 /* 1206 * migration cannot move any zpages in this zspage. Here, class->lock 1207 * is too heavy since callers would take some time until they calls 1208 * zs_unmap_object API so delegate the locking from class to zspage 1209 * which is smaller granularity. 1210 */ 1211 migrate_read_lock(zspage); 1212 read_unlock(&pool->migrate_lock); 1213 1214 class = zspage_class(pool, zspage); 1215 off = offset_in_page(class->size * obj_idx); 1216 1217 local_lock(&zs_map_area.lock); 1218 area = this_cpu_ptr(&zs_map_area); 1219 area->vm_mm = mm; 1220 if (off + class->size <= PAGE_SIZE) { 1221 /* this object is contained entirely within a page */ 1222 area->vm_addr = kmap_atomic(page); 1223 ret = area->vm_addr + off; 1224 goto out; 1225 } 1226 1227 /* this object spans two pages */ 1228 pages[0] = page; 1229 pages[1] = get_next_page(page); 1230 BUG_ON(!pages[1]); 1231 1232 ret = __zs_map_object(area, pages, off, class->size); 1233 out: 1234 if (likely(!ZsHugePage(zspage))) 1235 ret += ZS_HANDLE_SIZE; 1236 1237 return ret; 1238 } 1239 EXPORT_SYMBOL_GPL(zs_map_object); 1240 1241 void zs_unmap_object(struct zs_pool *pool, unsigned long handle) 1242 { 1243 struct zspage *zspage; 1244 struct page *page; 1245 unsigned long obj, off; 1246 unsigned int obj_idx; 1247 1248 struct size_class *class; 1249 struct mapping_area *area; 1250 1251 obj = handle_to_obj(handle); 1252 obj_to_location(obj, &page, &obj_idx); 1253 zspage = get_zspage(page); 1254 class = zspage_class(pool, zspage); 1255 off = offset_in_page(class->size * obj_idx); 1256 1257 area = this_cpu_ptr(&zs_map_area); 1258 if (off + class->size <= PAGE_SIZE) 1259 kunmap_atomic(area->vm_addr); 1260 else { 1261 struct page *pages[2]; 1262 1263 pages[0] = page; 1264 pages[1] = get_next_page(page); 1265 BUG_ON(!pages[1]); 1266 1267 __zs_unmap_object(area, pages, off, class->size); 1268 } 1269 local_unlock(&zs_map_area.lock); 1270 1271 migrate_read_unlock(zspage); 1272 } 1273 EXPORT_SYMBOL_GPL(zs_unmap_object); 1274 1275 /** 1276 * zs_huge_class_size() - Returns the size (in bytes) of the first huge 1277 * zsmalloc &size_class. 1278 * @pool: zsmalloc pool to use 1279 * 1280 * The function returns the size of the first huge class - any object of equal 1281 * or bigger size will be stored in zspage consisting of a single physical 1282 * page. 1283 * 1284 * Context: Any context. 1285 * 1286 * Return: the size (in bytes) of the first huge zsmalloc &size_class. 1287 */ 1288 size_t zs_huge_class_size(struct zs_pool *pool) 1289 { 1290 return huge_class_size; 1291 } 1292 EXPORT_SYMBOL_GPL(zs_huge_class_size); 1293 1294 static unsigned long obj_malloc(struct zs_pool *pool, 1295 struct zspage *zspage, unsigned long handle) 1296 { 1297 int i, nr_page, offset; 1298 unsigned long obj; 1299 struct link_free *link; 1300 struct size_class *class; 1301 1302 struct page *m_page; 1303 unsigned long m_offset; 1304 void *vaddr; 1305 1306 class = pool->size_class[zspage->class]; 1307 obj = get_freeobj(zspage); 1308 1309 offset = obj * class->size; 1310 nr_page = offset >> PAGE_SHIFT; 1311 m_offset = offset_in_page(offset); 1312 m_page = get_first_page(zspage); 1313 1314 for (i = 0; i < nr_page; i++) 1315 m_page = get_next_page(m_page); 1316 1317 vaddr = kmap_atomic(m_page); 1318 link = (struct link_free *)vaddr + m_offset / sizeof(*link); 1319 set_freeobj(zspage, link->next >> OBJ_TAG_BITS); 1320 if (likely(!ZsHugePage(zspage))) 1321 /* record handle in the header of allocated chunk */ 1322 link->handle = handle | OBJ_ALLOCATED_TAG; 1323 else 1324 /* record handle to page->index */ 1325 zspage->first_page->index = handle | OBJ_ALLOCATED_TAG; 1326 1327 kunmap_atomic(vaddr); 1328 mod_zspage_inuse(zspage, 1); 1329 1330 obj = location_to_obj(m_page, obj); 1331 record_obj(handle, obj); 1332 1333 return obj; 1334 } 1335 1336 1337 /** 1338 * zs_malloc - Allocate block of given size from pool. 1339 * @pool: pool to allocate from 1340 * @size: size of block to allocate 1341 * @gfp: gfp flags when allocating object 1342 * 1343 * On success, handle to the allocated object is returned, 1344 * otherwise an ERR_PTR(). 1345 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. 1346 */ 1347 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) 1348 { 1349 unsigned long handle; 1350 struct size_class *class; 1351 int newfg; 1352 struct zspage *zspage; 1353 1354 if (unlikely(!size)) 1355 return (unsigned long)ERR_PTR(-EINVAL); 1356 1357 if (unlikely(size > ZS_MAX_ALLOC_SIZE)) 1358 return (unsigned long)ERR_PTR(-ENOSPC); 1359 1360 handle = cache_alloc_handle(pool, gfp); 1361 if (!handle) 1362 return (unsigned long)ERR_PTR(-ENOMEM); 1363 1364 /* extra space in chunk to keep the handle */ 1365 size += ZS_HANDLE_SIZE; 1366 class = pool->size_class[get_size_class_index(size)]; 1367 1368 /* class->lock effectively protects the zpage migration */ 1369 spin_lock(&class->lock); 1370 zspage = find_get_zspage(class); 1371 if (likely(zspage)) { 1372 obj_malloc(pool, zspage, handle); 1373 /* Now move the zspage to another fullness group, if required */ 1374 fix_fullness_group(class, zspage); 1375 class_stat_add(class, ZS_OBJS_INUSE, 1); 1376 1377 goto out; 1378 } 1379 1380 spin_unlock(&class->lock); 1381 1382 zspage = alloc_zspage(pool, class, gfp); 1383 if (!zspage) { 1384 cache_free_handle(pool, handle); 1385 return (unsigned long)ERR_PTR(-ENOMEM); 1386 } 1387 1388 spin_lock(&class->lock); 1389 obj_malloc(pool, zspage, handle); 1390 newfg = get_fullness_group(class, zspage); 1391 insert_zspage(class, zspage, newfg); 1392 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); 1393 class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); 1394 class_stat_add(class, ZS_OBJS_INUSE, 1); 1395 1396 /* We completely set up zspage so mark them as movable */ 1397 SetZsPageMovable(pool, zspage); 1398 out: 1399 spin_unlock(&class->lock); 1400 1401 return handle; 1402 } 1403 EXPORT_SYMBOL_GPL(zs_malloc); 1404 1405 static void obj_free(int class_size, unsigned long obj) 1406 { 1407 struct link_free *link; 1408 struct zspage *zspage; 1409 struct page *f_page; 1410 unsigned long f_offset; 1411 unsigned int f_objidx; 1412 void *vaddr; 1413 1414 obj_to_location(obj, &f_page, &f_objidx); 1415 f_offset = offset_in_page(class_size * f_objidx); 1416 zspage = get_zspage(f_page); 1417 1418 vaddr = kmap_atomic(f_page); 1419 link = (struct link_free *)(vaddr + f_offset); 1420 1421 /* Insert this object in containing zspage's freelist */ 1422 if (likely(!ZsHugePage(zspage))) 1423 link->next = get_freeobj(zspage) << OBJ_TAG_BITS; 1424 else 1425 f_page->index = 0; 1426 set_freeobj(zspage, f_objidx); 1427 1428 kunmap_atomic(vaddr); 1429 mod_zspage_inuse(zspage, -1); 1430 } 1431 1432 void zs_free(struct zs_pool *pool, unsigned long handle) 1433 { 1434 struct zspage *zspage; 1435 struct page *f_page; 1436 unsigned long obj; 1437 struct size_class *class; 1438 int fullness; 1439 1440 if (IS_ERR_OR_NULL((void *)handle)) 1441 return; 1442 1443 /* 1444 * The pool->migrate_lock protects the race with zpage's migration 1445 * so it's safe to get the page from handle. 1446 */ 1447 read_lock(&pool->migrate_lock); 1448 obj = handle_to_obj(handle); 1449 obj_to_page(obj, &f_page); 1450 zspage = get_zspage(f_page); 1451 class = zspage_class(pool, zspage); 1452 spin_lock(&class->lock); 1453 read_unlock(&pool->migrate_lock); 1454 1455 class_stat_sub(class, ZS_OBJS_INUSE, 1); 1456 obj_free(class->size, obj); 1457 1458 fullness = fix_fullness_group(class, zspage); 1459 if (fullness == ZS_INUSE_RATIO_0) 1460 free_zspage(pool, class, zspage); 1461 1462 spin_unlock(&class->lock); 1463 cache_free_handle(pool, handle); 1464 } 1465 EXPORT_SYMBOL_GPL(zs_free); 1466 1467 static void zs_object_copy(struct size_class *class, unsigned long dst, 1468 unsigned long src) 1469 { 1470 struct page *s_page, *d_page; 1471 unsigned int s_objidx, d_objidx; 1472 unsigned long s_off, d_off; 1473 void *s_addr, *d_addr; 1474 int s_size, d_size, size; 1475 int written = 0; 1476 1477 s_size = d_size = class->size; 1478 1479 obj_to_location(src, &s_page, &s_objidx); 1480 obj_to_location(dst, &d_page, &d_objidx); 1481 1482 s_off = offset_in_page(class->size * s_objidx); 1483 d_off = offset_in_page(class->size * d_objidx); 1484 1485 if (s_off + class->size > PAGE_SIZE) 1486 s_size = PAGE_SIZE - s_off; 1487 1488 if (d_off + class->size > PAGE_SIZE) 1489 d_size = PAGE_SIZE - d_off; 1490 1491 s_addr = kmap_atomic(s_page); 1492 d_addr = kmap_atomic(d_page); 1493 1494 while (1) { 1495 size = min(s_size, d_size); 1496 memcpy(d_addr + d_off, s_addr + s_off, size); 1497 written += size; 1498 1499 if (written == class->size) 1500 break; 1501 1502 s_off += size; 1503 s_size -= size; 1504 d_off += size; 1505 d_size -= size; 1506 1507 /* 1508 * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic() 1509 * calls must occurs in reverse order of calls to kmap_atomic(). 1510 * So, to call kunmap_atomic(s_addr) we should first call 1511 * kunmap_atomic(d_addr). For more details see 1512 * Documentation/mm/highmem.rst. 1513 */ 1514 if (s_off >= PAGE_SIZE) { 1515 kunmap_atomic(d_addr); 1516 kunmap_atomic(s_addr); 1517 s_page = get_next_page(s_page); 1518 s_addr = kmap_atomic(s_page); 1519 d_addr = kmap_atomic(d_page); 1520 s_size = class->size - written; 1521 s_off = 0; 1522 } 1523 1524 if (d_off >= PAGE_SIZE) { 1525 kunmap_atomic(d_addr); 1526 d_page = get_next_page(d_page); 1527 d_addr = kmap_atomic(d_page); 1528 d_size = class->size - written; 1529 d_off = 0; 1530 } 1531 } 1532 1533 kunmap_atomic(d_addr); 1534 kunmap_atomic(s_addr); 1535 } 1536 1537 /* 1538 * Find alloced object in zspage from index object and 1539 * return handle. 1540 */ 1541 static unsigned long find_alloced_obj(struct size_class *class, 1542 struct page *page, int *obj_idx) 1543 { 1544 unsigned int offset; 1545 int index = *obj_idx; 1546 unsigned long handle = 0; 1547 void *addr = kmap_atomic(page); 1548 1549 offset = get_first_obj_offset(page); 1550 offset += class->size * index; 1551 1552 while (offset < PAGE_SIZE) { 1553 if (obj_allocated(page, addr + offset, &handle)) 1554 break; 1555 1556 offset += class->size; 1557 index++; 1558 } 1559 1560 kunmap_atomic(addr); 1561 1562 *obj_idx = index; 1563 1564 return handle; 1565 } 1566 1567 static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, 1568 struct zspage *dst_zspage) 1569 { 1570 unsigned long used_obj, free_obj; 1571 unsigned long handle; 1572 int obj_idx = 0; 1573 struct page *s_page = get_first_page(src_zspage); 1574 struct size_class *class = pool->size_class[src_zspage->class]; 1575 1576 while (1) { 1577 handle = find_alloced_obj(class, s_page, &obj_idx); 1578 if (!handle) { 1579 s_page = get_next_page(s_page); 1580 if (!s_page) 1581 break; 1582 obj_idx = 0; 1583 continue; 1584 } 1585 1586 used_obj = handle_to_obj(handle); 1587 free_obj = obj_malloc(pool, dst_zspage, handle); 1588 zs_object_copy(class, free_obj, used_obj); 1589 obj_idx++; 1590 obj_free(class->size, used_obj); 1591 1592 /* Stop if there is no more space */ 1593 if (zspage_full(class, dst_zspage)) 1594 break; 1595 1596 /* Stop if there are no more objects to migrate */ 1597 if (zspage_empty(src_zspage)) 1598 break; 1599 } 1600 } 1601 1602 static struct zspage *isolate_src_zspage(struct size_class *class) 1603 { 1604 struct zspage *zspage; 1605 int fg; 1606 1607 for (fg = ZS_INUSE_RATIO_10; fg <= ZS_INUSE_RATIO_99; fg++) { 1608 zspage = list_first_entry_or_null(&class->fullness_list[fg], 1609 struct zspage, list); 1610 if (zspage) { 1611 remove_zspage(class, zspage); 1612 return zspage; 1613 } 1614 } 1615 1616 return zspage; 1617 } 1618 1619 static struct zspage *isolate_dst_zspage(struct size_class *class) 1620 { 1621 struct zspage *zspage; 1622 int fg; 1623 1624 for (fg = ZS_INUSE_RATIO_99; fg >= ZS_INUSE_RATIO_10; fg--) { 1625 zspage = list_first_entry_or_null(&class->fullness_list[fg], 1626 struct zspage, list); 1627 if (zspage) { 1628 remove_zspage(class, zspage); 1629 return zspage; 1630 } 1631 } 1632 1633 return zspage; 1634 } 1635 1636 /* 1637 * putback_zspage - add @zspage into right class's fullness list 1638 * @class: destination class 1639 * @zspage: target page 1640 * 1641 * Return @zspage's fullness status 1642 */ 1643 static int putback_zspage(struct size_class *class, struct zspage *zspage) 1644 { 1645 int fullness; 1646 1647 fullness = get_fullness_group(class, zspage); 1648 insert_zspage(class, zspage, fullness); 1649 1650 return fullness; 1651 } 1652 1653 #ifdef CONFIG_COMPACTION 1654 /* 1655 * To prevent zspage destroy during migration, zspage freeing should 1656 * hold locks of all pages in the zspage. 1657 */ 1658 static void lock_zspage(struct zspage *zspage) 1659 { 1660 struct page *curr_page, *page; 1661 1662 /* 1663 * Pages we haven't locked yet can be migrated off the list while we're 1664 * trying to lock them, so we need to be careful and only attempt to 1665 * lock each page under migrate_read_lock(). Otherwise, the page we lock 1666 * may no longer belong to the zspage. This means that we may wait for 1667 * the wrong page to unlock, so we must take a reference to the page 1668 * prior to waiting for it to unlock outside migrate_read_lock(). 1669 */ 1670 while (1) { 1671 migrate_read_lock(zspage); 1672 page = get_first_page(zspage); 1673 if (trylock_page(page)) 1674 break; 1675 get_page(page); 1676 migrate_read_unlock(zspage); 1677 wait_on_page_locked(page); 1678 put_page(page); 1679 } 1680 1681 curr_page = page; 1682 while ((page = get_next_page(curr_page))) { 1683 if (trylock_page(page)) { 1684 curr_page = page; 1685 } else { 1686 get_page(page); 1687 migrate_read_unlock(zspage); 1688 wait_on_page_locked(page); 1689 put_page(page); 1690 migrate_read_lock(zspage); 1691 } 1692 } 1693 migrate_read_unlock(zspage); 1694 } 1695 #endif /* CONFIG_COMPACTION */ 1696 1697 static void migrate_lock_init(struct zspage *zspage) 1698 { 1699 rwlock_init(&zspage->lock); 1700 } 1701 1702 static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock) 1703 { 1704 read_lock(&zspage->lock); 1705 } 1706 1707 static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock) 1708 { 1709 read_unlock(&zspage->lock); 1710 } 1711 1712 static void migrate_write_lock(struct zspage *zspage) 1713 { 1714 write_lock(&zspage->lock); 1715 } 1716 1717 static void migrate_write_unlock(struct zspage *zspage) 1718 { 1719 write_unlock(&zspage->lock); 1720 } 1721 1722 #ifdef CONFIG_COMPACTION 1723 1724 static const struct movable_operations zsmalloc_mops; 1725 1726 static void replace_sub_page(struct size_class *class, struct zspage *zspage, 1727 struct page *newpage, struct page *oldpage) 1728 { 1729 struct page *page; 1730 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; 1731 int idx = 0; 1732 1733 page = get_first_page(zspage); 1734 do { 1735 if (page == oldpage) 1736 pages[idx] = newpage; 1737 else 1738 pages[idx] = page; 1739 idx++; 1740 } while ((page = get_next_page(page)) != NULL); 1741 1742 create_page_chain(class, zspage, pages); 1743 set_first_obj_offset(newpage, get_first_obj_offset(oldpage)); 1744 if (unlikely(ZsHugePage(zspage))) 1745 newpage->index = oldpage->index; 1746 __SetPageMovable(newpage, &zsmalloc_mops); 1747 } 1748 1749 static bool zs_page_isolate(struct page *page, isolate_mode_t mode) 1750 { 1751 /* 1752 * Page is locked so zspage couldn't be destroyed. For detail, look at 1753 * lock_zspage in free_zspage. 1754 */ 1755 VM_BUG_ON_PAGE(PageIsolated(page), page); 1756 1757 return true; 1758 } 1759 1760 static int zs_page_migrate(struct page *newpage, struct page *page, 1761 enum migrate_mode mode) 1762 { 1763 struct zs_pool *pool; 1764 struct size_class *class; 1765 struct zspage *zspage; 1766 struct page *dummy; 1767 void *s_addr, *d_addr, *addr; 1768 unsigned int offset; 1769 unsigned long handle; 1770 unsigned long old_obj, new_obj; 1771 unsigned int obj_idx; 1772 1773 VM_BUG_ON_PAGE(!PageIsolated(page), page); 1774 1775 /* We're committed, tell the world that this is a Zsmalloc page. */ 1776 __SetPageZsmalloc(newpage); 1777 1778 /* The page is locked, so this pointer must remain valid */ 1779 zspage = get_zspage(page); 1780 pool = zspage->pool; 1781 1782 /* 1783 * The pool migrate_lock protects the race between zpage migration 1784 * and zs_free. 1785 */ 1786 write_lock(&pool->migrate_lock); 1787 class = zspage_class(pool, zspage); 1788 1789 /* 1790 * the class lock protects zpage alloc/free in the zspage. 1791 */ 1792 spin_lock(&class->lock); 1793 /* the migrate_write_lock protects zpage access via zs_map_object */ 1794 migrate_write_lock(zspage); 1795 1796 offset = get_first_obj_offset(page); 1797 s_addr = kmap_atomic(page); 1798 1799 /* 1800 * Here, any user cannot access all objects in the zspage so let's move. 1801 */ 1802 d_addr = kmap_atomic(newpage); 1803 copy_page(d_addr, s_addr); 1804 kunmap_atomic(d_addr); 1805 1806 for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE; 1807 addr += class->size) { 1808 if (obj_allocated(page, addr, &handle)) { 1809 1810 old_obj = handle_to_obj(handle); 1811 obj_to_location(old_obj, &dummy, &obj_idx); 1812 new_obj = (unsigned long)location_to_obj(newpage, 1813 obj_idx); 1814 record_obj(handle, new_obj); 1815 } 1816 } 1817 kunmap_atomic(s_addr); 1818 1819 replace_sub_page(class, zspage, newpage, page); 1820 /* 1821 * Since we complete the data copy and set up new zspage structure, 1822 * it's okay to release migration_lock. 1823 */ 1824 write_unlock(&pool->migrate_lock); 1825 spin_unlock(&class->lock); 1826 migrate_write_unlock(zspage); 1827 1828 get_page(newpage); 1829 if (page_zone(newpage) != page_zone(page)) { 1830 dec_zone_page_state(page, NR_ZSPAGES); 1831 inc_zone_page_state(newpage, NR_ZSPAGES); 1832 } 1833 1834 reset_page(page); 1835 put_page(page); 1836 1837 return MIGRATEPAGE_SUCCESS; 1838 } 1839 1840 static void zs_page_putback(struct page *page) 1841 { 1842 VM_BUG_ON_PAGE(!PageIsolated(page), page); 1843 } 1844 1845 static const struct movable_operations zsmalloc_mops = { 1846 .isolate_page = zs_page_isolate, 1847 .migrate_page = zs_page_migrate, 1848 .putback_page = zs_page_putback, 1849 }; 1850 1851 /* 1852 * Caller should hold page_lock of all pages in the zspage 1853 * In here, we cannot use zspage meta data. 1854 */ 1855 static void async_free_zspage(struct work_struct *work) 1856 { 1857 int i; 1858 struct size_class *class; 1859 struct zspage *zspage, *tmp; 1860 LIST_HEAD(free_pages); 1861 struct zs_pool *pool = container_of(work, struct zs_pool, 1862 free_work); 1863 1864 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 1865 class = pool->size_class[i]; 1866 if (class->index != i) 1867 continue; 1868 1869 spin_lock(&class->lock); 1870 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], 1871 &free_pages); 1872 spin_unlock(&class->lock); 1873 } 1874 1875 list_for_each_entry_safe(zspage, tmp, &free_pages, list) { 1876 list_del(&zspage->list); 1877 lock_zspage(zspage); 1878 1879 class = zspage_class(pool, zspage); 1880 spin_lock(&class->lock); 1881 class_stat_sub(class, ZS_INUSE_RATIO_0, 1); 1882 __free_zspage(pool, class, zspage); 1883 spin_unlock(&class->lock); 1884 } 1885 }; 1886 1887 static void kick_deferred_free(struct zs_pool *pool) 1888 { 1889 schedule_work(&pool->free_work); 1890 } 1891 1892 static void zs_flush_migration(struct zs_pool *pool) 1893 { 1894 flush_work(&pool->free_work); 1895 } 1896 1897 static void init_deferred_free(struct zs_pool *pool) 1898 { 1899 INIT_WORK(&pool->free_work, async_free_zspage); 1900 } 1901 1902 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) 1903 { 1904 struct page *page = get_first_page(zspage); 1905 1906 do { 1907 WARN_ON(!trylock_page(page)); 1908 __SetPageMovable(page, &zsmalloc_mops); 1909 unlock_page(page); 1910 } while ((page = get_next_page(page)) != NULL); 1911 } 1912 #else 1913 static inline void zs_flush_migration(struct zs_pool *pool) { } 1914 #endif 1915 1916 /* 1917 * 1918 * Based on the number of unused allocated objects calculate 1919 * and return the number of pages that we can free. 1920 */ 1921 static unsigned long zs_can_compact(struct size_class *class) 1922 { 1923 unsigned long obj_wasted; 1924 unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); 1925 unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE); 1926 1927 if (obj_allocated <= obj_used) 1928 return 0; 1929 1930 obj_wasted = obj_allocated - obj_used; 1931 obj_wasted /= class->objs_per_zspage; 1932 1933 return obj_wasted * class->pages_per_zspage; 1934 } 1935 1936 static unsigned long __zs_compact(struct zs_pool *pool, 1937 struct size_class *class) 1938 { 1939 struct zspage *src_zspage = NULL; 1940 struct zspage *dst_zspage = NULL; 1941 unsigned long pages_freed = 0; 1942 1943 /* 1944 * protect the race between zpage migration and zs_free 1945 * as well as zpage allocation/free 1946 */ 1947 write_lock(&pool->migrate_lock); 1948 spin_lock(&class->lock); 1949 while (zs_can_compact(class)) { 1950 int fg; 1951 1952 if (!dst_zspage) { 1953 dst_zspage = isolate_dst_zspage(class); 1954 if (!dst_zspage) 1955 break; 1956 } 1957 1958 src_zspage = isolate_src_zspage(class); 1959 if (!src_zspage) 1960 break; 1961 1962 migrate_write_lock(src_zspage); 1963 migrate_zspage(pool, src_zspage, dst_zspage); 1964 migrate_write_unlock(src_zspage); 1965 1966 fg = putback_zspage(class, src_zspage); 1967 if (fg == ZS_INUSE_RATIO_0) { 1968 free_zspage(pool, class, src_zspage); 1969 pages_freed += class->pages_per_zspage; 1970 } 1971 src_zspage = NULL; 1972 1973 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 1974 || rwlock_is_contended(&pool->migrate_lock)) { 1975 putback_zspage(class, dst_zspage); 1976 dst_zspage = NULL; 1977 1978 spin_unlock(&class->lock); 1979 write_unlock(&pool->migrate_lock); 1980 cond_resched(); 1981 write_lock(&pool->migrate_lock); 1982 spin_lock(&class->lock); 1983 } 1984 } 1985 1986 if (src_zspage) 1987 putback_zspage(class, src_zspage); 1988 1989 if (dst_zspage) 1990 putback_zspage(class, dst_zspage); 1991 1992 spin_unlock(&class->lock); 1993 write_unlock(&pool->migrate_lock); 1994 1995 return pages_freed; 1996 } 1997 1998 unsigned long zs_compact(struct zs_pool *pool) 1999 { 2000 int i; 2001 struct size_class *class; 2002 unsigned long pages_freed = 0; 2003 2004 /* 2005 * Pool compaction is performed under pool->migrate_lock so it is basically 2006 * single-threaded. Having more than one thread in __zs_compact() 2007 * will increase pool->migrate_lock contention, which will impact other 2008 * zsmalloc operations that need pool->migrate_lock. 2009 */ 2010 if (atomic_xchg(&pool->compaction_in_progress, 1)) 2011 return 0; 2012 2013 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { 2014 class = pool->size_class[i]; 2015 if (class->index != i) 2016 continue; 2017 pages_freed += __zs_compact(pool, class); 2018 } 2019 atomic_long_add(pages_freed, &pool->stats.pages_compacted); 2020 atomic_set(&pool->compaction_in_progress, 0); 2021 2022 return pages_freed; 2023 } 2024 EXPORT_SYMBOL_GPL(zs_compact); 2025 2026 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) 2027 { 2028 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); 2029 } 2030 EXPORT_SYMBOL_GPL(zs_pool_stats); 2031 2032 static unsigned long zs_shrinker_scan(struct shrinker *shrinker, 2033 struct shrink_control *sc) 2034 { 2035 unsigned long pages_freed; 2036 struct zs_pool *pool = shrinker->private_data; 2037 2038 /* 2039 * Compact classes and calculate compaction delta. 2040 * Can run concurrently with a manually triggered 2041 * (by user) compaction. 2042 */ 2043 pages_freed = zs_compact(pool); 2044 2045 return pages_freed ? pages_freed : SHRINK_STOP; 2046 } 2047 2048 static unsigned long zs_shrinker_count(struct shrinker *shrinker, 2049 struct shrink_control *sc) 2050 { 2051 int i; 2052 struct size_class *class; 2053 unsigned long pages_to_free = 0; 2054 struct zs_pool *pool = shrinker->private_data; 2055 2056 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { 2057 class = pool->size_class[i]; 2058 if (class->index != i) 2059 continue; 2060 2061 pages_to_free += zs_can_compact(class); 2062 } 2063 2064 return pages_to_free; 2065 } 2066 2067 static void zs_unregister_shrinker(struct zs_pool *pool) 2068 { 2069 shrinker_free(pool->shrinker); 2070 } 2071 2072 static int zs_register_shrinker(struct zs_pool *pool) 2073 { 2074 pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name); 2075 if (!pool->shrinker) 2076 return -ENOMEM; 2077 2078 pool->shrinker->scan_objects = zs_shrinker_scan; 2079 pool->shrinker->count_objects = zs_shrinker_count; 2080 pool->shrinker->batch = 0; 2081 pool->shrinker->private_data = pool; 2082 2083 shrinker_register(pool->shrinker); 2084 2085 return 0; 2086 } 2087 2088 static int calculate_zspage_chain_size(int class_size) 2089 { 2090 int i, min_waste = INT_MAX; 2091 int chain_size = 1; 2092 2093 if (is_power_of_2(class_size)) 2094 return chain_size; 2095 2096 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { 2097 int waste; 2098 2099 waste = (i * PAGE_SIZE) % class_size; 2100 if (waste < min_waste) { 2101 min_waste = waste; 2102 chain_size = i; 2103 } 2104 } 2105 2106 return chain_size; 2107 } 2108 2109 /** 2110 * zs_create_pool - Creates an allocation pool to work from. 2111 * @name: pool name to be created 2112 * 2113 * This function must be called before anything when using 2114 * the zsmalloc allocator. 2115 * 2116 * On success, a pointer to the newly created pool is returned, 2117 * otherwise NULL. 2118 */ 2119 struct zs_pool *zs_create_pool(const char *name) 2120 { 2121 int i; 2122 struct zs_pool *pool; 2123 struct size_class *prev_class = NULL; 2124 2125 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 2126 if (!pool) 2127 return NULL; 2128 2129 init_deferred_free(pool); 2130 rwlock_init(&pool->migrate_lock); 2131 atomic_set(&pool->compaction_in_progress, 0); 2132 2133 pool->name = kstrdup(name, GFP_KERNEL); 2134 if (!pool->name) 2135 goto err; 2136 2137 if (create_cache(pool)) 2138 goto err; 2139 2140 /* 2141 * Iterate reversely, because, size of size_class that we want to use 2142 * for merging should be larger or equal to current size. 2143 */ 2144 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { 2145 int size; 2146 int pages_per_zspage; 2147 int objs_per_zspage; 2148 struct size_class *class; 2149 int fullness; 2150 2151 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; 2152 if (size > ZS_MAX_ALLOC_SIZE) 2153 size = ZS_MAX_ALLOC_SIZE; 2154 pages_per_zspage = calculate_zspage_chain_size(size); 2155 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size; 2156 2157 /* 2158 * We iterate from biggest down to smallest classes, 2159 * so huge_class_size holds the size of the first huge 2160 * class. Any object bigger than or equal to that will 2161 * endup in the huge class. 2162 */ 2163 if (pages_per_zspage != 1 && objs_per_zspage != 1 && 2164 !huge_class_size) { 2165 huge_class_size = size; 2166 /* 2167 * The object uses ZS_HANDLE_SIZE bytes to store the 2168 * handle. We need to subtract it, because zs_malloc() 2169 * unconditionally adds handle size before it performs 2170 * size class search - so object may be smaller than 2171 * huge class size, yet it still can end up in the huge 2172 * class because it grows by ZS_HANDLE_SIZE extra bytes 2173 * right before class lookup. 2174 */ 2175 huge_class_size -= (ZS_HANDLE_SIZE - 1); 2176 } 2177 2178 /* 2179 * size_class is used for normal zsmalloc operation such 2180 * as alloc/free for that size. Although it is natural that we 2181 * have one size_class for each size, there is a chance that we 2182 * can get more memory utilization if we use one size_class for 2183 * many different sizes whose size_class have same 2184 * characteristics. So, we makes size_class point to 2185 * previous size_class if possible. 2186 */ 2187 if (prev_class) { 2188 if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) { 2189 pool->size_class[i] = prev_class; 2190 continue; 2191 } 2192 } 2193 2194 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); 2195 if (!class) 2196 goto err; 2197 2198 class->size = size; 2199 class->index = i; 2200 class->pages_per_zspage = pages_per_zspage; 2201 class->objs_per_zspage = objs_per_zspage; 2202 spin_lock_init(&class->lock); 2203 pool->size_class[i] = class; 2204 2205 fullness = ZS_INUSE_RATIO_0; 2206 while (fullness < NR_FULLNESS_GROUPS) { 2207 INIT_LIST_HEAD(&class->fullness_list[fullness]); 2208 fullness++; 2209 } 2210 2211 prev_class = class; 2212 } 2213 2214 /* debug only, don't abort if it fails */ 2215 zs_pool_stat_create(pool, name); 2216 2217 /* 2218 * Not critical since shrinker is only used to trigger internal 2219 * defragmentation of the pool which is pretty optional thing. If 2220 * registration fails we still can use the pool normally and user can 2221 * trigger compaction manually. Thus, ignore return code. 2222 */ 2223 zs_register_shrinker(pool); 2224 2225 return pool; 2226 2227 err: 2228 zs_destroy_pool(pool); 2229 return NULL; 2230 } 2231 EXPORT_SYMBOL_GPL(zs_create_pool); 2232 2233 void zs_destroy_pool(struct zs_pool *pool) 2234 { 2235 int i; 2236 2237 zs_unregister_shrinker(pool); 2238 zs_flush_migration(pool); 2239 zs_pool_stat_destroy(pool); 2240 2241 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 2242 int fg; 2243 struct size_class *class = pool->size_class[i]; 2244 2245 if (!class) 2246 continue; 2247 2248 if (class->index != i) 2249 continue; 2250 2251 for (fg = ZS_INUSE_RATIO_0; fg < NR_FULLNESS_GROUPS; fg++) { 2252 if (list_empty(&class->fullness_list[fg])) 2253 continue; 2254 2255 pr_err("Class-%d fullness group %d is not empty\n", 2256 class->size, fg); 2257 } 2258 kfree(class); 2259 } 2260 2261 destroy_cache(pool); 2262 kfree(pool->name); 2263 kfree(pool); 2264 } 2265 EXPORT_SYMBOL_GPL(zs_destroy_pool); 2266 2267 static int __init zs_init(void) 2268 { 2269 int ret; 2270 2271 ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare", 2272 zs_cpu_prepare, zs_cpu_dead); 2273 if (ret) 2274 goto out; 2275 2276 #ifdef CONFIG_ZPOOL 2277 zpool_register_driver(&zs_zpool_driver); 2278 #endif 2279 2280 zs_stat_init(); 2281 2282 return 0; 2283 2284 out: 2285 return ret; 2286 } 2287 2288 static void __exit zs_exit(void) 2289 { 2290 #ifdef CONFIG_ZPOOL 2291 zpool_unregister_driver(&zs_zpool_driver); 2292 #endif 2293 cpuhp_remove_state(CPUHP_MM_ZS_PREPARE); 2294 2295 zs_stat_exit(); 2296 } 2297 2298 module_init(zs_init); 2299 module_exit(zs_exit); 2300 2301 MODULE_LICENSE("Dual BSD/GPL"); 2302 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 2303 MODULE_DESCRIPTION("zsmalloc memory allocator"); 2304