1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* 4 * zsmalloc memory allocator 5 * 6 * Copyright (C) 2011 Nitin Gupta 7 * Copyright (C) 2012, 2013 Minchan Kim 8 * 9 * This code is released using a dual license strategy: BSD/GPL 10 * You can choose the license that better fits your requirements. 11 * 12 * Released under the terms of 3-clause BSD License 13 * Released under the terms of GNU General Public License Version 2.0 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 /* 19 * lock ordering: 20 * page_lock 21 * pool->lock 22 * class->lock 23 * zspage->lock 24 */ 25 26 #include <linux/module.h> 27 #include <linux/kernel.h> 28 #include <linux/sched.h> 29 #include <linux/errno.h> 30 #include <linux/highmem.h> 31 #include <linux/string.h> 32 #include <linux/slab.h> 33 #include <linux/scatterlist.h> 34 #include <linux/spinlock.h> 35 #include <linux/sprintf.h> 36 #include <linux/shrinker.h> 37 #include <linux/types.h> 38 #include <linux/debugfs.h> 39 #include <linux/zsmalloc.h> 40 #include <linux/fs.h> 41 #include <linux/workqueue.h> 42 #include "zpdesc.h" 43 44 #define ZSPAGE_MAGIC 0x58 45 46 /* 47 * This must be power of 2 and greater than or equal to sizeof(link_free). 48 * These two conditions ensure that any 'struct link_free' itself doesn't 49 * span more than 1 page which avoids complex case of mapping 2 pages simply 50 * to restore link_free pointer values. 51 */ 52 #define ZS_ALIGN 8 53 54 #define ZS_HANDLE_SIZE (sizeof(unsigned long)) 55 56 /* 57 * Object location (<PFN>, <obj_idx>) is encoded as 58 * a single (unsigned long) handle value. 59 * 60 * Note that object index <obj_idx> starts from 0. 61 * 62 * This is made more complicated by various memory models and PAE. 63 */ 64 65 #ifndef MAX_POSSIBLE_PHYSMEM_BITS 66 #ifdef MAX_PHYSMEM_BITS 67 #define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS 68 #else 69 /* 70 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just 71 * be PAGE_SHIFT 72 */ 73 #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG 74 #endif 75 #endif 76 77 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT) 78 79 /* 80 * Head in allocated object should have OBJ_ALLOCATED_TAG 81 * to identify the object was allocated or not. 82 * It's okay to add the status bit in the least bit because 83 * header keeps handle which is 4byte-aligned address so we 84 * have room for two bit at least. 85 */ 86 #define OBJ_ALLOCATED_TAG 1 87 88 #define OBJ_TAG_BITS 1 89 #define OBJ_TAG_MASK OBJ_ALLOCATED_TAG 90 91 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) 92 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) 93 94 #define HUGE_BITS 1 95 #define FULLNESS_BITS 4 96 #define CLASS_BITS 8 97 #define MAGIC_VAL_BITS 8 98 99 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL)) 100 101 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ 102 #define ZS_MIN_ALLOC_SIZE \ 103 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) 104 /* each chunk includes extra space to keep handle */ 105 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE 106 107 /* 108 * On systems with 4K page size, this gives 255 size classes! There is a 109 * trade-off here: 110 * - Large number of size classes is potentially wasteful as free page are 111 * spread across these classes 112 * - Small number of size classes causes large internal fragmentation 113 * - Probably its better to use specific size classes (empirically 114 * determined). NOTE: all those class sizes must be set as multiple of 115 * ZS_ALIGN to make sure link_free itself never has to span 2 pages. 116 * 117 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN 118 * (reason above) 119 */ 120 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS) 121 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \ 122 ZS_SIZE_CLASS_DELTA) + 1) 123 124 /* 125 * Pages are distinguished by the ratio of used memory (that is the ratio 126 * of ->inuse objects to all objects that page can store). For example, 127 * INUSE_RATIO_10 means that the ratio of used objects is > 0% and <= 10%. 128 * 129 * The number of fullness groups is not random. It allows us to keep 130 * difference between the least busy page in the group (minimum permitted 131 * number of ->inuse objects) and the most busy page (maximum permitted 132 * number of ->inuse objects) at a reasonable value. 133 */ 134 enum fullness_group { 135 ZS_INUSE_RATIO_0, 136 ZS_INUSE_RATIO_10, 137 /* NOTE: 8 more fullness groups here */ 138 ZS_INUSE_RATIO_99 = 10, 139 ZS_INUSE_RATIO_100, 140 NR_FULLNESS_GROUPS, 141 }; 142 143 enum class_stat_type { 144 /* NOTE: stats for 12 fullness groups here: from inuse 0 to 100 */ 145 ZS_OBJS_ALLOCATED = NR_FULLNESS_GROUPS, 146 ZS_OBJS_INUSE, 147 NR_CLASS_STAT_TYPES, 148 }; 149 150 struct zs_size_stat { 151 unsigned long objs[NR_CLASS_STAT_TYPES]; 152 }; 153 154 #ifdef CONFIG_ZSMALLOC_STAT 155 static struct dentry *zs_stat_root; 156 #endif 157 158 static size_t huge_class_size; 159 160 struct size_class { 161 spinlock_t lock; 162 struct list_head fullness_list[NR_FULLNESS_GROUPS]; 163 /* 164 * Size of objects stored in this class. Must be multiple 165 * of ZS_ALIGN. 166 */ 167 int size; 168 int objs_per_zspage; 169 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ 170 int pages_per_zspage; 171 172 unsigned int index; 173 struct zs_size_stat stats; 174 }; 175 176 /* 177 * Placed within free objects to form a singly linked list. 178 * For every zspage, zspage->freeobj gives head of this list. 179 * 180 * This must be power of 2 and less than or equal to ZS_ALIGN 181 */ 182 struct link_free { 183 union { 184 /* 185 * Free object index; 186 * It's valid for non-allocated object 187 */ 188 unsigned long next; 189 /* 190 * Handle of allocated object. 191 */ 192 unsigned long handle; 193 }; 194 }; 195 196 static struct kmem_cache *handle_cachep; 197 static struct kmem_cache *zspage_cachep; 198 199 struct zs_pool { 200 const char *name; 201 202 struct size_class *size_class[ZS_SIZE_CLASSES]; 203 204 atomic_long_t pages_allocated; 205 206 struct zs_pool_stats stats; 207 208 /* Compact classes */ 209 struct shrinker *shrinker; 210 211 #ifdef CONFIG_ZSMALLOC_STAT 212 struct dentry *stat_dentry; 213 #endif 214 #ifdef CONFIG_COMPACTION 215 struct work_struct free_work; 216 #endif 217 /* protect zspage migration/compaction */ 218 rwlock_t lock; 219 atomic_t compaction_in_progress; 220 }; 221 222 static inline void zpdesc_set_first(struct zpdesc *zpdesc) 223 { 224 SetPagePrivate(zpdesc_page(zpdesc)); 225 } 226 227 static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc) 228 { 229 inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); 230 } 231 232 static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc) 233 { 234 dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); 235 } 236 237 static inline struct zpdesc *alloc_zpdesc(gfp_t gfp, const int nid) 238 { 239 struct page *page = alloc_pages_node(nid, gfp, 0); 240 241 return page_zpdesc(page); 242 } 243 244 static inline void free_zpdesc(struct zpdesc *zpdesc) 245 { 246 struct page *page = zpdesc_page(zpdesc); 247 248 /* PageZsmalloc is sticky until the page is freed to the buddy. */ 249 __free_page(page); 250 } 251 252 #define ZS_PAGE_UNLOCKED 0 253 #define ZS_PAGE_WRLOCKED -1 254 255 struct zspage_lock { 256 spinlock_t lock; 257 int cnt; 258 struct lockdep_map dep_map; 259 }; 260 261 struct zspage { 262 struct { 263 unsigned int huge:HUGE_BITS; 264 unsigned int fullness:FULLNESS_BITS; 265 unsigned int class:CLASS_BITS + 1; 266 unsigned int magic:MAGIC_VAL_BITS; 267 }; 268 unsigned int inuse; 269 unsigned int freeobj; 270 struct zpdesc *first_zpdesc; 271 struct list_head list; /* fullness list */ 272 struct zs_pool *pool; 273 struct zspage_lock zsl; 274 }; 275 276 static void zspage_lock_init(struct zspage *zspage) 277 { 278 static struct lock_class_key __key; 279 struct zspage_lock *zsl = &zspage->zsl; 280 281 lockdep_init_map(&zsl->dep_map, "zspage->lock", &__key, 0); 282 spin_lock_init(&zsl->lock); 283 zsl->cnt = ZS_PAGE_UNLOCKED; 284 } 285 286 /* 287 * The zspage lock can be held from atomic contexts, but it needs to remain 288 * preemptible when held for reading because it remains held outside of those 289 * atomic contexts, otherwise we unnecessarily lose preemptibility. 290 * 291 * To achieve this, the following rules are enforced on readers and writers: 292 * 293 * - Writers are blocked by both writers and readers, while readers are only 294 * blocked by writers (i.e. normal rwlock semantics). 295 * 296 * - Writers are always atomic (to allow readers to spin waiting for them). 297 * 298 * - Writers always use trylock (as the lock may be held be sleeping readers). 299 * 300 * - Readers may spin on the lock (as they can only wait for atomic writers). 301 * 302 * - Readers may sleep while holding the lock (as writes only use trylock). 303 */ 304 static void zspage_read_lock(struct zspage *zspage) 305 { 306 struct zspage_lock *zsl = &zspage->zsl; 307 308 rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_); 309 310 spin_lock(&zsl->lock); 311 zsl->cnt++; 312 spin_unlock(&zsl->lock); 313 314 lock_acquired(&zsl->dep_map, _RET_IP_); 315 } 316 317 static void zspage_read_unlock(struct zspage *zspage) 318 { 319 struct zspage_lock *zsl = &zspage->zsl; 320 321 rwsem_release(&zsl->dep_map, _RET_IP_); 322 323 spin_lock(&zsl->lock); 324 zsl->cnt--; 325 spin_unlock(&zsl->lock); 326 } 327 328 static __must_check bool zspage_write_trylock(struct zspage *zspage) 329 { 330 struct zspage_lock *zsl = &zspage->zsl; 331 332 spin_lock(&zsl->lock); 333 if (zsl->cnt == ZS_PAGE_UNLOCKED) { 334 zsl->cnt = ZS_PAGE_WRLOCKED; 335 rwsem_acquire(&zsl->dep_map, 0, 1, _RET_IP_); 336 lock_acquired(&zsl->dep_map, _RET_IP_); 337 return true; 338 } 339 340 spin_unlock(&zsl->lock); 341 return false; 342 } 343 344 static void zspage_write_unlock(struct zspage *zspage) 345 { 346 struct zspage_lock *zsl = &zspage->zsl; 347 348 rwsem_release(&zsl->dep_map, _RET_IP_); 349 350 zsl->cnt = ZS_PAGE_UNLOCKED; 351 spin_unlock(&zsl->lock); 352 } 353 354 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ 355 static void SetZsHugePage(struct zspage *zspage) 356 { 357 zspage->huge = 1; 358 } 359 360 static bool ZsHugePage(struct zspage *zspage) 361 { 362 return zspage->huge; 363 } 364 365 #ifdef CONFIG_COMPACTION 366 static void kick_deferred_free(struct zs_pool *pool); 367 static void init_deferred_free(struct zs_pool *pool); 368 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); 369 #else 370 static void kick_deferred_free(struct zs_pool *pool) {} 371 static void init_deferred_free(struct zs_pool *pool) {} 372 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} 373 #endif 374 375 static unsigned long cache_alloc_handle(gfp_t gfp) 376 { 377 gfp = gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE); 378 379 return (unsigned long)kmem_cache_alloc(handle_cachep, gfp); 380 } 381 382 static void cache_free_handle(unsigned long handle) 383 { 384 kmem_cache_free(handle_cachep, (void *)handle); 385 } 386 387 static struct zspage *cache_alloc_zspage(gfp_t gfp) 388 { 389 gfp = gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE); 390 391 return kmem_cache_zalloc(zspage_cachep, gfp); 392 } 393 394 static void cache_free_zspage(struct zspage *zspage) 395 { 396 kmem_cache_free(zspage_cachep, zspage); 397 } 398 399 /* class->lock(which owns the handle) synchronizes races */ 400 static void record_obj(unsigned long handle, unsigned long obj) 401 { 402 *(unsigned long *)handle = obj; 403 } 404 405 static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc) 406 { 407 return PagePrivate(zpdesc_page(zpdesc)); 408 } 409 410 /* Protected by class->lock */ 411 static inline int get_zspage_inuse(struct zspage *zspage) 412 { 413 return zspage->inuse; 414 } 415 416 static inline void mod_zspage_inuse(struct zspage *zspage, int val) 417 { 418 zspage->inuse += val; 419 } 420 421 static struct zpdesc *get_first_zpdesc(struct zspage *zspage) 422 { 423 struct zpdesc *first_zpdesc = zspage->first_zpdesc; 424 425 VM_BUG_ON_PAGE(!is_first_zpdesc(first_zpdesc), zpdesc_page(first_zpdesc)); 426 return first_zpdesc; 427 } 428 429 #define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff 430 431 static inline unsigned int get_first_obj_offset(struct zpdesc *zpdesc) 432 { 433 VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); 434 return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK; 435 } 436 437 static inline void set_first_obj_offset(struct zpdesc *zpdesc, unsigned int offset) 438 { 439 /* With 24 bits available, we can support offsets into 16 MiB pages. */ 440 BUILD_BUG_ON(PAGE_SIZE > SZ_16M); 441 VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); 442 VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK); 443 zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK; 444 zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK; 445 } 446 447 static inline unsigned int get_freeobj(struct zspage *zspage) 448 { 449 return zspage->freeobj; 450 } 451 452 static inline void set_freeobj(struct zspage *zspage, unsigned int obj) 453 { 454 zspage->freeobj = obj; 455 } 456 457 static struct size_class *zspage_class(struct zs_pool *pool, 458 struct zspage *zspage) 459 { 460 return pool->size_class[zspage->class]; 461 } 462 463 /* 464 * zsmalloc divides the pool into various size classes where each 465 * class maintains a list of zspages where each zspage is divided 466 * into equal sized chunks. Each allocation falls into one of these 467 * classes depending on its size. This function returns index of the 468 * size class which has chunk size big enough to hold the given size. 469 */ 470 static int get_size_class_index(int size) 471 { 472 int idx = 0; 473 474 if (likely(size > ZS_MIN_ALLOC_SIZE)) 475 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, 476 ZS_SIZE_CLASS_DELTA); 477 478 return min_t(int, ZS_SIZE_CLASSES - 1, idx); 479 } 480 481 static inline void class_stat_add(struct size_class *class, int type, 482 unsigned long cnt) 483 { 484 class->stats.objs[type] += cnt; 485 } 486 487 static inline void class_stat_sub(struct size_class *class, int type, 488 unsigned long cnt) 489 { 490 class->stats.objs[type] -= cnt; 491 } 492 493 static inline unsigned long class_stat_read(struct size_class *class, int type) 494 { 495 return class->stats.objs[type]; 496 } 497 498 #ifdef CONFIG_ZSMALLOC_STAT 499 500 static void __init zs_stat_init(void) 501 { 502 if (!debugfs_initialized()) { 503 pr_warn("debugfs not available, stat dir not created\n"); 504 return; 505 } 506 507 zs_stat_root = debugfs_create_dir("zsmalloc", NULL); 508 } 509 510 static void __exit zs_stat_exit(void) 511 { 512 debugfs_remove_recursive(zs_stat_root); 513 } 514 515 static unsigned long zs_can_compact(struct size_class *class); 516 517 static int zs_stats_size_show(struct seq_file *s, void *v) 518 { 519 int i, fg; 520 struct zs_pool *pool = s->private; 521 struct size_class *class; 522 int objs_per_zspage; 523 unsigned long obj_allocated, obj_used, pages_used, freeable; 524 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; 525 unsigned long total_freeable = 0; 526 unsigned long inuse_totals[NR_FULLNESS_GROUPS] = {0, }; 527 528 seq_printf(s, " %5s %5s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %13s %10s %10s %16s %8s\n", 529 "class", "size", "10%", "20%", "30%", "40%", 530 "50%", "60%", "70%", "80%", "90%", "99%", "100%", 531 "obj_allocated", "obj_used", "pages_used", 532 "pages_per_zspage", "freeable"); 533 534 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 535 536 class = pool->size_class[i]; 537 538 if (class->index != i) 539 continue; 540 541 spin_lock(&class->lock); 542 543 seq_printf(s, " %5u %5u ", i, class->size); 544 for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) { 545 inuse_totals[fg] += class_stat_read(class, fg); 546 seq_printf(s, "%9lu ", class_stat_read(class, fg)); 547 } 548 549 obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); 550 obj_used = class_stat_read(class, ZS_OBJS_INUSE); 551 freeable = zs_can_compact(class); 552 spin_unlock(&class->lock); 553 554 objs_per_zspage = class->objs_per_zspage; 555 pages_used = obj_allocated / objs_per_zspage * 556 class->pages_per_zspage; 557 558 seq_printf(s, "%13lu %10lu %10lu %16d %8lu\n", 559 obj_allocated, obj_used, pages_used, 560 class->pages_per_zspage, freeable); 561 562 total_objs += obj_allocated; 563 total_used_objs += obj_used; 564 total_pages += pages_used; 565 total_freeable += freeable; 566 } 567 568 seq_puts(s, "\n"); 569 seq_printf(s, " %5s %5s ", "Total", ""); 570 571 for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) 572 seq_printf(s, "%9lu ", inuse_totals[fg]); 573 574 seq_printf(s, "%13lu %10lu %10lu %16s %8lu\n", 575 total_objs, total_used_objs, total_pages, "", 576 total_freeable); 577 578 return 0; 579 } 580 DEFINE_SHOW_ATTRIBUTE(zs_stats_size); 581 582 static void zs_pool_stat_create(struct zs_pool *pool, const char *name) 583 { 584 if (!zs_stat_root) { 585 pr_warn("no root stat dir, not creating <%s> stat dir\n", name); 586 return; 587 } 588 589 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root); 590 591 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool, 592 &zs_stats_size_fops); 593 } 594 595 static void zs_pool_stat_destroy(struct zs_pool *pool) 596 { 597 debugfs_remove_recursive(pool->stat_dentry); 598 } 599 600 #else /* CONFIG_ZSMALLOC_STAT */ 601 static void __init zs_stat_init(void) 602 { 603 } 604 605 static void __exit zs_stat_exit(void) 606 { 607 } 608 609 static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name) 610 { 611 } 612 613 static inline void zs_pool_stat_destroy(struct zs_pool *pool) 614 { 615 } 616 #endif 617 618 619 /* 620 * For each size class, zspages are divided into different groups 621 * depending on their usage ratio. This function returns fullness 622 * status of the given page. 623 */ 624 static int get_fullness_group(struct size_class *class, struct zspage *zspage) 625 { 626 int inuse, objs_per_zspage, ratio; 627 628 inuse = get_zspage_inuse(zspage); 629 objs_per_zspage = class->objs_per_zspage; 630 631 if (inuse == 0) 632 return ZS_INUSE_RATIO_0; 633 if (inuse == objs_per_zspage) 634 return ZS_INUSE_RATIO_100; 635 636 ratio = 100 * inuse / objs_per_zspage; 637 /* 638 * Take integer division into consideration: a page with one inuse 639 * object out of 127 possible, will end up having 0 usage ratio, 640 * which is wrong as it belongs in ZS_INUSE_RATIO_10 fullness group. 641 */ 642 return ratio / 10 + 1; 643 } 644 645 /* 646 * Each size class maintains various freelists and zspages are assigned 647 * to one of these freelists based on the number of live objects they 648 * have. This functions inserts the given zspage into the freelist 649 * identified by <class, fullness_group>. 650 */ 651 static void insert_zspage(struct size_class *class, 652 struct zspage *zspage, 653 int fullness) 654 { 655 class_stat_add(class, fullness, 1); 656 list_add(&zspage->list, &class->fullness_list[fullness]); 657 zspage->fullness = fullness; 658 } 659 660 /* 661 * This function removes the given zspage from the freelist identified 662 * by <class, fullness_group>. 663 */ 664 static void remove_zspage(struct size_class *class, struct zspage *zspage) 665 { 666 int fullness = zspage->fullness; 667 668 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); 669 670 list_del_init(&zspage->list); 671 class_stat_sub(class, fullness, 1); 672 } 673 674 /* 675 * Each size class maintains zspages in different fullness groups depending 676 * on the number of live objects they contain. When allocating or freeing 677 * objects, the fullness status of the page can change, for instance, from 678 * INUSE_RATIO_80 to INUSE_RATIO_70 when freeing an object. This function 679 * checks if such a status change has occurred for the given page and 680 * accordingly moves the page from the list of the old fullness group to that 681 * of the new fullness group. 682 */ 683 static int fix_fullness_group(struct size_class *class, struct zspage *zspage) 684 { 685 int newfg; 686 687 newfg = get_fullness_group(class, zspage); 688 if (newfg == zspage->fullness) 689 goto out; 690 691 remove_zspage(class, zspage); 692 insert_zspage(class, zspage, newfg); 693 out: 694 return newfg; 695 } 696 697 static struct zspage *get_zspage(struct zpdesc *zpdesc) 698 { 699 struct zspage *zspage = zpdesc->zspage; 700 701 BUG_ON(zspage->magic != ZSPAGE_MAGIC); 702 return zspage; 703 } 704 705 static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc) 706 { 707 struct zspage *zspage = get_zspage(zpdesc); 708 709 if (unlikely(ZsHugePage(zspage))) 710 return NULL; 711 712 return zpdesc->next; 713 } 714 715 /** 716 * obj_to_location - get (<zpdesc>, <obj_idx>) from encoded object value 717 * @obj: the encoded object value 718 * @zpdesc: zpdesc object resides in zspage 719 * @obj_idx: object index 720 */ 721 static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc, 722 unsigned int *obj_idx) 723 { 724 *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS); 725 *obj_idx = (obj & OBJ_INDEX_MASK); 726 } 727 728 static void obj_to_zpdesc(unsigned long obj, struct zpdesc **zpdesc) 729 { 730 *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS); 731 } 732 733 /** 734 * location_to_obj - get obj value encoded from (<zpdesc>, <obj_idx>) 735 * @zpdesc: zpdesc object resides in zspage 736 * @obj_idx: object index 737 */ 738 static unsigned long location_to_obj(struct zpdesc *zpdesc, unsigned int obj_idx) 739 { 740 unsigned long obj; 741 742 obj = zpdesc_pfn(zpdesc) << OBJ_INDEX_BITS; 743 obj |= obj_idx & OBJ_INDEX_MASK; 744 745 return obj; 746 } 747 748 static unsigned long handle_to_obj(unsigned long handle) 749 { 750 return *(unsigned long *)handle; 751 } 752 753 static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj, 754 unsigned long *phandle) 755 { 756 unsigned long handle; 757 struct zspage *zspage = get_zspage(zpdesc); 758 759 if (unlikely(ZsHugePage(zspage))) { 760 VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc)); 761 handle = zpdesc->handle; 762 } else 763 handle = *(unsigned long *)obj; 764 765 if (!(handle & OBJ_ALLOCATED_TAG)) 766 return false; 767 768 /* Clear all tags before returning the handle */ 769 *phandle = handle & ~OBJ_TAG_MASK; 770 return true; 771 } 772 773 static void reset_zpdesc(struct zpdesc *zpdesc) 774 { 775 struct page *page = zpdesc_page(zpdesc); 776 777 ClearPagePrivate(page); 778 zpdesc->zspage = NULL; 779 zpdesc->next = NULL; 780 /* PageZsmalloc is sticky until the page is freed to the buddy. */ 781 } 782 783 static int trylock_zspage(struct zspage *zspage) 784 { 785 struct zpdesc *cursor, *fail; 786 787 for (cursor = get_first_zpdesc(zspage); cursor != NULL; cursor = 788 get_next_zpdesc(cursor)) { 789 if (!zpdesc_trylock(cursor)) { 790 fail = cursor; 791 goto unlock; 792 } 793 } 794 795 return 1; 796 unlock: 797 for (cursor = get_first_zpdesc(zspage); cursor != fail; cursor = 798 get_next_zpdesc(cursor)) 799 zpdesc_unlock(cursor); 800 801 return 0; 802 } 803 804 static void __free_zspage(struct zs_pool *pool, struct size_class *class, 805 struct zspage *zspage) 806 { 807 struct zpdesc *zpdesc, *next; 808 809 assert_spin_locked(&class->lock); 810 811 VM_BUG_ON(get_zspage_inuse(zspage)); 812 VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0); 813 814 next = zpdesc = get_first_zpdesc(zspage); 815 do { 816 VM_BUG_ON_PAGE(!zpdesc_is_locked(zpdesc), zpdesc_page(zpdesc)); 817 next = get_next_zpdesc(zpdesc); 818 reset_zpdesc(zpdesc); 819 zpdesc_unlock(zpdesc); 820 zpdesc_dec_zone_page_state(zpdesc); 821 zpdesc_put(zpdesc); 822 zpdesc = next; 823 } while (zpdesc != NULL); 824 825 cache_free_zspage(zspage); 826 827 class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); 828 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); 829 } 830 831 static void free_zspage(struct zs_pool *pool, struct size_class *class, 832 struct zspage *zspage) 833 { 834 VM_BUG_ON(get_zspage_inuse(zspage)); 835 VM_BUG_ON(list_empty(&zspage->list)); 836 837 /* 838 * Since zs_free couldn't be sleepable, this function cannot call 839 * lock_page. The page locks trylock_zspage got will be released 840 * by __free_zspage. 841 */ 842 if (!trylock_zspage(zspage)) { 843 kick_deferred_free(pool); 844 return; 845 } 846 847 remove_zspage(class, zspage); 848 __free_zspage(pool, class, zspage); 849 } 850 851 /* Initialize a newly allocated zspage */ 852 static void init_zspage(struct size_class *class, struct zspage *zspage) 853 { 854 unsigned int freeobj = 1; 855 unsigned long off = 0; 856 struct zpdesc *zpdesc = get_first_zpdesc(zspage); 857 858 while (zpdesc) { 859 struct zpdesc *next_zpdesc; 860 struct link_free *link; 861 void *vaddr; 862 863 set_first_obj_offset(zpdesc, off); 864 865 vaddr = kmap_local_zpdesc(zpdesc); 866 link = (struct link_free *)vaddr + off / sizeof(*link); 867 868 while ((off += class->size) < PAGE_SIZE) { 869 link->next = freeobj++ << OBJ_TAG_BITS; 870 link += class->size / sizeof(*link); 871 } 872 873 /* 874 * We now come to the last (full or partial) object on this 875 * page, which must point to the first object on the next 876 * page (if present) 877 */ 878 next_zpdesc = get_next_zpdesc(zpdesc); 879 if (next_zpdesc) { 880 link->next = freeobj++ << OBJ_TAG_BITS; 881 } else { 882 /* 883 * Reset OBJ_TAG_BITS bit to last link to tell 884 * whether it's allocated object or not. 885 */ 886 link->next = -1UL << OBJ_TAG_BITS; 887 } 888 kunmap_local(vaddr); 889 zpdesc = next_zpdesc; 890 off %= PAGE_SIZE; 891 } 892 893 set_freeobj(zspage, 0); 894 } 895 896 static void create_page_chain(struct size_class *class, struct zspage *zspage, 897 struct zpdesc *zpdescs[]) 898 { 899 int i; 900 struct zpdesc *zpdesc; 901 struct zpdesc *prev_zpdesc = NULL; 902 int nr_zpdescs = class->pages_per_zspage; 903 904 /* 905 * Allocate individual pages and link them together as: 906 * 1. all pages are linked together using zpdesc->next 907 * 2. each sub-page point to zspage using zpdesc->zspage 908 * 909 * we set PG_private to identify the first zpdesc (i.e. no other zpdesc 910 * has this flag set). 911 */ 912 for (i = 0; i < nr_zpdescs; i++) { 913 zpdesc = zpdescs[i]; 914 zpdesc->zspage = zspage; 915 zpdesc->next = NULL; 916 if (i == 0) { 917 zspage->first_zpdesc = zpdesc; 918 zpdesc_set_first(zpdesc); 919 if (unlikely(class->objs_per_zspage == 1 && 920 class->pages_per_zspage == 1)) 921 SetZsHugePage(zspage); 922 } else { 923 prev_zpdesc->next = zpdesc; 924 } 925 prev_zpdesc = zpdesc; 926 } 927 } 928 929 /* 930 * Allocate a zspage for the given size class 931 */ 932 static struct zspage *alloc_zspage(struct zs_pool *pool, 933 struct size_class *class, 934 gfp_t gfp, const int nid) 935 { 936 int i; 937 struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE]; 938 struct zspage *zspage = cache_alloc_zspage(gfp); 939 940 if (!zspage) 941 return NULL; 942 943 if (!IS_ENABLED(CONFIG_COMPACTION)) 944 gfp &= ~__GFP_MOVABLE; 945 946 zspage->magic = ZSPAGE_MAGIC; 947 zspage->pool = pool; 948 zspage->class = class->index; 949 zspage_lock_init(zspage); 950 951 for (i = 0; i < class->pages_per_zspage; i++) { 952 struct zpdesc *zpdesc; 953 954 zpdesc = alloc_zpdesc(gfp, nid); 955 if (!zpdesc) { 956 while (--i >= 0) { 957 zpdesc_dec_zone_page_state(zpdescs[i]); 958 free_zpdesc(zpdescs[i]); 959 } 960 cache_free_zspage(zspage); 961 return NULL; 962 } 963 __zpdesc_set_zsmalloc(zpdesc); 964 965 zpdesc_inc_zone_page_state(zpdesc); 966 zpdescs[i] = zpdesc; 967 } 968 969 create_page_chain(class, zspage, zpdescs); 970 init_zspage(class, zspage); 971 972 return zspage; 973 } 974 975 static struct zspage *find_get_zspage(struct size_class *class) 976 { 977 int i; 978 struct zspage *zspage; 979 980 for (i = ZS_INUSE_RATIO_99; i >= ZS_INUSE_RATIO_0; i--) { 981 zspage = list_first_entry_or_null(&class->fullness_list[i], 982 struct zspage, list); 983 if (zspage) 984 break; 985 } 986 987 return zspage; 988 } 989 990 static bool can_merge(struct size_class *prev, int pages_per_zspage, 991 int objs_per_zspage) 992 { 993 if (prev->pages_per_zspage == pages_per_zspage && 994 prev->objs_per_zspage == objs_per_zspage) 995 return true; 996 997 return false; 998 } 999 1000 static bool zspage_full(struct size_class *class, struct zspage *zspage) 1001 { 1002 return get_zspage_inuse(zspage) == class->objs_per_zspage; 1003 } 1004 1005 static bool zspage_empty(struct zspage *zspage) 1006 { 1007 return get_zspage_inuse(zspage) == 0; 1008 } 1009 1010 /** 1011 * zs_lookup_class_index() - Returns index of the zsmalloc &size_class 1012 * that hold objects of the provided size. 1013 * @pool: zsmalloc pool to use 1014 * @size: object size 1015 * 1016 * Context: Any context. 1017 * 1018 * Return: the index of the zsmalloc &size_class that hold objects of the 1019 * provided size. 1020 */ 1021 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size) 1022 { 1023 struct size_class *class; 1024 1025 class = pool->size_class[get_size_class_index(size)]; 1026 1027 return class->index; 1028 } 1029 EXPORT_SYMBOL_GPL(zs_lookup_class_index); 1030 1031 unsigned long zs_get_total_pages(struct zs_pool *pool) 1032 { 1033 return atomic_long_read(&pool->pages_allocated); 1034 } 1035 EXPORT_SYMBOL_GPL(zs_get_total_pages); 1036 1037 void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, 1038 size_t mem_len, void *local_copy) 1039 { 1040 struct zspage *zspage; 1041 struct zpdesc *zpdesc; 1042 unsigned long obj, off; 1043 unsigned int obj_idx; 1044 struct size_class *class; 1045 void *addr; 1046 1047 /* Guarantee we can get zspage from handle safely */ 1048 read_lock(&pool->lock); 1049 obj = handle_to_obj(handle); 1050 obj_to_location(obj, &zpdesc, &obj_idx); 1051 zspage = get_zspage(zpdesc); 1052 1053 /* Make sure migration doesn't move any pages in this zspage */ 1054 zspage_read_lock(zspage); 1055 read_unlock(&pool->lock); 1056 1057 class = zspage_class(pool, zspage); 1058 off = offset_in_page(class->size * obj_idx); 1059 1060 if (!ZsHugePage(zspage)) 1061 off += ZS_HANDLE_SIZE; 1062 1063 if (off + mem_len <= PAGE_SIZE) { 1064 /* this object is contained entirely within a page */ 1065 addr = kmap_local_zpdesc(zpdesc); 1066 addr += off; 1067 } else { 1068 size_t sizes[2]; 1069 1070 /* this object spans two pages */ 1071 sizes[0] = PAGE_SIZE - off; 1072 sizes[1] = mem_len - sizes[0]; 1073 addr = local_copy; 1074 1075 memcpy_from_page(addr, zpdesc_page(zpdesc), 1076 off, sizes[0]); 1077 zpdesc = get_next_zpdesc(zpdesc); 1078 memcpy_from_page(addr + sizes[0], 1079 zpdesc_page(zpdesc), 1080 0, sizes[1]); 1081 } 1082 1083 return addr; 1084 } 1085 EXPORT_SYMBOL_GPL(zs_obj_read_begin); 1086 1087 void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, 1088 size_t mem_len, void *handle_mem) 1089 { 1090 struct zspage *zspage; 1091 struct zpdesc *zpdesc; 1092 unsigned long obj, off; 1093 unsigned int obj_idx; 1094 struct size_class *class; 1095 1096 obj = handle_to_obj(handle); 1097 obj_to_location(obj, &zpdesc, &obj_idx); 1098 zspage = get_zspage(zpdesc); 1099 class = zspage_class(pool, zspage); 1100 off = offset_in_page(class->size * obj_idx); 1101 1102 if (!ZsHugePage(zspage)) 1103 off += ZS_HANDLE_SIZE; 1104 1105 if (off + mem_len <= PAGE_SIZE) { 1106 handle_mem -= off; 1107 kunmap_local(handle_mem); 1108 } 1109 1110 zspage_read_unlock(zspage); 1111 } 1112 EXPORT_SYMBOL_GPL(zs_obj_read_end); 1113 1114 void zs_obj_read_sg_begin(struct zs_pool *pool, unsigned long handle, 1115 struct scatterlist *sg, size_t mem_len) 1116 { 1117 struct zspage *zspage; 1118 struct zpdesc *zpdesc; 1119 unsigned long obj, off; 1120 unsigned int obj_idx; 1121 struct size_class *class; 1122 1123 /* Guarantee we can get zspage from handle safely */ 1124 read_lock(&pool->lock); 1125 obj = handle_to_obj(handle); 1126 obj_to_location(obj, &zpdesc, &obj_idx); 1127 zspage = get_zspage(zpdesc); 1128 1129 /* Make sure migration doesn't move any pages in this zspage */ 1130 zspage_read_lock(zspage); 1131 read_unlock(&pool->lock); 1132 1133 class = zspage_class(pool, zspage); 1134 off = offset_in_page(class->size * obj_idx); 1135 1136 if (!ZsHugePage(zspage)) 1137 off += ZS_HANDLE_SIZE; 1138 1139 if (off + mem_len <= PAGE_SIZE) { 1140 /* this object is contained entirely within a page */ 1141 sg_init_table(sg, 1); 1142 sg_set_page(sg, zpdesc_page(zpdesc), mem_len, off); 1143 } else { 1144 size_t sizes[2]; 1145 1146 /* this object spans two pages */ 1147 sizes[0] = PAGE_SIZE - off; 1148 sizes[1] = mem_len - sizes[0]; 1149 1150 sg_init_table(sg, 2); 1151 sg_set_page(sg, zpdesc_page(zpdesc), sizes[0], off); 1152 1153 zpdesc = get_next_zpdesc(zpdesc); 1154 sg = sg_next(sg); 1155 1156 sg_set_page(sg, zpdesc_page(zpdesc), sizes[1], 0); 1157 } 1158 } 1159 EXPORT_SYMBOL_GPL(zs_obj_read_sg_begin); 1160 1161 void zs_obj_read_sg_end(struct zs_pool *pool, unsigned long handle) 1162 { 1163 struct zspage *zspage; 1164 struct zpdesc *zpdesc; 1165 unsigned long obj; 1166 unsigned int obj_idx; 1167 1168 obj = handle_to_obj(handle); 1169 obj_to_location(obj, &zpdesc, &obj_idx); 1170 zspage = get_zspage(zpdesc); 1171 1172 zspage_read_unlock(zspage); 1173 } 1174 EXPORT_SYMBOL_GPL(zs_obj_read_sg_end); 1175 1176 void zs_obj_write(struct zs_pool *pool, unsigned long handle, 1177 void *handle_mem, size_t mem_len) 1178 { 1179 struct zspage *zspage; 1180 struct zpdesc *zpdesc; 1181 unsigned long obj, off; 1182 unsigned int obj_idx; 1183 struct size_class *class; 1184 1185 /* Guarantee we can get zspage from handle safely */ 1186 read_lock(&pool->lock); 1187 obj = handle_to_obj(handle); 1188 obj_to_location(obj, &zpdesc, &obj_idx); 1189 zspage = get_zspage(zpdesc); 1190 1191 /* Make sure migration doesn't move any pages in this zspage */ 1192 zspage_read_lock(zspage); 1193 read_unlock(&pool->lock); 1194 1195 class = zspage_class(pool, zspage); 1196 off = offset_in_page(class->size * obj_idx); 1197 1198 if (!ZsHugePage(zspage)) 1199 off += ZS_HANDLE_SIZE; 1200 1201 if (off + mem_len <= PAGE_SIZE) { 1202 /* this object is contained entirely within a page */ 1203 void *dst = kmap_local_zpdesc(zpdesc); 1204 1205 memcpy(dst + off, handle_mem, mem_len); 1206 kunmap_local(dst); 1207 } else { 1208 /* this object spans two pages */ 1209 size_t sizes[2]; 1210 1211 sizes[0] = PAGE_SIZE - off; 1212 sizes[1] = mem_len - sizes[0]; 1213 1214 memcpy_to_page(zpdesc_page(zpdesc), off, 1215 handle_mem, sizes[0]); 1216 zpdesc = get_next_zpdesc(zpdesc); 1217 memcpy_to_page(zpdesc_page(zpdesc), 0, 1218 handle_mem + sizes[0], sizes[1]); 1219 } 1220 1221 zspage_read_unlock(zspage); 1222 } 1223 EXPORT_SYMBOL_GPL(zs_obj_write); 1224 1225 /** 1226 * zs_huge_class_size() - Returns the size (in bytes) of the first huge 1227 * zsmalloc &size_class. 1228 * @pool: zsmalloc pool to use 1229 * 1230 * The function returns the size of the first huge class - any object of equal 1231 * or bigger size will be stored in zspage consisting of a single physical 1232 * page. 1233 * 1234 * Context: Any context. 1235 * 1236 * Return: the size (in bytes) of the first huge zsmalloc &size_class. 1237 */ 1238 size_t zs_huge_class_size(struct zs_pool *pool) 1239 { 1240 return huge_class_size; 1241 } 1242 EXPORT_SYMBOL_GPL(zs_huge_class_size); 1243 1244 static unsigned long obj_malloc(struct zs_pool *pool, 1245 struct zspage *zspage, unsigned long handle) 1246 { 1247 int i, nr_zpdesc, offset; 1248 unsigned long obj; 1249 struct link_free *link; 1250 struct size_class *class; 1251 1252 struct zpdesc *m_zpdesc; 1253 unsigned long m_offset; 1254 void *vaddr; 1255 1256 class = pool->size_class[zspage->class]; 1257 obj = get_freeobj(zspage); 1258 1259 offset = obj * class->size; 1260 nr_zpdesc = offset >> PAGE_SHIFT; 1261 m_offset = offset_in_page(offset); 1262 m_zpdesc = get_first_zpdesc(zspage); 1263 1264 for (i = 0; i < nr_zpdesc; i++) 1265 m_zpdesc = get_next_zpdesc(m_zpdesc); 1266 1267 vaddr = kmap_local_zpdesc(m_zpdesc); 1268 link = (struct link_free *)vaddr + m_offset / sizeof(*link); 1269 set_freeobj(zspage, link->next >> OBJ_TAG_BITS); 1270 if (likely(!ZsHugePage(zspage))) 1271 /* record handle in the header of allocated chunk */ 1272 link->handle = handle | OBJ_ALLOCATED_TAG; 1273 else 1274 zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG; 1275 1276 kunmap_local(vaddr); 1277 mod_zspage_inuse(zspage, 1); 1278 1279 obj = location_to_obj(m_zpdesc, obj); 1280 record_obj(handle, obj); 1281 1282 return obj; 1283 } 1284 1285 1286 /** 1287 * zs_malloc - Allocate block of given size from pool. 1288 * @pool: pool to allocate from 1289 * @size: size of block to allocate 1290 * @gfp: gfp flags when allocating object 1291 * @nid: The preferred node id to allocate new zspage (if needed) 1292 * 1293 * On success, handle to the allocated object is returned, 1294 * otherwise an ERR_PTR(). 1295 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. 1296 */ 1297 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp, 1298 const int nid) 1299 { 1300 unsigned long handle; 1301 struct size_class *class; 1302 int newfg; 1303 struct zspage *zspage; 1304 1305 if (unlikely(!size)) 1306 return (unsigned long)ERR_PTR(-EINVAL); 1307 1308 if (unlikely(size > ZS_MAX_ALLOC_SIZE)) 1309 return (unsigned long)ERR_PTR(-ENOSPC); 1310 1311 handle = cache_alloc_handle(gfp); 1312 if (!handle) 1313 return (unsigned long)ERR_PTR(-ENOMEM); 1314 1315 /* extra space in chunk to keep the handle */ 1316 size += ZS_HANDLE_SIZE; 1317 class = pool->size_class[get_size_class_index(size)]; 1318 1319 /* class->lock effectively protects the zpage migration */ 1320 spin_lock(&class->lock); 1321 zspage = find_get_zspage(class); 1322 if (likely(zspage)) { 1323 obj_malloc(pool, zspage, handle); 1324 /* Now move the zspage to another fullness group, if required */ 1325 fix_fullness_group(class, zspage); 1326 class_stat_add(class, ZS_OBJS_INUSE, 1); 1327 1328 goto out; 1329 } 1330 1331 spin_unlock(&class->lock); 1332 1333 zspage = alloc_zspage(pool, class, gfp, nid); 1334 if (!zspage) { 1335 cache_free_handle(handle); 1336 return (unsigned long)ERR_PTR(-ENOMEM); 1337 } 1338 1339 spin_lock(&class->lock); 1340 obj_malloc(pool, zspage, handle); 1341 newfg = get_fullness_group(class, zspage); 1342 insert_zspage(class, zspage, newfg); 1343 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); 1344 class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); 1345 class_stat_add(class, ZS_OBJS_INUSE, 1); 1346 1347 /* We completely set up zspage so mark them as movable */ 1348 SetZsPageMovable(pool, zspage); 1349 out: 1350 spin_unlock(&class->lock); 1351 1352 return handle; 1353 } 1354 EXPORT_SYMBOL_GPL(zs_malloc); 1355 1356 static void obj_free(int class_size, unsigned long obj) 1357 { 1358 struct link_free *link; 1359 struct zspage *zspage; 1360 struct zpdesc *f_zpdesc; 1361 unsigned long f_offset; 1362 unsigned int f_objidx; 1363 void *vaddr; 1364 1365 1366 obj_to_location(obj, &f_zpdesc, &f_objidx); 1367 f_offset = offset_in_page(class_size * f_objidx); 1368 zspage = get_zspage(f_zpdesc); 1369 1370 vaddr = kmap_local_zpdesc(f_zpdesc); 1371 link = (struct link_free *)(vaddr + f_offset); 1372 1373 /* Insert this object in containing zspage's freelist */ 1374 if (likely(!ZsHugePage(zspage))) 1375 link->next = get_freeobj(zspage) << OBJ_TAG_BITS; 1376 else 1377 f_zpdesc->handle = 0; 1378 set_freeobj(zspage, f_objidx); 1379 1380 kunmap_local(vaddr); 1381 mod_zspage_inuse(zspage, -1); 1382 } 1383 1384 void zs_free(struct zs_pool *pool, unsigned long handle) 1385 { 1386 struct zspage *zspage; 1387 struct zpdesc *f_zpdesc; 1388 unsigned long obj; 1389 struct size_class *class; 1390 int fullness; 1391 1392 if (IS_ERR_OR_NULL((void *)handle)) 1393 return; 1394 1395 /* 1396 * The pool->lock protects the race with zpage's migration 1397 * so it's safe to get the page from handle. 1398 */ 1399 read_lock(&pool->lock); 1400 obj = handle_to_obj(handle); 1401 obj_to_zpdesc(obj, &f_zpdesc); 1402 zspage = get_zspage(f_zpdesc); 1403 class = zspage_class(pool, zspage); 1404 spin_lock(&class->lock); 1405 read_unlock(&pool->lock); 1406 1407 class_stat_sub(class, ZS_OBJS_INUSE, 1); 1408 obj_free(class->size, obj); 1409 1410 fullness = fix_fullness_group(class, zspage); 1411 if (fullness == ZS_INUSE_RATIO_0) 1412 free_zspage(pool, class, zspage); 1413 1414 spin_unlock(&class->lock); 1415 cache_free_handle(handle); 1416 } 1417 EXPORT_SYMBOL_GPL(zs_free); 1418 1419 static void zs_object_copy(struct size_class *class, unsigned long dst, 1420 unsigned long src) 1421 { 1422 struct zpdesc *s_zpdesc, *d_zpdesc; 1423 unsigned int s_objidx, d_objidx; 1424 unsigned long s_off, d_off; 1425 void *s_addr, *d_addr; 1426 int s_size, d_size, size; 1427 int written = 0; 1428 1429 s_size = d_size = class->size; 1430 1431 obj_to_location(src, &s_zpdesc, &s_objidx); 1432 obj_to_location(dst, &d_zpdesc, &d_objidx); 1433 1434 s_off = offset_in_page(class->size * s_objidx); 1435 d_off = offset_in_page(class->size * d_objidx); 1436 1437 if (s_off + class->size > PAGE_SIZE) 1438 s_size = PAGE_SIZE - s_off; 1439 1440 if (d_off + class->size > PAGE_SIZE) 1441 d_size = PAGE_SIZE - d_off; 1442 1443 s_addr = kmap_local_zpdesc(s_zpdesc); 1444 d_addr = kmap_local_zpdesc(d_zpdesc); 1445 1446 while (1) { 1447 size = min(s_size, d_size); 1448 memcpy(d_addr + d_off, s_addr + s_off, size); 1449 written += size; 1450 1451 if (written == class->size) 1452 break; 1453 1454 s_off += size; 1455 s_size -= size; 1456 d_off += size; 1457 d_size -= size; 1458 1459 /* 1460 * Calling kunmap_local(d_addr) is necessary. kunmap_local() 1461 * calls must occurs in reverse order of calls to kmap_local_page(). 1462 * So, to call kunmap_local(s_addr) we should first call 1463 * kunmap_local(d_addr). For more details see 1464 * Documentation/mm/highmem.rst. 1465 */ 1466 if (s_off >= PAGE_SIZE) { 1467 kunmap_local(d_addr); 1468 kunmap_local(s_addr); 1469 s_zpdesc = get_next_zpdesc(s_zpdesc); 1470 s_addr = kmap_local_zpdesc(s_zpdesc); 1471 d_addr = kmap_local_zpdesc(d_zpdesc); 1472 s_size = class->size - written; 1473 s_off = 0; 1474 } 1475 1476 if (d_off >= PAGE_SIZE) { 1477 kunmap_local(d_addr); 1478 d_zpdesc = get_next_zpdesc(d_zpdesc); 1479 d_addr = kmap_local_zpdesc(d_zpdesc); 1480 d_size = class->size - written; 1481 d_off = 0; 1482 } 1483 } 1484 1485 kunmap_local(d_addr); 1486 kunmap_local(s_addr); 1487 } 1488 1489 /* 1490 * Find alloced object in zspage from index object and 1491 * return handle. 1492 */ 1493 static unsigned long find_alloced_obj(struct size_class *class, 1494 struct zpdesc *zpdesc, int *obj_idx) 1495 { 1496 unsigned int offset; 1497 int index = *obj_idx; 1498 unsigned long handle = 0; 1499 void *addr = kmap_local_zpdesc(zpdesc); 1500 1501 offset = get_first_obj_offset(zpdesc); 1502 offset += class->size * index; 1503 1504 while (offset < PAGE_SIZE) { 1505 if (obj_allocated(zpdesc, addr + offset, &handle)) 1506 break; 1507 1508 offset += class->size; 1509 index++; 1510 } 1511 1512 kunmap_local(addr); 1513 1514 *obj_idx = index; 1515 1516 return handle; 1517 } 1518 1519 static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, 1520 struct zspage *dst_zspage) 1521 { 1522 unsigned long used_obj, free_obj; 1523 unsigned long handle; 1524 int obj_idx = 0; 1525 struct zpdesc *s_zpdesc = get_first_zpdesc(src_zspage); 1526 struct size_class *class = pool->size_class[src_zspage->class]; 1527 1528 while (1) { 1529 handle = find_alloced_obj(class, s_zpdesc, &obj_idx); 1530 if (!handle) { 1531 s_zpdesc = get_next_zpdesc(s_zpdesc); 1532 if (!s_zpdesc) 1533 break; 1534 obj_idx = 0; 1535 continue; 1536 } 1537 1538 used_obj = handle_to_obj(handle); 1539 free_obj = obj_malloc(pool, dst_zspage, handle); 1540 zs_object_copy(class, free_obj, used_obj); 1541 obj_idx++; 1542 obj_free(class->size, used_obj); 1543 1544 /* Stop if there is no more space */ 1545 if (zspage_full(class, dst_zspage)) 1546 break; 1547 1548 /* Stop if there are no more objects to migrate */ 1549 if (zspage_empty(src_zspage)) 1550 break; 1551 } 1552 } 1553 1554 static struct zspage *isolate_src_zspage(struct size_class *class) 1555 { 1556 struct zspage *zspage; 1557 int fg; 1558 1559 for (fg = ZS_INUSE_RATIO_10; fg <= ZS_INUSE_RATIO_99; fg++) { 1560 zspage = list_first_entry_or_null(&class->fullness_list[fg], 1561 struct zspage, list); 1562 if (zspage) { 1563 remove_zspage(class, zspage); 1564 return zspage; 1565 } 1566 } 1567 1568 return zspage; 1569 } 1570 1571 static struct zspage *isolate_dst_zspage(struct size_class *class) 1572 { 1573 struct zspage *zspage; 1574 int fg; 1575 1576 for (fg = ZS_INUSE_RATIO_99; fg >= ZS_INUSE_RATIO_10; fg--) { 1577 zspage = list_first_entry_or_null(&class->fullness_list[fg], 1578 struct zspage, list); 1579 if (zspage) { 1580 remove_zspage(class, zspage); 1581 return zspage; 1582 } 1583 } 1584 1585 return zspage; 1586 } 1587 1588 /* 1589 * putback_zspage - add @zspage into right class's fullness list 1590 * @class: destination class 1591 * @zspage: target page 1592 * 1593 * Return @zspage's fullness status 1594 */ 1595 static int putback_zspage(struct size_class *class, struct zspage *zspage) 1596 { 1597 int fullness; 1598 1599 fullness = get_fullness_group(class, zspage); 1600 insert_zspage(class, zspage, fullness); 1601 1602 return fullness; 1603 } 1604 1605 #ifdef CONFIG_COMPACTION 1606 /* 1607 * To prevent zspage destroy during migration, zspage freeing should 1608 * hold locks of all pages in the zspage. 1609 */ 1610 static void lock_zspage(struct zspage *zspage) 1611 { 1612 struct zpdesc *curr_zpdesc, *zpdesc; 1613 1614 /* 1615 * Pages we haven't locked yet can be migrated off the list while we're 1616 * trying to lock them, so we need to be careful and only attempt to 1617 * lock each page under zspage_read_lock(). Otherwise, the page we lock 1618 * may no longer belong to the zspage. This means that we may wait for 1619 * the wrong page to unlock, so we must take a reference to the page 1620 * prior to waiting for it to unlock outside zspage_read_lock(). 1621 */ 1622 while (1) { 1623 zspage_read_lock(zspage); 1624 zpdesc = get_first_zpdesc(zspage); 1625 if (zpdesc_trylock(zpdesc)) 1626 break; 1627 zpdesc_get(zpdesc); 1628 zspage_read_unlock(zspage); 1629 zpdesc_wait_locked(zpdesc); 1630 zpdesc_put(zpdesc); 1631 } 1632 1633 curr_zpdesc = zpdesc; 1634 while ((zpdesc = get_next_zpdesc(curr_zpdesc))) { 1635 if (zpdesc_trylock(zpdesc)) { 1636 curr_zpdesc = zpdesc; 1637 } else { 1638 zpdesc_get(zpdesc); 1639 zspage_read_unlock(zspage); 1640 zpdesc_wait_locked(zpdesc); 1641 zpdesc_put(zpdesc); 1642 zspage_read_lock(zspage); 1643 } 1644 } 1645 zspage_read_unlock(zspage); 1646 } 1647 #endif /* CONFIG_COMPACTION */ 1648 1649 #ifdef CONFIG_COMPACTION 1650 1651 static void replace_sub_page(struct size_class *class, struct zspage *zspage, 1652 struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc) 1653 { 1654 struct zpdesc *zpdesc; 1655 struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; 1656 unsigned int first_obj_offset; 1657 int idx = 0; 1658 1659 zpdesc = get_first_zpdesc(zspage); 1660 do { 1661 if (zpdesc == oldzpdesc) 1662 zpdescs[idx] = newzpdesc; 1663 else 1664 zpdescs[idx] = zpdesc; 1665 idx++; 1666 } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); 1667 1668 create_page_chain(class, zspage, zpdescs); 1669 first_obj_offset = get_first_obj_offset(oldzpdesc); 1670 set_first_obj_offset(newzpdesc, first_obj_offset); 1671 if (unlikely(ZsHugePage(zspage))) 1672 newzpdesc->handle = oldzpdesc->handle; 1673 __zpdesc_set_movable(newzpdesc); 1674 } 1675 1676 static bool zs_page_isolate(struct page *page, isolate_mode_t mode) 1677 { 1678 /* 1679 * Page is locked so zspage can't be destroyed concurrently 1680 * (see free_zspage()). But if the page was already destroyed 1681 * (see reset_zpdesc()), refuse isolation here. 1682 */ 1683 return page_zpdesc(page)->zspage; 1684 } 1685 1686 static int zs_page_migrate(struct page *newpage, struct page *page, 1687 enum migrate_mode mode) 1688 { 1689 struct zs_pool *pool; 1690 struct size_class *class; 1691 struct zspage *zspage; 1692 struct zpdesc *dummy; 1693 struct zpdesc *newzpdesc = page_zpdesc(newpage); 1694 struct zpdesc *zpdesc = page_zpdesc(page); 1695 void *s_addr, *d_addr, *addr; 1696 unsigned int offset; 1697 unsigned long handle; 1698 unsigned long old_obj, new_obj; 1699 unsigned int obj_idx; 1700 1701 /* 1702 * TODO: nothing prevents a zspage from getting destroyed while 1703 * it is isolated for migration, as the page lock is temporarily 1704 * dropped after zs_page_isolate() succeeded: we should rework that 1705 * and defer destroying such pages once they are un-isolated (putback) 1706 * instead. 1707 */ 1708 if (!zpdesc->zspage) 1709 return 0; 1710 1711 /* The page is locked, so this pointer must remain valid */ 1712 zspage = get_zspage(zpdesc); 1713 pool = zspage->pool; 1714 1715 /* 1716 * The pool migrate_lock protects the race between zpage migration 1717 * and zs_free. 1718 */ 1719 write_lock(&pool->lock); 1720 class = zspage_class(pool, zspage); 1721 1722 /* 1723 * the class lock protects zpage alloc/free in the zspage. 1724 */ 1725 spin_lock(&class->lock); 1726 /* the zspage write_lock protects zpage access via zs_obj_read/write() */ 1727 if (!zspage_write_trylock(zspage)) { 1728 spin_unlock(&class->lock); 1729 write_unlock(&pool->lock); 1730 return -EINVAL; 1731 } 1732 1733 /* We're committed, tell the world that this is a Zsmalloc page. */ 1734 __zpdesc_set_zsmalloc(newzpdesc); 1735 1736 offset = get_first_obj_offset(zpdesc); 1737 s_addr = kmap_local_zpdesc(zpdesc); 1738 1739 /* 1740 * Here, any user cannot access all objects in the zspage so let's move. 1741 */ 1742 d_addr = kmap_local_zpdesc(newzpdesc); 1743 copy_page(d_addr, s_addr); 1744 kunmap_local(d_addr); 1745 1746 for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE; 1747 addr += class->size) { 1748 if (obj_allocated(zpdesc, addr, &handle)) { 1749 1750 old_obj = handle_to_obj(handle); 1751 obj_to_location(old_obj, &dummy, &obj_idx); 1752 new_obj = (unsigned long)location_to_obj(newzpdesc, obj_idx); 1753 record_obj(handle, new_obj); 1754 } 1755 } 1756 kunmap_local(s_addr); 1757 1758 replace_sub_page(class, zspage, newzpdesc, zpdesc); 1759 /* 1760 * Since we complete the data copy and set up new zspage structure, 1761 * it's okay to release migration_lock. 1762 */ 1763 write_unlock(&pool->lock); 1764 spin_unlock(&class->lock); 1765 zspage_write_unlock(zspage); 1766 1767 zpdesc_get(newzpdesc); 1768 if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) { 1769 zpdesc_dec_zone_page_state(zpdesc); 1770 zpdesc_inc_zone_page_state(newzpdesc); 1771 } 1772 1773 reset_zpdesc(zpdesc); 1774 zpdesc_put(zpdesc); 1775 1776 return 0; 1777 } 1778 1779 static void zs_page_putback(struct page *page) 1780 { 1781 } 1782 1783 const struct movable_operations zsmalloc_mops = { 1784 .isolate_page = zs_page_isolate, 1785 .migrate_page = zs_page_migrate, 1786 .putback_page = zs_page_putback, 1787 }; 1788 1789 /* 1790 * Caller should hold page_lock of all pages in the zspage 1791 * In here, we cannot use zspage meta data. 1792 */ 1793 static void async_free_zspage(struct work_struct *work) 1794 { 1795 int i; 1796 struct size_class *class; 1797 struct zspage *zspage, *tmp; 1798 LIST_HEAD(free_pages); 1799 struct zs_pool *pool = container_of(work, struct zs_pool, 1800 free_work); 1801 1802 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 1803 class = pool->size_class[i]; 1804 if (class->index != i) 1805 continue; 1806 1807 spin_lock(&class->lock); 1808 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], 1809 &free_pages); 1810 spin_unlock(&class->lock); 1811 } 1812 1813 list_for_each_entry_safe(zspage, tmp, &free_pages, list) { 1814 list_del(&zspage->list); 1815 lock_zspage(zspage); 1816 1817 class = zspage_class(pool, zspage); 1818 spin_lock(&class->lock); 1819 class_stat_sub(class, ZS_INUSE_RATIO_0, 1); 1820 __free_zspage(pool, class, zspage); 1821 spin_unlock(&class->lock); 1822 } 1823 }; 1824 1825 static void kick_deferred_free(struct zs_pool *pool) 1826 { 1827 schedule_work(&pool->free_work); 1828 } 1829 1830 static void zs_flush_migration(struct zs_pool *pool) 1831 { 1832 flush_work(&pool->free_work); 1833 } 1834 1835 static void init_deferred_free(struct zs_pool *pool) 1836 { 1837 INIT_WORK(&pool->free_work, async_free_zspage); 1838 } 1839 1840 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) 1841 { 1842 struct zpdesc *zpdesc = get_first_zpdesc(zspage); 1843 1844 do { 1845 WARN_ON(!zpdesc_trylock(zpdesc)); 1846 __zpdesc_set_movable(zpdesc); 1847 zpdesc_unlock(zpdesc); 1848 } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); 1849 } 1850 #else 1851 static inline void zs_flush_migration(struct zs_pool *pool) { } 1852 #endif 1853 1854 /* 1855 * 1856 * Based on the number of unused allocated objects calculate 1857 * and return the number of pages that we can free. 1858 */ 1859 static unsigned long zs_can_compact(struct size_class *class) 1860 { 1861 unsigned long obj_wasted; 1862 unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); 1863 unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE); 1864 1865 if (obj_allocated <= obj_used) 1866 return 0; 1867 1868 obj_wasted = obj_allocated - obj_used; 1869 obj_wasted /= class->objs_per_zspage; 1870 1871 return obj_wasted * class->pages_per_zspage; 1872 } 1873 1874 static unsigned long __zs_compact(struct zs_pool *pool, 1875 struct size_class *class) 1876 { 1877 struct zspage *src_zspage = NULL; 1878 struct zspage *dst_zspage = NULL; 1879 unsigned long pages_freed = 0; 1880 1881 /* 1882 * protect the race between zpage migration and zs_free 1883 * as well as zpage allocation/free 1884 */ 1885 write_lock(&pool->lock); 1886 spin_lock(&class->lock); 1887 while (zs_can_compact(class)) { 1888 int fg; 1889 1890 if (!dst_zspage) { 1891 dst_zspage = isolate_dst_zspage(class); 1892 if (!dst_zspage) 1893 break; 1894 } 1895 1896 src_zspage = isolate_src_zspage(class); 1897 if (!src_zspage) 1898 break; 1899 1900 if (!zspage_write_trylock(src_zspage)) 1901 break; 1902 1903 migrate_zspage(pool, src_zspage, dst_zspage); 1904 zspage_write_unlock(src_zspage); 1905 1906 fg = putback_zspage(class, src_zspage); 1907 if (fg == ZS_INUSE_RATIO_0) { 1908 free_zspage(pool, class, src_zspage); 1909 pages_freed += class->pages_per_zspage; 1910 } 1911 src_zspage = NULL; 1912 1913 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 1914 || rwlock_is_contended(&pool->lock)) { 1915 putback_zspage(class, dst_zspage); 1916 dst_zspage = NULL; 1917 1918 spin_unlock(&class->lock); 1919 write_unlock(&pool->lock); 1920 cond_resched(); 1921 write_lock(&pool->lock); 1922 spin_lock(&class->lock); 1923 } 1924 } 1925 1926 if (src_zspage) 1927 putback_zspage(class, src_zspage); 1928 1929 if (dst_zspage) 1930 putback_zspage(class, dst_zspage); 1931 1932 spin_unlock(&class->lock); 1933 write_unlock(&pool->lock); 1934 1935 return pages_freed; 1936 } 1937 1938 unsigned long zs_compact(struct zs_pool *pool) 1939 { 1940 int i; 1941 struct size_class *class; 1942 unsigned long pages_freed = 0; 1943 1944 /* 1945 * Pool compaction is performed under pool->lock so it is basically 1946 * single-threaded. Having more than one thread in __zs_compact() 1947 * will increase pool->lock contention, which will impact other 1948 * zsmalloc operations that need pool->lock. 1949 */ 1950 if (atomic_xchg(&pool->compaction_in_progress, 1)) 1951 return 0; 1952 1953 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { 1954 class = pool->size_class[i]; 1955 if (class->index != i) 1956 continue; 1957 pages_freed += __zs_compact(pool, class); 1958 } 1959 atomic_long_add(pages_freed, &pool->stats.pages_compacted); 1960 atomic_set(&pool->compaction_in_progress, 0); 1961 1962 return pages_freed; 1963 } 1964 EXPORT_SYMBOL_GPL(zs_compact); 1965 1966 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) 1967 { 1968 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); 1969 } 1970 EXPORT_SYMBOL_GPL(zs_pool_stats); 1971 1972 static unsigned long zs_shrinker_scan(struct shrinker *shrinker, 1973 struct shrink_control *sc) 1974 { 1975 unsigned long pages_freed; 1976 struct zs_pool *pool = shrinker->private_data; 1977 1978 /* 1979 * Compact classes and calculate compaction delta. 1980 * Can run concurrently with a manually triggered 1981 * (by user) compaction. 1982 */ 1983 pages_freed = zs_compact(pool); 1984 1985 return pages_freed ? pages_freed : SHRINK_STOP; 1986 } 1987 1988 static unsigned long zs_shrinker_count(struct shrinker *shrinker, 1989 struct shrink_control *sc) 1990 { 1991 int i; 1992 struct size_class *class; 1993 unsigned long pages_to_free = 0; 1994 struct zs_pool *pool = shrinker->private_data; 1995 1996 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { 1997 class = pool->size_class[i]; 1998 if (class->index != i) 1999 continue; 2000 2001 pages_to_free += zs_can_compact(class); 2002 } 2003 2004 return pages_to_free; 2005 } 2006 2007 static void zs_unregister_shrinker(struct zs_pool *pool) 2008 { 2009 shrinker_free(pool->shrinker); 2010 } 2011 2012 static int zs_register_shrinker(struct zs_pool *pool) 2013 { 2014 pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name); 2015 if (!pool->shrinker) 2016 return -ENOMEM; 2017 2018 pool->shrinker->scan_objects = zs_shrinker_scan; 2019 pool->shrinker->count_objects = zs_shrinker_count; 2020 pool->shrinker->batch = 0; 2021 pool->shrinker->private_data = pool; 2022 2023 shrinker_register(pool->shrinker); 2024 2025 return 0; 2026 } 2027 2028 static int calculate_zspage_chain_size(int class_size) 2029 { 2030 int i, min_waste = INT_MAX; 2031 int chain_size = 1; 2032 2033 if (is_power_of_2(class_size)) 2034 return chain_size; 2035 2036 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { 2037 int waste; 2038 2039 waste = (i * PAGE_SIZE) % class_size; 2040 if (waste < min_waste) { 2041 min_waste = waste; 2042 chain_size = i; 2043 } 2044 } 2045 2046 return chain_size; 2047 } 2048 2049 /** 2050 * zs_create_pool - Creates an allocation pool to work from. 2051 * @name: pool name to be created 2052 * 2053 * This function must be called before anything when using 2054 * the zsmalloc allocator. 2055 * 2056 * On success, a pointer to the newly created pool is returned, 2057 * otherwise NULL. 2058 */ 2059 struct zs_pool *zs_create_pool(const char *name) 2060 { 2061 int i; 2062 struct zs_pool *pool; 2063 struct size_class *prev_class = NULL; 2064 2065 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 2066 if (!pool) 2067 return NULL; 2068 2069 init_deferred_free(pool); 2070 rwlock_init(&pool->lock); 2071 atomic_set(&pool->compaction_in_progress, 0); 2072 2073 pool->name = kstrdup(name, GFP_KERNEL); 2074 if (!pool->name) 2075 goto err; 2076 2077 /* 2078 * Iterate reversely, because, size of size_class that we want to use 2079 * for merging should be larger or equal to current size. 2080 */ 2081 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { 2082 int size; 2083 int pages_per_zspage; 2084 int objs_per_zspage; 2085 struct size_class *class; 2086 int fullness; 2087 2088 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; 2089 if (size > ZS_MAX_ALLOC_SIZE) 2090 size = ZS_MAX_ALLOC_SIZE; 2091 pages_per_zspage = calculate_zspage_chain_size(size); 2092 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size; 2093 2094 /* 2095 * We iterate from biggest down to smallest classes, 2096 * so huge_class_size holds the size of the first huge 2097 * class. Any object bigger than or equal to that will 2098 * endup in the huge class. 2099 */ 2100 if (pages_per_zspage != 1 && objs_per_zspage != 1 && 2101 !huge_class_size) { 2102 huge_class_size = size; 2103 /* 2104 * The object uses ZS_HANDLE_SIZE bytes to store the 2105 * handle. We need to subtract it, because zs_malloc() 2106 * unconditionally adds handle size before it performs 2107 * size class search - so object may be smaller than 2108 * huge class size, yet it still can end up in the huge 2109 * class because it grows by ZS_HANDLE_SIZE extra bytes 2110 * right before class lookup. 2111 */ 2112 huge_class_size -= (ZS_HANDLE_SIZE - 1); 2113 } 2114 2115 /* 2116 * size_class is used for normal zsmalloc operation such 2117 * as alloc/free for that size. Although it is natural that we 2118 * have one size_class for each size, there is a chance that we 2119 * can get more memory utilization if we use one size_class for 2120 * many different sizes whose size_class have same 2121 * characteristics. So, we makes size_class point to 2122 * previous size_class if possible. 2123 */ 2124 if (prev_class) { 2125 if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) { 2126 pool->size_class[i] = prev_class; 2127 continue; 2128 } 2129 } 2130 2131 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); 2132 if (!class) 2133 goto err; 2134 2135 class->size = size; 2136 class->index = i; 2137 class->pages_per_zspage = pages_per_zspage; 2138 class->objs_per_zspage = objs_per_zspage; 2139 spin_lock_init(&class->lock); 2140 pool->size_class[i] = class; 2141 2142 fullness = ZS_INUSE_RATIO_0; 2143 while (fullness < NR_FULLNESS_GROUPS) { 2144 INIT_LIST_HEAD(&class->fullness_list[fullness]); 2145 fullness++; 2146 } 2147 2148 prev_class = class; 2149 } 2150 2151 /* debug only, don't abort if it fails */ 2152 zs_pool_stat_create(pool, name); 2153 2154 /* 2155 * Not critical since shrinker is only used to trigger internal 2156 * defragmentation of the pool which is pretty optional thing. If 2157 * registration fails we still can use the pool normally and user can 2158 * trigger compaction manually. Thus, ignore return code. 2159 */ 2160 zs_register_shrinker(pool); 2161 2162 return pool; 2163 2164 err: 2165 zs_destroy_pool(pool); 2166 return NULL; 2167 } 2168 EXPORT_SYMBOL_GPL(zs_create_pool); 2169 2170 void zs_destroy_pool(struct zs_pool *pool) 2171 { 2172 int i; 2173 2174 zs_unregister_shrinker(pool); 2175 zs_flush_migration(pool); 2176 zs_pool_stat_destroy(pool); 2177 2178 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 2179 int fg; 2180 struct size_class *class = pool->size_class[i]; 2181 2182 if (!class) 2183 continue; 2184 2185 if (class->index != i) 2186 continue; 2187 2188 for (fg = ZS_INUSE_RATIO_0; fg < NR_FULLNESS_GROUPS; fg++) { 2189 if (list_empty(&class->fullness_list[fg])) 2190 continue; 2191 2192 pr_err("Class-%d fullness group %d is not empty\n", 2193 class->size, fg); 2194 } 2195 kfree(class); 2196 } 2197 2198 kfree(pool->name); 2199 kfree(pool); 2200 } 2201 EXPORT_SYMBOL_GPL(zs_destroy_pool); 2202 2203 static void zs_destroy_caches(void) 2204 { 2205 kmem_cache_destroy(handle_cachep); 2206 handle_cachep = NULL; 2207 kmem_cache_destroy(zspage_cachep); 2208 zspage_cachep = NULL; 2209 } 2210 2211 static int __init zs_init_caches(void) 2212 { 2213 handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, 2214 0, 0, NULL); 2215 zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), 2216 0, 0, NULL); 2217 2218 if (!handle_cachep || !zspage_cachep) { 2219 zs_destroy_caches(); 2220 return -ENOMEM; 2221 } 2222 return 0; 2223 } 2224 2225 static int __init zs_init(void) 2226 { 2227 int rc; 2228 2229 rc = zs_init_caches(); 2230 if (rc) 2231 return rc; 2232 2233 #ifdef CONFIG_COMPACTION 2234 rc = set_movable_ops(&zsmalloc_mops, PGTY_zsmalloc); 2235 if (rc) { 2236 zs_destroy_caches(); 2237 return rc; 2238 } 2239 #endif 2240 zs_stat_init(); 2241 return 0; 2242 } 2243 2244 static void __exit zs_exit(void) 2245 { 2246 #ifdef CONFIG_COMPACTION 2247 set_movable_ops(NULL, PGTY_zsmalloc); 2248 #endif 2249 zs_stat_exit(); 2250 zs_destroy_caches(); 2251 } 2252 2253 module_init(zs_init); 2254 module_exit(zs_exit); 2255 2256 MODULE_LICENSE("Dual BSD/GPL"); 2257 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 2258 MODULE_DESCRIPTION("zsmalloc memory allocator"); 2259