1 /* 2 * zsmalloc memory allocator 3 * 4 * Copyright (C) 2011 Nitin Gupta 5 * Copyright (C) 2012, 2013 Minchan Kim 6 * 7 * This code is released using a dual license strategy: BSD/GPL 8 * You can choose the license that better fits your requirements. 9 * 10 * Released under the terms of 3-clause BSD License 11 * Released under the terms of GNU General Public License Version 2.0 12 */ 13 14 /* 15 * Following is how we use various fields and flags of underlying 16 * struct page(s) to form a zspage. 17 * 18 * Usage of struct page fields: 19 * page->first_page: points to the first component (0-order) page 20 * page->index (union with page->freelist): offset of the first object 21 * starting in this page. For the first page, this is 22 * always 0, so we use this field (aka freelist) to point 23 * to the first free object in zspage. 24 * page->lru: links together all component pages (except the first page) 25 * of a zspage 26 * 27 * For _first_ page only: 28 * 29 * page->private (union with page->first_page): refers to the 30 * component page after the first page 31 * If the page is first_page for huge object, it stores handle. 32 * Look at size_class->huge. 33 * page->freelist: points to the first free object in zspage. 34 * Free objects are linked together using in-place 35 * metadata. 36 * page->objects: maximum number of objects we can store in this 37 * zspage (class->zspage_order * PAGE_SIZE / class->size) 38 * page->lru: links together first pages of various zspages. 39 * Basically forming list of zspages in a fullness group. 40 * page->mapping: class index and fullness group of the zspage 41 * 42 * Usage of struct page flags: 43 * PG_private: identifies the first component page 44 * PG_private2: identifies the last component page 45 * 46 */ 47 48 #ifdef CONFIG_ZSMALLOC_DEBUG 49 #define DEBUG 50 #endif 51 52 #include <linux/module.h> 53 #include <linux/kernel.h> 54 #include <linux/sched.h> 55 #include <linux/bitops.h> 56 #include <linux/errno.h> 57 #include <linux/highmem.h> 58 #include <linux/string.h> 59 #include <linux/slab.h> 60 #include <asm/tlbflush.h> 61 #include <asm/pgtable.h> 62 #include <linux/cpumask.h> 63 #include <linux/cpu.h> 64 #include <linux/vmalloc.h> 65 #include <linux/hardirq.h> 66 #include <linux/spinlock.h> 67 #include <linux/types.h> 68 #include <linux/debugfs.h> 69 #include <linux/zsmalloc.h> 70 #include <linux/zpool.h> 71 72 /* 73 * This must be power of 2 and greater than of equal to sizeof(link_free). 74 * These two conditions ensure that any 'struct link_free' itself doesn't 75 * span more than 1 page which avoids complex case of mapping 2 pages simply 76 * to restore link_free pointer values. 77 */ 78 #define ZS_ALIGN 8 79 80 /* 81 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) 82 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. 83 */ 84 #define ZS_MAX_ZSPAGE_ORDER 2 85 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) 86 87 #define ZS_HANDLE_SIZE (sizeof(unsigned long)) 88 89 /* 90 * Object location (<PFN>, <obj_idx>) is encoded as 91 * as single (unsigned long) handle value. 92 * 93 * Note that object index <obj_idx> is relative to system 94 * page <PFN> it is stored in, so for each sub-page belonging 95 * to a zspage, obj_idx starts with 0. 96 * 97 * This is made more complicated by various memory models and PAE. 98 */ 99 100 #ifndef MAX_PHYSMEM_BITS 101 #ifdef CONFIG_HIGHMEM64G 102 #define MAX_PHYSMEM_BITS 36 103 #else /* !CONFIG_HIGHMEM64G */ 104 /* 105 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just 106 * be PAGE_SHIFT 107 */ 108 #define MAX_PHYSMEM_BITS BITS_PER_LONG 109 #endif 110 #endif 111 #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) 112 113 /* 114 * Memory for allocating for handle keeps object position by 115 * encoding <page, obj_idx> and the encoded value has a room 116 * in least bit(ie, look at obj_to_location). 117 * We use the bit to synchronize between object access by 118 * user and migration. 119 */ 120 #define HANDLE_PIN_BIT 0 121 122 /* 123 * Head in allocated object should have OBJ_ALLOCATED_TAG 124 * to identify the object was allocated or not. 125 * It's okay to add the status bit in the least bit because 126 * header keeps handle which is 4byte-aligned address so we 127 * have room for two bit at least. 128 */ 129 #define OBJ_ALLOCATED_TAG 1 130 #define OBJ_TAG_BITS 1 131 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) 132 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) 133 134 #define MAX(a, b) ((a) >= (b) ? (a) : (b)) 135 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ 136 #define ZS_MIN_ALLOC_SIZE \ 137 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) 138 /* each chunk includes extra space to keep handle */ 139 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE 140 141 /* 142 * On systems with 4K page size, this gives 255 size classes! There is a 143 * trader-off here: 144 * - Large number of size classes is potentially wasteful as free page are 145 * spread across these classes 146 * - Small number of size classes causes large internal fragmentation 147 * - Probably its better to use specific size classes (empirically 148 * determined). NOTE: all those class sizes must be set as multiple of 149 * ZS_ALIGN to make sure link_free itself never has to span 2 pages. 150 * 151 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN 152 * (reason above) 153 */ 154 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) 155 156 /* 157 * We do not maintain any list for completely empty or full pages 158 */ 159 enum fullness_group { 160 ZS_ALMOST_FULL, 161 ZS_ALMOST_EMPTY, 162 _ZS_NR_FULLNESS_GROUPS, 163 164 ZS_EMPTY, 165 ZS_FULL 166 }; 167 168 enum zs_stat_type { 169 OBJ_ALLOCATED, 170 OBJ_USED, 171 CLASS_ALMOST_FULL, 172 CLASS_ALMOST_EMPTY, 173 NR_ZS_STAT_TYPE, 174 }; 175 176 #ifdef CONFIG_ZSMALLOC_STAT 177 178 static struct dentry *zs_stat_root; 179 180 struct zs_size_stat { 181 unsigned long objs[NR_ZS_STAT_TYPE]; 182 }; 183 184 #endif 185 186 /* 187 * number of size_classes 188 */ 189 static int zs_size_classes; 190 191 /* 192 * We assign a page to ZS_ALMOST_EMPTY fullness group when: 193 * n <= N / f, where 194 * n = number of allocated objects 195 * N = total number of objects zspage can store 196 * f = fullness_threshold_frac 197 * 198 * Similarly, we assign zspage to: 199 * ZS_ALMOST_FULL when n > N / f 200 * ZS_EMPTY when n == 0 201 * ZS_FULL when n == N 202 * 203 * (see: fix_fullness_group()) 204 */ 205 static const int fullness_threshold_frac = 4; 206 207 struct size_class { 208 /* 209 * Size of objects stored in this class. Must be multiple 210 * of ZS_ALIGN. 211 */ 212 int size; 213 unsigned int index; 214 215 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ 216 int pages_per_zspage; 217 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ 218 bool huge; 219 220 #ifdef CONFIG_ZSMALLOC_STAT 221 struct zs_size_stat stats; 222 #endif 223 224 spinlock_t lock; 225 226 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; 227 }; 228 229 /* 230 * Placed within free objects to form a singly linked list. 231 * For every zspage, first_page->freelist gives head of this list. 232 * 233 * This must be power of 2 and less than or equal to ZS_ALIGN 234 */ 235 struct link_free { 236 union { 237 /* 238 * Position of next free chunk (encodes <PFN, obj_idx>) 239 * It's valid for non-allocated object 240 */ 241 void *next; 242 /* 243 * Handle of allocated object. 244 */ 245 unsigned long handle; 246 }; 247 }; 248 249 struct zs_pool { 250 char *name; 251 252 struct size_class **size_class; 253 struct kmem_cache *handle_cachep; 254 255 gfp_t flags; /* allocation flags used when growing pool */ 256 atomic_long_t pages_allocated; 257 258 #ifdef CONFIG_ZSMALLOC_STAT 259 struct dentry *stat_dentry; 260 #endif 261 }; 262 263 /* 264 * A zspage's class index and fullness group 265 * are encoded in its (first)page->mapping 266 */ 267 #define CLASS_IDX_BITS 28 268 #define FULLNESS_BITS 4 269 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) 270 #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) 271 272 struct mapping_area { 273 #ifdef CONFIG_PGTABLE_MAPPING 274 struct vm_struct *vm; /* vm area for mapping object that span pages */ 275 #else 276 char *vm_buf; /* copy buffer for objects that span pages */ 277 #endif 278 char *vm_addr; /* address of kmap_atomic()'ed pages */ 279 enum zs_mapmode vm_mm; /* mapping mode */ 280 bool huge; 281 }; 282 283 static int create_handle_cache(struct zs_pool *pool) 284 { 285 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, 286 0, 0, NULL); 287 return pool->handle_cachep ? 0 : 1; 288 } 289 290 static void destroy_handle_cache(struct zs_pool *pool) 291 { 292 if (pool->handle_cachep) 293 kmem_cache_destroy(pool->handle_cachep); 294 } 295 296 static unsigned long alloc_handle(struct zs_pool *pool) 297 { 298 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, 299 pool->flags & ~__GFP_HIGHMEM); 300 } 301 302 static void free_handle(struct zs_pool *pool, unsigned long handle) 303 { 304 kmem_cache_free(pool->handle_cachep, (void *)handle); 305 } 306 307 static void record_obj(unsigned long handle, unsigned long obj) 308 { 309 *(unsigned long *)handle = obj; 310 } 311 312 /* zpool driver */ 313 314 #ifdef CONFIG_ZPOOL 315 316 static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops) 317 { 318 return zs_create_pool(name, gfp); 319 } 320 321 static void zs_zpool_destroy(void *pool) 322 { 323 zs_destroy_pool(pool); 324 } 325 326 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, 327 unsigned long *handle) 328 { 329 *handle = zs_malloc(pool, size); 330 return *handle ? 0 : -1; 331 } 332 static void zs_zpool_free(void *pool, unsigned long handle) 333 { 334 zs_free(pool, handle); 335 } 336 337 static int zs_zpool_shrink(void *pool, unsigned int pages, 338 unsigned int *reclaimed) 339 { 340 return -EINVAL; 341 } 342 343 static void *zs_zpool_map(void *pool, unsigned long handle, 344 enum zpool_mapmode mm) 345 { 346 enum zs_mapmode zs_mm; 347 348 switch (mm) { 349 case ZPOOL_MM_RO: 350 zs_mm = ZS_MM_RO; 351 break; 352 case ZPOOL_MM_WO: 353 zs_mm = ZS_MM_WO; 354 break; 355 case ZPOOL_MM_RW: /* fallthru */ 356 default: 357 zs_mm = ZS_MM_RW; 358 break; 359 } 360 361 return zs_map_object(pool, handle, zs_mm); 362 } 363 static void zs_zpool_unmap(void *pool, unsigned long handle) 364 { 365 zs_unmap_object(pool, handle); 366 } 367 368 static u64 zs_zpool_total_size(void *pool) 369 { 370 return zs_get_total_pages(pool) << PAGE_SHIFT; 371 } 372 373 static struct zpool_driver zs_zpool_driver = { 374 .type = "zsmalloc", 375 .owner = THIS_MODULE, 376 .create = zs_zpool_create, 377 .destroy = zs_zpool_destroy, 378 .malloc = zs_zpool_malloc, 379 .free = zs_zpool_free, 380 .shrink = zs_zpool_shrink, 381 .map = zs_zpool_map, 382 .unmap = zs_zpool_unmap, 383 .total_size = zs_zpool_total_size, 384 }; 385 386 MODULE_ALIAS("zpool-zsmalloc"); 387 #endif /* CONFIG_ZPOOL */ 388 389 static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage) 390 { 391 return pages_per_zspage * PAGE_SIZE / size; 392 } 393 394 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ 395 static DEFINE_PER_CPU(struct mapping_area, zs_map_area); 396 397 static int is_first_page(struct page *page) 398 { 399 return PagePrivate(page); 400 } 401 402 static int is_last_page(struct page *page) 403 { 404 return PagePrivate2(page); 405 } 406 407 static void get_zspage_mapping(struct page *page, unsigned int *class_idx, 408 enum fullness_group *fullness) 409 { 410 unsigned long m; 411 BUG_ON(!is_first_page(page)); 412 413 m = (unsigned long)page->mapping; 414 *fullness = m & FULLNESS_MASK; 415 *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; 416 } 417 418 static void set_zspage_mapping(struct page *page, unsigned int class_idx, 419 enum fullness_group fullness) 420 { 421 unsigned long m; 422 BUG_ON(!is_first_page(page)); 423 424 m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | 425 (fullness & FULLNESS_MASK); 426 page->mapping = (struct address_space *)m; 427 } 428 429 /* 430 * zsmalloc divides the pool into various size classes where each 431 * class maintains a list of zspages where each zspage is divided 432 * into equal sized chunks. Each allocation falls into one of these 433 * classes depending on its size. This function returns index of the 434 * size class which has chunk size big enough to hold the give size. 435 */ 436 static int get_size_class_index(int size) 437 { 438 int idx = 0; 439 440 if (likely(size > ZS_MIN_ALLOC_SIZE)) 441 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, 442 ZS_SIZE_CLASS_DELTA); 443 444 return min(zs_size_classes - 1, idx); 445 } 446 447 #ifdef CONFIG_ZSMALLOC_STAT 448 449 static inline void zs_stat_inc(struct size_class *class, 450 enum zs_stat_type type, unsigned long cnt) 451 { 452 class->stats.objs[type] += cnt; 453 } 454 455 static inline void zs_stat_dec(struct size_class *class, 456 enum zs_stat_type type, unsigned long cnt) 457 { 458 class->stats.objs[type] -= cnt; 459 } 460 461 static inline unsigned long zs_stat_get(struct size_class *class, 462 enum zs_stat_type type) 463 { 464 return class->stats.objs[type]; 465 } 466 467 static int __init zs_stat_init(void) 468 { 469 if (!debugfs_initialized()) 470 return -ENODEV; 471 472 zs_stat_root = debugfs_create_dir("zsmalloc", NULL); 473 if (!zs_stat_root) 474 return -ENOMEM; 475 476 return 0; 477 } 478 479 static void __exit zs_stat_exit(void) 480 { 481 debugfs_remove_recursive(zs_stat_root); 482 } 483 484 static int zs_stats_size_show(struct seq_file *s, void *v) 485 { 486 int i; 487 struct zs_pool *pool = s->private; 488 struct size_class *class; 489 int objs_per_zspage; 490 unsigned long class_almost_full, class_almost_empty; 491 unsigned long obj_allocated, obj_used, pages_used; 492 unsigned long total_class_almost_full = 0, total_class_almost_empty = 0; 493 unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; 494 495 seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n", 496 "class", "size", "almost_full", "almost_empty", 497 "obj_allocated", "obj_used", "pages_used", 498 "pages_per_zspage"); 499 500 for (i = 0; i < zs_size_classes; i++) { 501 class = pool->size_class[i]; 502 503 if (class->index != i) 504 continue; 505 506 spin_lock(&class->lock); 507 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); 508 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); 509 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); 510 obj_used = zs_stat_get(class, OBJ_USED); 511 spin_unlock(&class->lock); 512 513 objs_per_zspage = get_maxobj_per_zspage(class->size, 514 class->pages_per_zspage); 515 pages_used = obj_allocated / objs_per_zspage * 516 class->pages_per_zspage; 517 518 seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n", 519 i, class->size, class_almost_full, class_almost_empty, 520 obj_allocated, obj_used, pages_used, 521 class->pages_per_zspage); 522 523 total_class_almost_full += class_almost_full; 524 total_class_almost_empty += class_almost_empty; 525 total_objs += obj_allocated; 526 total_used_objs += obj_used; 527 total_pages += pages_used; 528 } 529 530 seq_puts(s, "\n"); 531 seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n", 532 "Total", "", total_class_almost_full, 533 total_class_almost_empty, total_objs, 534 total_used_objs, total_pages); 535 536 return 0; 537 } 538 539 static int zs_stats_size_open(struct inode *inode, struct file *file) 540 { 541 return single_open(file, zs_stats_size_show, inode->i_private); 542 } 543 544 static const struct file_operations zs_stat_size_ops = { 545 .open = zs_stats_size_open, 546 .read = seq_read, 547 .llseek = seq_lseek, 548 .release = single_release, 549 }; 550 551 static int zs_pool_stat_create(char *name, struct zs_pool *pool) 552 { 553 struct dentry *entry; 554 555 if (!zs_stat_root) 556 return -ENODEV; 557 558 entry = debugfs_create_dir(name, zs_stat_root); 559 if (!entry) { 560 pr_warn("debugfs dir <%s> creation failed\n", name); 561 return -ENOMEM; 562 } 563 pool->stat_dentry = entry; 564 565 entry = debugfs_create_file("classes", S_IFREG | S_IRUGO, 566 pool->stat_dentry, pool, &zs_stat_size_ops); 567 if (!entry) { 568 pr_warn("%s: debugfs file entry <%s> creation failed\n", 569 name, "classes"); 570 return -ENOMEM; 571 } 572 573 return 0; 574 } 575 576 static void zs_pool_stat_destroy(struct zs_pool *pool) 577 { 578 debugfs_remove_recursive(pool->stat_dentry); 579 } 580 581 #else /* CONFIG_ZSMALLOC_STAT */ 582 583 static inline void zs_stat_inc(struct size_class *class, 584 enum zs_stat_type type, unsigned long cnt) 585 { 586 } 587 588 static inline void zs_stat_dec(struct size_class *class, 589 enum zs_stat_type type, unsigned long cnt) 590 { 591 } 592 593 static inline unsigned long zs_stat_get(struct size_class *class, 594 enum zs_stat_type type) 595 { 596 return 0; 597 } 598 599 static int __init zs_stat_init(void) 600 { 601 return 0; 602 } 603 604 static void __exit zs_stat_exit(void) 605 { 606 } 607 608 static inline int zs_pool_stat_create(char *name, struct zs_pool *pool) 609 { 610 return 0; 611 } 612 613 static inline void zs_pool_stat_destroy(struct zs_pool *pool) 614 { 615 } 616 617 #endif 618 619 620 /* 621 * For each size class, zspages are divided into different groups 622 * depending on how "full" they are. This was done so that we could 623 * easily find empty or nearly empty zspages when we try to shrink 624 * the pool (not yet implemented). This function returns fullness 625 * status of the given page. 626 */ 627 static enum fullness_group get_fullness_group(struct page *page) 628 { 629 int inuse, max_objects; 630 enum fullness_group fg; 631 BUG_ON(!is_first_page(page)); 632 633 inuse = page->inuse; 634 max_objects = page->objects; 635 636 if (inuse == 0) 637 fg = ZS_EMPTY; 638 else if (inuse == max_objects) 639 fg = ZS_FULL; 640 else if (inuse <= 3 * max_objects / fullness_threshold_frac) 641 fg = ZS_ALMOST_EMPTY; 642 else 643 fg = ZS_ALMOST_FULL; 644 645 return fg; 646 } 647 648 /* 649 * Each size class maintains various freelists and zspages are assigned 650 * to one of these freelists based on the number of live objects they 651 * have. This functions inserts the given zspage into the freelist 652 * identified by <class, fullness_group>. 653 */ 654 static void insert_zspage(struct page *page, struct size_class *class, 655 enum fullness_group fullness) 656 { 657 struct page **head; 658 659 BUG_ON(!is_first_page(page)); 660 661 if (fullness >= _ZS_NR_FULLNESS_GROUPS) 662 return; 663 664 head = &class->fullness_list[fullness]; 665 if (*head) 666 list_add_tail(&page->lru, &(*head)->lru); 667 668 *head = page; 669 zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ? 670 CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); 671 } 672 673 /* 674 * This function removes the given zspage from the freelist identified 675 * by <class, fullness_group>. 676 */ 677 static void remove_zspage(struct page *page, struct size_class *class, 678 enum fullness_group fullness) 679 { 680 struct page **head; 681 682 BUG_ON(!is_first_page(page)); 683 684 if (fullness >= _ZS_NR_FULLNESS_GROUPS) 685 return; 686 687 head = &class->fullness_list[fullness]; 688 BUG_ON(!*head); 689 if (list_empty(&(*head)->lru)) 690 *head = NULL; 691 else if (*head == page) 692 *head = (struct page *)list_entry((*head)->lru.next, 693 struct page, lru); 694 695 list_del_init(&page->lru); 696 zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ? 697 CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); 698 } 699 700 /* 701 * Each size class maintains zspages in different fullness groups depending 702 * on the number of live objects they contain. When allocating or freeing 703 * objects, the fullness status of the page can change, say, from ALMOST_FULL 704 * to ALMOST_EMPTY when freeing an object. This function checks if such 705 * a status change has occurred for the given page and accordingly moves the 706 * page from the freelist of the old fullness group to that of the new 707 * fullness group. 708 */ 709 static enum fullness_group fix_fullness_group(struct size_class *class, 710 struct page *page) 711 { 712 int class_idx; 713 enum fullness_group currfg, newfg; 714 715 BUG_ON(!is_first_page(page)); 716 717 get_zspage_mapping(page, &class_idx, &currfg); 718 newfg = get_fullness_group(page); 719 if (newfg == currfg) 720 goto out; 721 722 remove_zspage(page, class, currfg); 723 insert_zspage(page, class, newfg); 724 set_zspage_mapping(page, class_idx, newfg); 725 726 out: 727 return newfg; 728 } 729 730 /* 731 * We have to decide on how many pages to link together 732 * to form a zspage for each size class. This is important 733 * to reduce wastage due to unusable space left at end of 734 * each zspage which is given as: 735 * wastage = Zp % class_size 736 * usage = Zp - wastage 737 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... 738 * 739 * For example, for size class of 3/8 * PAGE_SIZE, we should 740 * link together 3 PAGE_SIZE sized pages to form a zspage 741 * since then we can perfectly fit in 8 such objects. 742 */ 743 static int get_pages_per_zspage(int class_size) 744 { 745 int i, max_usedpc = 0; 746 /* zspage order which gives maximum used size per KB */ 747 int max_usedpc_order = 1; 748 749 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { 750 int zspage_size; 751 int waste, usedpc; 752 753 zspage_size = i * PAGE_SIZE; 754 waste = zspage_size % class_size; 755 usedpc = (zspage_size - waste) * 100 / zspage_size; 756 757 if (usedpc > max_usedpc) { 758 max_usedpc = usedpc; 759 max_usedpc_order = i; 760 } 761 } 762 763 return max_usedpc_order; 764 } 765 766 /* 767 * A single 'zspage' is composed of many system pages which are 768 * linked together using fields in struct page. This function finds 769 * the first/head page, given any component page of a zspage. 770 */ 771 static struct page *get_first_page(struct page *page) 772 { 773 if (is_first_page(page)) 774 return page; 775 else 776 return page->first_page; 777 } 778 779 static struct page *get_next_page(struct page *page) 780 { 781 struct page *next; 782 783 if (is_last_page(page)) 784 next = NULL; 785 else if (is_first_page(page)) 786 next = (struct page *)page_private(page); 787 else 788 next = list_entry(page->lru.next, struct page, lru); 789 790 return next; 791 } 792 793 /* 794 * Encode <page, obj_idx> as a single handle value. 795 * We use the least bit of handle for tagging. 796 */ 797 static void *location_to_obj(struct page *page, unsigned long obj_idx) 798 { 799 unsigned long obj; 800 801 if (!page) { 802 BUG_ON(obj_idx); 803 return NULL; 804 } 805 806 obj = page_to_pfn(page) << OBJ_INDEX_BITS; 807 obj |= ((obj_idx) & OBJ_INDEX_MASK); 808 obj <<= OBJ_TAG_BITS; 809 810 return (void *)obj; 811 } 812 813 /* 814 * Decode <page, obj_idx> pair from the given object handle. We adjust the 815 * decoded obj_idx back to its original value since it was adjusted in 816 * location_to_obj(). 817 */ 818 static void obj_to_location(unsigned long obj, struct page **page, 819 unsigned long *obj_idx) 820 { 821 obj >>= OBJ_TAG_BITS; 822 *page = pfn_to_page(obj >> OBJ_INDEX_BITS); 823 *obj_idx = (obj & OBJ_INDEX_MASK); 824 } 825 826 static unsigned long handle_to_obj(unsigned long handle) 827 { 828 return *(unsigned long *)handle; 829 } 830 831 static unsigned long obj_to_head(struct size_class *class, struct page *page, 832 void *obj) 833 { 834 if (class->huge) { 835 VM_BUG_ON(!is_first_page(page)); 836 return *(unsigned long *)page_private(page); 837 } else 838 return *(unsigned long *)obj; 839 } 840 841 static unsigned long obj_idx_to_offset(struct page *page, 842 unsigned long obj_idx, int class_size) 843 { 844 unsigned long off = 0; 845 846 if (!is_first_page(page)) 847 off = page->index; 848 849 return off + obj_idx * class_size; 850 } 851 852 static inline int trypin_tag(unsigned long handle) 853 { 854 unsigned long *ptr = (unsigned long *)handle; 855 856 return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr); 857 } 858 859 static void pin_tag(unsigned long handle) 860 { 861 while (!trypin_tag(handle)); 862 } 863 864 static void unpin_tag(unsigned long handle) 865 { 866 unsigned long *ptr = (unsigned long *)handle; 867 868 clear_bit_unlock(HANDLE_PIN_BIT, ptr); 869 } 870 871 static void reset_page(struct page *page) 872 { 873 clear_bit(PG_private, &page->flags); 874 clear_bit(PG_private_2, &page->flags); 875 set_page_private(page, 0); 876 page->mapping = NULL; 877 page->freelist = NULL; 878 page_mapcount_reset(page); 879 } 880 881 static void free_zspage(struct page *first_page) 882 { 883 struct page *nextp, *tmp, *head_extra; 884 885 BUG_ON(!is_first_page(first_page)); 886 BUG_ON(first_page->inuse); 887 888 head_extra = (struct page *)page_private(first_page); 889 890 reset_page(first_page); 891 __free_page(first_page); 892 893 /* zspage with only 1 system page */ 894 if (!head_extra) 895 return; 896 897 list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { 898 list_del(&nextp->lru); 899 reset_page(nextp); 900 __free_page(nextp); 901 } 902 reset_page(head_extra); 903 __free_page(head_extra); 904 } 905 906 /* Initialize a newly allocated zspage */ 907 static void init_zspage(struct page *first_page, struct size_class *class) 908 { 909 unsigned long off = 0; 910 struct page *page = first_page; 911 912 BUG_ON(!is_first_page(first_page)); 913 while (page) { 914 struct page *next_page; 915 struct link_free *link; 916 unsigned int i = 1; 917 void *vaddr; 918 919 /* 920 * page->index stores offset of first object starting 921 * in the page. For the first page, this is always 0, 922 * so we use first_page->index (aka ->freelist) to store 923 * head of corresponding zspage's freelist. 924 */ 925 if (page != first_page) 926 page->index = off; 927 928 vaddr = kmap_atomic(page); 929 link = (struct link_free *)vaddr + off / sizeof(*link); 930 931 while ((off += class->size) < PAGE_SIZE) { 932 link->next = location_to_obj(page, i++); 933 link += class->size / sizeof(*link); 934 } 935 936 /* 937 * We now come to the last (full or partial) object on this 938 * page, which must point to the first object on the next 939 * page (if present) 940 */ 941 next_page = get_next_page(page); 942 link->next = location_to_obj(next_page, 0); 943 kunmap_atomic(vaddr); 944 page = next_page; 945 off %= PAGE_SIZE; 946 } 947 } 948 949 /* 950 * Allocate a zspage for the given size class 951 */ 952 static struct page *alloc_zspage(struct size_class *class, gfp_t flags) 953 { 954 int i, error; 955 struct page *first_page = NULL, *uninitialized_var(prev_page); 956 957 /* 958 * Allocate individual pages and link them together as: 959 * 1. first page->private = first sub-page 960 * 2. all sub-pages are linked together using page->lru 961 * 3. each sub-page is linked to the first page using page->first_page 962 * 963 * For each size class, First/Head pages are linked together using 964 * page->lru. Also, we set PG_private to identify the first page 965 * (i.e. no other sub-page has this flag set) and PG_private_2 to 966 * identify the last page. 967 */ 968 error = -ENOMEM; 969 for (i = 0; i < class->pages_per_zspage; i++) { 970 struct page *page; 971 972 page = alloc_page(flags); 973 if (!page) 974 goto cleanup; 975 976 INIT_LIST_HEAD(&page->lru); 977 if (i == 0) { /* first page */ 978 SetPagePrivate(page); 979 set_page_private(page, 0); 980 first_page = page; 981 first_page->inuse = 0; 982 } 983 if (i == 1) 984 set_page_private(first_page, (unsigned long)page); 985 if (i >= 1) 986 page->first_page = first_page; 987 if (i >= 2) 988 list_add(&page->lru, &prev_page->lru); 989 if (i == class->pages_per_zspage - 1) /* last page */ 990 SetPagePrivate2(page); 991 prev_page = page; 992 } 993 994 init_zspage(first_page, class); 995 996 first_page->freelist = location_to_obj(first_page, 0); 997 /* Maximum number of objects we can store in this zspage */ 998 first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; 999 1000 error = 0; /* Success */ 1001 1002 cleanup: 1003 if (unlikely(error) && first_page) { 1004 free_zspage(first_page); 1005 first_page = NULL; 1006 } 1007 1008 return first_page; 1009 } 1010 1011 static struct page *find_get_zspage(struct size_class *class) 1012 { 1013 int i; 1014 struct page *page; 1015 1016 for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { 1017 page = class->fullness_list[i]; 1018 if (page) 1019 break; 1020 } 1021 1022 return page; 1023 } 1024 1025 #ifdef CONFIG_PGTABLE_MAPPING 1026 static inline int __zs_cpu_up(struct mapping_area *area) 1027 { 1028 /* 1029 * Make sure we don't leak memory if a cpu UP notification 1030 * and zs_init() race and both call zs_cpu_up() on the same cpu 1031 */ 1032 if (area->vm) 1033 return 0; 1034 area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); 1035 if (!area->vm) 1036 return -ENOMEM; 1037 return 0; 1038 } 1039 1040 static inline void __zs_cpu_down(struct mapping_area *area) 1041 { 1042 if (area->vm) 1043 free_vm_area(area->vm); 1044 area->vm = NULL; 1045 } 1046 1047 static inline void *__zs_map_object(struct mapping_area *area, 1048 struct page *pages[2], int off, int size) 1049 { 1050 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); 1051 area->vm_addr = area->vm->addr; 1052 return area->vm_addr + off; 1053 } 1054 1055 static inline void __zs_unmap_object(struct mapping_area *area, 1056 struct page *pages[2], int off, int size) 1057 { 1058 unsigned long addr = (unsigned long)area->vm_addr; 1059 1060 unmap_kernel_range(addr, PAGE_SIZE * 2); 1061 } 1062 1063 #else /* CONFIG_PGTABLE_MAPPING */ 1064 1065 static inline int __zs_cpu_up(struct mapping_area *area) 1066 { 1067 /* 1068 * Make sure we don't leak memory if a cpu UP notification 1069 * and zs_init() race and both call zs_cpu_up() on the same cpu 1070 */ 1071 if (area->vm_buf) 1072 return 0; 1073 area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); 1074 if (!area->vm_buf) 1075 return -ENOMEM; 1076 return 0; 1077 } 1078 1079 static inline void __zs_cpu_down(struct mapping_area *area) 1080 { 1081 kfree(area->vm_buf); 1082 area->vm_buf = NULL; 1083 } 1084 1085 static void *__zs_map_object(struct mapping_area *area, 1086 struct page *pages[2], int off, int size) 1087 { 1088 int sizes[2]; 1089 void *addr; 1090 char *buf = area->vm_buf; 1091 1092 /* disable page faults to match kmap_atomic() return conditions */ 1093 pagefault_disable(); 1094 1095 /* no read fastpath */ 1096 if (area->vm_mm == ZS_MM_WO) 1097 goto out; 1098 1099 sizes[0] = PAGE_SIZE - off; 1100 sizes[1] = size - sizes[0]; 1101 1102 /* copy object to per-cpu buffer */ 1103 addr = kmap_atomic(pages[0]); 1104 memcpy(buf, addr + off, sizes[0]); 1105 kunmap_atomic(addr); 1106 addr = kmap_atomic(pages[1]); 1107 memcpy(buf + sizes[0], addr, sizes[1]); 1108 kunmap_atomic(addr); 1109 out: 1110 return area->vm_buf; 1111 } 1112 1113 static void __zs_unmap_object(struct mapping_area *area, 1114 struct page *pages[2], int off, int size) 1115 { 1116 int sizes[2]; 1117 void *addr; 1118 char *buf; 1119 1120 /* no write fastpath */ 1121 if (area->vm_mm == ZS_MM_RO) 1122 goto out; 1123 1124 buf = area->vm_buf; 1125 if (!area->huge) { 1126 buf = buf + ZS_HANDLE_SIZE; 1127 size -= ZS_HANDLE_SIZE; 1128 off += ZS_HANDLE_SIZE; 1129 } 1130 1131 sizes[0] = PAGE_SIZE - off; 1132 sizes[1] = size - sizes[0]; 1133 1134 /* copy per-cpu buffer to object */ 1135 addr = kmap_atomic(pages[0]); 1136 memcpy(addr + off, buf, sizes[0]); 1137 kunmap_atomic(addr); 1138 addr = kmap_atomic(pages[1]); 1139 memcpy(addr, buf + sizes[0], sizes[1]); 1140 kunmap_atomic(addr); 1141 1142 out: 1143 /* enable page faults to match kunmap_atomic() return conditions */ 1144 pagefault_enable(); 1145 } 1146 1147 #endif /* CONFIG_PGTABLE_MAPPING */ 1148 1149 static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, 1150 void *pcpu) 1151 { 1152 int ret, cpu = (long)pcpu; 1153 struct mapping_area *area; 1154 1155 switch (action) { 1156 case CPU_UP_PREPARE: 1157 area = &per_cpu(zs_map_area, cpu); 1158 ret = __zs_cpu_up(area); 1159 if (ret) 1160 return notifier_from_errno(ret); 1161 break; 1162 case CPU_DEAD: 1163 case CPU_UP_CANCELED: 1164 area = &per_cpu(zs_map_area, cpu); 1165 __zs_cpu_down(area); 1166 break; 1167 } 1168 1169 return NOTIFY_OK; 1170 } 1171 1172 static struct notifier_block zs_cpu_nb = { 1173 .notifier_call = zs_cpu_notifier 1174 }; 1175 1176 static int zs_register_cpu_notifier(void) 1177 { 1178 int cpu, uninitialized_var(ret); 1179 1180 cpu_notifier_register_begin(); 1181 1182 __register_cpu_notifier(&zs_cpu_nb); 1183 for_each_online_cpu(cpu) { 1184 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 1185 if (notifier_to_errno(ret)) 1186 break; 1187 } 1188 1189 cpu_notifier_register_done(); 1190 return notifier_to_errno(ret); 1191 } 1192 1193 static void zs_unregister_cpu_notifier(void) 1194 { 1195 int cpu; 1196 1197 cpu_notifier_register_begin(); 1198 1199 for_each_online_cpu(cpu) 1200 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); 1201 __unregister_cpu_notifier(&zs_cpu_nb); 1202 1203 cpu_notifier_register_done(); 1204 } 1205 1206 static void init_zs_size_classes(void) 1207 { 1208 int nr; 1209 1210 nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1; 1211 if ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) % ZS_SIZE_CLASS_DELTA) 1212 nr += 1; 1213 1214 zs_size_classes = nr; 1215 } 1216 1217 static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) 1218 { 1219 if (prev->pages_per_zspage != pages_per_zspage) 1220 return false; 1221 1222 if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage) 1223 != get_maxobj_per_zspage(size, pages_per_zspage)) 1224 return false; 1225 1226 return true; 1227 } 1228 1229 static bool zspage_full(struct page *page) 1230 { 1231 BUG_ON(!is_first_page(page)); 1232 1233 return page->inuse == page->objects; 1234 } 1235 1236 unsigned long zs_get_total_pages(struct zs_pool *pool) 1237 { 1238 return atomic_long_read(&pool->pages_allocated); 1239 } 1240 EXPORT_SYMBOL_GPL(zs_get_total_pages); 1241 1242 /** 1243 * zs_map_object - get address of allocated object from handle. 1244 * @pool: pool from which the object was allocated 1245 * @handle: handle returned from zs_malloc 1246 * 1247 * Before using an object allocated from zs_malloc, it must be mapped using 1248 * this function. When done with the object, it must be unmapped using 1249 * zs_unmap_object. 1250 * 1251 * Only one object can be mapped per cpu at a time. There is no protection 1252 * against nested mappings. 1253 * 1254 * This function returns with preemption and page faults disabled. 1255 */ 1256 void *zs_map_object(struct zs_pool *pool, unsigned long handle, 1257 enum zs_mapmode mm) 1258 { 1259 struct page *page; 1260 unsigned long obj, obj_idx, off; 1261 1262 unsigned int class_idx; 1263 enum fullness_group fg; 1264 struct size_class *class; 1265 struct mapping_area *area; 1266 struct page *pages[2]; 1267 void *ret; 1268 1269 BUG_ON(!handle); 1270 1271 /* 1272 * Because we use per-cpu mapping areas shared among the 1273 * pools/users, we can't allow mapping in interrupt context 1274 * because it can corrupt another users mappings. 1275 */ 1276 BUG_ON(in_interrupt()); 1277 1278 /* From now on, migration cannot move the object */ 1279 pin_tag(handle); 1280 1281 obj = handle_to_obj(handle); 1282 obj_to_location(obj, &page, &obj_idx); 1283 get_zspage_mapping(get_first_page(page), &class_idx, &fg); 1284 class = pool->size_class[class_idx]; 1285 off = obj_idx_to_offset(page, obj_idx, class->size); 1286 1287 area = &get_cpu_var(zs_map_area); 1288 area->vm_mm = mm; 1289 if (off + class->size <= PAGE_SIZE) { 1290 /* this object is contained entirely within a page */ 1291 area->vm_addr = kmap_atomic(page); 1292 ret = area->vm_addr + off; 1293 goto out; 1294 } 1295 1296 /* this object spans two pages */ 1297 pages[0] = page; 1298 pages[1] = get_next_page(page); 1299 BUG_ON(!pages[1]); 1300 1301 ret = __zs_map_object(area, pages, off, class->size); 1302 out: 1303 if (!class->huge) 1304 ret += ZS_HANDLE_SIZE; 1305 1306 return ret; 1307 } 1308 EXPORT_SYMBOL_GPL(zs_map_object); 1309 1310 void zs_unmap_object(struct zs_pool *pool, unsigned long handle) 1311 { 1312 struct page *page; 1313 unsigned long obj, obj_idx, off; 1314 1315 unsigned int class_idx; 1316 enum fullness_group fg; 1317 struct size_class *class; 1318 struct mapping_area *area; 1319 1320 BUG_ON(!handle); 1321 1322 obj = handle_to_obj(handle); 1323 obj_to_location(obj, &page, &obj_idx); 1324 get_zspage_mapping(get_first_page(page), &class_idx, &fg); 1325 class = pool->size_class[class_idx]; 1326 off = obj_idx_to_offset(page, obj_idx, class->size); 1327 1328 area = this_cpu_ptr(&zs_map_area); 1329 if (off + class->size <= PAGE_SIZE) 1330 kunmap_atomic(area->vm_addr); 1331 else { 1332 struct page *pages[2]; 1333 1334 pages[0] = page; 1335 pages[1] = get_next_page(page); 1336 BUG_ON(!pages[1]); 1337 1338 __zs_unmap_object(area, pages, off, class->size); 1339 } 1340 put_cpu_var(zs_map_area); 1341 unpin_tag(handle); 1342 } 1343 EXPORT_SYMBOL_GPL(zs_unmap_object); 1344 1345 static unsigned long obj_malloc(struct page *first_page, 1346 struct size_class *class, unsigned long handle) 1347 { 1348 unsigned long obj; 1349 struct link_free *link; 1350 1351 struct page *m_page; 1352 unsigned long m_objidx, m_offset; 1353 void *vaddr; 1354 1355 handle |= OBJ_ALLOCATED_TAG; 1356 obj = (unsigned long)first_page->freelist; 1357 obj_to_location(obj, &m_page, &m_objidx); 1358 m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); 1359 1360 vaddr = kmap_atomic(m_page); 1361 link = (struct link_free *)vaddr + m_offset / sizeof(*link); 1362 first_page->freelist = link->next; 1363 if (!class->huge) 1364 /* record handle in the header of allocated chunk */ 1365 link->handle = handle; 1366 else 1367 /* record handle in first_page->private */ 1368 set_page_private(first_page, handle); 1369 kunmap_atomic(vaddr); 1370 first_page->inuse++; 1371 zs_stat_inc(class, OBJ_USED, 1); 1372 1373 return obj; 1374 } 1375 1376 1377 /** 1378 * zs_malloc - Allocate block of given size from pool. 1379 * @pool: pool to allocate from 1380 * @size: size of block to allocate 1381 * 1382 * On success, handle to the allocated object is returned, 1383 * otherwise 0. 1384 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. 1385 */ 1386 unsigned long zs_malloc(struct zs_pool *pool, size_t size) 1387 { 1388 unsigned long handle, obj; 1389 struct size_class *class; 1390 struct page *first_page; 1391 1392 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) 1393 return 0; 1394 1395 handle = alloc_handle(pool); 1396 if (!handle) 1397 return 0; 1398 1399 /* extra space in chunk to keep the handle */ 1400 size += ZS_HANDLE_SIZE; 1401 class = pool->size_class[get_size_class_index(size)]; 1402 1403 spin_lock(&class->lock); 1404 first_page = find_get_zspage(class); 1405 1406 if (!first_page) { 1407 spin_unlock(&class->lock); 1408 first_page = alloc_zspage(class, pool->flags); 1409 if (unlikely(!first_page)) { 1410 free_handle(pool, handle); 1411 return 0; 1412 } 1413 1414 set_zspage_mapping(first_page, class->index, ZS_EMPTY); 1415 atomic_long_add(class->pages_per_zspage, 1416 &pool->pages_allocated); 1417 1418 spin_lock(&class->lock); 1419 zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage( 1420 class->size, class->pages_per_zspage)); 1421 } 1422 1423 obj = obj_malloc(first_page, class, handle); 1424 /* Now move the zspage to another fullness group, if required */ 1425 fix_fullness_group(class, first_page); 1426 record_obj(handle, obj); 1427 spin_unlock(&class->lock); 1428 1429 return handle; 1430 } 1431 EXPORT_SYMBOL_GPL(zs_malloc); 1432 1433 static void obj_free(struct zs_pool *pool, struct size_class *class, 1434 unsigned long obj) 1435 { 1436 struct link_free *link; 1437 struct page *first_page, *f_page; 1438 unsigned long f_objidx, f_offset; 1439 void *vaddr; 1440 int class_idx; 1441 enum fullness_group fullness; 1442 1443 BUG_ON(!obj); 1444 1445 obj &= ~OBJ_ALLOCATED_TAG; 1446 obj_to_location(obj, &f_page, &f_objidx); 1447 first_page = get_first_page(f_page); 1448 1449 get_zspage_mapping(first_page, &class_idx, &fullness); 1450 f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); 1451 1452 vaddr = kmap_atomic(f_page); 1453 1454 /* Insert this object in containing zspage's freelist */ 1455 link = (struct link_free *)(vaddr + f_offset); 1456 link->next = first_page->freelist; 1457 if (class->huge) 1458 set_page_private(first_page, 0); 1459 kunmap_atomic(vaddr); 1460 first_page->freelist = (void *)obj; 1461 first_page->inuse--; 1462 zs_stat_dec(class, OBJ_USED, 1); 1463 } 1464 1465 void zs_free(struct zs_pool *pool, unsigned long handle) 1466 { 1467 struct page *first_page, *f_page; 1468 unsigned long obj, f_objidx; 1469 int class_idx; 1470 struct size_class *class; 1471 enum fullness_group fullness; 1472 1473 if (unlikely(!handle)) 1474 return; 1475 1476 pin_tag(handle); 1477 obj = handle_to_obj(handle); 1478 obj_to_location(obj, &f_page, &f_objidx); 1479 first_page = get_first_page(f_page); 1480 1481 get_zspage_mapping(first_page, &class_idx, &fullness); 1482 class = pool->size_class[class_idx]; 1483 1484 spin_lock(&class->lock); 1485 obj_free(pool, class, obj); 1486 fullness = fix_fullness_group(class, first_page); 1487 if (fullness == ZS_EMPTY) { 1488 zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( 1489 class->size, class->pages_per_zspage)); 1490 atomic_long_sub(class->pages_per_zspage, 1491 &pool->pages_allocated); 1492 free_zspage(first_page); 1493 } 1494 spin_unlock(&class->lock); 1495 unpin_tag(handle); 1496 1497 free_handle(pool, handle); 1498 } 1499 EXPORT_SYMBOL_GPL(zs_free); 1500 1501 static void zs_object_copy(unsigned long src, unsigned long dst, 1502 struct size_class *class) 1503 { 1504 struct page *s_page, *d_page; 1505 unsigned long s_objidx, d_objidx; 1506 unsigned long s_off, d_off; 1507 void *s_addr, *d_addr; 1508 int s_size, d_size, size; 1509 int written = 0; 1510 1511 s_size = d_size = class->size; 1512 1513 obj_to_location(src, &s_page, &s_objidx); 1514 obj_to_location(dst, &d_page, &d_objidx); 1515 1516 s_off = obj_idx_to_offset(s_page, s_objidx, class->size); 1517 d_off = obj_idx_to_offset(d_page, d_objidx, class->size); 1518 1519 if (s_off + class->size > PAGE_SIZE) 1520 s_size = PAGE_SIZE - s_off; 1521 1522 if (d_off + class->size > PAGE_SIZE) 1523 d_size = PAGE_SIZE - d_off; 1524 1525 s_addr = kmap_atomic(s_page); 1526 d_addr = kmap_atomic(d_page); 1527 1528 while (1) { 1529 size = min(s_size, d_size); 1530 memcpy(d_addr + d_off, s_addr + s_off, size); 1531 written += size; 1532 1533 if (written == class->size) 1534 break; 1535 1536 s_off += size; 1537 s_size -= size; 1538 d_off += size; 1539 d_size -= size; 1540 1541 if (s_off >= PAGE_SIZE) { 1542 kunmap_atomic(d_addr); 1543 kunmap_atomic(s_addr); 1544 s_page = get_next_page(s_page); 1545 BUG_ON(!s_page); 1546 s_addr = kmap_atomic(s_page); 1547 d_addr = kmap_atomic(d_page); 1548 s_size = class->size - written; 1549 s_off = 0; 1550 } 1551 1552 if (d_off >= PAGE_SIZE) { 1553 kunmap_atomic(d_addr); 1554 d_page = get_next_page(d_page); 1555 BUG_ON(!d_page); 1556 d_addr = kmap_atomic(d_page); 1557 d_size = class->size - written; 1558 d_off = 0; 1559 } 1560 } 1561 1562 kunmap_atomic(d_addr); 1563 kunmap_atomic(s_addr); 1564 } 1565 1566 /* 1567 * Find alloced object in zspage from index object and 1568 * return handle. 1569 */ 1570 static unsigned long find_alloced_obj(struct page *page, int index, 1571 struct size_class *class) 1572 { 1573 unsigned long head; 1574 int offset = 0; 1575 unsigned long handle = 0; 1576 void *addr = kmap_atomic(page); 1577 1578 if (!is_first_page(page)) 1579 offset = page->index; 1580 offset += class->size * index; 1581 1582 while (offset < PAGE_SIZE) { 1583 head = obj_to_head(class, page, addr + offset); 1584 if (head & OBJ_ALLOCATED_TAG) { 1585 handle = head & ~OBJ_ALLOCATED_TAG; 1586 if (trypin_tag(handle)) 1587 break; 1588 handle = 0; 1589 } 1590 1591 offset += class->size; 1592 index++; 1593 } 1594 1595 kunmap_atomic(addr); 1596 return handle; 1597 } 1598 1599 struct zs_compact_control { 1600 /* Source page for migration which could be a subpage of zspage. */ 1601 struct page *s_page; 1602 /* Destination page for migration which should be a first page 1603 * of zspage. */ 1604 struct page *d_page; 1605 /* Starting object index within @s_page which used for live object 1606 * in the subpage. */ 1607 int index; 1608 /* how many of objects are migrated */ 1609 int nr_migrated; 1610 }; 1611 1612 static int migrate_zspage(struct zs_pool *pool, struct size_class *class, 1613 struct zs_compact_control *cc) 1614 { 1615 unsigned long used_obj, free_obj; 1616 unsigned long handle; 1617 struct page *s_page = cc->s_page; 1618 struct page *d_page = cc->d_page; 1619 unsigned long index = cc->index; 1620 int nr_migrated = 0; 1621 int ret = 0; 1622 1623 while (1) { 1624 handle = find_alloced_obj(s_page, index, class); 1625 if (!handle) { 1626 s_page = get_next_page(s_page); 1627 if (!s_page) 1628 break; 1629 index = 0; 1630 continue; 1631 } 1632 1633 /* Stop if there is no more space */ 1634 if (zspage_full(d_page)) { 1635 unpin_tag(handle); 1636 ret = -ENOMEM; 1637 break; 1638 } 1639 1640 used_obj = handle_to_obj(handle); 1641 free_obj = obj_malloc(d_page, class, handle); 1642 zs_object_copy(used_obj, free_obj, class); 1643 index++; 1644 record_obj(handle, free_obj); 1645 unpin_tag(handle); 1646 obj_free(pool, class, used_obj); 1647 nr_migrated++; 1648 } 1649 1650 /* Remember last position in this iteration */ 1651 cc->s_page = s_page; 1652 cc->index = index; 1653 cc->nr_migrated = nr_migrated; 1654 1655 return ret; 1656 } 1657 1658 static struct page *alloc_target_page(struct size_class *class) 1659 { 1660 int i; 1661 struct page *page; 1662 1663 for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { 1664 page = class->fullness_list[i]; 1665 if (page) { 1666 remove_zspage(page, class, i); 1667 break; 1668 } 1669 } 1670 1671 return page; 1672 } 1673 1674 static void putback_zspage(struct zs_pool *pool, struct size_class *class, 1675 struct page *first_page) 1676 { 1677 enum fullness_group fullness; 1678 1679 BUG_ON(!is_first_page(first_page)); 1680 1681 fullness = get_fullness_group(first_page); 1682 insert_zspage(first_page, class, fullness); 1683 set_zspage_mapping(first_page, class->index, fullness); 1684 1685 if (fullness == ZS_EMPTY) { 1686 zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( 1687 class->size, class->pages_per_zspage)); 1688 atomic_long_sub(class->pages_per_zspage, 1689 &pool->pages_allocated); 1690 1691 free_zspage(first_page); 1692 } 1693 } 1694 1695 static struct page *isolate_source_page(struct size_class *class) 1696 { 1697 struct page *page; 1698 1699 page = class->fullness_list[ZS_ALMOST_EMPTY]; 1700 if (page) 1701 remove_zspage(page, class, ZS_ALMOST_EMPTY); 1702 1703 return page; 1704 } 1705 1706 static unsigned long __zs_compact(struct zs_pool *pool, 1707 struct size_class *class) 1708 { 1709 int nr_to_migrate; 1710 struct zs_compact_control cc; 1711 struct page *src_page; 1712 struct page *dst_page = NULL; 1713 unsigned long nr_total_migrated = 0; 1714 1715 spin_lock(&class->lock); 1716 while ((src_page = isolate_source_page(class))) { 1717 1718 BUG_ON(!is_first_page(src_page)); 1719 1720 /* The goal is to migrate all live objects in source page */ 1721 nr_to_migrate = src_page->inuse; 1722 cc.index = 0; 1723 cc.s_page = src_page; 1724 1725 while ((dst_page = alloc_target_page(class))) { 1726 cc.d_page = dst_page; 1727 /* 1728 * If there is no more space in dst_page, try to 1729 * allocate another zspage. 1730 */ 1731 if (!migrate_zspage(pool, class, &cc)) 1732 break; 1733 1734 putback_zspage(pool, class, dst_page); 1735 nr_total_migrated += cc.nr_migrated; 1736 nr_to_migrate -= cc.nr_migrated; 1737 } 1738 1739 /* Stop if we couldn't find slot */ 1740 if (dst_page == NULL) 1741 break; 1742 1743 putback_zspage(pool, class, dst_page); 1744 putback_zspage(pool, class, src_page); 1745 spin_unlock(&class->lock); 1746 nr_total_migrated += cc.nr_migrated; 1747 cond_resched(); 1748 spin_lock(&class->lock); 1749 } 1750 1751 if (src_page) 1752 putback_zspage(pool, class, src_page); 1753 1754 spin_unlock(&class->lock); 1755 1756 return nr_total_migrated; 1757 } 1758 1759 unsigned long zs_compact(struct zs_pool *pool) 1760 { 1761 int i; 1762 unsigned long nr_migrated = 0; 1763 struct size_class *class; 1764 1765 for (i = zs_size_classes - 1; i >= 0; i--) { 1766 class = pool->size_class[i]; 1767 if (!class) 1768 continue; 1769 if (class->index != i) 1770 continue; 1771 nr_migrated += __zs_compact(pool, class); 1772 } 1773 1774 return nr_migrated; 1775 } 1776 EXPORT_SYMBOL_GPL(zs_compact); 1777 1778 /** 1779 * zs_create_pool - Creates an allocation pool to work from. 1780 * @flags: allocation flags used to allocate pool metadata 1781 * 1782 * This function must be called before anything when using 1783 * the zsmalloc allocator. 1784 * 1785 * On success, a pointer to the newly created pool is returned, 1786 * otherwise NULL. 1787 */ 1788 struct zs_pool *zs_create_pool(char *name, gfp_t flags) 1789 { 1790 int i; 1791 struct zs_pool *pool; 1792 struct size_class *prev_class = NULL; 1793 1794 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1795 if (!pool) 1796 return NULL; 1797 1798 pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *), 1799 GFP_KERNEL); 1800 if (!pool->size_class) { 1801 kfree(pool); 1802 return NULL; 1803 } 1804 1805 pool->name = kstrdup(name, GFP_KERNEL); 1806 if (!pool->name) 1807 goto err; 1808 1809 if (create_handle_cache(pool)) 1810 goto err; 1811 1812 /* 1813 * Iterate reversly, because, size of size_class that we want to use 1814 * for merging should be larger or equal to current size. 1815 */ 1816 for (i = zs_size_classes - 1; i >= 0; i--) { 1817 int size; 1818 int pages_per_zspage; 1819 struct size_class *class; 1820 1821 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; 1822 if (size > ZS_MAX_ALLOC_SIZE) 1823 size = ZS_MAX_ALLOC_SIZE; 1824 pages_per_zspage = get_pages_per_zspage(size); 1825 1826 /* 1827 * size_class is used for normal zsmalloc operation such 1828 * as alloc/free for that size. Although it is natural that we 1829 * have one size_class for each size, there is a chance that we 1830 * can get more memory utilization if we use one size_class for 1831 * many different sizes whose size_class have same 1832 * characteristics. So, we makes size_class point to 1833 * previous size_class if possible. 1834 */ 1835 if (prev_class) { 1836 if (can_merge(prev_class, size, pages_per_zspage)) { 1837 pool->size_class[i] = prev_class; 1838 continue; 1839 } 1840 } 1841 1842 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); 1843 if (!class) 1844 goto err; 1845 1846 class->size = size; 1847 class->index = i; 1848 class->pages_per_zspage = pages_per_zspage; 1849 if (pages_per_zspage == 1 && 1850 get_maxobj_per_zspage(size, pages_per_zspage) == 1) 1851 class->huge = true; 1852 spin_lock_init(&class->lock); 1853 pool->size_class[i] = class; 1854 1855 prev_class = class; 1856 } 1857 1858 pool->flags = flags; 1859 1860 if (zs_pool_stat_create(name, pool)) 1861 goto err; 1862 1863 return pool; 1864 1865 err: 1866 zs_destroy_pool(pool); 1867 return NULL; 1868 } 1869 EXPORT_SYMBOL_GPL(zs_create_pool); 1870 1871 void zs_destroy_pool(struct zs_pool *pool) 1872 { 1873 int i; 1874 1875 zs_pool_stat_destroy(pool); 1876 1877 for (i = 0; i < zs_size_classes; i++) { 1878 int fg; 1879 struct size_class *class = pool->size_class[i]; 1880 1881 if (!class) 1882 continue; 1883 1884 if (class->index != i) 1885 continue; 1886 1887 for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { 1888 if (class->fullness_list[fg]) { 1889 pr_info("Freeing non-empty class with size %db, fullness group %d\n", 1890 class->size, fg); 1891 } 1892 } 1893 kfree(class); 1894 } 1895 1896 destroy_handle_cache(pool); 1897 kfree(pool->size_class); 1898 kfree(pool->name); 1899 kfree(pool); 1900 } 1901 EXPORT_SYMBOL_GPL(zs_destroy_pool); 1902 1903 static int __init zs_init(void) 1904 { 1905 int ret = zs_register_cpu_notifier(); 1906 1907 if (ret) 1908 goto notifier_fail; 1909 1910 init_zs_size_classes(); 1911 1912 #ifdef CONFIG_ZPOOL 1913 zpool_register_driver(&zs_zpool_driver); 1914 #endif 1915 1916 ret = zs_stat_init(); 1917 if (ret) { 1918 pr_err("zs stat initialization failed\n"); 1919 goto stat_fail; 1920 } 1921 return 0; 1922 1923 stat_fail: 1924 #ifdef CONFIG_ZPOOL 1925 zpool_unregister_driver(&zs_zpool_driver); 1926 #endif 1927 notifier_fail: 1928 zs_unregister_cpu_notifier(); 1929 1930 return ret; 1931 } 1932 1933 static void __exit zs_exit(void) 1934 { 1935 #ifdef CONFIG_ZPOOL 1936 zpool_unregister_driver(&zs_zpool_driver); 1937 #endif 1938 zs_unregister_cpu_notifier(); 1939 1940 zs_stat_exit(); 1941 } 1942 1943 module_init(zs_init); 1944 module_exit(zs_exit); 1945 1946 MODULE_LICENSE("Dual BSD/GPL"); 1947 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 1948