1 /* 2 * zsmalloc memory allocator 3 * 4 * Copyright (C) 2011 Nitin Gupta 5 * Copyright (C) 2012, 2013 Minchan Kim 6 * 7 * This code is released using a dual license strategy: BSD/GPL 8 * You can choose the license that better fits your requirements. 9 * 10 * Released under the terms of 3-clause BSD License 11 * Released under the terms of GNU General Public License Version 2.0 12 */ 13 14 /* 15 * This allocator is designed for use with zram. Thus, the allocator is 16 * supposed to work well under low memory conditions. In particular, it 17 * never attempts higher order page allocation which is very likely to 18 * fail under memory pressure. On the other hand, if we just use single 19 * (0-order) pages, it would suffer from very high fragmentation -- 20 * any object of size PAGE_SIZE/2 or larger would occupy an entire page. 21 * This was one of the major issues with its predecessor (xvmalloc). 22 * 23 * To overcome these issues, zsmalloc allocates a bunch of 0-order pages 24 * and links them together using various 'struct page' fields. These linked 25 * pages act as a single higher-order page i.e. an object can span 0-order 26 * page boundaries. The code refers to these linked pages as a single entity 27 * called zspage. 28 * 29 * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE 30 * since this satisfies the requirements of all its current users (in the 31 * worst case, page is incompressible and is thus stored "as-is" i.e. in 32 * uncompressed form). For allocation requests larger than this size, failure 33 * is returned (see zs_malloc). 34 * 35 * Additionally, zs_malloc() does not return a dereferenceable pointer. 36 * Instead, it returns an opaque handle (unsigned long) which encodes actual 37 * location of the allocated object. The reason for this indirection is that 38 * zsmalloc does not keep zspages permanently mapped since that would cause 39 * issues on 32-bit systems where the VA region for kernel space mappings 40 * is very small. So, before using the allocating memory, the object has to 41 * be mapped using zs_map_object() to get a usable pointer and subsequently 42 * unmapped using zs_unmap_object(). 43 * 44 * Following is how we use various fields and flags of underlying 45 * struct page(s) to form a zspage. 46 * 47 * Usage of struct page fields: 48 * page->first_page: points to the first component (0-order) page 49 * page->index (union with page->freelist): offset of the first object 50 * starting in this page. For the first page, this is 51 * always 0, so we use this field (aka freelist) to point 52 * to the first free object in zspage. 53 * page->lru: links together all component pages (except the first page) 54 * of a zspage 55 * 56 * For _first_ page only: 57 * 58 * page->private (union with page->first_page): refers to the 59 * component page after the first page 60 * page->freelist: points to the first free object in zspage. 61 * Free objects are linked together using in-place 62 * metadata. 63 * page->objects: maximum number of objects we can store in this 64 * zspage (class->zspage_order * PAGE_SIZE / class->size) 65 * page->lru: links together first pages of various zspages. 66 * Basically forming list of zspages in a fullness group. 67 * page->mapping: class index and fullness group of the zspage 68 * 69 * Usage of struct page flags: 70 * PG_private: identifies the first component page 71 * PG_private2: identifies the last component page 72 * 73 */ 74 75 #ifdef CONFIG_ZSMALLOC_DEBUG 76 #define DEBUG 77 #endif 78 79 #include <linux/module.h> 80 #include <linux/kernel.h> 81 #include <linux/bitops.h> 82 #include <linux/errno.h> 83 #include <linux/highmem.h> 84 #include <linux/string.h> 85 #include <linux/slab.h> 86 #include <asm/tlbflush.h> 87 #include <asm/pgtable.h> 88 #include <linux/cpumask.h> 89 #include <linux/cpu.h> 90 #include <linux/vmalloc.h> 91 #include <linux/hardirq.h> 92 #include <linux/spinlock.h> 93 #include <linux/types.h> 94 #include <linux/zsmalloc.h> 95 #include <linux/zpool.h> 96 97 /* 98 * This must be power of 2 and greater than of equal to sizeof(link_free). 99 * These two conditions ensure that any 'struct link_free' itself doesn't 100 * span more than 1 page which avoids complex case of mapping 2 pages simply 101 * to restore link_free pointer values. 102 */ 103 #define ZS_ALIGN 8 104 105 /* 106 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) 107 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. 108 */ 109 #define ZS_MAX_ZSPAGE_ORDER 2 110 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) 111 112 /* 113 * Object location (<PFN>, <obj_idx>) is encoded as 114 * as single (unsigned long) handle value. 115 * 116 * Note that object index <obj_idx> is relative to system 117 * page <PFN> it is stored in, so for each sub-page belonging 118 * to a zspage, obj_idx starts with 0. 119 * 120 * This is made more complicated by various memory models and PAE. 121 */ 122 123 #ifndef MAX_PHYSMEM_BITS 124 #ifdef CONFIG_HIGHMEM64G 125 #define MAX_PHYSMEM_BITS 36 126 #else /* !CONFIG_HIGHMEM64G */ 127 /* 128 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just 129 * be PAGE_SHIFT 130 */ 131 #define MAX_PHYSMEM_BITS BITS_PER_LONG 132 #endif 133 #endif 134 #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) 135 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS) 136 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) 137 138 #define MAX(a, b) ((a) >= (b) ? (a) : (b)) 139 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ 140 #define ZS_MIN_ALLOC_SIZE \ 141 MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) 142 #define ZS_MAX_ALLOC_SIZE PAGE_SIZE 143 144 /* 145 * On systems with 4K page size, this gives 255 size classes! There is a 146 * trader-off here: 147 * - Large number of size classes is potentially wasteful as free page are 148 * spread across these classes 149 * - Small number of size classes causes large internal fragmentation 150 * - Probably its better to use specific size classes (empirically 151 * determined). NOTE: all those class sizes must be set as multiple of 152 * ZS_ALIGN to make sure link_free itself never has to span 2 pages. 153 * 154 * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN 155 * (reason above) 156 */ 157 #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) 158 #define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \ 159 ZS_SIZE_CLASS_DELTA + 1) 160 161 /* 162 * We do not maintain any list for completely empty or full pages 163 */ 164 enum fullness_group { 165 ZS_ALMOST_FULL, 166 ZS_ALMOST_EMPTY, 167 _ZS_NR_FULLNESS_GROUPS, 168 169 ZS_EMPTY, 170 ZS_FULL 171 }; 172 173 /* 174 * We assign a page to ZS_ALMOST_EMPTY fullness group when: 175 * n <= N / f, where 176 * n = number of allocated objects 177 * N = total number of objects zspage can store 178 * f = 1/fullness_threshold_frac 179 * 180 * Similarly, we assign zspage to: 181 * ZS_ALMOST_FULL when n > N / f 182 * ZS_EMPTY when n == 0 183 * ZS_FULL when n == N 184 * 185 * (see: fix_fullness_group()) 186 */ 187 static const int fullness_threshold_frac = 4; 188 189 struct size_class { 190 /* 191 * Size of objects stored in this class. Must be multiple 192 * of ZS_ALIGN. 193 */ 194 int size; 195 unsigned int index; 196 197 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ 198 int pages_per_zspage; 199 200 spinlock_t lock; 201 202 /* stats */ 203 u64 pages_allocated; 204 205 struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; 206 }; 207 208 /* 209 * Placed within free objects to form a singly linked list. 210 * For every zspage, first_page->freelist gives head of this list. 211 * 212 * This must be power of 2 and less than or equal to ZS_ALIGN 213 */ 214 struct link_free { 215 /* Handle of next free chunk (encodes <PFN, obj_idx>) */ 216 void *next; 217 }; 218 219 struct zs_pool { 220 struct size_class size_class[ZS_SIZE_CLASSES]; 221 222 gfp_t flags; /* allocation flags used when growing pool */ 223 }; 224 225 /* 226 * A zspage's class index and fullness group 227 * are encoded in its (first)page->mapping 228 */ 229 #define CLASS_IDX_BITS 28 230 #define FULLNESS_BITS 4 231 #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) 232 #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) 233 234 struct mapping_area { 235 #ifdef CONFIG_PGTABLE_MAPPING 236 struct vm_struct *vm; /* vm area for mapping object that span pages */ 237 #else 238 char *vm_buf; /* copy buffer for objects that span pages */ 239 #endif 240 char *vm_addr; /* address of kmap_atomic()'ed pages */ 241 enum zs_mapmode vm_mm; /* mapping mode */ 242 }; 243 244 /* zpool driver */ 245 246 #ifdef CONFIG_ZPOOL 247 248 static void *zs_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops) 249 { 250 return zs_create_pool(gfp); 251 } 252 253 static void zs_zpool_destroy(void *pool) 254 { 255 zs_destroy_pool(pool); 256 } 257 258 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, 259 unsigned long *handle) 260 { 261 *handle = zs_malloc(pool, size); 262 return *handle ? 0 : -1; 263 } 264 static void zs_zpool_free(void *pool, unsigned long handle) 265 { 266 zs_free(pool, handle); 267 } 268 269 static int zs_zpool_shrink(void *pool, unsigned int pages, 270 unsigned int *reclaimed) 271 { 272 return -EINVAL; 273 } 274 275 static void *zs_zpool_map(void *pool, unsigned long handle, 276 enum zpool_mapmode mm) 277 { 278 enum zs_mapmode zs_mm; 279 280 switch (mm) { 281 case ZPOOL_MM_RO: 282 zs_mm = ZS_MM_RO; 283 break; 284 case ZPOOL_MM_WO: 285 zs_mm = ZS_MM_WO; 286 break; 287 case ZPOOL_MM_RW: /* fallthru */ 288 default: 289 zs_mm = ZS_MM_RW; 290 break; 291 } 292 293 return zs_map_object(pool, handle, zs_mm); 294 } 295 static void zs_zpool_unmap(void *pool, unsigned long handle) 296 { 297 zs_unmap_object(pool, handle); 298 } 299 300 static u64 zs_zpool_total_size(void *pool) 301 { 302 return zs_get_total_size_bytes(pool); 303 } 304 305 static struct zpool_driver zs_zpool_driver = { 306 .type = "zsmalloc", 307 .owner = THIS_MODULE, 308 .create = zs_zpool_create, 309 .destroy = zs_zpool_destroy, 310 .malloc = zs_zpool_malloc, 311 .free = zs_zpool_free, 312 .shrink = zs_zpool_shrink, 313 .map = zs_zpool_map, 314 .unmap = zs_zpool_unmap, 315 .total_size = zs_zpool_total_size, 316 }; 317 318 #endif /* CONFIG_ZPOOL */ 319 320 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ 321 static DEFINE_PER_CPU(struct mapping_area, zs_map_area); 322 323 static int is_first_page(struct page *page) 324 { 325 return PagePrivate(page); 326 } 327 328 static int is_last_page(struct page *page) 329 { 330 return PagePrivate2(page); 331 } 332 333 static void get_zspage_mapping(struct page *page, unsigned int *class_idx, 334 enum fullness_group *fullness) 335 { 336 unsigned long m; 337 BUG_ON(!is_first_page(page)); 338 339 m = (unsigned long)page->mapping; 340 *fullness = m & FULLNESS_MASK; 341 *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; 342 } 343 344 static void set_zspage_mapping(struct page *page, unsigned int class_idx, 345 enum fullness_group fullness) 346 { 347 unsigned long m; 348 BUG_ON(!is_first_page(page)); 349 350 m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | 351 (fullness & FULLNESS_MASK); 352 page->mapping = (struct address_space *)m; 353 } 354 355 /* 356 * zsmalloc divides the pool into various size classes where each 357 * class maintains a list of zspages where each zspage is divided 358 * into equal sized chunks. Each allocation falls into one of these 359 * classes depending on its size. This function returns index of the 360 * size class which has chunk size big enough to hold the give size. 361 */ 362 static int get_size_class_index(int size) 363 { 364 int idx = 0; 365 366 if (likely(size > ZS_MIN_ALLOC_SIZE)) 367 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, 368 ZS_SIZE_CLASS_DELTA); 369 370 return idx; 371 } 372 373 /* 374 * For each size class, zspages are divided into different groups 375 * depending on how "full" they are. This was done so that we could 376 * easily find empty or nearly empty zspages when we try to shrink 377 * the pool (not yet implemented). This function returns fullness 378 * status of the given page. 379 */ 380 static enum fullness_group get_fullness_group(struct page *page) 381 { 382 int inuse, max_objects; 383 enum fullness_group fg; 384 BUG_ON(!is_first_page(page)); 385 386 inuse = page->inuse; 387 max_objects = page->objects; 388 389 if (inuse == 0) 390 fg = ZS_EMPTY; 391 else if (inuse == max_objects) 392 fg = ZS_FULL; 393 else if (inuse <= max_objects / fullness_threshold_frac) 394 fg = ZS_ALMOST_EMPTY; 395 else 396 fg = ZS_ALMOST_FULL; 397 398 return fg; 399 } 400 401 /* 402 * Each size class maintains various freelists and zspages are assigned 403 * to one of these freelists based on the number of live objects they 404 * have. This functions inserts the given zspage into the freelist 405 * identified by <class, fullness_group>. 406 */ 407 static void insert_zspage(struct page *page, struct size_class *class, 408 enum fullness_group fullness) 409 { 410 struct page **head; 411 412 BUG_ON(!is_first_page(page)); 413 414 if (fullness >= _ZS_NR_FULLNESS_GROUPS) 415 return; 416 417 head = &class->fullness_list[fullness]; 418 if (*head) 419 list_add_tail(&page->lru, &(*head)->lru); 420 421 *head = page; 422 } 423 424 /* 425 * This function removes the given zspage from the freelist identified 426 * by <class, fullness_group>. 427 */ 428 static void remove_zspage(struct page *page, struct size_class *class, 429 enum fullness_group fullness) 430 { 431 struct page **head; 432 433 BUG_ON(!is_first_page(page)); 434 435 if (fullness >= _ZS_NR_FULLNESS_GROUPS) 436 return; 437 438 head = &class->fullness_list[fullness]; 439 BUG_ON(!*head); 440 if (list_empty(&(*head)->lru)) 441 *head = NULL; 442 else if (*head == page) 443 *head = (struct page *)list_entry((*head)->lru.next, 444 struct page, lru); 445 446 list_del_init(&page->lru); 447 } 448 449 /* 450 * Each size class maintains zspages in different fullness groups depending 451 * on the number of live objects they contain. When allocating or freeing 452 * objects, the fullness status of the page can change, say, from ALMOST_FULL 453 * to ALMOST_EMPTY when freeing an object. This function checks if such 454 * a status change has occurred for the given page and accordingly moves the 455 * page from the freelist of the old fullness group to that of the new 456 * fullness group. 457 */ 458 static enum fullness_group fix_fullness_group(struct zs_pool *pool, 459 struct page *page) 460 { 461 int class_idx; 462 struct size_class *class; 463 enum fullness_group currfg, newfg; 464 465 BUG_ON(!is_first_page(page)); 466 467 get_zspage_mapping(page, &class_idx, &currfg); 468 newfg = get_fullness_group(page); 469 if (newfg == currfg) 470 goto out; 471 472 class = &pool->size_class[class_idx]; 473 remove_zspage(page, class, currfg); 474 insert_zspage(page, class, newfg); 475 set_zspage_mapping(page, class_idx, newfg); 476 477 out: 478 return newfg; 479 } 480 481 /* 482 * We have to decide on how many pages to link together 483 * to form a zspage for each size class. This is important 484 * to reduce wastage due to unusable space left at end of 485 * each zspage which is given as: 486 * wastage = Zp - Zp % size_class 487 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... 488 * 489 * For example, for size class of 3/8 * PAGE_SIZE, we should 490 * link together 3 PAGE_SIZE sized pages to form a zspage 491 * since then we can perfectly fit in 8 such objects. 492 */ 493 static int get_pages_per_zspage(int class_size) 494 { 495 int i, max_usedpc = 0; 496 /* zspage order which gives maximum used size per KB */ 497 int max_usedpc_order = 1; 498 499 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { 500 int zspage_size; 501 int waste, usedpc; 502 503 zspage_size = i * PAGE_SIZE; 504 waste = zspage_size % class_size; 505 usedpc = (zspage_size - waste) * 100 / zspage_size; 506 507 if (usedpc > max_usedpc) { 508 max_usedpc = usedpc; 509 max_usedpc_order = i; 510 } 511 } 512 513 return max_usedpc_order; 514 } 515 516 /* 517 * A single 'zspage' is composed of many system pages which are 518 * linked together using fields in struct page. This function finds 519 * the first/head page, given any component page of a zspage. 520 */ 521 static struct page *get_first_page(struct page *page) 522 { 523 if (is_first_page(page)) 524 return page; 525 else 526 return page->first_page; 527 } 528 529 static struct page *get_next_page(struct page *page) 530 { 531 struct page *next; 532 533 if (is_last_page(page)) 534 next = NULL; 535 else if (is_first_page(page)) 536 next = (struct page *)page_private(page); 537 else 538 next = list_entry(page->lru.next, struct page, lru); 539 540 return next; 541 } 542 543 /* 544 * Encode <page, obj_idx> as a single handle value. 545 * On hardware platforms with physical memory starting at 0x0 the pfn 546 * could be 0 so we ensure that the handle will never be 0 by adjusting the 547 * encoded obj_idx value before encoding. 548 */ 549 static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) 550 { 551 unsigned long handle; 552 553 if (!page) { 554 BUG_ON(obj_idx); 555 return NULL; 556 } 557 558 handle = page_to_pfn(page) << OBJ_INDEX_BITS; 559 handle |= ((obj_idx + 1) & OBJ_INDEX_MASK); 560 561 return (void *)handle; 562 } 563 564 /* 565 * Decode <page, obj_idx> pair from the given object handle. We adjust the 566 * decoded obj_idx back to its original value since it was adjusted in 567 * obj_location_to_handle(). 568 */ 569 static void obj_handle_to_location(unsigned long handle, struct page **page, 570 unsigned long *obj_idx) 571 { 572 *page = pfn_to_page(handle >> OBJ_INDEX_BITS); 573 *obj_idx = (handle & OBJ_INDEX_MASK) - 1; 574 } 575 576 static unsigned long obj_idx_to_offset(struct page *page, 577 unsigned long obj_idx, int class_size) 578 { 579 unsigned long off = 0; 580 581 if (!is_first_page(page)) 582 off = page->index; 583 584 return off + obj_idx * class_size; 585 } 586 587 static void reset_page(struct page *page) 588 { 589 clear_bit(PG_private, &page->flags); 590 clear_bit(PG_private_2, &page->flags); 591 set_page_private(page, 0); 592 page->mapping = NULL; 593 page->freelist = NULL; 594 page_mapcount_reset(page); 595 } 596 597 static void free_zspage(struct page *first_page) 598 { 599 struct page *nextp, *tmp, *head_extra; 600 601 BUG_ON(!is_first_page(first_page)); 602 BUG_ON(first_page->inuse); 603 604 head_extra = (struct page *)page_private(first_page); 605 606 reset_page(first_page); 607 __free_page(first_page); 608 609 /* zspage with only 1 system page */ 610 if (!head_extra) 611 return; 612 613 list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { 614 list_del(&nextp->lru); 615 reset_page(nextp); 616 __free_page(nextp); 617 } 618 reset_page(head_extra); 619 __free_page(head_extra); 620 } 621 622 /* Initialize a newly allocated zspage */ 623 static void init_zspage(struct page *first_page, struct size_class *class) 624 { 625 unsigned long off = 0; 626 struct page *page = first_page; 627 628 BUG_ON(!is_first_page(first_page)); 629 while (page) { 630 struct page *next_page; 631 struct link_free *link; 632 unsigned int i, objs_on_page; 633 634 /* 635 * page->index stores offset of first object starting 636 * in the page. For the first page, this is always 0, 637 * so we use first_page->index (aka ->freelist) to store 638 * head of corresponding zspage's freelist. 639 */ 640 if (page != first_page) 641 page->index = off; 642 643 link = (struct link_free *)kmap_atomic(page) + 644 off / sizeof(*link); 645 objs_on_page = (PAGE_SIZE - off) / class->size; 646 647 for (i = 1; i <= objs_on_page; i++) { 648 off += class->size; 649 if (off < PAGE_SIZE) { 650 link->next = obj_location_to_handle(page, i); 651 link += class->size / sizeof(*link); 652 } 653 } 654 655 /* 656 * We now come to the last (full or partial) object on this 657 * page, which must point to the first object on the next 658 * page (if present) 659 */ 660 next_page = get_next_page(page); 661 link->next = obj_location_to_handle(next_page, 0); 662 kunmap_atomic(link); 663 page = next_page; 664 off = (off + class->size) % PAGE_SIZE; 665 } 666 } 667 668 /* 669 * Allocate a zspage for the given size class 670 */ 671 static struct page *alloc_zspage(struct size_class *class, gfp_t flags) 672 { 673 int i, error; 674 struct page *first_page = NULL, *uninitialized_var(prev_page); 675 676 /* 677 * Allocate individual pages and link them together as: 678 * 1. first page->private = first sub-page 679 * 2. all sub-pages are linked together using page->lru 680 * 3. each sub-page is linked to the first page using page->first_page 681 * 682 * For each size class, First/Head pages are linked together using 683 * page->lru. Also, we set PG_private to identify the first page 684 * (i.e. no other sub-page has this flag set) and PG_private_2 to 685 * identify the last page. 686 */ 687 error = -ENOMEM; 688 for (i = 0; i < class->pages_per_zspage; i++) { 689 struct page *page; 690 691 page = alloc_page(flags); 692 if (!page) 693 goto cleanup; 694 695 INIT_LIST_HEAD(&page->lru); 696 if (i == 0) { /* first page */ 697 SetPagePrivate(page); 698 set_page_private(page, 0); 699 first_page = page; 700 first_page->inuse = 0; 701 } 702 if (i == 1) 703 set_page_private(first_page, (unsigned long)page); 704 if (i >= 1) 705 page->first_page = first_page; 706 if (i >= 2) 707 list_add(&page->lru, &prev_page->lru); 708 if (i == class->pages_per_zspage - 1) /* last page */ 709 SetPagePrivate2(page); 710 prev_page = page; 711 } 712 713 init_zspage(first_page, class); 714 715 first_page->freelist = obj_location_to_handle(first_page, 0); 716 /* Maximum number of objects we can store in this zspage */ 717 first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; 718 719 error = 0; /* Success */ 720 721 cleanup: 722 if (unlikely(error) && first_page) { 723 free_zspage(first_page); 724 first_page = NULL; 725 } 726 727 return first_page; 728 } 729 730 static struct page *find_get_zspage(struct size_class *class) 731 { 732 int i; 733 struct page *page; 734 735 for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { 736 page = class->fullness_list[i]; 737 if (page) 738 break; 739 } 740 741 return page; 742 } 743 744 #ifdef CONFIG_PGTABLE_MAPPING 745 static inline int __zs_cpu_up(struct mapping_area *area) 746 { 747 /* 748 * Make sure we don't leak memory if a cpu UP notification 749 * and zs_init() race and both call zs_cpu_up() on the same cpu 750 */ 751 if (area->vm) 752 return 0; 753 area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); 754 if (!area->vm) 755 return -ENOMEM; 756 return 0; 757 } 758 759 static inline void __zs_cpu_down(struct mapping_area *area) 760 { 761 if (area->vm) 762 free_vm_area(area->vm); 763 area->vm = NULL; 764 } 765 766 static inline void *__zs_map_object(struct mapping_area *area, 767 struct page *pages[2], int off, int size) 768 { 769 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); 770 area->vm_addr = area->vm->addr; 771 return area->vm_addr + off; 772 } 773 774 static inline void __zs_unmap_object(struct mapping_area *area, 775 struct page *pages[2], int off, int size) 776 { 777 unsigned long addr = (unsigned long)area->vm_addr; 778 779 unmap_kernel_range(addr, PAGE_SIZE * 2); 780 } 781 782 #else /* CONFIG_PGTABLE_MAPPING */ 783 784 static inline int __zs_cpu_up(struct mapping_area *area) 785 { 786 /* 787 * Make sure we don't leak memory if a cpu UP notification 788 * and zs_init() race and both call zs_cpu_up() on the same cpu 789 */ 790 if (area->vm_buf) 791 return 0; 792 area->vm_buf = (char *)__get_free_page(GFP_KERNEL); 793 if (!area->vm_buf) 794 return -ENOMEM; 795 return 0; 796 } 797 798 static inline void __zs_cpu_down(struct mapping_area *area) 799 { 800 if (area->vm_buf) 801 free_page((unsigned long)area->vm_buf); 802 area->vm_buf = NULL; 803 } 804 805 static void *__zs_map_object(struct mapping_area *area, 806 struct page *pages[2], int off, int size) 807 { 808 int sizes[2]; 809 void *addr; 810 char *buf = area->vm_buf; 811 812 /* disable page faults to match kmap_atomic() return conditions */ 813 pagefault_disable(); 814 815 /* no read fastpath */ 816 if (area->vm_mm == ZS_MM_WO) 817 goto out; 818 819 sizes[0] = PAGE_SIZE - off; 820 sizes[1] = size - sizes[0]; 821 822 /* copy object to per-cpu buffer */ 823 addr = kmap_atomic(pages[0]); 824 memcpy(buf, addr + off, sizes[0]); 825 kunmap_atomic(addr); 826 addr = kmap_atomic(pages[1]); 827 memcpy(buf + sizes[0], addr, sizes[1]); 828 kunmap_atomic(addr); 829 out: 830 return area->vm_buf; 831 } 832 833 static void __zs_unmap_object(struct mapping_area *area, 834 struct page *pages[2], int off, int size) 835 { 836 int sizes[2]; 837 void *addr; 838 char *buf = area->vm_buf; 839 840 /* no write fastpath */ 841 if (area->vm_mm == ZS_MM_RO) 842 goto out; 843 844 sizes[0] = PAGE_SIZE - off; 845 sizes[1] = size - sizes[0]; 846 847 /* copy per-cpu buffer to object */ 848 addr = kmap_atomic(pages[0]); 849 memcpy(addr + off, buf, sizes[0]); 850 kunmap_atomic(addr); 851 addr = kmap_atomic(pages[1]); 852 memcpy(addr, buf + sizes[0], sizes[1]); 853 kunmap_atomic(addr); 854 855 out: 856 /* enable page faults to match kunmap_atomic() return conditions */ 857 pagefault_enable(); 858 } 859 860 #endif /* CONFIG_PGTABLE_MAPPING */ 861 862 static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, 863 void *pcpu) 864 { 865 int ret, cpu = (long)pcpu; 866 struct mapping_area *area; 867 868 switch (action) { 869 case CPU_UP_PREPARE: 870 area = &per_cpu(zs_map_area, cpu); 871 ret = __zs_cpu_up(area); 872 if (ret) 873 return notifier_from_errno(ret); 874 break; 875 case CPU_DEAD: 876 case CPU_UP_CANCELED: 877 area = &per_cpu(zs_map_area, cpu); 878 __zs_cpu_down(area); 879 break; 880 } 881 882 return NOTIFY_OK; 883 } 884 885 static struct notifier_block zs_cpu_nb = { 886 .notifier_call = zs_cpu_notifier 887 }; 888 889 static void zs_exit(void) 890 { 891 int cpu; 892 893 #ifdef CONFIG_ZPOOL 894 zpool_unregister_driver(&zs_zpool_driver); 895 #endif 896 897 cpu_notifier_register_begin(); 898 899 for_each_online_cpu(cpu) 900 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); 901 __unregister_cpu_notifier(&zs_cpu_nb); 902 903 cpu_notifier_register_done(); 904 } 905 906 static int zs_init(void) 907 { 908 int cpu, ret; 909 910 cpu_notifier_register_begin(); 911 912 __register_cpu_notifier(&zs_cpu_nb); 913 for_each_online_cpu(cpu) { 914 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 915 if (notifier_to_errno(ret)) { 916 cpu_notifier_register_done(); 917 goto fail; 918 } 919 } 920 921 cpu_notifier_register_done(); 922 923 #ifdef CONFIG_ZPOOL 924 zpool_register_driver(&zs_zpool_driver); 925 #endif 926 927 return 0; 928 fail: 929 zs_exit(); 930 return notifier_to_errno(ret); 931 } 932 933 /** 934 * zs_create_pool - Creates an allocation pool to work from. 935 * @flags: allocation flags used to allocate pool metadata 936 * 937 * This function must be called before anything when using 938 * the zsmalloc allocator. 939 * 940 * On success, a pointer to the newly created pool is returned, 941 * otherwise NULL. 942 */ 943 struct zs_pool *zs_create_pool(gfp_t flags) 944 { 945 int i, ovhd_size; 946 struct zs_pool *pool; 947 948 ovhd_size = roundup(sizeof(*pool), PAGE_SIZE); 949 pool = kzalloc(ovhd_size, GFP_KERNEL); 950 if (!pool) 951 return NULL; 952 953 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 954 int size; 955 struct size_class *class; 956 957 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; 958 if (size > ZS_MAX_ALLOC_SIZE) 959 size = ZS_MAX_ALLOC_SIZE; 960 961 class = &pool->size_class[i]; 962 class->size = size; 963 class->index = i; 964 spin_lock_init(&class->lock); 965 class->pages_per_zspage = get_pages_per_zspage(size); 966 967 } 968 969 pool->flags = flags; 970 971 return pool; 972 } 973 EXPORT_SYMBOL_GPL(zs_create_pool); 974 975 void zs_destroy_pool(struct zs_pool *pool) 976 { 977 int i; 978 979 for (i = 0; i < ZS_SIZE_CLASSES; i++) { 980 int fg; 981 struct size_class *class = &pool->size_class[i]; 982 983 for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { 984 if (class->fullness_list[fg]) { 985 pr_info("Freeing non-empty class with size %db, fullness group %d\n", 986 class->size, fg); 987 } 988 } 989 } 990 kfree(pool); 991 } 992 EXPORT_SYMBOL_GPL(zs_destroy_pool); 993 994 /** 995 * zs_malloc - Allocate block of given size from pool. 996 * @pool: pool to allocate from 997 * @size: size of block to allocate 998 * 999 * On success, handle to the allocated object is returned, 1000 * otherwise 0. 1001 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. 1002 */ 1003 unsigned long zs_malloc(struct zs_pool *pool, size_t size) 1004 { 1005 unsigned long obj; 1006 struct link_free *link; 1007 int class_idx; 1008 struct size_class *class; 1009 1010 struct page *first_page, *m_page; 1011 unsigned long m_objidx, m_offset; 1012 1013 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) 1014 return 0; 1015 1016 class_idx = get_size_class_index(size); 1017 class = &pool->size_class[class_idx]; 1018 BUG_ON(class_idx != class->index); 1019 1020 spin_lock(&class->lock); 1021 first_page = find_get_zspage(class); 1022 1023 if (!first_page) { 1024 spin_unlock(&class->lock); 1025 first_page = alloc_zspage(class, pool->flags); 1026 if (unlikely(!first_page)) 1027 return 0; 1028 1029 set_zspage_mapping(first_page, class->index, ZS_EMPTY); 1030 spin_lock(&class->lock); 1031 class->pages_allocated += class->pages_per_zspage; 1032 } 1033 1034 obj = (unsigned long)first_page->freelist; 1035 obj_handle_to_location(obj, &m_page, &m_objidx); 1036 m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); 1037 1038 link = (struct link_free *)kmap_atomic(m_page) + 1039 m_offset / sizeof(*link); 1040 first_page->freelist = link->next; 1041 memset(link, POISON_INUSE, sizeof(*link)); 1042 kunmap_atomic(link); 1043 1044 first_page->inuse++; 1045 /* Now move the zspage to another fullness group, if required */ 1046 fix_fullness_group(pool, first_page); 1047 spin_unlock(&class->lock); 1048 1049 return obj; 1050 } 1051 EXPORT_SYMBOL_GPL(zs_malloc); 1052 1053 void zs_free(struct zs_pool *pool, unsigned long obj) 1054 { 1055 struct link_free *link; 1056 struct page *first_page, *f_page; 1057 unsigned long f_objidx, f_offset; 1058 1059 int class_idx; 1060 struct size_class *class; 1061 enum fullness_group fullness; 1062 1063 if (unlikely(!obj)) 1064 return; 1065 1066 obj_handle_to_location(obj, &f_page, &f_objidx); 1067 first_page = get_first_page(f_page); 1068 1069 get_zspage_mapping(first_page, &class_idx, &fullness); 1070 class = &pool->size_class[class_idx]; 1071 f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); 1072 1073 spin_lock(&class->lock); 1074 1075 /* Insert this object in containing zspage's freelist */ 1076 link = (struct link_free *)((unsigned char *)kmap_atomic(f_page) 1077 + f_offset); 1078 link->next = first_page->freelist; 1079 kunmap_atomic(link); 1080 first_page->freelist = (void *)obj; 1081 1082 first_page->inuse--; 1083 fullness = fix_fullness_group(pool, first_page); 1084 1085 if (fullness == ZS_EMPTY) 1086 class->pages_allocated -= class->pages_per_zspage; 1087 1088 spin_unlock(&class->lock); 1089 1090 if (fullness == ZS_EMPTY) 1091 free_zspage(first_page); 1092 } 1093 EXPORT_SYMBOL_GPL(zs_free); 1094 1095 /** 1096 * zs_map_object - get address of allocated object from handle. 1097 * @pool: pool from which the object was allocated 1098 * @handle: handle returned from zs_malloc 1099 * 1100 * Before using an object allocated from zs_malloc, it must be mapped using 1101 * this function. When done with the object, it must be unmapped using 1102 * zs_unmap_object. 1103 * 1104 * Only one object can be mapped per cpu at a time. There is no protection 1105 * against nested mappings. 1106 * 1107 * This function returns with preemption and page faults disabled. 1108 */ 1109 void *zs_map_object(struct zs_pool *pool, unsigned long handle, 1110 enum zs_mapmode mm) 1111 { 1112 struct page *page; 1113 unsigned long obj_idx, off; 1114 1115 unsigned int class_idx; 1116 enum fullness_group fg; 1117 struct size_class *class; 1118 struct mapping_area *area; 1119 struct page *pages[2]; 1120 1121 BUG_ON(!handle); 1122 1123 /* 1124 * Because we use per-cpu mapping areas shared among the 1125 * pools/users, we can't allow mapping in interrupt context 1126 * because it can corrupt another users mappings. 1127 */ 1128 BUG_ON(in_interrupt()); 1129 1130 obj_handle_to_location(handle, &page, &obj_idx); 1131 get_zspage_mapping(get_first_page(page), &class_idx, &fg); 1132 class = &pool->size_class[class_idx]; 1133 off = obj_idx_to_offset(page, obj_idx, class->size); 1134 1135 area = &get_cpu_var(zs_map_area); 1136 area->vm_mm = mm; 1137 if (off + class->size <= PAGE_SIZE) { 1138 /* this object is contained entirely within a page */ 1139 area->vm_addr = kmap_atomic(page); 1140 return area->vm_addr + off; 1141 } 1142 1143 /* this object spans two pages */ 1144 pages[0] = page; 1145 pages[1] = get_next_page(page); 1146 BUG_ON(!pages[1]); 1147 1148 return __zs_map_object(area, pages, off, class->size); 1149 } 1150 EXPORT_SYMBOL_GPL(zs_map_object); 1151 1152 void zs_unmap_object(struct zs_pool *pool, unsigned long handle) 1153 { 1154 struct page *page; 1155 unsigned long obj_idx, off; 1156 1157 unsigned int class_idx; 1158 enum fullness_group fg; 1159 struct size_class *class; 1160 struct mapping_area *area; 1161 1162 BUG_ON(!handle); 1163 1164 obj_handle_to_location(handle, &page, &obj_idx); 1165 get_zspage_mapping(get_first_page(page), &class_idx, &fg); 1166 class = &pool->size_class[class_idx]; 1167 off = obj_idx_to_offset(page, obj_idx, class->size); 1168 1169 area = this_cpu_ptr(&zs_map_area); 1170 if (off + class->size <= PAGE_SIZE) 1171 kunmap_atomic(area->vm_addr); 1172 else { 1173 struct page *pages[2]; 1174 1175 pages[0] = page; 1176 pages[1] = get_next_page(page); 1177 BUG_ON(!pages[1]); 1178 1179 __zs_unmap_object(area, pages, off, class->size); 1180 } 1181 put_cpu_var(zs_map_area); 1182 } 1183 EXPORT_SYMBOL_GPL(zs_unmap_object); 1184 1185 u64 zs_get_total_size_bytes(struct zs_pool *pool) 1186 { 1187 int i; 1188 u64 npages = 0; 1189 1190 for (i = 0; i < ZS_SIZE_CLASSES; i++) 1191 npages += pool->size_class[i].pages_allocated; 1192 1193 return npages << PAGE_SHIFT; 1194 } 1195 EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); 1196 1197 module_init(zs_init); 1198 module_exit(zs_exit); 1199 1200 MODULE_LICENSE("Dual BSD/GPL"); 1201 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 1202