1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * z3fold.c 4 * 5 * Author: Vitaly Wool <vitaly.wool@konsulko.com> 6 * Copyright (C) 2016, Sony Mobile Communications Inc. 7 * 8 * This implementation is based on zbud written by Seth Jennings. 9 * 10 * z3fold is an special purpose allocator for storing compressed pages. It 11 * can store up to three compressed pages per page which improves the 12 * compression ratio of zbud while retaining its main concepts (e. g. always 13 * storing an integral number of objects per page) and simplicity. 14 * It still has simple and deterministic reclaim properties that make it 15 * preferable to a higher density approach (with no requirement on integral 16 * number of object per page) when reclaim is used. 17 * 18 * As in zbud, pages are divided into "chunks". The size of the chunks is 19 * fixed at compile time and is determined by NCHUNKS_ORDER below. 20 * 21 * z3fold doesn't export any API and is meant to be used via zpool API. 22 */ 23 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/atomic.h> 27 #include <linux/sched.h> 28 #include <linux/cpumask.h> 29 #include <linux/list.h> 30 #include <linux/mm.h> 31 #include <linux/module.h> 32 #include <linux/page-flags.h> 33 #include <linux/migrate.h> 34 #include <linux/node.h> 35 #include <linux/compaction.h> 36 #include <linux/percpu.h> 37 #include <linux/mount.h> 38 #include <linux/pseudo_fs.h> 39 #include <linux/fs.h> 40 #include <linux/preempt.h> 41 #include <linux/workqueue.h> 42 #include <linux/slab.h> 43 #include <linux/spinlock.h> 44 #include <linux/zpool.h> 45 #include <linux/magic.h> 46 #include <linux/kmemleak.h> 47 48 /* 49 * NCHUNKS_ORDER determines the internal allocation granularity, effectively 50 * adjusting internal fragmentation. It also determines the number of 51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 52 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks 53 * in the beginning of an allocated page are occupied by z3fold header, so 54 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), 55 * which shows the max number of free chunks in z3fold page, also there will 56 * be 63, or 62, respectively, freelists per pool. 57 */ 58 #define NCHUNKS_ORDER 6 59 60 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) 61 #define CHUNK_SIZE (1 << CHUNK_SHIFT) 62 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) 63 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) 64 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) 65 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) 66 67 #define BUDDY_MASK (0x3) 68 #define BUDDY_SHIFT 2 69 #define SLOTS_ALIGN (0x40) 70 71 /***************** 72 * Structures 73 *****************/ 74 struct z3fold_pool; 75 struct z3fold_ops { 76 int (*evict)(struct z3fold_pool *pool, unsigned long handle); 77 }; 78 79 enum buddy { 80 HEADLESS = 0, 81 FIRST, 82 MIDDLE, 83 LAST, 84 BUDDIES_MAX = LAST 85 }; 86 87 struct z3fold_buddy_slots { 88 /* 89 * we are using BUDDY_MASK in handle_to_buddy etc. so there should 90 * be enough slots to hold all possible variants 91 */ 92 unsigned long slot[BUDDY_MASK + 1]; 93 unsigned long pool; /* back link */ 94 rwlock_t lock; 95 }; 96 #define HANDLE_FLAG_MASK (0x03) 97 98 /* 99 * struct z3fold_header - z3fold page metadata occupying first chunks of each 100 * z3fold page, except for HEADLESS pages 101 * @buddy: links the z3fold page into the relevant list in the 102 * pool 103 * @page_lock: per-page lock 104 * @refcount: reference count for the z3fold page 105 * @work: work_struct for page layout optimization 106 * @slots: pointer to the structure holding buddy slots 107 * @pool: pointer to the containing pool 108 * @cpu: CPU which this page "belongs" to 109 * @first_chunks: the size of the first buddy in chunks, 0 if free 110 * @middle_chunks: the size of the middle buddy in chunks, 0 if free 111 * @last_chunks: the size of the last buddy in chunks, 0 if free 112 * @first_num: the starting number (for the first handle) 113 * @mapped_count: the number of objects currently mapped 114 */ 115 struct z3fold_header { 116 struct list_head buddy; 117 spinlock_t page_lock; 118 struct kref refcount; 119 struct work_struct work; 120 struct z3fold_buddy_slots *slots; 121 struct z3fold_pool *pool; 122 short cpu; 123 unsigned short first_chunks; 124 unsigned short middle_chunks; 125 unsigned short last_chunks; 126 unsigned short start_middle; 127 unsigned short first_num:2; 128 unsigned short mapped_count:2; 129 unsigned short foreign_handles:2; 130 }; 131 132 /** 133 * struct z3fold_pool - stores metadata for each z3fold pool 134 * @name: pool name 135 * @lock: protects pool unbuddied/lru lists 136 * @stale_lock: protects pool stale page list 137 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- 138 * buddies; the list each z3fold page is added to depends on 139 * the size of its free region. 140 * @lru: list tracking the z3fold pages in LRU order by most recently 141 * added buddy. 142 * @stale: list of pages marked for freeing 143 * @pages_nr: number of z3fold pages in the pool. 144 * @c_handle: cache for z3fold_buddy_slots allocation 145 * @ops: pointer to a structure of user defined operations specified at 146 * pool creation time. 147 * @compact_wq: workqueue for page layout background optimization 148 * @release_wq: workqueue for safe page release 149 * @work: work_struct for safe page release 150 * @inode: inode for z3fold pseudo filesystem 151 * 152 * This structure is allocated at pool creation time and maintains metadata 153 * pertaining to a particular z3fold pool. 154 */ 155 struct z3fold_pool { 156 const char *name; 157 spinlock_t lock; 158 spinlock_t stale_lock; 159 struct list_head *unbuddied; 160 struct list_head lru; 161 struct list_head stale; 162 atomic64_t pages_nr; 163 struct kmem_cache *c_handle; 164 const struct z3fold_ops *ops; 165 struct zpool *zpool; 166 const struct zpool_ops *zpool_ops; 167 struct workqueue_struct *compact_wq; 168 struct workqueue_struct *release_wq; 169 struct work_struct work; 170 struct inode *inode; 171 }; 172 173 /* 174 * Internal z3fold page flags 175 */ 176 enum z3fold_page_flags { 177 PAGE_HEADLESS = 0, 178 MIDDLE_CHUNK_MAPPED, 179 NEEDS_COMPACTING, 180 PAGE_STALE, 181 PAGE_CLAIMED, /* by either reclaim or free */ 182 }; 183 184 /* 185 * handle flags, go under HANDLE_FLAG_MASK 186 */ 187 enum z3fold_handle_flags { 188 HANDLES_NOFREE = 0, 189 }; 190 191 /* 192 * Forward declarations 193 */ 194 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool); 195 static void compact_page_work(struct work_struct *w); 196 197 /***************** 198 * Helpers 199 *****************/ 200 201 /* Converts an allocation size in bytes to size in z3fold chunks */ 202 static int size_to_chunks(size_t size) 203 { 204 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; 205 } 206 207 #define for_each_unbuddied_list(_iter, _begin) \ 208 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) 209 210 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, 211 gfp_t gfp) 212 { 213 struct z3fold_buddy_slots *slots; 214 215 slots = kmem_cache_zalloc(pool->c_handle, 216 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE))); 217 218 if (slots) { 219 /* It will be freed separately in free_handle(). */ 220 kmemleak_not_leak(slots); 221 slots->pool = (unsigned long)pool; 222 rwlock_init(&slots->lock); 223 } 224 225 return slots; 226 } 227 228 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) 229 { 230 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); 231 } 232 233 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle) 234 { 235 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1)); 236 } 237 238 /* Lock a z3fold page */ 239 static inline void z3fold_page_lock(struct z3fold_header *zhdr) 240 { 241 spin_lock(&zhdr->page_lock); 242 } 243 244 /* Try to lock a z3fold page */ 245 static inline int z3fold_page_trylock(struct z3fold_header *zhdr) 246 { 247 return spin_trylock(&zhdr->page_lock); 248 } 249 250 /* Unlock a z3fold page */ 251 static inline void z3fold_page_unlock(struct z3fold_header *zhdr) 252 { 253 spin_unlock(&zhdr->page_lock); 254 } 255 256 257 static inline struct z3fold_header *__get_z3fold_header(unsigned long handle, 258 bool lock) 259 { 260 struct z3fold_buddy_slots *slots; 261 struct z3fold_header *zhdr; 262 int locked = 0; 263 264 if (!(handle & (1 << PAGE_HEADLESS))) { 265 slots = handle_to_slots(handle); 266 do { 267 unsigned long addr; 268 269 read_lock(&slots->lock); 270 addr = *(unsigned long *)handle; 271 zhdr = (struct z3fold_header *)(addr & PAGE_MASK); 272 if (lock) 273 locked = z3fold_page_trylock(zhdr); 274 read_unlock(&slots->lock); 275 if (locked) 276 break; 277 cpu_relax(); 278 } while (lock); 279 } else { 280 zhdr = (struct z3fold_header *)(handle & PAGE_MASK); 281 } 282 283 return zhdr; 284 } 285 286 /* Returns the z3fold page where a given handle is stored */ 287 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h) 288 { 289 return __get_z3fold_header(h, false); 290 } 291 292 /* return locked z3fold page if it's not headless */ 293 static inline struct z3fold_header *get_z3fold_header(unsigned long h) 294 { 295 return __get_z3fold_header(h, true); 296 } 297 298 static inline void put_z3fold_header(struct z3fold_header *zhdr) 299 { 300 struct page *page = virt_to_page(zhdr); 301 302 if (!test_bit(PAGE_HEADLESS, &page->private)) 303 z3fold_page_unlock(zhdr); 304 } 305 306 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr) 307 { 308 struct z3fold_buddy_slots *slots; 309 int i; 310 bool is_free; 311 312 if (handle & (1 << PAGE_HEADLESS)) 313 return; 314 315 if (WARN_ON(*(unsigned long *)handle == 0)) 316 return; 317 318 slots = handle_to_slots(handle); 319 write_lock(&slots->lock); 320 *(unsigned long *)handle = 0; 321 322 if (test_bit(HANDLES_NOFREE, &slots->pool)) { 323 write_unlock(&slots->lock); 324 return; /* simple case, nothing else to do */ 325 } 326 327 if (zhdr->slots != slots) 328 zhdr->foreign_handles--; 329 330 is_free = true; 331 for (i = 0; i <= BUDDY_MASK; i++) { 332 if (slots->slot[i]) { 333 is_free = false; 334 break; 335 } 336 } 337 write_unlock(&slots->lock); 338 339 if (is_free) { 340 struct z3fold_pool *pool = slots_to_pool(slots); 341 342 if (zhdr->slots == slots) 343 zhdr->slots = NULL; 344 kmem_cache_free(pool->c_handle, slots); 345 } 346 } 347 348 static int z3fold_init_fs_context(struct fs_context *fc) 349 { 350 return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM; 351 } 352 353 static struct file_system_type z3fold_fs = { 354 .name = "z3fold", 355 .init_fs_context = z3fold_init_fs_context, 356 .kill_sb = kill_anon_super, 357 }; 358 359 static struct vfsmount *z3fold_mnt; 360 static int z3fold_mount(void) 361 { 362 int ret = 0; 363 364 z3fold_mnt = kern_mount(&z3fold_fs); 365 if (IS_ERR(z3fold_mnt)) 366 ret = PTR_ERR(z3fold_mnt); 367 368 return ret; 369 } 370 371 static void z3fold_unmount(void) 372 { 373 kern_unmount(z3fold_mnt); 374 } 375 376 static const struct address_space_operations z3fold_aops; 377 static int z3fold_register_migration(struct z3fold_pool *pool) 378 { 379 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb); 380 if (IS_ERR(pool->inode)) { 381 pool->inode = NULL; 382 return 1; 383 } 384 385 pool->inode->i_mapping->private_data = pool; 386 pool->inode->i_mapping->a_ops = &z3fold_aops; 387 return 0; 388 } 389 390 static void z3fold_unregister_migration(struct z3fold_pool *pool) 391 { 392 if (pool->inode) 393 iput(pool->inode); 394 } 395 396 /* Initializes the z3fold header of a newly allocated z3fold page */ 397 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, 398 struct z3fold_pool *pool, gfp_t gfp) 399 { 400 struct z3fold_header *zhdr = page_address(page); 401 struct z3fold_buddy_slots *slots; 402 403 INIT_LIST_HEAD(&page->lru); 404 clear_bit(PAGE_HEADLESS, &page->private); 405 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 406 clear_bit(NEEDS_COMPACTING, &page->private); 407 clear_bit(PAGE_STALE, &page->private); 408 clear_bit(PAGE_CLAIMED, &page->private); 409 if (headless) 410 return zhdr; 411 412 slots = alloc_slots(pool, gfp); 413 if (!slots) 414 return NULL; 415 416 spin_lock_init(&zhdr->page_lock); 417 kref_init(&zhdr->refcount); 418 zhdr->first_chunks = 0; 419 zhdr->middle_chunks = 0; 420 zhdr->last_chunks = 0; 421 zhdr->first_num = 0; 422 zhdr->start_middle = 0; 423 zhdr->cpu = -1; 424 zhdr->foreign_handles = 0; 425 zhdr->mapped_count = 0; 426 zhdr->slots = slots; 427 zhdr->pool = pool; 428 INIT_LIST_HEAD(&zhdr->buddy); 429 INIT_WORK(&zhdr->work, compact_page_work); 430 return zhdr; 431 } 432 433 /* Resets the struct page fields and frees the page */ 434 static void free_z3fold_page(struct page *page, bool headless) 435 { 436 if (!headless) { 437 lock_page(page); 438 __ClearPageMovable(page); 439 unlock_page(page); 440 } 441 ClearPagePrivate(page); 442 __free_page(page); 443 } 444 445 /* Helper function to build the index */ 446 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) 447 { 448 return (bud + zhdr->first_num) & BUDDY_MASK; 449 } 450 451 /* 452 * Encodes the handle of a particular buddy within a z3fold page 453 * Pool lock should be held as this function accesses first_num 454 */ 455 static unsigned long __encode_handle(struct z3fold_header *zhdr, 456 struct z3fold_buddy_slots *slots, 457 enum buddy bud) 458 { 459 unsigned long h = (unsigned long)zhdr; 460 int idx = 0; 461 462 /* 463 * For a headless page, its handle is its pointer with the extra 464 * PAGE_HEADLESS bit set 465 */ 466 if (bud == HEADLESS) 467 return h | (1 << PAGE_HEADLESS); 468 469 /* otherwise, return pointer to encoded handle */ 470 idx = __idx(zhdr, bud); 471 h += idx; 472 if (bud == LAST) 473 h |= (zhdr->last_chunks << BUDDY_SHIFT); 474 475 write_lock(&slots->lock); 476 slots->slot[idx] = h; 477 write_unlock(&slots->lock); 478 return (unsigned long)&slots->slot[idx]; 479 } 480 481 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) 482 { 483 return __encode_handle(zhdr, zhdr->slots, bud); 484 } 485 486 /* only for LAST bud, returns zero otherwise */ 487 static unsigned short handle_to_chunks(unsigned long handle) 488 { 489 struct z3fold_buddy_slots *slots = handle_to_slots(handle); 490 unsigned long addr; 491 492 read_lock(&slots->lock); 493 addr = *(unsigned long *)handle; 494 read_unlock(&slots->lock); 495 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT; 496 } 497 498 /* 499 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle 500 * but that doesn't matter. because the masking will result in the 501 * correct buddy number. 502 */ 503 static enum buddy handle_to_buddy(unsigned long handle) 504 { 505 struct z3fold_header *zhdr; 506 struct z3fold_buddy_slots *slots = handle_to_slots(handle); 507 unsigned long addr; 508 509 read_lock(&slots->lock); 510 WARN_ON(handle & (1 << PAGE_HEADLESS)); 511 addr = *(unsigned long *)handle; 512 read_unlock(&slots->lock); 513 zhdr = (struct z3fold_header *)(addr & PAGE_MASK); 514 return (addr - zhdr->first_num) & BUDDY_MASK; 515 } 516 517 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) 518 { 519 return zhdr->pool; 520 } 521 522 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) 523 { 524 struct page *page = virt_to_page(zhdr); 525 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 526 527 WARN_ON(!list_empty(&zhdr->buddy)); 528 set_bit(PAGE_STALE, &page->private); 529 clear_bit(NEEDS_COMPACTING, &page->private); 530 spin_lock(&pool->lock); 531 if (!list_empty(&page->lru)) 532 list_del_init(&page->lru); 533 spin_unlock(&pool->lock); 534 535 if (locked) 536 z3fold_page_unlock(zhdr); 537 538 spin_lock(&pool->stale_lock); 539 list_add(&zhdr->buddy, &pool->stale); 540 queue_work(pool->release_wq, &pool->work); 541 spin_unlock(&pool->stale_lock); 542 } 543 544 static void __attribute__((__unused__)) 545 release_z3fold_page(struct kref *ref) 546 { 547 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 548 refcount); 549 __release_z3fold_page(zhdr, false); 550 } 551 552 static void release_z3fold_page_locked(struct kref *ref) 553 { 554 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 555 refcount); 556 WARN_ON(z3fold_page_trylock(zhdr)); 557 __release_z3fold_page(zhdr, true); 558 } 559 560 static void release_z3fold_page_locked_list(struct kref *ref) 561 { 562 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 563 refcount); 564 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 565 566 spin_lock(&pool->lock); 567 list_del_init(&zhdr->buddy); 568 spin_unlock(&pool->lock); 569 570 WARN_ON(z3fold_page_trylock(zhdr)); 571 __release_z3fold_page(zhdr, true); 572 } 573 574 static void free_pages_work(struct work_struct *w) 575 { 576 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); 577 578 spin_lock(&pool->stale_lock); 579 while (!list_empty(&pool->stale)) { 580 struct z3fold_header *zhdr = list_first_entry(&pool->stale, 581 struct z3fold_header, buddy); 582 struct page *page = virt_to_page(zhdr); 583 584 list_del(&zhdr->buddy); 585 if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) 586 continue; 587 spin_unlock(&pool->stale_lock); 588 cancel_work_sync(&zhdr->work); 589 free_z3fold_page(page, false); 590 cond_resched(); 591 spin_lock(&pool->stale_lock); 592 } 593 spin_unlock(&pool->stale_lock); 594 } 595 596 /* 597 * Returns the number of free chunks in a z3fold page. 598 * NB: can't be used with HEADLESS pages. 599 */ 600 static int num_free_chunks(struct z3fold_header *zhdr) 601 { 602 int nfree; 603 /* 604 * If there is a middle object, pick up the bigger free space 605 * either before or after it. Otherwise just subtract the number 606 * of chunks occupied by the first and the last objects. 607 */ 608 if (zhdr->middle_chunks != 0) { 609 int nfree_before = zhdr->first_chunks ? 610 0 : zhdr->start_middle - ZHDR_CHUNKS; 611 int nfree_after = zhdr->last_chunks ? 612 0 : TOTAL_CHUNKS - 613 (zhdr->start_middle + zhdr->middle_chunks); 614 nfree = max(nfree_before, nfree_after); 615 } else 616 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; 617 return nfree; 618 } 619 620 /* Add to the appropriate unbuddied list */ 621 static inline void add_to_unbuddied(struct z3fold_pool *pool, 622 struct z3fold_header *zhdr) 623 { 624 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || 625 zhdr->middle_chunks == 0) { 626 struct list_head *unbuddied; 627 int freechunks = num_free_chunks(zhdr); 628 629 migrate_disable(); 630 unbuddied = this_cpu_ptr(pool->unbuddied); 631 spin_lock(&pool->lock); 632 list_add(&zhdr->buddy, &unbuddied[freechunks]); 633 spin_unlock(&pool->lock); 634 zhdr->cpu = smp_processor_id(); 635 migrate_enable(); 636 } 637 } 638 639 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) 640 { 641 enum buddy bud = HEADLESS; 642 643 if (zhdr->middle_chunks) { 644 if (!zhdr->first_chunks && 645 chunks <= zhdr->start_middle - ZHDR_CHUNKS) 646 bud = FIRST; 647 else if (!zhdr->last_chunks) 648 bud = LAST; 649 } else { 650 if (!zhdr->first_chunks) 651 bud = FIRST; 652 else if (!zhdr->last_chunks) 653 bud = LAST; 654 else 655 bud = MIDDLE; 656 } 657 658 return bud; 659 } 660 661 static inline void *mchunk_memmove(struct z3fold_header *zhdr, 662 unsigned short dst_chunk) 663 { 664 void *beg = zhdr; 665 return memmove(beg + (dst_chunk << CHUNK_SHIFT), 666 beg + (zhdr->start_middle << CHUNK_SHIFT), 667 zhdr->middle_chunks << CHUNK_SHIFT); 668 } 669 670 static inline bool buddy_single(struct z3fold_header *zhdr) 671 { 672 return !((zhdr->first_chunks && zhdr->middle_chunks) || 673 (zhdr->first_chunks && zhdr->last_chunks) || 674 (zhdr->middle_chunks && zhdr->last_chunks)); 675 } 676 677 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) 678 { 679 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 680 void *p = zhdr; 681 unsigned long old_handle = 0; 682 size_t sz = 0; 683 struct z3fold_header *new_zhdr = NULL; 684 int first_idx = __idx(zhdr, FIRST); 685 int middle_idx = __idx(zhdr, MIDDLE); 686 int last_idx = __idx(zhdr, LAST); 687 unsigned short *moved_chunks = NULL; 688 689 /* 690 * No need to protect slots here -- all the slots are "local" and 691 * the page lock is already taken 692 */ 693 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) { 694 p += ZHDR_SIZE_ALIGNED; 695 sz = zhdr->first_chunks << CHUNK_SHIFT; 696 old_handle = (unsigned long)&zhdr->slots->slot[first_idx]; 697 moved_chunks = &zhdr->first_chunks; 698 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) { 699 p += zhdr->start_middle << CHUNK_SHIFT; 700 sz = zhdr->middle_chunks << CHUNK_SHIFT; 701 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx]; 702 moved_chunks = &zhdr->middle_chunks; 703 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) { 704 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); 705 sz = zhdr->last_chunks << CHUNK_SHIFT; 706 old_handle = (unsigned long)&zhdr->slots->slot[last_idx]; 707 moved_chunks = &zhdr->last_chunks; 708 } 709 710 if (sz > 0) { 711 enum buddy new_bud = HEADLESS; 712 short chunks = size_to_chunks(sz); 713 void *q; 714 715 new_zhdr = __z3fold_alloc(pool, sz, false); 716 if (!new_zhdr) 717 return NULL; 718 719 if (WARN_ON(new_zhdr == zhdr)) 720 goto out_fail; 721 722 new_bud = get_free_buddy(new_zhdr, chunks); 723 q = new_zhdr; 724 switch (new_bud) { 725 case FIRST: 726 new_zhdr->first_chunks = chunks; 727 q += ZHDR_SIZE_ALIGNED; 728 break; 729 case MIDDLE: 730 new_zhdr->middle_chunks = chunks; 731 new_zhdr->start_middle = 732 new_zhdr->first_chunks + ZHDR_CHUNKS; 733 q += new_zhdr->start_middle << CHUNK_SHIFT; 734 break; 735 case LAST: 736 new_zhdr->last_chunks = chunks; 737 q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT); 738 break; 739 default: 740 goto out_fail; 741 } 742 new_zhdr->foreign_handles++; 743 memcpy(q, p, sz); 744 write_lock(&zhdr->slots->lock); 745 *(unsigned long *)old_handle = (unsigned long)new_zhdr + 746 __idx(new_zhdr, new_bud); 747 if (new_bud == LAST) 748 *(unsigned long *)old_handle |= 749 (new_zhdr->last_chunks << BUDDY_SHIFT); 750 write_unlock(&zhdr->slots->lock); 751 add_to_unbuddied(pool, new_zhdr); 752 z3fold_page_unlock(new_zhdr); 753 754 *moved_chunks = 0; 755 } 756 757 return new_zhdr; 758 759 out_fail: 760 if (new_zhdr) { 761 if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) 762 atomic64_dec(&pool->pages_nr); 763 else { 764 add_to_unbuddied(pool, new_zhdr); 765 z3fold_page_unlock(new_zhdr); 766 } 767 } 768 return NULL; 769 770 } 771 772 #define BIG_CHUNK_GAP 3 773 /* Has to be called with lock held */ 774 static int z3fold_compact_page(struct z3fold_header *zhdr) 775 { 776 struct page *page = virt_to_page(zhdr); 777 778 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) 779 return 0; /* can't move middle chunk, it's used */ 780 781 if (unlikely(PageIsolated(page))) 782 return 0; 783 784 if (zhdr->middle_chunks == 0) 785 return 0; /* nothing to compact */ 786 787 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { 788 /* move to the beginning */ 789 mchunk_memmove(zhdr, ZHDR_CHUNKS); 790 zhdr->first_chunks = zhdr->middle_chunks; 791 zhdr->middle_chunks = 0; 792 zhdr->start_middle = 0; 793 zhdr->first_num++; 794 return 1; 795 } 796 797 /* 798 * moving data is expensive, so let's only do that if 799 * there's substantial gain (at least BIG_CHUNK_GAP chunks) 800 */ 801 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && 802 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= 803 BIG_CHUNK_GAP) { 804 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); 805 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; 806 return 1; 807 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && 808 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle 809 + zhdr->middle_chunks) >= 810 BIG_CHUNK_GAP) { 811 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - 812 zhdr->middle_chunks; 813 mchunk_memmove(zhdr, new_start); 814 zhdr->start_middle = new_start; 815 return 1; 816 } 817 818 return 0; 819 } 820 821 static void do_compact_page(struct z3fold_header *zhdr, bool locked) 822 { 823 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 824 struct page *page; 825 826 page = virt_to_page(zhdr); 827 if (locked) 828 WARN_ON(z3fold_page_trylock(zhdr)); 829 else 830 z3fold_page_lock(zhdr); 831 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { 832 z3fold_page_unlock(zhdr); 833 return; 834 } 835 spin_lock(&pool->lock); 836 list_del_init(&zhdr->buddy); 837 spin_unlock(&pool->lock); 838 839 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 840 atomic64_dec(&pool->pages_nr); 841 return; 842 } 843 844 if (test_bit(PAGE_STALE, &page->private) || 845 test_and_set_bit(PAGE_CLAIMED, &page->private)) { 846 z3fold_page_unlock(zhdr); 847 return; 848 } 849 850 if (!zhdr->foreign_handles && buddy_single(zhdr) && 851 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { 852 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) 853 atomic64_dec(&pool->pages_nr); 854 else { 855 clear_bit(PAGE_CLAIMED, &page->private); 856 z3fold_page_unlock(zhdr); 857 } 858 return; 859 } 860 861 z3fold_compact_page(zhdr); 862 add_to_unbuddied(pool, zhdr); 863 clear_bit(PAGE_CLAIMED, &page->private); 864 z3fold_page_unlock(zhdr); 865 } 866 867 static void compact_page_work(struct work_struct *w) 868 { 869 struct z3fold_header *zhdr = container_of(w, struct z3fold_header, 870 work); 871 872 do_compact_page(zhdr, false); 873 } 874 875 /* returns _locked_ z3fold page header or NULL */ 876 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, 877 size_t size, bool can_sleep) 878 { 879 struct z3fold_header *zhdr = NULL; 880 struct page *page; 881 struct list_head *unbuddied; 882 int chunks = size_to_chunks(size), i; 883 884 lookup: 885 migrate_disable(); 886 /* First, try to find an unbuddied z3fold page. */ 887 unbuddied = this_cpu_ptr(pool->unbuddied); 888 for_each_unbuddied_list(i, chunks) { 889 struct list_head *l = &unbuddied[i]; 890 891 zhdr = list_first_entry_or_null(READ_ONCE(l), 892 struct z3fold_header, buddy); 893 894 if (!zhdr) 895 continue; 896 897 /* Re-check under lock. */ 898 spin_lock(&pool->lock); 899 l = &unbuddied[i]; 900 if (unlikely(zhdr != list_first_entry(READ_ONCE(l), 901 struct z3fold_header, buddy)) || 902 !z3fold_page_trylock(zhdr)) { 903 spin_unlock(&pool->lock); 904 zhdr = NULL; 905 migrate_enable(); 906 if (can_sleep) 907 cond_resched(); 908 goto lookup; 909 } 910 list_del_init(&zhdr->buddy); 911 zhdr->cpu = -1; 912 spin_unlock(&pool->lock); 913 914 page = virt_to_page(zhdr); 915 if (test_bit(NEEDS_COMPACTING, &page->private) || 916 test_bit(PAGE_CLAIMED, &page->private)) { 917 z3fold_page_unlock(zhdr); 918 zhdr = NULL; 919 migrate_enable(); 920 if (can_sleep) 921 cond_resched(); 922 goto lookup; 923 } 924 925 /* 926 * this page could not be removed from its unbuddied 927 * list while pool lock was held, and then we've taken 928 * page lock so kref_put could not be called before 929 * we got here, so it's safe to just call kref_get() 930 */ 931 kref_get(&zhdr->refcount); 932 break; 933 } 934 migrate_enable(); 935 936 if (!zhdr) { 937 int cpu; 938 939 /* look for _exact_ match on other cpus' lists */ 940 for_each_online_cpu(cpu) { 941 struct list_head *l; 942 943 unbuddied = per_cpu_ptr(pool->unbuddied, cpu); 944 spin_lock(&pool->lock); 945 l = &unbuddied[chunks]; 946 947 zhdr = list_first_entry_or_null(READ_ONCE(l), 948 struct z3fold_header, buddy); 949 950 if (!zhdr || !z3fold_page_trylock(zhdr)) { 951 spin_unlock(&pool->lock); 952 zhdr = NULL; 953 continue; 954 } 955 list_del_init(&zhdr->buddy); 956 zhdr->cpu = -1; 957 spin_unlock(&pool->lock); 958 959 page = virt_to_page(zhdr); 960 if (test_bit(NEEDS_COMPACTING, &page->private) || 961 test_bit(PAGE_CLAIMED, &page->private)) { 962 z3fold_page_unlock(zhdr); 963 zhdr = NULL; 964 if (can_sleep) 965 cond_resched(); 966 continue; 967 } 968 kref_get(&zhdr->refcount); 969 break; 970 } 971 } 972 973 if (zhdr && !zhdr->slots) 974 zhdr->slots = alloc_slots(pool, 975 can_sleep ? GFP_NOIO : GFP_ATOMIC); 976 return zhdr; 977 } 978 979 /* 980 * API Functions 981 */ 982 983 /** 984 * z3fold_create_pool() - create a new z3fold pool 985 * @name: pool name 986 * @gfp: gfp flags when allocating the z3fold pool structure 987 * @ops: user-defined operations for the z3fold pool 988 * 989 * Return: pointer to the new z3fold pool or NULL if the metadata allocation 990 * failed. 991 */ 992 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, 993 const struct z3fold_ops *ops) 994 { 995 struct z3fold_pool *pool = NULL; 996 int i, cpu; 997 998 pool = kzalloc(sizeof(struct z3fold_pool), gfp); 999 if (!pool) 1000 goto out; 1001 pool->c_handle = kmem_cache_create("z3fold_handle", 1002 sizeof(struct z3fold_buddy_slots), 1003 SLOTS_ALIGN, 0, NULL); 1004 if (!pool->c_handle) 1005 goto out_c; 1006 spin_lock_init(&pool->lock); 1007 spin_lock_init(&pool->stale_lock); 1008 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); 1009 if (!pool->unbuddied) 1010 goto out_pool; 1011 for_each_possible_cpu(cpu) { 1012 struct list_head *unbuddied = 1013 per_cpu_ptr(pool->unbuddied, cpu); 1014 for_each_unbuddied_list(i, 0) 1015 INIT_LIST_HEAD(&unbuddied[i]); 1016 } 1017 INIT_LIST_HEAD(&pool->lru); 1018 INIT_LIST_HEAD(&pool->stale); 1019 atomic64_set(&pool->pages_nr, 0); 1020 pool->name = name; 1021 pool->compact_wq = create_singlethread_workqueue(pool->name); 1022 if (!pool->compact_wq) 1023 goto out_unbuddied; 1024 pool->release_wq = create_singlethread_workqueue(pool->name); 1025 if (!pool->release_wq) 1026 goto out_wq; 1027 if (z3fold_register_migration(pool)) 1028 goto out_rwq; 1029 INIT_WORK(&pool->work, free_pages_work); 1030 pool->ops = ops; 1031 return pool; 1032 1033 out_rwq: 1034 destroy_workqueue(pool->release_wq); 1035 out_wq: 1036 destroy_workqueue(pool->compact_wq); 1037 out_unbuddied: 1038 free_percpu(pool->unbuddied); 1039 out_pool: 1040 kmem_cache_destroy(pool->c_handle); 1041 out_c: 1042 kfree(pool); 1043 out: 1044 return NULL; 1045 } 1046 1047 /** 1048 * z3fold_destroy_pool() - destroys an existing z3fold pool 1049 * @pool: the z3fold pool to be destroyed 1050 * 1051 * The pool should be emptied before this function is called. 1052 */ 1053 static void z3fold_destroy_pool(struct z3fold_pool *pool) 1054 { 1055 kmem_cache_destroy(pool->c_handle); 1056 1057 /* 1058 * We need to destroy pool->compact_wq before pool->release_wq, 1059 * as any pending work on pool->compact_wq will call 1060 * queue_work(pool->release_wq, &pool->work). 1061 * 1062 * There are still outstanding pages until both workqueues are drained, 1063 * so we cannot unregister migration until then. 1064 */ 1065 1066 destroy_workqueue(pool->compact_wq); 1067 destroy_workqueue(pool->release_wq); 1068 z3fold_unregister_migration(pool); 1069 kfree(pool); 1070 } 1071 1072 /** 1073 * z3fold_alloc() - allocates a region of a given size 1074 * @pool: z3fold pool from which to allocate 1075 * @size: size in bytes of the desired allocation 1076 * @gfp: gfp flags used if the pool needs to grow 1077 * @handle: handle of the new allocation 1078 * 1079 * This function will attempt to find a free region in the pool large enough to 1080 * satisfy the allocation request. A search of the unbuddied lists is 1081 * performed first. If no suitable free region is found, then a new page is 1082 * allocated and added to the pool to satisfy the request. 1083 * 1084 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used 1085 * as z3fold pool pages. 1086 * 1087 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or 1088 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate 1089 * a new page. 1090 */ 1091 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, 1092 unsigned long *handle) 1093 { 1094 int chunks = size_to_chunks(size); 1095 struct z3fold_header *zhdr = NULL; 1096 struct page *page = NULL; 1097 enum buddy bud; 1098 bool can_sleep = gfpflags_allow_blocking(gfp); 1099 1100 if (!size) 1101 return -EINVAL; 1102 1103 if (size > PAGE_SIZE) 1104 return -ENOSPC; 1105 1106 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) 1107 bud = HEADLESS; 1108 else { 1109 retry: 1110 zhdr = __z3fold_alloc(pool, size, can_sleep); 1111 if (zhdr) { 1112 bud = get_free_buddy(zhdr, chunks); 1113 if (bud == HEADLESS) { 1114 if (kref_put(&zhdr->refcount, 1115 release_z3fold_page_locked)) 1116 atomic64_dec(&pool->pages_nr); 1117 else 1118 z3fold_page_unlock(zhdr); 1119 pr_err("No free chunks in unbuddied\n"); 1120 WARN_ON(1); 1121 goto retry; 1122 } 1123 page = virt_to_page(zhdr); 1124 goto found; 1125 } 1126 bud = FIRST; 1127 } 1128 1129 page = NULL; 1130 if (can_sleep) { 1131 spin_lock(&pool->stale_lock); 1132 zhdr = list_first_entry_or_null(&pool->stale, 1133 struct z3fold_header, buddy); 1134 /* 1135 * Before allocating a page, let's see if we can take one from 1136 * the stale pages list. cancel_work_sync() can sleep so we 1137 * limit this case to the contexts where we can sleep 1138 */ 1139 if (zhdr) { 1140 list_del(&zhdr->buddy); 1141 spin_unlock(&pool->stale_lock); 1142 cancel_work_sync(&zhdr->work); 1143 page = virt_to_page(zhdr); 1144 } else { 1145 spin_unlock(&pool->stale_lock); 1146 } 1147 } 1148 if (!page) 1149 page = alloc_page(gfp); 1150 1151 if (!page) 1152 return -ENOMEM; 1153 1154 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); 1155 if (!zhdr) { 1156 __free_page(page); 1157 return -ENOMEM; 1158 } 1159 atomic64_inc(&pool->pages_nr); 1160 1161 if (bud == HEADLESS) { 1162 set_bit(PAGE_HEADLESS, &page->private); 1163 goto headless; 1164 } 1165 if (can_sleep) { 1166 lock_page(page); 1167 __SetPageMovable(page, pool->inode->i_mapping); 1168 unlock_page(page); 1169 } else { 1170 if (trylock_page(page)) { 1171 __SetPageMovable(page, pool->inode->i_mapping); 1172 unlock_page(page); 1173 } 1174 } 1175 z3fold_page_lock(zhdr); 1176 1177 found: 1178 if (bud == FIRST) 1179 zhdr->first_chunks = chunks; 1180 else if (bud == LAST) 1181 zhdr->last_chunks = chunks; 1182 else { 1183 zhdr->middle_chunks = chunks; 1184 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; 1185 } 1186 add_to_unbuddied(pool, zhdr); 1187 1188 headless: 1189 spin_lock(&pool->lock); 1190 /* Add/move z3fold page to beginning of LRU */ 1191 if (!list_empty(&page->lru)) 1192 list_del(&page->lru); 1193 1194 list_add(&page->lru, &pool->lru); 1195 1196 *handle = encode_handle(zhdr, bud); 1197 spin_unlock(&pool->lock); 1198 if (bud != HEADLESS) 1199 z3fold_page_unlock(zhdr); 1200 1201 return 0; 1202 } 1203 1204 /** 1205 * z3fold_free() - frees the allocation associated with the given handle 1206 * @pool: pool in which the allocation resided 1207 * @handle: handle associated with the allocation returned by z3fold_alloc() 1208 * 1209 * In the case that the z3fold page in which the allocation resides is under 1210 * reclaim, as indicated by the PG_reclaim flag being set, this function 1211 * only sets the first|last_chunks to 0. The page is actually freed 1212 * once both buddies are evicted (see z3fold_reclaim_page() below). 1213 */ 1214 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) 1215 { 1216 struct z3fold_header *zhdr; 1217 struct page *page; 1218 enum buddy bud; 1219 bool page_claimed; 1220 1221 zhdr = get_z3fold_header(handle); 1222 page = virt_to_page(zhdr); 1223 page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private); 1224 1225 if (test_bit(PAGE_HEADLESS, &page->private)) { 1226 /* if a headless page is under reclaim, just leave. 1227 * NB: we use test_and_set_bit for a reason: if the bit 1228 * has not been set before, we release this page 1229 * immediately so we don't care about its value any more. 1230 */ 1231 if (!page_claimed) { 1232 spin_lock(&pool->lock); 1233 list_del(&page->lru); 1234 spin_unlock(&pool->lock); 1235 put_z3fold_header(zhdr); 1236 free_z3fold_page(page, true); 1237 atomic64_dec(&pool->pages_nr); 1238 } 1239 return; 1240 } 1241 1242 /* Non-headless case */ 1243 bud = handle_to_buddy(handle); 1244 1245 switch (bud) { 1246 case FIRST: 1247 zhdr->first_chunks = 0; 1248 break; 1249 case MIDDLE: 1250 zhdr->middle_chunks = 0; 1251 break; 1252 case LAST: 1253 zhdr->last_chunks = 0; 1254 break; 1255 default: 1256 pr_err("%s: unknown bud %d\n", __func__, bud); 1257 WARN_ON(1); 1258 put_z3fold_header(zhdr); 1259 return; 1260 } 1261 1262 if (!page_claimed) 1263 free_handle(handle, zhdr); 1264 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) { 1265 atomic64_dec(&pool->pages_nr); 1266 return; 1267 } 1268 if (page_claimed) { 1269 /* the page has not been claimed by us */ 1270 z3fold_page_unlock(zhdr); 1271 return; 1272 } 1273 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { 1274 put_z3fold_header(zhdr); 1275 clear_bit(PAGE_CLAIMED, &page->private); 1276 return; 1277 } 1278 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { 1279 spin_lock(&pool->lock); 1280 list_del_init(&zhdr->buddy); 1281 spin_unlock(&pool->lock); 1282 zhdr->cpu = -1; 1283 kref_get(&zhdr->refcount); 1284 clear_bit(PAGE_CLAIMED, &page->private); 1285 do_compact_page(zhdr, true); 1286 return; 1287 } 1288 kref_get(&zhdr->refcount); 1289 clear_bit(PAGE_CLAIMED, &page->private); 1290 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); 1291 put_z3fold_header(zhdr); 1292 } 1293 1294 /** 1295 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it 1296 * @pool: pool from which a page will attempt to be evicted 1297 * @retries: number of pages on the LRU list for which eviction will 1298 * be attempted before failing 1299 * 1300 * z3fold reclaim is different from normal system reclaim in that it is done 1301 * from the bottom, up. This is because only the bottom layer, z3fold, has 1302 * information on how the allocations are organized within each z3fold page. 1303 * This has the potential to create interesting locking situations between 1304 * z3fold and the user, however. 1305 * 1306 * To avoid these, this is how z3fold_reclaim_page() should be called: 1307 * 1308 * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). 1309 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and 1310 * call the user-defined eviction handler with the pool and handle as 1311 * arguments. 1312 * 1313 * If the handle can not be evicted, the eviction handler should return 1314 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the 1315 * appropriate list and try the next z3fold page on the LRU up to 1316 * a user defined number of retries. 1317 * 1318 * If the handle is successfully evicted, the eviction handler should 1319 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() 1320 * contains logic to delay freeing the page if the page is under reclaim, 1321 * as indicated by the setting of the PG_reclaim flag on the underlying page. 1322 * 1323 * If all buddies in the z3fold page are successfully evicted, then the 1324 * z3fold page can be freed. 1325 * 1326 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are 1327 * no pages to evict or an eviction handler is not registered, -EAGAIN if 1328 * the retry limit was hit. 1329 */ 1330 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) 1331 { 1332 int i, ret = -1; 1333 struct z3fold_header *zhdr = NULL; 1334 struct page *page = NULL; 1335 struct list_head *pos; 1336 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; 1337 struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN))); 1338 1339 rwlock_init(&slots.lock); 1340 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE); 1341 1342 spin_lock(&pool->lock); 1343 if (!pool->ops || !pool->ops->evict || retries == 0) { 1344 spin_unlock(&pool->lock); 1345 return -EINVAL; 1346 } 1347 for (i = 0; i < retries; i++) { 1348 if (list_empty(&pool->lru)) { 1349 spin_unlock(&pool->lock); 1350 return -EINVAL; 1351 } 1352 list_for_each_prev(pos, &pool->lru) { 1353 page = list_entry(pos, struct page, lru); 1354 1355 zhdr = page_address(page); 1356 if (test_bit(PAGE_HEADLESS, &page->private)) 1357 break; 1358 1359 if (kref_get_unless_zero(&zhdr->refcount) == 0) { 1360 zhdr = NULL; 1361 break; 1362 } 1363 if (!z3fold_page_trylock(zhdr)) { 1364 if (kref_put(&zhdr->refcount, 1365 release_z3fold_page)) 1366 atomic64_dec(&pool->pages_nr); 1367 zhdr = NULL; 1368 continue; /* can't evict at this point */ 1369 } 1370 1371 /* test_and_set_bit is of course atomic, but we still 1372 * need to do it under page lock, otherwise checking 1373 * that bit in __z3fold_alloc wouldn't make sense 1374 */ 1375 if (zhdr->foreign_handles || 1376 test_and_set_bit(PAGE_CLAIMED, &page->private)) { 1377 if (kref_put(&zhdr->refcount, 1378 release_z3fold_page)) 1379 atomic64_dec(&pool->pages_nr); 1380 else 1381 z3fold_page_unlock(zhdr); 1382 zhdr = NULL; 1383 continue; /* can't evict such page */ 1384 } 1385 list_del_init(&zhdr->buddy); 1386 zhdr->cpu = -1; 1387 break; 1388 } 1389 1390 if (!zhdr) 1391 break; 1392 1393 list_del_init(&page->lru); 1394 spin_unlock(&pool->lock); 1395 1396 if (!test_bit(PAGE_HEADLESS, &page->private)) { 1397 /* 1398 * We need encode the handles before unlocking, and 1399 * use our local slots structure because z3fold_free 1400 * can zero out zhdr->slots and we can't do much 1401 * about that 1402 */ 1403 first_handle = 0; 1404 last_handle = 0; 1405 middle_handle = 0; 1406 memset(slots.slot, 0, sizeof(slots.slot)); 1407 if (zhdr->first_chunks) 1408 first_handle = __encode_handle(zhdr, &slots, 1409 FIRST); 1410 if (zhdr->middle_chunks) 1411 middle_handle = __encode_handle(zhdr, &slots, 1412 MIDDLE); 1413 if (zhdr->last_chunks) 1414 last_handle = __encode_handle(zhdr, &slots, 1415 LAST); 1416 /* 1417 * it's safe to unlock here because we hold a 1418 * reference to this page 1419 */ 1420 z3fold_page_unlock(zhdr); 1421 } else { 1422 first_handle = encode_handle(zhdr, HEADLESS); 1423 last_handle = middle_handle = 0; 1424 } 1425 /* Issue the eviction callback(s) */ 1426 if (middle_handle) { 1427 ret = pool->ops->evict(pool, middle_handle); 1428 if (ret) 1429 goto next; 1430 } 1431 if (first_handle) { 1432 ret = pool->ops->evict(pool, first_handle); 1433 if (ret) 1434 goto next; 1435 } 1436 if (last_handle) { 1437 ret = pool->ops->evict(pool, last_handle); 1438 if (ret) 1439 goto next; 1440 } 1441 next: 1442 if (test_bit(PAGE_HEADLESS, &page->private)) { 1443 if (ret == 0) { 1444 free_z3fold_page(page, true); 1445 atomic64_dec(&pool->pages_nr); 1446 return 0; 1447 } 1448 spin_lock(&pool->lock); 1449 list_add(&page->lru, &pool->lru); 1450 spin_unlock(&pool->lock); 1451 clear_bit(PAGE_CLAIMED, &page->private); 1452 } else { 1453 struct z3fold_buddy_slots *slots = zhdr->slots; 1454 z3fold_page_lock(zhdr); 1455 if (kref_put(&zhdr->refcount, 1456 release_z3fold_page_locked)) { 1457 kmem_cache_free(pool->c_handle, slots); 1458 atomic64_dec(&pool->pages_nr); 1459 return 0; 1460 } 1461 /* 1462 * if we are here, the page is still not completely 1463 * free. Take the global pool lock then to be able 1464 * to add it back to the lru list 1465 */ 1466 spin_lock(&pool->lock); 1467 list_add(&page->lru, &pool->lru); 1468 spin_unlock(&pool->lock); 1469 z3fold_page_unlock(zhdr); 1470 clear_bit(PAGE_CLAIMED, &page->private); 1471 } 1472 1473 /* We started off locked to we need to lock the pool back */ 1474 spin_lock(&pool->lock); 1475 } 1476 spin_unlock(&pool->lock); 1477 return -EAGAIN; 1478 } 1479 1480 /** 1481 * z3fold_map() - maps the allocation associated with the given handle 1482 * @pool: pool in which the allocation resides 1483 * @handle: handle associated with the allocation to be mapped 1484 * 1485 * Extracts the buddy number from handle and constructs the pointer to the 1486 * correct starting chunk within the page. 1487 * 1488 * Returns: a pointer to the mapped allocation 1489 */ 1490 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) 1491 { 1492 struct z3fold_header *zhdr; 1493 struct page *page; 1494 void *addr; 1495 enum buddy buddy; 1496 1497 zhdr = get_z3fold_header(handle); 1498 addr = zhdr; 1499 page = virt_to_page(zhdr); 1500 1501 if (test_bit(PAGE_HEADLESS, &page->private)) 1502 goto out; 1503 1504 buddy = handle_to_buddy(handle); 1505 switch (buddy) { 1506 case FIRST: 1507 addr += ZHDR_SIZE_ALIGNED; 1508 break; 1509 case MIDDLE: 1510 addr += zhdr->start_middle << CHUNK_SHIFT; 1511 set_bit(MIDDLE_CHUNK_MAPPED, &page->private); 1512 break; 1513 case LAST: 1514 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); 1515 break; 1516 default: 1517 pr_err("unknown buddy id %d\n", buddy); 1518 WARN_ON(1); 1519 addr = NULL; 1520 break; 1521 } 1522 1523 if (addr) 1524 zhdr->mapped_count++; 1525 out: 1526 put_z3fold_header(zhdr); 1527 return addr; 1528 } 1529 1530 /** 1531 * z3fold_unmap() - unmaps the allocation associated with the given handle 1532 * @pool: pool in which the allocation resides 1533 * @handle: handle associated with the allocation to be unmapped 1534 */ 1535 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) 1536 { 1537 struct z3fold_header *zhdr; 1538 struct page *page; 1539 enum buddy buddy; 1540 1541 zhdr = get_z3fold_header(handle); 1542 page = virt_to_page(zhdr); 1543 1544 if (test_bit(PAGE_HEADLESS, &page->private)) 1545 return; 1546 1547 buddy = handle_to_buddy(handle); 1548 if (buddy == MIDDLE) 1549 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 1550 zhdr->mapped_count--; 1551 put_z3fold_header(zhdr); 1552 } 1553 1554 /** 1555 * z3fold_get_pool_size() - gets the z3fold pool size in pages 1556 * @pool: pool whose size is being queried 1557 * 1558 * Returns: size in pages of the given pool. 1559 */ 1560 static u64 z3fold_get_pool_size(struct z3fold_pool *pool) 1561 { 1562 return atomic64_read(&pool->pages_nr); 1563 } 1564 1565 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) 1566 { 1567 struct z3fold_header *zhdr; 1568 struct z3fold_pool *pool; 1569 1570 VM_BUG_ON_PAGE(!PageMovable(page), page); 1571 VM_BUG_ON_PAGE(PageIsolated(page), page); 1572 1573 if (test_bit(PAGE_HEADLESS, &page->private)) 1574 return false; 1575 1576 zhdr = page_address(page); 1577 z3fold_page_lock(zhdr); 1578 if (test_bit(NEEDS_COMPACTING, &page->private) || 1579 test_bit(PAGE_STALE, &page->private)) 1580 goto out; 1581 1582 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) 1583 goto out; 1584 1585 if (test_and_set_bit(PAGE_CLAIMED, &page->private)) 1586 goto out; 1587 pool = zhdr_to_pool(zhdr); 1588 spin_lock(&pool->lock); 1589 if (!list_empty(&zhdr->buddy)) 1590 list_del_init(&zhdr->buddy); 1591 if (!list_empty(&page->lru)) 1592 list_del_init(&page->lru); 1593 spin_unlock(&pool->lock); 1594 1595 kref_get(&zhdr->refcount); 1596 z3fold_page_unlock(zhdr); 1597 return true; 1598 1599 out: 1600 z3fold_page_unlock(zhdr); 1601 return false; 1602 } 1603 1604 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage, 1605 struct page *page, enum migrate_mode mode) 1606 { 1607 struct z3fold_header *zhdr, *new_zhdr; 1608 struct z3fold_pool *pool; 1609 struct address_space *new_mapping; 1610 1611 VM_BUG_ON_PAGE(!PageMovable(page), page); 1612 VM_BUG_ON_PAGE(!PageIsolated(page), page); 1613 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page); 1614 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 1615 1616 zhdr = page_address(page); 1617 pool = zhdr_to_pool(zhdr); 1618 1619 if (!z3fold_page_trylock(zhdr)) 1620 return -EAGAIN; 1621 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { 1622 z3fold_page_unlock(zhdr); 1623 clear_bit(PAGE_CLAIMED, &page->private); 1624 return -EBUSY; 1625 } 1626 if (work_pending(&zhdr->work)) { 1627 z3fold_page_unlock(zhdr); 1628 return -EAGAIN; 1629 } 1630 new_zhdr = page_address(newpage); 1631 memcpy(new_zhdr, zhdr, PAGE_SIZE); 1632 newpage->private = page->private; 1633 page->private = 0; 1634 z3fold_page_unlock(zhdr); 1635 spin_lock_init(&new_zhdr->page_lock); 1636 INIT_WORK(&new_zhdr->work, compact_page_work); 1637 /* 1638 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, 1639 * so we only have to reinitialize it. 1640 */ 1641 INIT_LIST_HEAD(&new_zhdr->buddy); 1642 new_mapping = page_mapping(page); 1643 __ClearPageMovable(page); 1644 ClearPagePrivate(page); 1645 1646 get_page(newpage); 1647 z3fold_page_lock(new_zhdr); 1648 if (new_zhdr->first_chunks) 1649 encode_handle(new_zhdr, FIRST); 1650 if (new_zhdr->last_chunks) 1651 encode_handle(new_zhdr, LAST); 1652 if (new_zhdr->middle_chunks) 1653 encode_handle(new_zhdr, MIDDLE); 1654 set_bit(NEEDS_COMPACTING, &newpage->private); 1655 new_zhdr->cpu = smp_processor_id(); 1656 spin_lock(&pool->lock); 1657 list_add(&newpage->lru, &pool->lru); 1658 spin_unlock(&pool->lock); 1659 __SetPageMovable(newpage, new_mapping); 1660 z3fold_page_unlock(new_zhdr); 1661 1662 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); 1663 1664 page_mapcount_reset(page); 1665 clear_bit(PAGE_CLAIMED, &page->private); 1666 put_page(page); 1667 return 0; 1668 } 1669 1670 static void z3fold_page_putback(struct page *page) 1671 { 1672 struct z3fold_header *zhdr; 1673 struct z3fold_pool *pool; 1674 1675 zhdr = page_address(page); 1676 pool = zhdr_to_pool(zhdr); 1677 1678 z3fold_page_lock(zhdr); 1679 if (!list_empty(&zhdr->buddy)) 1680 list_del_init(&zhdr->buddy); 1681 INIT_LIST_HEAD(&page->lru); 1682 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 1683 atomic64_dec(&pool->pages_nr); 1684 return; 1685 } 1686 spin_lock(&pool->lock); 1687 list_add(&page->lru, &pool->lru); 1688 spin_unlock(&pool->lock); 1689 clear_bit(PAGE_CLAIMED, &page->private); 1690 z3fold_page_unlock(zhdr); 1691 } 1692 1693 static const struct address_space_operations z3fold_aops = { 1694 .isolate_page = z3fold_page_isolate, 1695 .migratepage = z3fold_page_migrate, 1696 .putback_page = z3fold_page_putback, 1697 }; 1698 1699 /***************** 1700 * zpool 1701 ****************/ 1702 1703 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) 1704 { 1705 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) 1706 return pool->zpool_ops->evict(pool->zpool, handle); 1707 else 1708 return -ENOENT; 1709 } 1710 1711 static const struct z3fold_ops z3fold_zpool_ops = { 1712 .evict = z3fold_zpool_evict 1713 }; 1714 1715 static void *z3fold_zpool_create(const char *name, gfp_t gfp, 1716 const struct zpool_ops *zpool_ops, 1717 struct zpool *zpool) 1718 { 1719 struct z3fold_pool *pool; 1720 1721 pool = z3fold_create_pool(name, gfp, 1722 zpool_ops ? &z3fold_zpool_ops : NULL); 1723 if (pool) { 1724 pool->zpool = zpool; 1725 pool->zpool_ops = zpool_ops; 1726 } 1727 return pool; 1728 } 1729 1730 static void z3fold_zpool_destroy(void *pool) 1731 { 1732 z3fold_destroy_pool(pool); 1733 } 1734 1735 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, 1736 unsigned long *handle) 1737 { 1738 return z3fold_alloc(pool, size, gfp, handle); 1739 } 1740 static void z3fold_zpool_free(void *pool, unsigned long handle) 1741 { 1742 z3fold_free(pool, handle); 1743 } 1744 1745 static int z3fold_zpool_shrink(void *pool, unsigned int pages, 1746 unsigned int *reclaimed) 1747 { 1748 unsigned int total = 0; 1749 int ret = -EINVAL; 1750 1751 while (total < pages) { 1752 ret = z3fold_reclaim_page(pool, 8); 1753 if (ret < 0) 1754 break; 1755 total++; 1756 } 1757 1758 if (reclaimed) 1759 *reclaimed = total; 1760 1761 return ret; 1762 } 1763 1764 static void *z3fold_zpool_map(void *pool, unsigned long handle, 1765 enum zpool_mapmode mm) 1766 { 1767 return z3fold_map(pool, handle); 1768 } 1769 static void z3fold_zpool_unmap(void *pool, unsigned long handle) 1770 { 1771 z3fold_unmap(pool, handle); 1772 } 1773 1774 static u64 z3fold_zpool_total_size(void *pool) 1775 { 1776 return z3fold_get_pool_size(pool) * PAGE_SIZE; 1777 } 1778 1779 static struct zpool_driver z3fold_zpool_driver = { 1780 .type = "z3fold", 1781 .owner = THIS_MODULE, 1782 .create = z3fold_zpool_create, 1783 .destroy = z3fold_zpool_destroy, 1784 .malloc = z3fold_zpool_malloc, 1785 .free = z3fold_zpool_free, 1786 .shrink = z3fold_zpool_shrink, 1787 .map = z3fold_zpool_map, 1788 .unmap = z3fold_zpool_unmap, 1789 .total_size = z3fold_zpool_total_size, 1790 }; 1791 1792 MODULE_ALIAS("zpool-z3fold"); 1793 1794 static int __init init_z3fold(void) 1795 { 1796 int ret; 1797 1798 /* Make sure the z3fold header is not larger than the page size */ 1799 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE); 1800 ret = z3fold_mount(); 1801 if (ret) 1802 return ret; 1803 1804 zpool_register_driver(&z3fold_zpool_driver); 1805 1806 return 0; 1807 } 1808 1809 static void __exit exit_z3fold(void) 1810 { 1811 z3fold_unmount(); 1812 zpool_unregister_driver(&z3fold_zpool_driver); 1813 } 1814 1815 module_init(init_z3fold); 1816 module_exit(exit_z3fold); 1817 1818 MODULE_LICENSE("GPL"); 1819 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>"); 1820 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); 1821