1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2022 Alibaba Cloud 6 */ 7 #include "compress.h" 8 #include <linux/psi.h> 9 #include <linux/cpuhotplug.h> 10 #include <trace/events/erofs.h> 11 12 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) 13 #define Z_EROFS_INLINE_BVECS 2 14 15 /* 16 * let's leave a type here in case of introducing 17 * another tagged pointer later. 18 */ 19 typedef void *z_erofs_next_pcluster_t; 20 21 struct z_erofs_bvec { 22 struct page *page; 23 int offset; 24 unsigned int end; 25 }; 26 27 #define __Z_EROFS_BVSET(name, total) \ 28 struct name { \ 29 /* point to the next page which contains the following bvecs */ \ 30 struct page *nextpage; \ 31 struct z_erofs_bvec bvec[total]; \ 32 } 33 __Z_EROFS_BVSET(z_erofs_bvset,); 34 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS); 35 36 /* 37 * Structure fields follow one of the following exclusion rules. 38 * 39 * I: Modifiable by initialization/destruction paths and read-only 40 * for everyone else; 41 * 42 * L: Field should be protected by the pcluster lock; 43 * 44 * A: Field should be accessed / updated in atomic for parallelized code. 45 */ 46 struct z_erofs_pcluster { 47 struct erofs_workgroup obj; 48 struct mutex lock; 49 50 /* A: point to next chained pcluster or TAILs */ 51 z_erofs_next_pcluster_t next; 52 53 /* L: the maximum decompression size of this round */ 54 unsigned int length; 55 56 /* L: total number of bvecs */ 57 unsigned int vcnt; 58 59 /* I: page offset of start position of decompression */ 60 unsigned short pageofs_out; 61 62 /* I: page offset of inline compressed data */ 63 unsigned short pageofs_in; 64 65 union { 66 /* L: inline a certain number of bvec for bootstrap */ 67 struct z_erofs_bvset_inline bvset; 68 69 /* I: can be used to free the pcluster by RCU. */ 70 struct rcu_head rcu; 71 }; 72 73 union { 74 /* I: physical cluster size in pages */ 75 unsigned short pclusterpages; 76 77 /* I: tailpacking inline compressed size */ 78 unsigned short tailpacking_size; 79 }; 80 81 /* I: compression algorithm format */ 82 unsigned char algorithmformat; 83 84 /* L: whether partial decompression or not */ 85 bool partial; 86 87 /* L: indicate several pageofs_outs or not */ 88 bool multibases; 89 90 /* A: compressed bvecs (can be cached or inplaced pages) */ 91 struct z_erofs_bvec compressed_bvecs[]; 92 }; 93 94 /* the end of a chain of pclusters */ 95 #define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA) 96 #define Z_EROFS_PCLUSTER_NIL (NULL) 97 98 struct z_erofs_decompressqueue { 99 struct super_block *sb; 100 atomic_t pending_bios; 101 z_erofs_next_pcluster_t head; 102 103 union { 104 struct completion done; 105 struct work_struct work; 106 struct kthread_work kthread_work; 107 } u; 108 bool eio, sync; 109 }; 110 111 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl) 112 { 113 return !pcl->obj.index; 114 } 115 116 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) 117 { 118 if (z_erofs_is_inline_pcluster(pcl)) 119 return 1; 120 return pcl->pclusterpages; 121 } 122 123 /* 124 * bit 30: I/O error occurred on this page 125 * bit 0 - 29: remaining parts to complete this page 126 */ 127 #define Z_EROFS_PAGE_EIO (1 << 30) 128 129 static inline void z_erofs_onlinepage_init(struct page *page) 130 { 131 union { 132 atomic_t o; 133 unsigned long v; 134 } u = { .o = ATOMIC_INIT(1) }; 135 136 set_page_private(page, u.v); 137 smp_wmb(); 138 SetPagePrivate(page); 139 } 140 141 static inline void z_erofs_onlinepage_split(struct page *page) 142 { 143 atomic_inc((atomic_t *)&page->private); 144 } 145 146 static inline void z_erofs_page_mark_eio(struct page *page) 147 { 148 int orig; 149 150 do { 151 orig = atomic_read((atomic_t *)&page->private); 152 } while (atomic_cmpxchg((atomic_t *)&page->private, orig, 153 orig | Z_EROFS_PAGE_EIO) != orig); 154 } 155 156 static inline void z_erofs_onlinepage_endio(struct page *page) 157 { 158 unsigned int v; 159 160 DBG_BUGON(!PagePrivate(page)); 161 v = atomic_dec_return((atomic_t *)&page->private); 162 if (!(v & ~Z_EROFS_PAGE_EIO)) { 163 set_page_private(page, 0); 164 ClearPagePrivate(page); 165 if (!(v & Z_EROFS_PAGE_EIO)) 166 SetPageUptodate(page); 167 unlock_page(page); 168 } 169 } 170 171 #define Z_EROFS_ONSTACK_PAGES 32 172 173 /* 174 * since pclustersize is variable for big pcluster feature, introduce slab 175 * pools implementation for different pcluster sizes. 176 */ 177 struct z_erofs_pcluster_slab { 178 struct kmem_cache *slab; 179 unsigned int maxpages; 180 char name[48]; 181 }; 182 183 #define _PCLP(n) { .maxpages = n } 184 185 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = { 186 _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128), 187 _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES) 188 }; 189 190 struct z_erofs_bvec_iter { 191 struct page *bvpage; 192 struct z_erofs_bvset *bvset; 193 unsigned int nr, cur; 194 }; 195 196 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter) 197 { 198 if (iter->bvpage) 199 kunmap_local(iter->bvset); 200 return iter->bvpage; 201 } 202 203 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter) 204 { 205 unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec; 206 /* have to access nextpage in advance, otherwise it will be unmapped */ 207 struct page *nextpage = iter->bvset->nextpage; 208 struct page *oldpage; 209 210 DBG_BUGON(!nextpage); 211 oldpage = z_erofs_bvec_iter_end(iter); 212 iter->bvpage = nextpage; 213 iter->bvset = kmap_local_page(nextpage); 214 iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec); 215 iter->cur = 0; 216 return oldpage; 217 } 218 219 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter, 220 struct z_erofs_bvset_inline *bvset, 221 unsigned int bootstrap_nr, 222 unsigned int cur) 223 { 224 *iter = (struct z_erofs_bvec_iter) { 225 .nr = bootstrap_nr, 226 .bvset = (struct z_erofs_bvset *)bvset, 227 }; 228 229 while (cur > iter->nr) { 230 cur -= iter->nr; 231 z_erofs_bvset_flip(iter); 232 } 233 iter->cur = cur; 234 } 235 236 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, 237 struct z_erofs_bvec *bvec, 238 struct page **candidate_bvpage, 239 struct page **pagepool) 240 { 241 if (iter->cur >= iter->nr) { 242 struct page *nextpage = *candidate_bvpage; 243 244 if (!nextpage) { 245 nextpage = erofs_allocpage(pagepool, GFP_NOFS); 246 if (!nextpage) 247 return -ENOMEM; 248 set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE); 249 } 250 DBG_BUGON(iter->bvset->nextpage); 251 iter->bvset->nextpage = nextpage; 252 z_erofs_bvset_flip(iter); 253 254 iter->bvset->nextpage = NULL; 255 *candidate_bvpage = NULL; 256 } 257 iter->bvset->bvec[iter->cur++] = *bvec; 258 return 0; 259 } 260 261 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter, 262 struct z_erofs_bvec *bvec, 263 struct page **old_bvpage) 264 { 265 if (iter->cur == iter->nr) 266 *old_bvpage = z_erofs_bvset_flip(iter); 267 else 268 *old_bvpage = NULL; 269 *bvec = iter->bvset->bvec[iter->cur++]; 270 } 271 272 static void z_erofs_destroy_pcluster_pool(void) 273 { 274 int i; 275 276 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 277 if (!pcluster_pool[i].slab) 278 continue; 279 kmem_cache_destroy(pcluster_pool[i].slab); 280 pcluster_pool[i].slab = NULL; 281 } 282 } 283 284 static int z_erofs_create_pcluster_pool(void) 285 { 286 struct z_erofs_pcluster_slab *pcs; 287 struct z_erofs_pcluster *a; 288 unsigned int size; 289 290 for (pcs = pcluster_pool; 291 pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { 292 size = struct_size(a, compressed_bvecs, pcs->maxpages); 293 294 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages); 295 pcs->slab = kmem_cache_create(pcs->name, size, 0, 296 SLAB_RECLAIM_ACCOUNT, NULL); 297 if (pcs->slab) 298 continue; 299 300 z_erofs_destroy_pcluster_pool(); 301 return -ENOMEM; 302 } 303 return 0; 304 } 305 306 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) 307 { 308 int i; 309 310 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 311 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 312 struct z_erofs_pcluster *pcl; 313 314 if (nrpages > pcs->maxpages) 315 continue; 316 317 pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); 318 if (!pcl) 319 return ERR_PTR(-ENOMEM); 320 pcl->pclusterpages = nrpages; 321 return pcl; 322 } 323 return ERR_PTR(-EINVAL); 324 } 325 326 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) 327 { 328 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 329 int i; 330 331 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 332 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 333 334 if (pclusterpages > pcs->maxpages) 335 continue; 336 337 kmem_cache_free(pcs->slab, pcl); 338 return; 339 } 340 DBG_BUGON(1); 341 } 342 343 static struct workqueue_struct *z_erofs_workqueue __read_mostly; 344 345 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 346 static struct kthread_worker __rcu **z_erofs_pcpu_workers; 347 348 static void erofs_destroy_percpu_workers(void) 349 { 350 struct kthread_worker *worker; 351 unsigned int cpu; 352 353 for_each_possible_cpu(cpu) { 354 worker = rcu_dereference_protected( 355 z_erofs_pcpu_workers[cpu], 1); 356 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL); 357 if (worker) 358 kthread_destroy_worker(worker); 359 } 360 kfree(z_erofs_pcpu_workers); 361 } 362 363 static struct kthread_worker *erofs_init_percpu_worker(int cpu) 364 { 365 struct kthread_worker *worker = 366 kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu); 367 368 if (IS_ERR(worker)) 369 return worker; 370 if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI)) 371 sched_set_fifo_low(worker->task); 372 return worker; 373 } 374 375 static int erofs_init_percpu_workers(void) 376 { 377 struct kthread_worker *worker; 378 unsigned int cpu; 379 380 z_erofs_pcpu_workers = kcalloc(num_possible_cpus(), 381 sizeof(struct kthread_worker *), GFP_ATOMIC); 382 if (!z_erofs_pcpu_workers) 383 return -ENOMEM; 384 385 for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */ 386 worker = erofs_init_percpu_worker(cpu); 387 if (!IS_ERR(worker)) 388 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker); 389 } 390 return 0; 391 } 392 #else 393 static inline void erofs_destroy_percpu_workers(void) {} 394 static inline int erofs_init_percpu_workers(void) { return 0; } 395 #endif 396 397 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD) 398 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock); 399 static enum cpuhp_state erofs_cpuhp_state; 400 401 static int erofs_cpu_online(unsigned int cpu) 402 { 403 struct kthread_worker *worker, *old; 404 405 worker = erofs_init_percpu_worker(cpu); 406 if (IS_ERR(worker)) 407 return PTR_ERR(worker); 408 409 spin_lock(&z_erofs_pcpu_worker_lock); 410 old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu], 411 lockdep_is_held(&z_erofs_pcpu_worker_lock)); 412 if (!old) 413 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker); 414 spin_unlock(&z_erofs_pcpu_worker_lock); 415 if (old) 416 kthread_destroy_worker(worker); 417 return 0; 418 } 419 420 static int erofs_cpu_offline(unsigned int cpu) 421 { 422 struct kthread_worker *worker; 423 424 spin_lock(&z_erofs_pcpu_worker_lock); 425 worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu], 426 lockdep_is_held(&z_erofs_pcpu_worker_lock)); 427 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL); 428 spin_unlock(&z_erofs_pcpu_worker_lock); 429 430 synchronize_rcu(); 431 if (worker) 432 kthread_destroy_worker(worker); 433 return 0; 434 } 435 436 static int erofs_cpu_hotplug_init(void) 437 { 438 int state; 439 440 state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 441 "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline); 442 if (state < 0) 443 return state; 444 445 erofs_cpuhp_state = state; 446 return 0; 447 } 448 449 static void erofs_cpu_hotplug_destroy(void) 450 { 451 if (erofs_cpuhp_state) 452 cpuhp_remove_state_nocalls(erofs_cpuhp_state); 453 } 454 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */ 455 static inline int erofs_cpu_hotplug_init(void) { return 0; } 456 static inline void erofs_cpu_hotplug_destroy(void) {} 457 #endif 458 459 void z_erofs_exit_zip_subsystem(void) 460 { 461 erofs_cpu_hotplug_destroy(); 462 erofs_destroy_percpu_workers(); 463 destroy_workqueue(z_erofs_workqueue); 464 z_erofs_destroy_pcluster_pool(); 465 } 466 467 int __init z_erofs_init_zip_subsystem(void) 468 { 469 int err = z_erofs_create_pcluster_pool(); 470 471 if (err) 472 goto out_error_pcluster_pool; 473 474 z_erofs_workqueue = alloc_workqueue("erofs_worker", 475 WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus()); 476 if (!z_erofs_workqueue) { 477 err = -ENOMEM; 478 goto out_error_workqueue_init; 479 } 480 481 err = erofs_init_percpu_workers(); 482 if (err) 483 goto out_error_pcpu_worker; 484 485 err = erofs_cpu_hotplug_init(); 486 if (err < 0) 487 goto out_error_cpuhp_init; 488 return err; 489 490 out_error_cpuhp_init: 491 erofs_destroy_percpu_workers(); 492 out_error_pcpu_worker: 493 destroy_workqueue(z_erofs_workqueue); 494 out_error_workqueue_init: 495 z_erofs_destroy_pcluster_pool(); 496 out_error_pcluster_pool: 497 return err; 498 } 499 500 enum z_erofs_pclustermode { 501 Z_EROFS_PCLUSTER_INFLIGHT, 502 /* 503 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it 504 * could be dispatched into bypass queue later due to uptodated managed 505 * pages. All related online pages cannot be reused for inplace I/O (or 506 * bvpage) since it can be directly decoded without I/O submission. 507 */ 508 Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE, 509 /* 510 * The current collection has been linked with the owned chain, and 511 * could also be linked with the remaining collections, which means 512 * if the processing page is the tail page of the collection, thus 513 * the current collection can safely use the whole page (since 514 * the previous collection is under control) for in-place I/O, as 515 * illustrated below: 516 * ________________________________________________________________ 517 * | tail (partial) page | head (partial) page | 518 * | (of the current cl) | (of the previous collection) | 519 * | | | 520 * |__PCLUSTER_FOLLOWED___|___________PCLUSTER_FOLLOWED____________| 521 * 522 * [ (*) the above page can be used as inplace I/O. ] 523 */ 524 Z_EROFS_PCLUSTER_FOLLOWED, 525 }; 526 527 struct z_erofs_decompress_frontend { 528 struct inode *const inode; 529 struct erofs_map_blocks map; 530 struct z_erofs_bvec_iter biter; 531 532 struct page *pagepool; 533 struct page *candidate_bvpage; 534 struct z_erofs_pcluster *pcl; 535 z_erofs_next_pcluster_t owned_head; 536 enum z_erofs_pclustermode mode; 537 538 /* used for applying cache strategy on the fly */ 539 bool backmost; 540 erofs_off_t headoffset; 541 542 /* a pointer used to pick up inplace I/O pages */ 543 unsigned int icur; 544 }; 545 546 #define DECOMPRESS_FRONTEND_INIT(__i) { \ 547 .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ 548 .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true } 549 550 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe) 551 { 552 unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy; 553 554 if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) 555 return false; 556 557 if (fe->backmost) 558 return true; 559 560 if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND && 561 fe->map.m_la < fe->headoffset) 562 return true; 563 564 return false; 565 } 566 567 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) 568 { 569 struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); 570 struct z_erofs_pcluster *pcl = fe->pcl; 571 bool shouldalloc = z_erofs_should_alloc_cache(fe); 572 bool standalone = true; 573 /* 574 * optimistic allocation without direct reclaim since inplace I/O 575 * can be used if low memory otherwise. 576 */ 577 gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | 578 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 579 unsigned int i; 580 581 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) 582 return; 583 584 for (i = 0; i < pcl->pclusterpages; ++i) { 585 struct page *page; 586 void *t; /* mark pages just found for debugging */ 587 struct page *newpage = NULL; 588 589 /* the compressed page was loaded before */ 590 if (READ_ONCE(pcl->compressed_bvecs[i].page)) 591 continue; 592 593 page = find_get_page(mc, pcl->obj.index + i); 594 595 if (page) { 596 t = (void *)((unsigned long)page | 1); 597 } else { 598 /* I/O is needed, no possible to decompress directly */ 599 standalone = false; 600 if (!shouldalloc) 601 continue; 602 603 /* 604 * try to use cached I/O if page allocation 605 * succeeds or fallback to in-place I/O instead 606 * to avoid any direct reclaim. 607 */ 608 newpage = erofs_allocpage(&fe->pagepool, gfp); 609 if (!newpage) 610 continue; 611 set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); 612 t = (void *)((unsigned long)newpage | 1); 613 } 614 615 if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t)) 616 continue; 617 618 if (page) 619 put_page(page); 620 else if (newpage) 621 erofs_pagepool_add(&fe->pagepool, newpage); 622 } 623 624 /* 625 * don't do inplace I/O if all compressed pages are available in 626 * managed cache since it can be moved to the bypass queue instead. 627 */ 628 if (standalone) 629 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 630 } 631 632 /* called by erofs_shrinker to get rid of all compressed_pages */ 633 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, 634 struct erofs_workgroup *grp) 635 { 636 struct z_erofs_pcluster *const pcl = 637 container_of(grp, struct z_erofs_pcluster, obj); 638 int i; 639 640 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 641 /* 642 * refcount of workgroup is now freezed as 0, 643 * therefore no need to worry about available decompression users. 644 */ 645 for (i = 0; i < pcl->pclusterpages; ++i) { 646 struct page *page = pcl->compressed_bvecs[i].page; 647 648 if (!page) 649 continue; 650 651 /* block other users from reclaiming or migrating the page */ 652 if (!trylock_page(page)) 653 return -EBUSY; 654 655 if (!erofs_page_is_managed(sbi, page)) 656 continue; 657 658 /* barrier is implied in the following 'unlock_page' */ 659 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 660 detach_page_private(page); 661 unlock_page(page); 662 } 663 return 0; 664 } 665 666 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) 667 { 668 struct z_erofs_pcluster *pcl = folio_get_private(folio); 669 bool ret; 670 int i; 671 672 if (!folio_test_private(folio)) 673 return true; 674 675 ret = false; 676 spin_lock(&pcl->obj.lockref.lock); 677 if (pcl->obj.lockref.count > 0) 678 goto out; 679 680 DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); 681 for (i = 0; i < pcl->pclusterpages; ++i) { 682 if (pcl->compressed_bvecs[i].page == &folio->page) { 683 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 684 ret = true; 685 break; 686 } 687 } 688 if (ret) 689 folio_detach_private(folio); 690 out: 691 spin_unlock(&pcl->obj.lockref.lock); 692 return ret; 693 } 694 695 /* 696 * It will be called only on inode eviction. In case that there are still some 697 * decompression requests in progress, wait with rescheduling for a bit here. 698 * An extra lock could be introduced instead but it seems unnecessary. 699 */ 700 static void z_erofs_cache_invalidate_folio(struct folio *folio, 701 size_t offset, size_t length) 702 { 703 const size_t stop = length + offset; 704 705 /* Check for potential overflow in debug mode */ 706 DBG_BUGON(stop > folio_size(folio) || stop < length); 707 708 if (offset == 0 && stop == folio_size(folio)) 709 while (!z_erofs_cache_release_folio(folio, GFP_NOFS)) 710 cond_resched(); 711 } 712 713 static const struct address_space_operations z_erofs_cache_aops = { 714 .release_folio = z_erofs_cache_release_folio, 715 .invalidate_folio = z_erofs_cache_invalidate_folio, 716 }; 717 718 int erofs_init_managed_cache(struct super_block *sb) 719 { 720 struct inode *const inode = new_inode(sb); 721 722 if (!inode) 723 return -ENOMEM; 724 725 set_nlink(inode, 1); 726 inode->i_size = OFFSET_MAX; 727 inode->i_mapping->a_ops = &z_erofs_cache_aops; 728 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 729 EROFS_SB(sb)->managed_cache = inode; 730 return 0; 731 } 732 733 static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, 734 struct z_erofs_bvec *bvec) 735 { 736 struct z_erofs_pcluster *const pcl = fe->pcl; 737 738 while (fe->icur > 0) { 739 if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, 740 NULL, bvec->page)) { 741 pcl->compressed_bvecs[fe->icur] = *bvec; 742 return true; 743 } 744 } 745 return false; 746 } 747 748 /* callers must be with pcluster lock held */ 749 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, 750 struct z_erofs_bvec *bvec, bool exclusive) 751 { 752 int ret; 753 754 if (exclusive) { 755 /* give priority for inplaceio to use file pages first */ 756 if (z_erofs_try_inplace_io(fe, bvec)) 757 return 0; 758 /* otherwise, check if it can be used as a bvpage */ 759 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && 760 !fe->candidate_bvpage) 761 fe->candidate_bvpage = bvec->page; 762 } 763 ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage, 764 &fe->pagepool); 765 fe->pcl->vcnt += (ret >= 0); 766 return ret; 767 } 768 769 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) 770 { 771 struct z_erofs_pcluster *pcl = f->pcl; 772 z_erofs_next_pcluster_t *owned_head = &f->owned_head; 773 774 /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */ 775 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, 776 *owned_head) == Z_EROFS_PCLUSTER_NIL) { 777 *owned_head = &pcl->next; 778 /* so we can attach this pcluster to our submission chain. */ 779 f->mode = Z_EROFS_PCLUSTER_FOLLOWED; 780 return; 781 } 782 783 /* type 2, it belongs to an ongoing chain */ 784 f->mode = Z_EROFS_PCLUSTER_INFLIGHT; 785 } 786 787 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) 788 { 789 struct erofs_map_blocks *map = &fe->map; 790 bool ztailpacking = map->m_flags & EROFS_MAP_META; 791 struct z_erofs_pcluster *pcl; 792 struct erofs_workgroup *grp; 793 int err; 794 795 if (!(map->m_flags & EROFS_MAP_ENCODED) || 796 (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) { 797 DBG_BUGON(1); 798 return -EFSCORRUPTED; 799 } 800 801 /* no available pcluster, let's allocate one */ 802 pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : 803 map->m_plen >> PAGE_SHIFT); 804 if (IS_ERR(pcl)) 805 return PTR_ERR(pcl); 806 807 spin_lock_init(&pcl->obj.lockref.lock); 808 pcl->algorithmformat = map->m_algorithmformat; 809 pcl->length = 0; 810 pcl->partial = true; 811 812 /* new pclusters should be claimed as type 1, primary and followed */ 813 pcl->next = fe->owned_head; 814 pcl->pageofs_out = map->m_la & ~PAGE_MASK; 815 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; 816 817 /* 818 * lock all primary followed works before visible to others 819 * and mutex_trylock *never* fails for a new pcluster. 820 */ 821 mutex_init(&pcl->lock); 822 DBG_BUGON(!mutex_trylock(&pcl->lock)); 823 824 if (ztailpacking) { 825 pcl->obj.index = 0; /* which indicates ztailpacking */ 826 pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa); 827 pcl->tailpacking_size = map->m_plen; 828 } else { 829 pcl->obj.index = map->m_pa >> PAGE_SHIFT; 830 831 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); 832 if (IS_ERR(grp)) { 833 err = PTR_ERR(grp); 834 goto err_out; 835 } 836 837 if (grp != &pcl->obj) { 838 fe->pcl = container_of(grp, 839 struct z_erofs_pcluster, obj); 840 err = -EEXIST; 841 goto err_out; 842 } 843 } 844 fe->owned_head = &pcl->next; 845 fe->pcl = pcl; 846 return 0; 847 848 err_out: 849 mutex_unlock(&pcl->lock); 850 z_erofs_free_pcluster(pcl); 851 return err; 852 } 853 854 static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe) 855 { 856 struct erofs_map_blocks *map = &fe->map; 857 struct erofs_workgroup *grp = NULL; 858 int ret; 859 860 DBG_BUGON(fe->pcl); 861 862 /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ 863 DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); 864 865 if (!(map->m_flags & EROFS_MAP_META)) { 866 grp = erofs_find_workgroup(fe->inode->i_sb, 867 map->m_pa >> PAGE_SHIFT); 868 } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) { 869 DBG_BUGON(1); 870 return -EFSCORRUPTED; 871 } 872 873 if (grp) { 874 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj); 875 ret = -EEXIST; 876 } else { 877 ret = z_erofs_register_pcluster(fe); 878 } 879 880 if (ret == -EEXIST) { 881 mutex_lock(&fe->pcl->lock); 882 z_erofs_try_to_claim_pcluster(fe); 883 } else if (ret) { 884 return ret; 885 } 886 z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset, 887 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt); 888 /* since file-backed online pages are traversed in reverse order */ 889 fe->icur = z_erofs_pclusterpages(fe->pcl); 890 return 0; 891 } 892 893 /* 894 * keep in mind that no referenced pclusters will be freed 895 * only after a RCU grace period. 896 */ 897 static void z_erofs_rcu_callback(struct rcu_head *head) 898 { 899 z_erofs_free_pcluster(container_of(head, 900 struct z_erofs_pcluster, rcu)); 901 } 902 903 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) 904 { 905 struct z_erofs_pcluster *const pcl = 906 container_of(grp, struct z_erofs_pcluster, obj); 907 908 call_rcu(&pcl->rcu, z_erofs_rcu_callback); 909 } 910 911 static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe) 912 { 913 struct z_erofs_pcluster *pcl = fe->pcl; 914 915 if (!pcl) 916 return false; 917 918 z_erofs_bvec_iter_end(&fe->biter); 919 mutex_unlock(&pcl->lock); 920 921 if (fe->candidate_bvpage) 922 fe->candidate_bvpage = NULL; 923 924 /* 925 * if all pending pages are added, don't hold its reference 926 * any longer if the pcluster isn't hosted by ourselves. 927 */ 928 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE) 929 erofs_workgroup_put(&pcl->obj); 930 931 fe->pcl = NULL; 932 return true; 933 } 934 935 static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos, 936 struct page *page, unsigned int pageofs, 937 unsigned int len) 938 { 939 struct super_block *sb = inode->i_sb; 940 struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode; 941 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 942 u8 *src, *dst; 943 unsigned int i, cnt; 944 945 if (!packed_inode) 946 return -EFSCORRUPTED; 947 948 buf.inode = packed_inode; 949 pos += EROFS_I(inode)->z_fragmentoff; 950 for (i = 0; i < len; i += cnt) { 951 cnt = min_t(unsigned int, len - i, 952 sb->s_blocksize - erofs_blkoff(sb, pos)); 953 src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP); 954 if (IS_ERR(src)) { 955 erofs_put_metabuf(&buf); 956 return PTR_ERR(src); 957 } 958 959 dst = kmap_local_page(page); 960 memcpy(dst + pageofs + i, src + erofs_blkoff(sb, pos), cnt); 961 kunmap_local(dst); 962 pos += cnt; 963 } 964 erofs_put_metabuf(&buf); 965 return 0; 966 } 967 968 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, 969 struct page *page) 970 { 971 struct inode *const inode = fe->inode; 972 struct erofs_map_blocks *const map = &fe->map; 973 const loff_t offset = page_offset(page); 974 bool tight = true, exclusive; 975 unsigned int cur, end, spiltted; 976 int err = 0; 977 978 /* register locked file pages as online pages in pack */ 979 z_erofs_onlinepage_init(page); 980 981 spiltted = 0; 982 end = PAGE_SIZE; 983 repeat: 984 cur = end - 1; 985 986 if (offset + cur < map->m_la || 987 offset + cur >= map->m_la + map->m_llen) { 988 if (z_erofs_collector_end(fe)) 989 fe->backmost = false; 990 map->m_la = offset + cur; 991 map->m_llen = 0; 992 err = z_erofs_map_blocks_iter(inode, map, 0); 993 if (err) 994 goto out; 995 } else { 996 if (fe->pcl) 997 goto hitted; 998 /* didn't get a valid pcluster previously (very rare) */ 999 } 1000 1001 if (!(map->m_flags & EROFS_MAP_MAPPED) || 1002 map->m_flags & EROFS_MAP_FRAGMENT) 1003 goto hitted; 1004 1005 err = z_erofs_collector_begin(fe); 1006 if (err) 1007 goto out; 1008 1009 if (z_erofs_is_inline_pcluster(fe->pcl)) { 1010 void *mp; 1011 1012 mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb, 1013 erofs_blknr(inode->i_sb, map->m_pa), 1014 EROFS_NO_KMAP); 1015 if (IS_ERR(mp)) { 1016 err = PTR_ERR(mp); 1017 erofs_err(inode->i_sb, 1018 "failed to get inline page, err %d", err); 1019 goto out; 1020 } 1021 get_page(fe->map.buf.page); 1022 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, 1023 fe->map.buf.page); 1024 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 1025 } else { 1026 /* bind cache first when cached decompression is preferred */ 1027 z_erofs_bind_cache(fe); 1028 } 1029 hitted: 1030 /* 1031 * Ensure the current partial page belongs to this submit chain rather 1032 * than other concurrent submit chains or the noio(bypass) chain since 1033 * those chains are handled asynchronously thus the page cannot be used 1034 * for inplace I/O or bvpage (should be processed in a strict order.) 1035 */ 1036 tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); 1037 1038 cur = end - min_t(unsigned int, offset + end - map->m_la, end); 1039 if (!(map->m_flags & EROFS_MAP_MAPPED)) { 1040 zero_user_segment(page, cur, end); 1041 goto next_part; 1042 } 1043 if (map->m_flags & EROFS_MAP_FRAGMENT) { 1044 unsigned int pageofs, skip, len; 1045 1046 if (offset > map->m_la) { 1047 pageofs = 0; 1048 skip = offset - map->m_la; 1049 } else { 1050 pageofs = map->m_la & ~PAGE_MASK; 1051 skip = 0; 1052 } 1053 len = min_t(unsigned int, map->m_llen - skip, end - cur); 1054 err = z_erofs_read_fragment(inode, skip, page, pageofs, len); 1055 if (err) 1056 goto out; 1057 ++spiltted; 1058 tight = false; 1059 goto next_part; 1060 } 1061 1062 exclusive = (!cur && (!spiltted || tight)); 1063 if (cur) 1064 tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); 1065 1066 err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { 1067 .page = page, 1068 .offset = offset - map->m_la, 1069 .end = end, 1070 }), exclusive); 1071 if (err) 1072 goto out; 1073 1074 z_erofs_onlinepage_split(page); 1075 /* bump up the number of spiltted parts of a page */ 1076 ++spiltted; 1077 if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) 1078 fe->pcl->multibases = true; 1079 if (fe->pcl->length < offset + end - map->m_la) { 1080 fe->pcl->length = offset + end - map->m_la; 1081 fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; 1082 } 1083 if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && 1084 !(map->m_flags & EROFS_MAP_PARTIAL_REF) && 1085 fe->pcl->length == map->m_llen) 1086 fe->pcl->partial = false; 1087 next_part: 1088 /* shorten the remaining extent to update progress */ 1089 map->m_llen = offset + cur - map->m_la; 1090 map->m_flags &= ~EROFS_MAP_FULL_MAPPED; 1091 1092 end = cur; 1093 if (end > 0) 1094 goto repeat; 1095 1096 out: 1097 if (err) 1098 z_erofs_page_mark_eio(page); 1099 z_erofs_onlinepage_endio(page); 1100 return err; 1101 } 1102 1103 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi, 1104 unsigned int readahead_pages) 1105 { 1106 /* auto: enable for read_folio, disable for readahead */ 1107 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) && 1108 !readahead_pages) 1109 return true; 1110 1111 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) && 1112 (readahead_pages <= sbi->opt.max_sync_decompress_pages)) 1113 return true; 1114 1115 return false; 1116 } 1117 1118 static bool z_erofs_page_is_invalidated(struct page *page) 1119 { 1120 return !page->mapping && !z_erofs_is_shortlived_page(page); 1121 } 1122 1123 struct z_erofs_decompress_backend { 1124 struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; 1125 struct super_block *sb; 1126 struct z_erofs_pcluster *pcl; 1127 1128 /* pages with the longest decompressed length for deduplication */ 1129 struct page **decompressed_pages; 1130 /* pages to keep the compressed data */ 1131 struct page **compressed_pages; 1132 1133 struct list_head decompressed_secondary_bvecs; 1134 struct page **pagepool; 1135 unsigned int onstack_used, nr_pages; 1136 }; 1137 1138 struct z_erofs_bvec_item { 1139 struct z_erofs_bvec bvec; 1140 struct list_head list; 1141 }; 1142 1143 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, 1144 struct z_erofs_bvec *bvec) 1145 { 1146 struct z_erofs_bvec_item *item; 1147 1148 if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) { 1149 unsigned int pgnr; 1150 1151 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; 1152 DBG_BUGON(pgnr >= be->nr_pages); 1153 if (!be->decompressed_pages[pgnr]) { 1154 be->decompressed_pages[pgnr] = bvec->page; 1155 return; 1156 } 1157 } 1158 1159 /* (cold path) one pcluster is requested multiple times */ 1160 item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL); 1161 item->bvec = *bvec; 1162 list_add(&item->list, &be->decompressed_secondary_bvecs); 1163 } 1164 1165 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, 1166 int err) 1167 { 1168 unsigned int off0 = be->pcl->pageofs_out; 1169 struct list_head *p, *n; 1170 1171 list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) { 1172 struct z_erofs_bvec_item *bvi; 1173 unsigned int end, cur; 1174 void *dst, *src; 1175 1176 bvi = container_of(p, struct z_erofs_bvec_item, list); 1177 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0; 1178 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset, 1179 bvi->bvec.end); 1180 dst = kmap_local_page(bvi->bvec.page); 1181 while (cur < end) { 1182 unsigned int pgnr, scur, len; 1183 1184 pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT; 1185 DBG_BUGON(pgnr >= be->nr_pages); 1186 1187 scur = bvi->bvec.offset + cur - 1188 ((pgnr << PAGE_SHIFT) - off0); 1189 len = min_t(unsigned int, end - cur, PAGE_SIZE - scur); 1190 if (!be->decompressed_pages[pgnr]) { 1191 err = -EFSCORRUPTED; 1192 cur += len; 1193 continue; 1194 } 1195 src = kmap_local_page(be->decompressed_pages[pgnr]); 1196 memcpy(dst + cur, src + scur, len); 1197 kunmap_local(src); 1198 cur += len; 1199 } 1200 kunmap_local(dst); 1201 if (err) 1202 z_erofs_page_mark_eio(bvi->bvec.page); 1203 z_erofs_onlinepage_endio(bvi->bvec.page); 1204 list_del(p); 1205 kfree(bvi); 1206 } 1207 } 1208 1209 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) 1210 { 1211 struct z_erofs_pcluster *pcl = be->pcl; 1212 struct z_erofs_bvec_iter biter; 1213 struct page *old_bvpage; 1214 int i; 1215 1216 z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0); 1217 for (i = 0; i < pcl->vcnt; ++i) { 1218 struct z_erofs_bvec bvec; 1219 1220 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage); 1221 1222 if (old_bvpage) 1223 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 1224 1225 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page)); 1226 z_erofs_do_decompressed_bvec(be, &bvec); 1227 } 1228 1229 old_bvpage = z_erofs_bvec_iter_end(&biter); 1230 if (old_bvpage) 1231 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 1232 } 1233 1234 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, 1235 bool *overlapped) 1236 { 1237 struct z_erofs_pcluster *pcl = be->pcl; 1238 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 1239 int i, err = 0; 1240 1241 *overlapped = false; 1242 for (i = 0; i < pclusterpages; ++i) { 1243 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; 1244 struct page *page = bvec->page; 1245 1246 /* compressed pages ought to be present before decompressing */ 1247 if (!page) { 1248 DBG_BUGON(1); 1249 continue; 1250 } 1251 be->compressed_pages[i] = page; 1252 1253 if (z_erofs_is_inline_pcluster(pcl)) { 1254 if (!PageUptodate(page)) 1255 err = -EIO; 1256 continue; 1257 } 1258 1259 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1260 if (!z_erofs_is_shortlived_page(page)) { 1261 if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { 1262 if (!PageUptodate(page)) 1263 err = -EIO; 1264 continue; 1265 } 1266 z_erofs_do_decompressed_bvec(be, bvec); 1267 *overlapped = true; 1268 } 1269 } 1270 1271 if (err) 1272 return err; 1273 return 0; 1274 } 1275 1276 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, 1277 int err) 1278 { 1279 struct erofs_sb_info *const sbi = EROFS_SB(be->sb); 1280 struct z_erofs_pcluster *pcl = be->pcl; 1281 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 1282 const struct z_erofs_decompressor *decompressor = 1283 &erofs_decompressors[pcl->algorithmformat]; 1284 unsigned int i, inputsize; 1285 int err2; 1286 struct page *page; 1287 bool overlapped; 1288 1289 mutex_lock(&pcl->lock); 1290 be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT; 1291 1292 /* allocate (de)compressed page arrays if cannot be kept on stack */ 1293 be->decompressed_pages = NULL; 1294 be->compressed_pages = NULL; 1295 be->onstack_used = 0; 1296 if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) { 1297 be->decompressed_pages = be->onstack_pages; 1298 be->onstack_used = be->nr_pages; 1299 memset(be->decompressed_pages, 0, 1300 sizeof(struct page *) * be->nr_pages); 1301 } 1302 1303 if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES) 1304 be->compressed_pages = be->onstack_pages + be->onstack_used; 1305 1306 if (!be->decompressed_pages) 1307 be->decompressed_pages = 1308 kvcalloc(be->nr_pages, sizeof(struct page *), 1309 GFP_KERNEL | __GFP_NOFAIL); 1310 if (!be->compressed_pages) 1311 be->compressed_pages = 1312 kvcalloc(pclusterpages, sizeof(struct page *), 1313 GFP_KERNEL | __GFP_NOFAIL); 1314 1315 z_erofs_parse_out_bvecs(be); 1316 err2 = z_erofs_parse_in_bvecs(be, &overlapped); 1317 if (err2) 1318 err = err2; 1319 if (err) 1320 goto out; 1321 1322 if (z_erofs_is_inline_pcluster(pcl)) 1323 inputsize = pcl->tailpacking_size; 1324 else 1325 inputsize = pclusterpages * PAGE_SIZE; 1326 1327 err = decompressor->decompress(&(struct z_erofs_decompress_req) { 1328 .sb = be->sb, 1329 .in = be->compressed_pages, 1330 .out = be->decompressed_pages, 1331 .pageofs_in = pcl->pageofs_in, 1332 .pageofs_out = pcl->pageofs_out, 1333 .inputsize = inputsize, 1334 .outputsize = pcl->length, 1335 .alg = pcl->algorithmformat, 1336 .inplace_io = overlapped, 1337 .partial_decoding = pcl->partial, 1338 .fillgaps = pcl->multibases, 1339 }, be->pagepool); 1340 1341 out: 1342 /* must handle all compressed pages before actual file pages */ 1343 if (z_erofs_is_inline_pcluster(pcl)) { 1344 page = pcl->compressed_bvecs[0].page; 1345 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); 1346 put_page(page); 1347 } else { 1348 for (i = 0; i < pclusterpages; ++i) { 1349 page = pcl->compressed_bvecs[i].page; 1350 1351 if (erofs_page_is_managed(sbi, page)) 1352 continue; 1353 1354 /* recycle all individual short-lived pages */ 1355 (void)z_erofs_put_shortlivedpage(be->pagepool, page); 1356 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 1357 } 1358 } 1359 if (be->compressed_pages < be->onstack_pages || 1360 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) 1361 kvfree(be->compressed_pages); 1362 z_erofs_fill_other_copies(be, err); 1363 1364 for (i = 0; i < be->nr_pages; ++i) { 1365 page = be->decompressed_pages[i]; 1366 if (!page) 1367 continue; 1368 1369 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1370 1371 /* recycle all individual short-lived pages */ 1372 if (z_erofs_put_shortlivedpage(be->pagepool, page)) 1373 continue; 1374 if (err) 1375 z_erofs_page_mark_eio(page); 1376 z_erofs_onlinepage_endio(page); 1377 } 1378 1379 if (be->decompressed_pages != be->onstack_pages) 1380 kvfree(be->decompressed_pages); 1381 1382 pcl->length = 0; 1383 pcl->partial = true; 1384 pcl->multibases = false; 1385 pcl->bvset.nextpage = NULL; 1386 pcl->vcnt = 0; 1387 1388 /* pcluster lock MUST be taken before the following line */ 1389 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); 1390 mutex_unlock(&pcl->lock); 1391 return err; 1392 } 1393 1394 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, 1395 struct page **pagepool) 1396 { 1397 struct z_erofs_decompress_backend be = { 1398 .sb = io->sb, 1399 .pagepool = pagepool, 1400 .decompressed_secondary_bvecs = 1401 LIST_HEAD_INIT(be.decompressed_secondary_bvecs), 1402 }; 1403 z_erofs_next_pcluster_t owned = io->head; 1404 1405 while (owned != Z_EROFS_PCLUSTER_TAIL) { 1406 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); 1407 1408 be.pcl = container_of(owned, struct z_erofs_pcluster, next); 1409 owned = READ_ONCE(be.pcl->next); 1410 1411 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0); 1412 erofs_workgroup_put(&be.pcl->obj); 1413 } 1414 } 1415 1416 static void z_erofs_decompressqueue_work(struct work_struct *work) 1417 { 1418 struct z_erofs_decompressqueue *bgq = 1419 container_of(work, struct z_erofs_decompressqueue, u.work); 1420 struct page *pagepool = NULL; 1421 1422 DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL); 1423 z_erofs_decompress_queue(bgq, &pagepool); 1424 erofs_release_pages(&pagepool); 1425 kvfree(bgq); 1426 } 1427 1428 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1429 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work) 1430 { 1431 z_erofs_decompressqueue_work((struct work_struct *)work); 1432 } 1433 #endif 1434 1435 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, 1436 int bios) 1437 { 1438 struct erofs_sb_info *const sbi = EROFS_SB(io->sb); 1439 1440 /* wake up the caller thread for sync decompression */ 1441 if (io->sync) { 1442 if (!atomic_add_return(bios, &io->pending_bios)) 1443 complete(&io->u.done); 1444 return; 1445 } 1446 1447 if (atomic_add_return(bios, &io->pending_bios)) 1448 return; 1449 /* Use (kthread_)work and sync decompression for atomic contexts only */ 1450 if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) { 1451 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1452 struct kthread_worker *worker; 1453 1454 rcu_read_lock(); 1455 worker = rcu_dereference( 1456 z_erofs_pcpu_workers[raw_smp_processor_id()]); 1457 if (!worker) { 1458 INIT_WORK(&io->u.work, z_erofs_decompressqueue_work); 1459 queue_work(z_erofs_workqueue, &io->u.work); 1460 } else { 1461 kthread_queue_work(worker, &io->u.kthread_work); 1462 } 1463 rcu_read_unlock(); 1464 #else 1465 queue_work(z_erofs_workqueue, &io->u.work); 1466 #endif 1467 /* enable sync decompression for readahead */ 1468 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) 1469 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; 1470 return; 1471 } 1472 z_erofs_decompressqueue_work(&io->u.work); 1473 } 1474 1475 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, 1476 unsigned int nr, 1477 struct page **pagepool, 1478 struct address_space *mc) 1479 { 1480 const pgoff_t index = pcl->obj.index; 1481 gfp_t gfp = mapping_gfp_mask(mc); 1482 bool tocache = false; 1483 1484 struct address_space *mapping; 1485 struct page *oldpage, *page; 1486 int justfound; 1487 1488 repeat: 1489 page = READ_ONCE(pcl->compressed_bvecs[nr].page); 1490 oldpage = page; 1491 1492 if (!page) 1493 goto out_allocpage; 1494 1495 justfound = (unsigned long)page & 1UL; 1496 page = (struct page *)((unsigned long)page & ~1UL); 1497 1498 /* 1499 * preallocated cached pages, which is used to avoid direct reclaim 1500 * otherwise, it will go inplace I/O path instead. 1501 */ 1502 if (page->private == Z_EROFS_PREALLOCATED_PAGE) { 1503 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 1504 set_page_private(page, 0); 1505 tocache = true; 1506 goto out_tocache; 1507 } 1508 mapping = READ_ONCE(page->mapping); 1509 1510 /* 1511 * file-backed online pages in plcuster are all locked steady, 1512 * therefore it is impossible for `mapping' to be NULL. 1513 */ 1514 if (mapping && mapping != mc) 1515 /* ought to be unmanaged pages */ 1516 goto out; 1517 1518 /* directly return for shortlived page as well */ 1519 if (z_erofs_is_shortlived_page(page)) 1520 goto out; 1521 1522 lock_page(page); 1523 1524 /* only true if page reclaim goes wrong, should never happen */ 1525 DBG_BUGON(justfound && PagePrivate(page)); 1526 1527 /* the page is still in manage cache */ 1528 if (page->mapping == mc) { 1529 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); 1530 1531 if (!PagePrivate(page)) { 1532 /* 1533 * impossible to be !PagePrivate(page) for 1534 * the current restriction as well if 1535 * the page is already in compressed_bvecs[]. 1536 */ 1537 DBG_BUGON(!justfound); 1538 1539 justfound = 0; 1540 set_page_private(page, (unsigned long)pcl); 1541 SetPagePrivate(page); 1542 } 1543 1544 /* no need to submit io if it is already up-to-date */ 1545 if (PageUptodate(page)) { 1546 unlock_page(page); 1547 page = NULL; 1548 } 1549 goto out; 1550 } 1551 1552 /* 1553 * the managed page has been truncated, it's unsafe to 1554 * reuse this one, let's allocate a new cache-managed page. 1555 */ 1556 DBG_BUGON(page->mapping); 1557 DBG_BUGON(!justfound); 1558 1559 tocache = true; 1560 unlock_page(page); 1561 put_page(page); 1562 out_allocpage: 1563 page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); 1564 if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, 1565 oldpage, page)) { 1566 erofs_pagepool_add(pagepool, page); 1567 cond_resched(); 1568 goto repeat; 1569 } 1570 out_tocache: 1571 if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { 1572 /* turn into temporary page if fails (1 ref) */ 1573 set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); 1574 goto out; 1575 } 1576 attach_page_private(page, pcl); 1577 /* drop a refcount added by allocpage (then we have 2 refs here) */ 1578 put_page(page); 1579 1580 out: /* the only exit (for tracing and debugging) */ 1581 return page; 1582 } 1583 1584 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, 1585 struct z_erofs_decompressqueue *fgq, bool *fg) 1586 { 1587 struct z_erofs_decompressqueue *q; 1588 1589 if (fg && !*fg) { 1590 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN); 1591 if (!q) { 1592 *fg = true; 1593 goto fg_out; 1594 } 1595 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1596 kthread_init_work(&q->u.kthread_work, 1597 z_erofs_decompressqueue_kthread_work); 1598 #else 1599 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); 1600 #endif 1601 } else { 1602 fg_out: 1603 q = fgq; 1604 init_completion(&fgq->u.done); 1605 atomic_set(&fgq->pending_bios, 0); 1606 q->eio = false; 1607 q->sync = true; 1608 } 1609 q->sb = sb; 1610 q->head = Z_EROFS_PCLUSTER_TAIL; 1611 return q; 1612 } 1613 1614 /* define decompression jobqueue types */ 1615 enum { 1616 JQ_BYPASS, 1617 JQ_SUBMIT, 1618 NR_JOBQUEUES, 1619 }; 1620 1621 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, 1622 z_erofs_next_pcluster_t qtail[], 1623 z_erofs_next_pcluster_t owned_head) 1624 { 1625 z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; 1626 z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; 1627 1628 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL); 1629 1630 WRITE_ONCE(*submit_qtail, owned_head); 1631 WRITE_ONCE(*bypass_qtail, &pcl->next); 1632 1633 qtail[JQ_BYPASS] = &pcl->next; 1634 } 1635 1636 static void z_erofs_decompressqueue_endio(struct bio *bio) 1637 { 1638 struct z_erofs_decompressqueue *q = bio->bi_private; 1639 blk_status_t err = bio->bi_status; 1640 struct bio_vec *bvec; 1641 struct bvec_iter_all iter_all; 1642 1643 bio_for_each_segment_all(bvec, bio, iter_all) { 1644 struct page *page = bvec->bv_page; 1645 1646 DBG_BUGON(PageUptodate(page)); 1647 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1648 1649 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { 1650 if (!err) 1651 SetPageUptodate(page); 1652 unlock_page(page); 1653 } 1654 } 1655 if (err) 1656 q->eio = true; 1657 z_erofs_decompress_kickoff(q, -1); 1658 bio_put(bio); 1659 } 1660 1661 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, 1662 struct z_erofs_decompressqueue *fgq, 1663 bool *force_fg, bool readahead) 1664 { 1665 struct super_block *sb = f->inode->i_sb; 1666 struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb)); 1667 z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; 1668 struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; 1669 z_erofs_next_pcluster_t owned_head = f->owned_head; 1670 /* bio is NULL initially, so no need to initialize last_{index,bdev} */ 1671 pgoff_t last_index; 1672 struct block_device *last_bdev; 1673 unsigned int nr_bios = 0; 1674 struct bio *bio = NULL; 1675 unsigned long pflags; 1676 int memstall = 0; 1677 1678 /* 1679 * if managed cache is enabled, bypass jobqueue is needed, 1680 * no need to read from device for all pclusters in this queue. 1681 */ 1682 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); 1683 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg); 1684 1685 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; 1686 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; 1687 1688 /* by default, all need io submission */ 1689 q[JQ_SUBMIT]->head = owned_head; 1690 1691 do { 1692 struct erofs_map_dev mdev; 1693 struct z_erofs_pcluster *pcl; 1694 pgoff_t cur, end; 1695 unsigned int i = 0; 1696 bool bypass = true; 1697 1698 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); 1699 pcl = container_of(owned_head, struct z_erofs_pcluster, next); 1700 owned_head = READ_ONCE(pcl->next); 1701 1702 if (z_erofs_is_inline_pcluster(pcl)) { 1703 move_to_bypass_jobqueue(pcl, qtail, owned_head); 1704 continue; 1705 } 1706 1707 /* no device id here, thus it will always succeed */ 1708 mdev = (struct erofs_map_dev) { 1709 .m_pa = erofs_pos(sb, pcl->obj.index), 1710 }; 1711 (void)erofs_map_dev(sb, &mdev); 1712 1713 cur = erofs_blknr(sb, mdev.m_pa); 1714 end = cur + pcl->pclusterpages; 1715 1716 do { 1717 struct page *page; 1718 1719 page = pickup_page_for_submission(pcl, i++, 1720 &f->pagepool, mc); 1721 if (!page) 1722 continue; 1723 1724 if (bio && (cur != last_index + 1 || 1725 last_bdev != mdev.m_bdev)) { 1726 submit_bio_retry: 1727 submit_bio(bio); 1728 if (memstall) { 1729 psi_memstall_leave(&pflags); 1730 memstall = 0; 1731 } 1732 bio = NULL; 1733 } 1734 1735 if (unlikely(PageWorkingset(page)) && !memstall) { 1736 psi_memstall_enter(&pflags); 1737 memstall = 1; 1738 } 1739 1740 if (!bio) { 1741 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, 1742 REQ_OP_READ, GFP_NOIO); 1743 bio->bi_end_io = z_erofs_decompressqueue_endio; 1744 1745 last_bdev = mdev.m_bdev; 1746 bio->bi_iter.bi_sector = (sector_t)cur << 1747 (sb->s_blocksize_bits - 9); 1748 bio->bi_private = q[JQ_SUBMIT]; 1749 if (readahead) 1750 bio->bi_opf |= REQ_RAHEAD; 1751 ++nr_bios; 1752 } 1753 1754 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) 1755 goto submit_bio_retry; 1756 1757 last_index = cur; 1758 bypass = false; 1759 } while (++cur < end); 1760 1761 if (!bypass) 1762 qtail[JQ_SUBMIT] = &pcl->next; 1763 else 1764 move_to_bypass_jobqueue(pcl, qtail, owned_head); 1765 } while (owned_head != Z_EROFS_PCLUSTER_TAIL); 1766 1767 if (bio) { 1768 submit_bio(bio); 1769 if (memstall) 1770 psi_memstall_leave(&pflags); 1771 } 1772 1773 /* 1774 * although background is preferred, no one is pending for submission. 1775 * don't issue decompression but drop it directly instead. 1776 */ 1777 if (!*force_fg && !nr_bios) { 1778 kvfree(q[JQ_SUBMIT]); 1779 return; 1780 } 1781 z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios); 1782 } 1783 1784 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, 1785 bool force_fg, bool ra) 1786 { 1787 struct z_erofs_decompressqueue io[NR_JOBQUEUES]; 1788 1789 if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) 1790 return; 1791 z_erofs_submit_queue(f, io, &force_fg, ra); 1792 1793 /* handle bypass queue (no i/o pclusters) immediately */ 1794 z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); 1795 1796 if (!force_fg) 1797 return; 1798 1799 /* wait until all bios are completed */ 1800 wait_for_completion_io(&io[JQ_SUBMIT].u.done); 1801 1802 /* handle synchronous decompress queue in the caller context */ 1803 z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool); 1804 } 1805 1806 /* 1807 * Since partial uptodate is still unimplemented for now, we have to use 1808 * approximate readmore strategies as a start. 1809 */ 1810 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, 1811 struct readahead_control *rac, bool backmost) 1812 { 1813 struct inode *inode = f->inode; 1814 struct erofs_map_blocks *map = &f->map; 1815 erofs_off_t cur, end, headoffset = f->headoffset; 1816 int err; 1817 1818 if (backmost) { 1819 if (rac) 1820 end = headoffset + readahead_length(rac) - 1; 1821 else 1822 end = headoffset + PAGE_SIZE - 1; 1823 map->m_la = end; 1824 err = z_erofs_map_blocks_iter(inode, map, 1825 EROFS_GET_BLOCKS_READMORE); 1826 if (err) 1827 return; 1828 1829 /* expand ra for the trailing edge if readahead */ 1830 if (rac) { 1831 cur = round_up(map->m_la + map->m_llen, PAGE_SIZE); 1832 readahead_expand(rac, headoffset, cur - headoffset); 1833 return; 1834 } 1835 end = round_up(end, PAGE_SIZE); 1836 } else { 1837 end = round_up(map->m_la, PAGE_SIZE); 1838 1839 if (!map->m_llen) 1840 return; 1841 } 1842 1843 cur = map->m_la + map->m_llen - 1; 1844 while (cur >= end) { 1845 pgoff_t index = cur >> PAGE_SHIFT; 1846 struct page *page; 1847 1848 page = erofs_grab_cache_page_nowait(inode->i_mapping, index); 1849 if (page) { 1850 if (PageUptodate(page)) { 1851 unlock_page(page); 1852 } else { 1853 err = z_erofs_do_read_page(f, page); 1854 if (err) 1855 erofs_err(inode->i_sb, 1856 "readmore error at page %lu @ nid %llu", 1857 index, EROFS_I(inode)->nid); 1858 } 1859 put_page(page); 1860 } 1861 1862 if (cur < PAGE_SIZE) 1863 break; 1864 cur = (index << PAGE_SHIFT) - 1; 1865 } 1866 } 1867 1868 static int z_erofs_read_folio(struct file *file, struct folio *folio) 1869 { 1870 struct page *page = &folio->page; 1871 struct inode *const inode = page->mapping->host; 1872 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1873 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1874 int err; 1875 1876 trace_erofs_readpage(page, false); 1877 f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; 1878 1879 z_erofs_pcluster_readmore(&f, NULL, true); 1880 err = z_erofs_do_read_page(&f, page); 1881 z_erofs_pcluster_readmore(&f, NULL, false); 1882 (void)z_erofs_collector_end(&f); 1883 1884 /* if some compressed cluster ready, need submit them anyway */ 1885 z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false); 1886 1887 if (err) 1888 erofs_err(inode->i_sb, "failed to read, err [%d]", err); 1889 1890 erofs_put_metabuf(&f.map.buf); 1891 erofs_release_pages(&f.pagepool); 1892 return err; 1893 } 1894 1895 static void z_erofs_readahead(struct readahead_control *rac) 1896 { 1897 struct inode *const inode = rac->mapping->host; 1898 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1899 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1900 struct page *head = NULL, *page; 1901 unsigned int nr_pages; 1902 1903 f.headoffset = readahead_pos(rac); 1904 1905 z_erofs_pcluster_readmore(&f, rac, true); 1906 nr_pages = readahead_count(rac); 1907 trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false); 1908 1909 while ((page = readahead_page(rac))) { 1910 set_page_private(page, (unsigned long)head); 1911 head = page; 1912 } 1913 1914 while (head) { 1915 struct page *page = head; 1916 int err; 1917 1918 /* traversal in reverse order */ 1919 head = (void *)page_private(page); 1920 1921 err = z_erofs_do_read_page(&f, page); 1922 if (err) 1923 erofs_err(inode->i_sb, 1924 "readahead error at page %lu @ nid %llu", 1925 page->index, EROFS_I(inode)->nid); 1926 put_page(page); 1927 } 1928 z_erofs_pcluster_readmore(&f, rac, false); 1929 (void)z_erofs_collector_end(&f); 1930 1931 z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true); 1932 erofs_put_metabuf(&f.map.buf); 1933 erofs_release_pages(&f.pagepool); 1934 } 1935 1936 const struct address_space_operations z_erofs_aops = { 1937 .read_folio = z_erofs_read_folio, 1938 .readahead = z_erofs_readahead, 1939 }; 1940