1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2022 Alibaba Cloud 6 */ 7 #include "compress.h" 8 #include <linux/psi.h> 9 #include <linux/cpuhotplug.h> 10 #include <trace/events/erofs.h> 11 12 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) 13 #define Z_EROFS_INLINE_BVECS 2 14 15 struct z_erofs_bvec { 16 struct page *page; 17 int offset; 18 unsigned int end; 19 }; 20 21 #define __Z_EROFS_BVSET(name, total) \ 22 struct name { \ 23 /* point to the next page which contains the following bvecs */ \ 24 struct page *nextpage; \ 25 struct z_erofs_bvec bvec[total]; \ 26 } 27 __Z_EROFS_BVSET(z_erofs_bvset,); 28 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS); 29 30 /* 31 * Structure fields follow one of the following exclusion rules. 32 * 33 * I: Modifiable by initialization/destruction paths and read-only 34 * for everyone else; 35 * 36 * L: Field should be protected by the pcluster lock; 37 * 38 * A: Field should be accessed / updated in atomic for parallelized code. 39 */ 40 struct z_erofs_pcluster { 41 struct mutex lock; 42 struct lockref lockref; 43 44 /* A: point to next chained pcluster or TAILs */ 45 struct z_erofs_pcluster *next; 46 47 /* I: start physical position of this pcluster */ 48 erofs_off_t pos; 49 50 /* L: the maximum decompression size of this round */ 51 unsigned int length; 52 53 /* L: total number of bvecs */ 54 unsigned int vcnt; 55 56 /* I: pcluster size (compressed size) in bytes */ 57 unsigned int pclustersize; 58 59 /* I: page offset of start position of decompression */ 60 unsigned short pageofs_out; 61 62 /* I: page offset of inline compressed data */ 63 unsigned short pageofs_in; 64 65 union { 66 /* L: inline a certain number of bvec for bootstrap */ 67 struct z_erofs_bvset_inline bvset; 68 69 /* I: can be used to free the pcluster by RCU. */ 70 struct rcu_head rcu; 71 }; 72 73 /* I: compression algorithm format */ 74 unsigned char algorithmformat; 75 76 /* I: whether compressed data is in-lined or not */ 77 bool from_meta; 78 79 /* L: whether partial decompression or not */ 80 bool partial; 81 82 /* L: whether extra buffer allocations are best-effort */ 83 bool besteffort; 84 85 /* A: compressed bvecs (can be cached or inplaced pages) */ 86 struct z_erofs_bvec compressed_bvecs[]; 87 }; 88 89 /* the end of a chain of pclusters */ 90 #define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA) 91 92 struct z_erofs_decompressqueue { 93 struct super_block *sb; 94 struct z_erofs_pcluster *head; 95 atomic_t pending_bios; 96 97 union { 98 struct completion done; 99 struct work_struct work; 100 struct kthread_work kthread_work; 101 } u; 102 bool eio, sync; 103 }; 104 105 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) 106 { 107 return PAGE_ALIGN(pcl->pageofs_in + pcl->pclustersize) >> PAGE_SHIFT; 108 } 109 110 static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo) 111 { 112 return fo->mapping == MNGD_MAPPING(sbi); 113 } 114 115 #define Z_EROFS_ONSTACK_PAGES 32 116 117 /* 118 * since pclustersize is variable for big pcluster feature, introduce slab 119 * pools implementation for different pcluster sizes. 120 */ 121 struct z_erofs_pcluster_slab { 122 struct kmem_cache *slab; 123 unsigned int maxpages; 124 char name[48]; 125 }; 126 127 #define _PCLP(n) { .maxpages = n } 128 129 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = { 130 _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128), 131 _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES + 1) 132 }; 133 134 struct z_erofs_bvec_iter { 135 struct page *bvpage; 136 struct z_erofs_bvset *bvset; 137 unsigned int nr, cur; 138 }; 139 140 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter) 141 { 142 if (iter->bvpage) 143 kunmap_local(iter->bvset); 144 return iter->bvpage; 145 } 146 147 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter) 148 { 149 unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec; 150 /* have to access nextpage in advance, otherwise it will be unmapped */ 151 struct page *nextpage = iter->bvset->nextpage; 152 struct page *oldpage; 153 154 DBG_BUGON(!nextpage); 155 oldpage = z_erofs_bvec_iter_end(iter); 156 iter->bvpage = nextpage; 157 iter->bvset = kmap_local_page(nextpage); 158 iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec); 159 iter->cur = 0; 160 return oldpage; 161 } 162 163 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter, 164 struct z_erofs_bvset_inline *bvset, 165 unsigned int bootstrap_nr, 166 unsigned int cur) 167 { 168 *iter = (struct z_erofs_bvec_iter) { 169 .nr = bootstrap_nr, 170 .bvset = (struct z_erofs_bvset *)bvset, 171 }; 172 173 while (cur > iter->nr) { 174 cur -= iter->nr; 175 z_erofs_bvset_flip(iter); 176 } 177 iter->cur = cur; 178 } 179 180 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, 181 struct z_erofs_bvec *bvec, 182 struct page **candidate_bvpage, 183 struct page **pagepool) 184 { 185 if (iter->cur >= iter->nr) { 186 struct page *nextpage = *candidate_bvpage; 187 188 if (!nextpage) { 189 nextpage = __erofs_allocpage(pagepool, GFP_KERNEL, 190 true); 191 if (!nextpage) 192 return -ENOMEM; 193 set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE); 194 } 195 DBG_BUGON(iter->bvset->nextpage); 196 iter->bvset->nextpage = nextpage; 197 z_erofs_bvset_flip(iter); 198 199 iter->bvset->nextpage = NULL; 200 *candidate_bvpage = NULL; 201 } 202 iter->bvset->bvec[iter->cur++] = *bvec; 203 return 0; 204 } 205 206 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter, 207 struct z_erofs_bvec *bvec, 208 struct page **old_bvpage) 209 { 210 if (iter->cur == iter->nr) 211 *old_bvpage = z_erofs_bvset_flip(iter); 212 else 213 *old_bvpage = NULL; 214 *bvec = iter->bvset->bvec[iter->cur++]; 215 } 216 217 static void z_erofs_destroy_pcluster_pool(void) 218 { 219 int i; 220 221 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 222 if (!pcluster_pool[i].slab) 223 continue; 224 kmem_cache_destroy(pcluster_pool[i].slab); 225 pcluster_pool[i].slab = NULL; 226 } 227 } 228 229 static int z_erofs_create_pcluster_pool(void) 230 { 231 struct z_erofs_pcluster_slab *pcs; 232 struct z_erofs_pcluster *a; 233 unsigned int size; 234 235 for (pcs = pcluster_pool; 236 pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { 237 size = struct_size(a, compressed_bvecs, pcs->maxpages); 238 239 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages); 240 pcs->slab = kmem_cache_create(pcs->name, size, 0, 241 SLAB_RECLAIM_ACCOUNT, NULL); 242 if (pcs->slab) 243 continue; 244 245 z_erofs_destroy_pcluster_pool(); 246 return -ENOMEM; 247 } 248 return 0; 249 } 250 251 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size) 252 { 253 unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT; 254 struct z_erofs_pcluster_slab *pcs = pcluster_pool; 255 256 for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { 257 struct z_erofs_pcluster *pcl; 258 259 if (nrpages > pcs->maxpages) 260 continue; 261 262 pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL); 263 if (!pcl) 264 return ERR_PTR(-ENOMEM); 265 return pcl; 266 } 267 return ERR_PTR(-EINVAL); 268 } 269 270 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) 271 { 272 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 273 int i; 274 275 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { 276 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; 277 278 if (pclusterpages > pcs->maxpages) 279 continue; 280 281 kmem_cache_free(pcs->slab, pcl); 282 return; 283 } 284 DBG_BUGON(1); 285 } 286 287 static struct workqueue_struct *z_erofs_workqueue __read_mostly; 288 289 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 290 static struct kthread_worker __rcu **z_erofs_pcpu_workers; 291 static atomic_t erofs_percpu_workers_initialized = ATOMIC_INIT(0); 292 293 static void erofs_destroy_percpu_workers(void) 294 { 295 struct kthread_worker *worker; 296 unsigned int cpu; 297 298 for_each_possible_cpu(cpu) { 299 worker = rcu_dereference_protected( 300 z_erofs_pcpu_workers[cpu], 1); 301 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL); 302 if (worker) 303 kthread_destroy_worker(worker); 304 } 305 kfree(z_erofs_pcpu_workers); 306 } 307 308 static struct kthread_worker *erofs_init_percpu_worker(int cpu) 309 { 310 struct kthread_worker *worker = 311 kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u"); 312 313 if (IS_ERR(worker)) 314 return worker; 315 if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI)) 316 sched_set_fifo_low(worker->task); 317 return worker; 318 } 319 320 static int erofs_init_percpu_workers(void) 321 { 322 struct kthread_worker *worker; 323 unsigned int cpu; 324 325 z_erofs_pcpu_workers = kcalloc(num_possible_cpus(), 326 sizeof(struct kthread_worker *), GFP_ATOMIC); 327 if (!z_erofs_pcpu_workers) 328 return -ENOMEM; 329 330 for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */ 331 worker = erofs_init_percpu_worker(cpu); 332 if (!IS_ERR(worker)) 333 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker); 334 } 335 return 0; 336 } 337 338 #ifdef CONFIG_HOTPLUG_CPU 339 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock); 340 static enum cpuhp_state erofs_cpuhp_state; 341 342 static int erofs_cpu_online(unsigned int cpu) 343 { 344 struct kthread_worker *worker, *old; 345 346 worker = erofs_init_percpu_worker(cpu); 347 if (IS_ERR(worker)) 348 return PTR_ERR(worker); 349 350 spin_lock(&z_erofs_pcpu_worker_lock); 351 old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu], 352 lockdep_is_held(&z_erofs_pcpu_worker_lock)); 353 if (!old) 354 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker); 355 spin_unlock(&z_erofs_pcpu_worker_lock); 356 if (old) 357 kthread_destroy_worker(worker); 358 return 0; 359 } 360 361 static int erofs_cpu_offline(unsigned int cpu) 362 { 363 struct kthread_worker *worker; 364 365 spin_lock(&z_erofs_pcpu_worker_lock); 366 worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu], 367 lockdep_is_held(&z_erofs_pcpu_worker_lock)); 368 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL); 369 spin_unlock(&z_erofs_pcpu_worker_lock); 370 371 synchronize_rcu(); 372 if (worker) 373 kthread_destroy_worker(worker); 374 return 0; 375 } 376 377 static int erofs_cpu_hotplug_init(void) 378 { 379 int state; 380 381 state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 382 "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline); 383 if (state < 0) 384 return state; 385 386 erofs_cpuhp_state = state; 387 return 0; 388 } 389 390 static void erofs_cpu_hotplug_destroy(void) 391 { 392 if (erofs_cpuhp_state) 393 cpuhp_remove_state_nocalls(erofs_cpuhp_state); 394 } 395 #else /* !CONFIG_HOTPLUG_CPU */ 396 static inline int erofs_cpu_hotplug_init(void) { return 0; } 397 static inline void erofs_cpu_hotplug_destroy(void) {} 398 #endif/* CONFIG_HOTPLUG_CPU */ 399 static int z_erofs_init_pcpu_workers(struct super_block *sb) 400 { 401 int err; 402 403 if (atomic_xchg(&erofs_percpu_workers_initialized, 1)) 404 return 0; 405 406 err = erofs_init_percpu_workers(); 407 if (err) { 408 erofs_err(sb, "per-cpu workers: failed to allocate."); 409 goto err_init_percpu_workers; 410 } 411 412 err = erofs_cpu_hotplug_init(); 413 if (err < 0) { 414 erofs_err(sb, "per-cpu workers: failed CPU hotplug init."); 415 goto err_cpuhp_init; 416 } 417 erofs_info(sb, "initialized per-cpu workers successfully."); 418 return err; 419 420 err_cpuhp_init: 421 erofs_destroy_percpu_workers(); 422 err_init_percpu_workers: 423 atomic_set(&erofs_percpu_workers_initialized, 0); 424 return err; 425 } 426 427 static void z_erofs_destroy_pcpu_workers(void) 428 { 429 if (!atomic_xchg(&erofs_percpu_workers_initialized, 0)) 430 return; 431 erofs_cpu_hotplug_destroy(); 432 erofs_destroy_percpu_workers(); 433 } 434 #else /* !CONFIG_EROFS_FS_PCPU_KTHREAD */ 435 static inline int z_erofs_init_pcpu_workers(struct super_block *sb) { return 0; } 436 static inline void z_erofs_destroy_pcpu_workers(void) {} 437 #endif/* CONFIG_EROFS_FS_PCPU_KTHREAD */ 438 439 void z_erofs_exit_subsystem(void) 440 { 441 z_erofs_destroy_pcpu_workers(); 442 destroy_workqueue(z_erofs_workqueue); 443 z_erofs_destroy_pcluster_pool(); 444 z_erofs_crypto_disable_all_engines(); 445 z_erofs_exit_decompressor(); 446 } 447 448 int __init z_erofs_init_subsystem(void) 449 { 450 int err = z_erofs_init_decompressor(); 451 452 if (err) 453 goto err_decompressor; 454 455 err = z_erofs_create_pcluster_pool(); 456 if (err) 457 goto err_pcluster_pool; 458 459 z_erofs_workqueue = alloc_workqueue("erofs_worker", 460 WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus()); 461 if (!z_erofs_workqueue) { 462 err = -ENOMEM; 463 goto err_workqueue_init; 464 } 465 466 return err; 467 468 err_workqueue_init: 469 z_erofs_destroy_pcluster_pool(); 470 err_pcluster_pool: 471 z_erofs_exit_decompressor(); 472 err_decompressor: 473 return err; 474 } 475 476 enum z_erofs_pclustermode { 477 /* It has previously been linked into another processing chain */ 478 Z_EROFS_PCLUSTER_INFLIGHT, 479 /* 480 * A weaker form of Z_EROFS_PCLUSTER_FOLLOWED; the difference is that it 481 * may be dispatched to the bypass queue later due to uptodated managed 482 * folios. All file-backed folios related to this pcluster cannot be 483 * reused for in-place I/O (or bvpage) since the pcluster may be decoded 484 * in a separate queue (and thus out of order). 485 */ 486 Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE, 487 /* 488 * The pcluster has just been linked to our processing chain. 489 * File-backed folios (except for the head page) related to it can be 490 * used for in-place I/O (or bvpage). 491 */ 492 Z_EROFS_PCLUSTER_FOLLOWED, 493 }; 494 495 struct z_erofs_frontend { 496 struct inode *const inode; 497 struct erofs_map_blocks map; 498 struct z_erofs_bvec_iter biter; 499 500 struct page *pagepool; 501 struct page *candidate_bvpage; 502 struct z_erofs_pcluster *pcl, *head; 503 enum z_erofs_pclustermode mode; 504 505 erofs_off_t headoffset; 506 507 /* a pointer used to pick up inplace I/O pages */ 508 unsigned int icur; 509 }; 510 511 #define Z_EROFS_DEFINE_FRONTEND(fe, i, ho) struct z_erofs_frontend fe = { \ 512 .inode = i, .head = Z_EROFS_PCLUSTER_TAIL, \ 513 .mode = Z_EROFS_PCLUSTER_FOLLOWED, .headoffset = ho } 514 515 static bool z_erofs_should_alloc_cache(struct z_erofs_frontend *fe) 516 { 517 unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy; 518 519 if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) 520 return false; 521 522 if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED)) 523 return true; 524 525 if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND && 526 fe->map.m_la < fe->headoffset) 527 return true; 528 529 return false; 530 } 531 532 static void z_erofs_bind_cache(struct z_erofs_frontend *fe) 533 { 534 struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); 535 struct z_erofs_pcluster *pcl = fe->pcl; 536 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 537 bool shouldalloc = z_erofs_should_alloc_cache(fe); 538 pgoff_t poff = pcl->pos >> PAGE_SHIFT; 539 bool may_bypass = true; 540 /* Optimistic allocation, as in-place I/O can be used as a fallback */ 541 gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | 542 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 543 struct folio *folio, *newfolio; 544 unsigned int i; 545 546 if (i_blocksize(fe->inode) != PAGE_SIZE || 547 fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) 548 return; 549 550 for (i = 0; i < pclusterpages; ++i) { 551 /* Inaccurate check w/o locking to avoid unneeded lookups */ 552 if (READ_ONCE(pcl->compressed_bvecs[i].page)) 553 continue; 554 555 folio = filemap_get_folio(mc, poff + i); 556 if (IS_ERR(folio)) { 557 may_bypass = false; 558 if (!shouldalloc) 559 continue; 560 561 /* 562 * Allocate a managed folio for cached I/O, or it may be 563 * then filled with a file-backed folio for in-place I/O 564 */ 565 newfolio = filemap_alloc_folio(gfp, 0); 566 if (!newfolio) 567 continue; 568 newfolio->private = Z_EROFS_PREALLOCATED_FOLIO; 569 folio = NULL; 570 } 571 spin_lock(&pcl->lockref.lock); 572 if (!pcl->compressed_bvecs[i].page) { 573 pcl->compressed_bvecs[i].page = 574 folio_page(folio ?: newfolio, 0); 575 spin_unlock(&pcl->lockref.lock); 576 continue; 577 } 578 spin_unlock(&pcl->lockref.lock); 579 folio_put(folio ?: newfolio); 580 } 581 582 /* 583 * Don't perform in-place I/O if all compressed pages are available in 584 * the managed cache, as the pcluster can be moved to the bypass queue. 585 */ 586 if (may_bypass) 587 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 588 } 589 590 /* (erofs_shrinker) disconnect cached encoded data with pclusters */ 591 static int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, 592 struct z_erofs_pcluster *pcl) 593 { 594 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 595 struct folio *folio; 596 int i; 597 598 DBG_BUGON(pcl->from_meta); 599 /* Each cached folio contains one page unless bs > ps is supported */ 600 for (i = 0; i < pclusterpages; ++i) { 601 if (pcl->compressed_bvecs[i].page) { 602 folio = page_folio(pcl->compressed_bvecs[i].page); 603 /* Avoid reclaiming or migrating this folio */ 604 if (!folio_trylock(folio)) 605 return -EBUSY; 606 607 if (!erofs_folio_is_managed(sbi, folio)) 608 continue; 609 pcl->compressed_bvecs[i].page = NULL; 610 folio_detach_private(folio); 611 folio_unlock(folio); 612 } 613 } 614 return 0; 615 } 616 617 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) 618 { 619 struct z_erofs_pcluster *pcl = folio_get_private(folio); 620 struct z_erofs_bvec *bvec = pcl->compressed_bvecs; 621 struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl); 622 bool ret; 623 624 if (!folio_test_private(folio)) 625 return true; 626 627 ret = false; 628 spin_lock(&pcl->lockref.lock); 629 if (pcl->lockref.count <= 0) { 630 DBG_BUGON(pcl->from_meta); 631 for (; bvec < end; ++bvec) { 632 if (bvec->page && page_folio(bvec->page) == folio) { 633 bvec->page = NULL; 634 folio_detach_private(folio); 635 ret = true; 636 break; 637 } 638 } 639 } 640 spin_unlock(&pcl->lockref.lock); 641 return ret; 642 } 643 644 /* 645 * It will be called only on inode eviction. In case that there are still some 646 * decompression requests in progress, wait with rescheduling for a bit here. 647 * An extra lock could be introduced instead but it seems unnecessary. 648 */ 649 static void z_erofs_cache_invalidate_folio(struct folio *folio, 650 size_t offset, size_t length) 651 { 652 const size_t stop = length + offset; 653 654 /* Check for potential overflow in debug mode */ 655 DBG_BUGON(stop > folio_size(folio) || stop < length); 656 657 if (offset == 0 && stop == folio_size(folio)) 658 while (!z_erofs_cache_release_folio(folio, 0)) 659 cond_resched(); 660 } 661 662 static const struct address_space_operations z_erofs_cache_aops = { 663 .release_folio = z_erofs_cache_release_folio, 664 .invalidate_folio = z_erofs_cache_invalidate_folio, 665 }; 666 667 int z_erofs_init_super(struct super_block *sb) 668 { 669 struct inode *inode; 670 int err; 671 672 err = z_erofs_init_pcpu_workers(sb); 673 if (err) 674 return err; 675 676 inode = new_inode(sb); 677 if (!inode) 678 return -ENOMEM; 679 set_nlink(inode, 1); 680 inode->i_size = OFFSET_MAX; 681 inode->i_mapping->a_ops = &z_erofs_cache_aops; 682 mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); 683 EROFS_SB(sb)->managed_cache = inode; 684 xa_init(&EROFS_SB(sb)->managed_pslots); 685 return 0; 686 } 687 688 /* callers must be with pcluster lock held */ 689 static int z_erofs_attach_page(struct z_erofs_frontend *fe, 690 struct z_erofs_bvec *bvec, bool exclusive) 691 { 692 struct z_erofs_pcluster *pcl = fe->pcl; 693 int ret; 694 695 if (exclusive) { 696 /* Inplace I/O is limited to one page for uncompressed data */ 697 if (pcl->algorithmformat < Z_EROFS_COMPRESSION_MAX || 698 fe->icur <= 1) { 699 /* Try to prioritize inplace I/O here */ 700 spin_lock(&pcl->lockref.lock); 701 while (fe->icur > 0) { 702 if (pcl->compressed_bvecs[--fe->icur].page) 703 continue; 704 pcl->compressed_bvecs[fe->icur] = *bvec; 705 spin_unlock(&pcl->lockref.lock); 706 return 0; 707 } 708 spin_unlock(&pcl->lockref.lock); 709 } 710 711 /* otherwise, check if it can be used as a bvpage */ 712 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && 713 !fe->candidate_bvpage) 714 fe->candidate_bvpage = bvec->page; 715 } 716 ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage, 717 &fe->pagepool); 718 fe->pcl->vcnt += (ret >= 0); 719 return ret; 720 } 721 722 static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl) 723 { 724 if (lockref_get_not_zero(&pcl->lockref)) 725 return true; 726 727 spin_lock(&pcl->lockref.lock); 728 if (__lockref_is_dead(&pcl->lockref)) { 729 spin_unlock(&pcl->lockref.lock); 730 return false; 731 } 732 733 if (!pcl->lockref.count++) 734 atomic_long_dec(&erofs_global_shrink_cnt); 735 spin_unlock(&pcl->lockref.lock); 736 return true; 737 } 738 739 static int z_erofs_register_pcluster(struct z_erofs_frontend *fe) 740 { 741 struct erofs_map_blocks *map = &fe->map; 742 struct super_block *sb = fe->inode->i_sb; 743 struct erofs_sb_info *sbi = EROFS_SB(sb); 744 struct z_erofs_pcluster *pcl, *pre; 745 unsigned int pageofs_in; 746 int err; 747 748 pageofs_in = erofs_blkoff(sb, map->m_pa); 749 pcl = z_erofs_alloc_pcluster(pageofs_in + map->m_plen); 750 if (IS_ERR(pcl)) 751 return PTR_ERR(pcl); 752 753 lockref_init(&pcl->lockref); /* one ref for this request */ 754 pcl->algorithmformat = map->m_algorithmformat; 755 pcl->pclustersize = map->m_plen; 756 pcl->length = 0; 757 pcl->partial = true; 758 pcl->next = fe->head; 759 pcl->pos = map->m_pa; 760 pcl->pageofs_in = pageofs_in; 761 pcl->pageofs_out = map->m_la & ~PAGE_MASK; 762 pcl->from_meta = map->m_flags & EROFS_MAP_META; 763 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; 764 765 /* 766 * lock all primary followed works before visible to others 767 * and mutex_trylock *never* fails for a new pcluster. 768 */ 769 mutex_init(&pcl->lock); 770 DBG_BUGON(!mutex_trylock(&pcl->lock)); 771 772 if (!pcl->from_meta) { 773 while (1) { 774 xa_lock(&sbi->managed_pslots); 775 pre = __xa_cmpxchg(&sbi->managed_pslots, pcl->pos, 776 NULL, pcl, GFP_KERNEL); 777 if (!pre || xa_is_err(pre) || z_erofs_get_pcluster(pre)) { 778 xa_unlock(&sbi->managed_pslots); 779 break; 780 } 781 /* try to legitimize the current in-tree one */ 782 xa_unlock(&sbi->managed_pslots); 783 cond_resched(); 784 } 785 if (xa_is_err(pre)) { 786 err = xa_err(pre); 787 goto err_out; 788 } else if (pre) { 789 fe->pcl = pre; 790 err = -EEXIST; 791 goto err_out; 792 } 793 } 794 fe->head = fe->pcl = pcl; 795 return 0; 796 797 err_out: 798 mutex_unlock(&pcl->lock); 799 z_erofs_free_pcluster(pcl); 800 return err; 801 } 802 803 static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe) 804 { 805 struct erofs_map_blocks *map = &fe->map; 806 struct super_block *sb = fe->inode->i_sb; 807 struct z_erofs_pcluster *pcl = NULL; 808 void *ptr; 809 int ret; 810 811 DBG_BUGON(fe->pcl); 812 /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ 813 DBG_BUGON(!fe->head); 814 815 if (!(map->m_flags & EROFS_MAP_META)) { 816 while (1) { 817 rcu_read_lock(); 818 pcl = xa_load(&EROFS_SB(sb)->managed_pslots, map->m_pa); 819 if (!pcl || z_erofs_get_pcluster(pcl)) { 820 DBG_BUGON(pcl && map->m_pa != pcl->pos); 821 rcu_read_unlock(); 822 break; 823 } 824 rcu_read_unlock(); 825 } 826 } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) { 827 DBG_BUGON(1); 828 return -EFSCORRUPTED; 829 } 830 831 if (pcl) { 832 fe->pcl = pcl; 833 ret = -EEXIST; 834 } else { 835 ret = z_erofs_register_pcluster(fe); 836 } 837 838 if (ret == -EEXIST) { 839 mutex_lock(&fe->pcl->lock); 840 /* check if this pcluster hasn't been linked into any chain. */ 841 if (!cmpxchg(&fe->pcl->next, NULL, fe->head)) { 842 /* .. so it can be attached to our submission chain */ 843 fe->head = fe->pcl; 844 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; 845 } else { /* otherwise, it belongs to an inflight chain */ 846 fe->mode = Z_EROFS_PCLUSTER_INFLIGHT; 847 } 848 } else if (ret) { 849 return ret; 850 } 851 852 z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset, 853 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt); 854 if (!fe->pcl->from_meta) { 855 /* bind cache first when cached decompression is preferred */ 856 z_erofs_bind_cache(fe); 857 } else { 858 ret = erofs_init_metabuf(&map->buf, sb, 859 erofs_inode_in_metabox(fe->inode)); 860 if (ret) 861 return ret; 862 ptr = erofs_bread(&map->buf, map->m_pa, false); 863 if (IS_ERR(ptr)) { 864 ret = PTR_ERR(ptr); 865 erofs_err(sb, "failed to get inline folio %d", ret); 866 return ret; 867 } 868 folio_get(page_folio(map->buf.page)); 869 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page); 870 fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK; 871 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; 872 } 873 /* file-backed inplace I/O pages are traversed in reverse order */ 874 fe->icur = z_erofs_pclusterpages(fe->pcl); 875 return 0; 876 } 877 878 static void z_erofs_rcu_callback(struct rcu_head *head) 879 { 880 z_erofs_free_pcluster(container_of(head, struct z_erofs_pcluster, rcu)); 881 } 882 883 static bool __erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, 884 struct z_erofs_pcluster *pcl) 885 { 886 if (pcl->lockref.count) 887 return false; 888 889 /* 890 * Note that all cached folios should be detached before deleted from 891 * the XArray. Otherwise some folios could be still attached to the 892 * orphan old pcluster when the new one is available in the tree. 893 */ 894 if (erofs_try_to_free_all_cached_folios(sbi, pcl)) 895 return false; 896 897 /* 898 * It's impossible to fail after the pcluster is freezed, but in order 899 * to avoid some race conditions, add a DBG_BUGON to observe this. 900 */ 901 DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->pos) != pcl); 902 903 lockref_mark_dead(&pcl->lockref); 904 return true; 905 } 906 907 static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, 908 struct z_erofs_pcluster *pcl) 909 { 910 bool free; 911 912 spin_lock(&pcl->lockref.lock); 913 free = __erofs_try_to_release_pcluster(sbi, pcl); 914 spin_unlock(&pcl->lockref.lock); 915 if (free) { 916 atomic_long_dec(&erofs_global_shrink_cnt); 917 call_rcu(&pcl->rcu, z_erofs_rcu_callback); 918 } 919 return free; 920 } 921 922 unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr) 923 { 924 struct z_erofs_pcluster *pcl; 925 unsigned long index, freed = 0; 926 927 xa_lock(&sbi->managed_pslots); 928 xa_for_each(&sbi->managed_pslots, index, pcl) { 929 /* try to shrink each valid pcluster */ 930 if (!erofs_try_to_release_pcluster(sbi, pcl)) 931 continue; 932 xa_unlock(&sbi->managed_pslots); 933 934 ++freed; 935 if (!--nr) 936 return freed; 937 xa_lock(&sbi->managed_pslots); 938 } 939 xa_unlock(&sbi->managed_pslots); 940 return freed; 941 } 942 943 static void z_erofs_put_pcluster(struct erofs_sb_info *sbi, 944 struct z_erofs_pcluster *pcl, bool try_free) 945 { 946 bool free = false; 947 948 if (lockref_put_or_lock(&pcl->lockref)) 949 return; 950 951 DBG_BUGON(__lockref_is_dead(&pcl->lockref)); 952 if (!--pcl->lockref.count) { 953 if (try_free && xa_trylock(&sbi->managed_pslots)) { 954 free = __erofs_try_to_release_pcluster(sbi, pcl); 955 xa_unlock(&sbi->managed_pslots); 956 } 957 atomic_long_add(!free, &erofs_global_shrink_cnt); 958 } 959 spin_unlock(&pcl->lockref.lock); 960 if (free) 961 call_rcu(&pcl->rcu, z_erofs_rcu_callback); 962 } 963 964 static void z_erofs_pcluster_end(struct z_erofs_frontend *fe) 965 { 966 struct z_erofs_pcluster *pcl = fe->pcl; 967 968 if (!pcl) 969 return; 970 971 z_erofs_bvec_iter_end(&fe->biter); 972 mutex_unlock(&pcl->lock); 973 974 if (fe->candidate_bvpage) 975 fe->candidate_bvpage = NULL; 976 977 /* Drop refcount if it doesn't belong to our processing chain */ 978 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE) 979 z_erofs_put_pcluster(EROFS_I_SB(fe->inode), pcl, false); 980 fe->pcl = NULL; 981 } 982 983 static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio, 984 unsigned int cur, unsigned int end, erofs_off_t pos) 985 { 986 struct inode *packed_inode = EROFS_SB(sb)->packed_inode; 987 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 988 unsigned int cnt; 989 u8 *src; 990 991 if (!packed_inode) 992 return -EFSCORRUPTED; 993 994 buf.mapping = packed_inode->i_mapping; 995 for (; cur < end; cur += cnt, pos += cnt) { 996 cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos)); 997 src = erofs_bread(&buf, pos, true); 998 if (IS_ERR(src)) { 999 erofs_put_metabuf(&buf); 1000 return PTR_ERR(src); 1001 } 1002 memcpy_to_folio(folio, cur, src, cnt); 1003 } 1004 erofs_put_metabuf(&buf); 1005 return 0; 1006 } 1007 1008 static int z_erofs_scan_folio(struct z_erofs_frontend *f, 1009 struct folio *folio, bool ra) 1010 { 1011 struct inode *const inode = f->inode; 1012 struct erofs_map_blocks *const map = &f->map; 1013 const loff_t offset = folio_pos(folio); 1014 const unsigned int bs = i_blocksize(inode); 1015 unsigned int end = folio_size(folio), split = 0, cur, pgs; 1016 bool tight, excl; 1017 int err = 0; 1018 1019 tight = (bs == PAGE_SIZE); 1020 erofs_onlinefolio_init(folio); 1021 do { 1022 if (offset + end - 1 < map->m_la || 1023 offset + end - 1 >= map->m_la + map->m_llen) { 1024 z_erofs_pcluster_end(f); 1025 map->m_la = offset + end - 1; 1026 map->m_llen = 0; 1027 err = z_erofs_map_blocks_iter(inode, map, 0); 1028 if (err) 1029 break; 1030 } 1031 1032 cur = offset > map->m_la ? 0 : map->m_la - offset; 1033 pgs = round_down(cur, PAGE_SIZE); 1034 /* bump split parts first to avoid several separate cases */ 1035 ++split; 1036 1037 if (!(map->m_flags & EROFS_MAP_MAPPED)) { 1038 folio_zero_segment(folio, cur, end); 1039 tight = false; 1040 } else if (map->m_flags & __EROFS_MAP_FRAGMENT) { 1041 erofs_off_t fpos = offset + cur - map->m_la; 1042 1043 err = z_erofs_read_fragment(inode->i_sb, folio, cur, 1044 cur + min(map->m_llen - fpos, end - cur), 1045 EROFS_I(inode)->z_fragmentoff + fpos); 1046 if (err) 1047 break; 1048 tight = false; 1049 } else { 1050 if (!f->pcl) { 1051 err = z_erofs_pcluster_begin(f); 1052 if (err) 1053 break; 1054 f->pcl->besteffort |= !ra; 1055 } 1056 1057 pgs = round_down(end - 1, PAGE_SIZE); 1058 /* 1059 * Ensure this partial page belongs to this submit chain 1060 * rather than other concurrent submit chains or 1061 * noio(bypass) chains since those chains are handled 1062 * asynchronously thus it cannot be used for inplace I/O 1063 * or bvpage (should be processed in the strict order.) 1064 */ 1065 tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED); 1066 excl = false; 1067 if (cur <= pgs) { 1068 excl = (split <= 1) || tight; 1069 cur = pgs; 1070 } 1071 1072 err = z_erofs_attach_page(f, &((struct z_erofs_bvec) { 1073 .page = folio_page(folio, pgs >> PAGE_SHIFT), 1074 .offset = offset + pgs - map->m_la, 1075 .end = end - pgs, }), excl); 1076 if (err) 1077 break; 1078 1079 erofs_onlinefolio_split(folio); 1080 if (f->pcl->length < offset + end - map->m_la) { 1081 f->pcl->length = offset + end - map->m_la; 1082 f->pcl->pageofs_out = map->m_la & ~PAGE_MASK; 1083 } 1084 if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && 1085 !(map->m_flags & EROFS_MAP_PARTIAL_REF) && 1086 f->pcl->length == map->m_llen) 1087 f->pcl->partial = false; 1088 } 1089 /* shorten the remaining extent to update progress */ 1090 map->m_llen = offset + cur - map->m_la; 1091 map->m_flags &= ~EROFS_MAP_FULL_MAPPED; 1092 if (cur <= pgs) { 1093 split = cur < pgs; 1094 tight = (bs == PAGE_SIZE); 1095 } 1096 } while ((end = cur) > 0); 1097 erofs_onlinefolio_end(folio, err, false); 1098 return err; 1099 } 1100 1101 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi, 1102 unsigned int readahead_pages) 1103 { 1104 /* auto: enable for read_folio, disable for readahead */ 1105 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) && 1106 !readahead_pages) 1107 return true; 1108 1109 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) && 1110 (readahead_pages <= sbi->opt.max_sync_decompress_pages)) 1111 return true; 1112 1113 return false; 1114 } 1115 1116 static bool z_erofs_page_is_invalidated(struct page *page) 1117 { 1118 return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page); 1119 } 1120 1121 struct z_erofs_backend { 1122 struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; 1123 struct super_block *sb; 1124 struct z_erofs_pcluster *pcl; 1125 /* pages with the longest decompressed length for deduplication */ 1126 struct page **decompressed_pages; 1127 /* pages to keep the compressed data */ 1128 struct page **compressed_pages; 1129 1130 struct list_head decompressed_secondary_bvecs; 1131 struct page **pagepool; 1132 unsigned int onstack_used, nr_pages; 1133 /* indicate if temporary copies should be preserved for later use */ 1134 bool keepxcpy; 1135 }; 1136 1137 struct z_erofs_bvec_item { 1138 struct z_erofs_bvec bvec; 1139 struct list_head list; 1140 }; 1141 1142 static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be, 1143 struct z_erofs_bvec *bvec) 1144 { 1145 int poff = bvec->offset + be->pcl->pageofs_out; 1146 struct z_erofs_bvec_item *item; 1147 struct page **page; 1148 1149 if (!(poff & ~PAGE_MASK) && (bvec->end == PAGE_SIZE || 1150 bvec->offset + bvec->end == be->pcl->length)) { 1151 DBG_BUGON((poff >> PAGE_SHIFT) >= be->nr_pages); 1152 page = be->decompressed_pages + (poff >> PAGE_SHIFT); 1153 if (!*page) { 1154 *page = bvec->page; 1155 return; 1156 } 1157 } else { 1158 be->keepxcpy = true; 1159 } 1160 1161 /* (cold path) one pcluster is requested multiple times */ 1162 item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL); 1163 item->bvec = *bvec; 1164 list_add(&item->list, &be->decompressed_secondary_bvecs); 1165 } 1166 1167 static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err) 1168 { 1169 unsigned int off0 = be->pcl->pageofs_out; 1170 struct list_head *p, *n; 1171 1172 list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) { 1173 struct z_erofs_bvec_item *bvi; 1174 unsigned int end, cur; 1175 void *dst, *src; 1176 1177 bvi = container_of(p, struct z_erofs_bvec_item, list); 1178 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0; 1179 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset, 1180 bvi->bvec.end); 1181 dst = kmap_local_page(bvi->bvec.page); 1182 while (cur < end) { 1183 unsigned int pgnr, scur, len; 1184 1185 pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT; 1186 DBG_BUGON(pgnr >= be->nr_pages); 1187 1188 scur = bvi->bvec.offset + cur - 1189 ((pgnr << PAGE_SHIFT) - off0); 1190 len = min_t(unsigned int, end - cur, PAGE_SIZE - scur); 1191 if (!be->decompressed_pages[pgnr]) { 1192 err = -EFSCORRUPTED; 1193 cur += len; 1194 continue; 1195 } 1196 src = kmap_local_page(be->decompressed_pages[pgnr]); 1197 memcpy(dst + cur, src + scur, len); 1198 kunmap_local(src); 1199 cur += len; 1200 } 1201 kunmap_local(dst); 1202 erofs_onlinefolio_end(page_folio(bvi->bvec.page), err, true); 1203 list_del(p); 1204 kfree(bvi); 1205 } 1206 } 1207 1208 static void z_erofs_parse_out_bvecs(struct z_erofs_backend *be) 1209 { 1210 struct z_erofs_pcluster *pcl = be->pcl; 1211 struct z_erofs_bvec_iter biter; 1212 struct page *old_bvpage; 1213 int i; 1214 1215 z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0); 1216 for (i = 0; i < pcl->vcnt; ++i) { 1217 struct z_erofs_bvec bvec; 1218 1219 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage); 1220 1221 if (old_bvpage) 1222 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 1223 1224 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page)); 1225 z_erofs_do_decompressed_bvec(be, &bvec); 1226 } 1227 1228 old_bvpage = z_erofs_bvec_iter_end(&biter); 1229 if (old_bvpage) 1230 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); 1231 } 1232 1233 static int z_erofs_parse_in_bvecs(struct z_erofs_backend *be, bool *overlapped) 1234 { 1235 struct z_erofs_pcluster *pcl = be->pcl; 1236 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 1237 int i, err = 0; 1238 1239 *overlapped = false; 1240 for (i = 0; i < pclusterpages; ++i) { 1241 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; 1242 struct page *page = bvec->page; 1243 1244 /* compressed data ought to be valid when decompressing */ 1245 if (IS_ERR(page) || !page) { 1246 bvec->page = NULL; /* clear the failure reason */ 1247 err = page ? PTR_ERR(page) : -EIO; 1248 continue; 1249 } 1250 be->compressed_pages[i] = page; 1251 1252 if (pcl->from_meta || 1253 erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) { 1254 if (!PageUptodate(page)) 1255 err = -EIO; 1256 continue; 1257 } 1258 1259 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1260 if (z_erofs_is_shortlived_page(page)) 1261 continue; 1262 z_erofs_do_decompressed_bvec(be, bvec); 1263 *overlapped = true; 1264 } 1265 return err; 1266 } 1267 1268 static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err) 1269 { 1270 struct erofs_sb_info *const sbi = EROFS_SB(be->sb); 1271 struct z_erofs_pcluster *pcl = be->pcl; 1272 unsigned int pclusterpages = z_erofs_pclusterpages(pcl); 1273 const struct z_erofs_decompressor *decomp = 1274 z_erofs_decomp[pcl->algorithmformat]; 1275 int i, j, jtop, err2; 1276 struct page *page; 1277 bool overlapped; 1278 bool try_free = true; 1279 1280 mutex_lock(&pcl->lock); 1281 be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT; 1282 1283 /* allocate (de)compressed page arrays if cannot be kept on stack */ 1284 be->decompressed_pages = NULL; 1285 be->compressed_pages = NULL; 1286 be->onstack_used = 0; 1287 if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) { 1288 be->decompressed_pages = be->onstack_pages; 1289 be->onstack_used = be->nr_pages; 1290 memset(be->decompressed_pages, 0, 1291 sizeof(struct page *) * be->nr_pages); 1292 } 1293 1294 if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES) 1295 be->compressed_pages = be->onstack_pages + be->onstack_used; 1296 1297 if (!be->decompressed_pages) 1298 be->decompressed_pages = 1299 kvcalloc(be->nr_pages, sizeof(struct page *), 1300 GFP_KERNEL | __GFP_NOFAIL); 1301 if (!be->compressed_pages) 1302 be->compressed_pages = 1303 kvcalloc(pclusterpages, sizeof(struct page *), 1304 GFP_KERNEL | __GFP_NOFAIL); 1305 1306 z_erofs_parse_out_bvecs(be); 1307 err2 = z_erofs_parse_in_bvecs(be, &overlapped); 1308 if (err2) 1309 err = err2; 1310 if (!err) 1311 err = decomp->decompress(&(struct z_erofs_decompress_req) { 1312 .sb = be->sb, 1313 .in = be->compressed_pages, 1314 .out = be->decompressed_pages, 1315 .inpages = pclusterpages, 1316 .outpages = be->nr_pages, 1317 .pageofs_in = pcl->pageofs_in, 1318 .pageofs_out = pcl->pageofs_out, 1319 .inputsize = pcl->pclustersize, 1320 .outputsize = pcl->length, 1321 .alg = pcl->algorithmformat, 1322 .inplace_io = overlapped, 1323 .partial_decoding = pcl->partial, 1324 .fillgaps = be->keepxcpy, 1325 .gfp = pcl->besteffort ? GFP_KERNEL : 1326 GFP_NOWAIT | __GFP_NORETRY 1327 }, be->pagepool); 1328 1329 /* must handle all compressed pages before actual file pages */ 1330 if (pcl->from_meta) { 1331 folio_put(page_folio(pcl->compressed_bvecs[0].page)); 1332 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); 1333 } else { 1334 /* managed folios are still left in compressed_bvecs[] */ 1335 for (i = 0; i < pclusterpages; ++i) { 1336 page = be->compressed_pages[i]; 1337 if (!page) 1338 continue; 1339 if (erofs_folio_is_managed(sbi, page_folio(page))) { 1340 try_free = false; 1341 continue; 1342 } 1343 (void)z_erofs_put_shortlivedpage(be->pagepool, page); 1344 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); 1345 } 1346 } 1347 if (be->compressed_pages < be->onstack_pages || 1348 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) 1349 kvfree(be->compressed_pages); 1350 1351 jtop = 0; 1352 z_erofs_fill_other_copies(be, err); 1353 for (i = 0; i < be->nr_pages; ++i) { 1354 page = be->decompressed_pages[i]; 1355 if (!page) 1356 continue; 1357 1358 DBG_BUGON(z_erofs_page_is_invalidated(page)); 1359 if (!z_erofs_is_shortlived_page(page)) { 1360 erofs_onlinefolio_end(page_folio(page), err, true); 1361 continue; 1362 } 1363 if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) { 1364 erofs_pagepool_add(be->pagepool, page); 1365 continue; 1366 } 1367 for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j) 1368 ; 1369 if (j >= jtop) /* this bounce page is newly detected */ 1370 be->decompressed_pages[jtop++] = page; 1371 } 1372 while (jtop) 1373 erofs_pagepool_add(be->pagepool, 1374 be->decompressed_pages[--jtop]); 1375 if (be->decompressed_pages != be->onstack_pages) 1376 kvfree(be->decompressed_pages); 1377 1378 pcl->length = 0; 1379 pcl->partial = true; 1380 pcl->besteffort = false; 1381 pcl->bvset.nextpage = NULL; 1382 pcl->vcnt = 0; 1383 1384 /* pcluster lock MUST be taken before the following line */ 1385 WRITE_ONCE(pcl->next, NULL); 1386 mutex_unlock(&pcl->lock); 1387 1388 if (pcl->from_meta) 1389 z_erofs_free_pcluster(pcl); 1390 else 1391 z_erofs_put_pcluster(sbi, pcl, try_free); 1392 return err; 1393 } 1394 1395 static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, 1396 struct page **pagepool) 1397 { 1398 struct z_erofs_backend be = { 1399 .sb = io->sb, 1400 .pagepool = pagepool, 1401 .decompressed_secondary_bvecs = 1402 LIST_HEAD_INIT(be.decompressed_secondary_bvecs), 1403 .pcl = io->head, 1404 }; 1405 struct z_erofs_pcluster *next; 1406 int err = io->eio ? -EIO : 0; 1407 1408 for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) { 1409 DBG_BUGON(!be.pcl); 1410 next = READ_ONCE(be.pcl->next); 1411 err = z_erofs_decompress_pcluster(&be, err) ?: err; 1412 } 1413 return err; 1414 } 1415 1416 static void z_erofs_decompressqueue_work(struct work_struct *work) 1417 { 1418 struct z_erofs_decompressqueue *bgq = 1419 container_of(work, struct z_erofs_decompressqueue, u.work); 1420 struct page *pagepool = NULL; 1421 1422 DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL); 1423 z_erofs_decompress_queue(bgq, &pagepool); 1424 erofs_release_pages(&pagepool); 1425 kvfree(bgq); 1426 } 1427 1428 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1429 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work) 1430 { 1431 z_erofs_decompressqueue_work((struct work_struct *)work); 1432 } 1433 #endif 1434 1435 /* Use (kthread_)work in atomic contexts to minimize scheduling overhead */ 1436 static inline bool z_erofs_in_atomic(void) 1437 { 1438 if (IS_ENABLED(CONFIG_PREEMPTION) && rcu_preempt_depth()) 1439 return true; 1440 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 1441 return true; 1442 return !preemptible(); 1443 } 1444 1445 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, 1446 int bios) 1447 { 1448 struct erofs_sb_info *const sbi = EROFS_SB(io->sb); 1449 1450 /* wake up the caller thread for sync decompression */ 1451 if (io->sync) { 1452 if (!atomic_add_return(bios, &io->pending_bios)) 1453 complete(&io->u.done); 1454 return; 1455 } 1456 1457 if (atomic_add_return(bios, &io->pending_bios)) 1458 return; 1459 if (z_erofs_in_atomic()) { 1460 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1461 struct kthread_worker *worker; 1462 1463 rcu_read_lock(); 1464 worker = rcu_dereference( 1465 z_erofs_pcpu_workers[raw_smp_processor_id()]); 1466 if (!worker) { 1467 INIT_WORK(&io->u.work, z_erofs_decompressqueue_work); 1468 queue_work(z_erofs_workqueue, &io->u.work); 1469 } else { 1470 kthread_queue_work(worker, &io->u.kthread_work); 1471 } 1472 rcu_read_unlock(); 1473 #else 1474 queue_work(z_erofs_workqueue, &io->u.work); 1475 #endif 1476 /* enable sync decompression for readahead */ 1477 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) 1478 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; 1479 return; 1480 } 1481 z_erofs_decompressqueue_work(&io->u.work); 1482 } 1483 1484 static void z_erofs_fill_bio_vec(struct bio_vec *bvec, 1485 struct z_erofs_frontend *f, 1486 struct z_erofs_pcluster *pcl, 1487 unsigned int nr, 1488 struct address_space *mc) 1489 { 1490 gfp_t gfp = mapping_gfp_mask(mc); 1491 bool tocache = false; 1492 struct z_erofs_bvec zbv; 1493 struct address_space *mapping; 1494 struct folio *folio; 1495 struct page *page; 1496 int bs = i_blocksize(f->inode); 1497 1498 /* Except for inplace folios, the entire folio can be used for I/Os */ 1499 bvec->bv_offset = 0; 1500 bvec->bv_len = PAGE_SIZE; 1501 repeat: 1502 spin_lock(&pcl->lockref.lock); 1503 zbv = pcl->compressed_bvecs[nr]; 1504 spin_unlock(&pcl->lockref.lock); 1505 if (!zbv.page) 1506 goto out_allocfolio; 1507 1508 bvec->bv_page = zbv.page; 1509 DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page)); 1510 1511 folio = page_folio(zbv.page); 1512 /* For preallocated managed folios, add them to page cache here */ 1513 if (folio->private == Z_EROFS_PREALLOCATED_FOLIO) { 1514 tocache = true; 1515 goto out_tocache; 1516 } 1517 1518 mapping = READ_ONCE(folio->mapping); 1519 /* 1520 * File-backed folios for inplace I/Os are all locked steady, 1521 * therefore it is impossible for `mapping` to be NULL. 1522 */ 1523 if (mapping && mapping != mc) { 1524 if (zbv.offset < 0) 1525 bvec->bv_offset = round_up(-zbv.offset, bs); 1526 bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset; 1527 return; 1528 } 1529 1530 folio_lock(folio); 1531 if (likely(folio->mapping == mc)) { 1532 /* 1533 * The cached folio is still in managed cache but without 1534 * a valid `->private` pcluster hint. Let's reconnect them. 1535 */ 1536 if (!folio_test_private(folio)) { 1537 folio_attach_private(folio, pcl); 1538 /* compressed_bvecs[] already takes a ref before */ 1539 folio_put(folio); 1540 } 1541 if (likely(folio->private == pcl)) { 1542 /* don't submit cache I/Os again if already uptodate */ 1543 if (folio_test_uptodate(folio)) { 1544 folio_unlock(folio); 1545 bvec->bv_page = NULL; 1546 } 1547 return; 1548 } 1549 /* 1550 * Already linked with another pcluster, which only appears in 1551 * crafted images by fuzzers for now. But handle this anyway. 1552 */ 1553 tocache = false; /* use temporary short-lived pages */ 1554 } else { 1555 DBG_BUGON(1); /* referenced managed folios can't be truncated */ 1556 tocache = true; 1557 } 1558 folio_unlock(folio); 1559 folio_put(folio); 1560 out_allocfolio: 1561 page = __erofs_allocpage(&f->pagepool, gfp, true); 1562 spin_lock(&pcl->lockref.lock); 1563 if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) { 1564 if (page) 1565 erofs_pagepool_add(&f->pagepool, page); 1566 spin_unlock(&pcl->lockref.lock); 1567 cond_resched(); 1568 goto repeat; 1569 } 1570 pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM); 1571 spin_unlock(&pcl->lockref.lock); 1572 bvec->bv_page = page; 1573 if (!page) 1574 return; 1575 folio = page_folio(page); 1576 out_tocache: 1577 if (!tocache || bs != PAGE_SIZE || 1578 filemap_add_folio(mc, folio, (pcl->pos >> PAGE_SHIFT) + nr, gfp)) { 1579 /* turn into a temporary shortlived folio (1 ref) */ 1580 folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; 1581 return; 1582 } 1583 folio_attach_private(folio, pcl); 1584 /* drop a refcount added by allocpage (then 2 refs in total here) */ 1585 folio_put(folio); 1586 } 1587 1588 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, 1589 struct z_erofs_decompressqueue *fgq, bool *fg) 1590 { 1591 struct z_erofs_decompressqueue *q; 1592 1593 if (fg && !*fg) { 1594 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN); 1595 if (!q) { 1596 *fg = true; 1597 goto fg_out; 1598 } 1599 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1600 kthread_init_work(&q->u.kthread_work, 1601 z_erofs_decompressqueue_kthread_work); 1602 #else 1603 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work); 1604 #endif 1605 } else { 1606 fg_out: 1607 q = fgq; 1608 init_completion(&fgq->u.done); 1609 atomic_set(&fgq->pending_bios, 0); 1610 q->eio = false; 1611 q->sync = true; 1612 } 1613 q->sb = sb; 1614 q->head = Z_EROFS_PCLUSTER_TAIL; 1615 return q; 1616 } 1617 1618 /* define decompression jobqueue types */ 1619 enum { 1620 JQ_BYPASS, 1621 JQ_SUBMIT, 1622 NR_JOBQUEUES, 1623 }; 1624 1625 static void z_erofs_move_to_bypass_queue(struct z_erofs_pcluster *pcl, 1626 struct z_erofs_pcluster *next, 1627 struct z_erofs_pcluster **qtail[]) 1628 { 1629 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL); 1630 WRITE_ONCE(*qtail[JQ_SUBMIT], next); 1631 WRITE_ONCE(*qtail[JQ_BYPASS], pcl); 1632 qtail[JQ_BYPASS] = &pcl->next; 1633 } 1634 1635 static void z_erofs_endio(struct bio *bio) 1636 { 1637 struct z_erofs_decompressqueue *q = bio->bi_private; 1638 blk_status_t err = bio->bi_status; 1639 struct folio_iter fi; 1640 1641 bio_for_each_folio_all(fi, bio) { 1642 struct folio *folio = fi.folio; 1643 1644 DBG_BUGON(folio_test_uptodate(folio)); 1645 DBG_BUGON(z_erofs_page_is_invalidated(&folio->page)); 1646 if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio)) 1647 continue; 1648 1649 if (!err) 1650 folio_mark_uptodate(folio); 1651 folio_unlock(folio); 1652 } 1653 if (err) 1654 q->eio = true; 1655 z_erofs_decompress_kickoff(q, -1); 1656 if (bio->bi_bdev) 1657 bio_put(bio); 1658 } 1659 1660 static void z_erofs_submit_queue(struct z_erofs_frontend *f, 1661 struct z_erofs_decompressqueue *fgq, 1662 bool *force_fg, bool readahead) 1663 { 1664 struct super_block *sb = f->inode->i_sb; 1665 struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb)); 1666 struct z_erofs_pcluster **qtail[NR_JOBQUEUES]; 1667 struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; 1668 struct z_erofs_pcluster *pcl, *next; 1669 /* bio is NULL initially, so no need to initialize last_{index,bdev} */ 1670 erofs_off_t last_pa; 1671 unsigned int nr_bios = 0; 1672 struct bio *bio = NULL; 1673 unsigned long pflags; 1674 int memstall = 0; 1675 1676 /* No need to read from device for pclusters in the bypass queue. */ 1677 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); 1678 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg); 1679 1680 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; 1681 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; 1682 1683 /* by default, all need io submission */ 1684 q[JQ_SUBMIT]->head = next = f->head; 1685 1686 do { 1687 struct erofs_map_dev mdev; 1688 erofs_off_t cur, end; 1689 struct bio_vec bvec; 1690 unsigned int i = 0; 1691 bool bypass = true; 1692 1693 pcl = next; 1694 next = READ_ONCE(pcl->next); 1695 if (pcl->from_meta) { 1696 z_erofs_move_to_bypass_queue(pcl, next, qtail); 1697 continue; 1698 } 1699 1700 /* no device id here, thus it will always succeed */ 1701 mdev = (struct erofs_map_dev) { 1702 .m_pa = round_down(pcl->pos, sb->s_blocksize), 1703 }; 1704 (void)erofs_map_dev(sb, &mdev); 1705 1706 cur = mdev.m_pa; 1707 end = round_up(cur + pcl->pageofs_in + pcl->pclustersize, 1708 sb->s_blocksize); 1709 do { 1710 bvec.bv_page = NULL; 1711 if (bio && (cur != last_pa || 1712 bio->bi_bdev != mdev.m_bdev)) { 1713 drain_io: 1714 if (erofs_is_fileio_mode(EROFS_SB(sb))) 1715 erofs_fileio_submit_bio(bio); 1716 else if (erofs_is_fscache_mode(sb)) 1717 erofs_fscache_submit_bio(bio); 1718 else 1719 submit_bio(bio); 1720 1721 if (memstall) { 1722 psi_memstall_leave(&pflags); 1723 memstall = 0; 1724 } 1725 bio = NULL; 1726 } 1727 1728 if (!bvec.bv_page) { 1729 z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); 1730 if (!bvec.bv_page) 1731 continue; 1732 if (cur + bvec.bv_len > end) 1733 bvec.bv_len = end - cur; 1734 DBG_BUGON(bvec.bv_len < sb->s_blocksize); 1735 } 1736 1737 if (unlikely(PageWorkingset(bvec.bv_page)) && 1738 !memstall) { 1739 psi_memstall_enter(&pflags); 1740 memstall = 1; 1741 } 1742 1743 if (!bio) { 1744 if (erofs_is_fileio_mode(EROFS_SB(sb))) 1745 bio = erofs_fileio_bio_alloc(&mdev); 1746 else if (erofs_is_fscache_mode(sb)) 1747 bio = erofs_fscache_bio_alloc(&mdev); 1748 else 1749 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, 1750 REQ_OP_READ, GFP_NOIO); 1751 bio->bi_end_io = z_erofs_endio; 1752 bio->bi_iter.bi_sector = 1753 (mdev.m_dif->fsoff + cur) >> 9; 1754 bio->bi_private = q[JQ_SUBMIT]; 1755 if (readahead) 1756 bio->bi_opf |= REQ_RAHEAD; 1757 ++nr_bios; 1758 } 1759 1760 if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, 1761 bvec.bv_offset)) 1762 goto drain_io; 1763 last_pa = cur + bvec.bv_len; 1764 bypass = false; 1765 } while ((cur += bvec.bv_len) < end); 1766 1767 if (!bypass) 1768 qtail[JQ_SUBMIT] = &pcl->next; 1769 else 1770 z_erofs_move_to_bypass_queue(pcl, next, qtail); 1771 } while (next != Z_EROFS_PCLUSTER_TAIL); 1772 1773 if (bio) { 1774 if (erofs_is_fileio_mode(EROFS_SB(sb))) 1775 erofs_fileio_submit_bio(bio); 1776 else if (erofs_is_fscache_mode(sb)) 1777 erofs_fscache_submit_bio(bio); 1778 else 1779 submit_bio(bio); 1780 } 1781 if (memstall) 1782 psi_memstall_leave(&pflags); 1783 1784 /* 1785 * although background is preferred, no one is pending for submission. 1786 * don't issue decompression but drop it directly instead. 1787 */ 1788 if (!*force_fg && !nr_bios) { 1789 kvfree(q[JQ_SUBMIT]); 1790 return; 1791 } 1792 z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios); 1793 } 1794 1795 static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages) 1796 { 1797 struct z_erofs_decompressqueue io[NR_JOBQUEUES]; 1798 struct erofs_sb_info *sbi = EROFS_I_SB(f->inode); 1799 bool force_fg = z_erofs_is_sync_decompress(sbi, rapages); 1800 int err; 1801 1802 if (f->head == Z_EROFS_PCLUSTER_TAIL) 1803 return 0; 1804 z_erofs_submit_queue(f, io, &force_fg, !!rapages); 1805 1806 /* handle bypass queue (no i/o pclusters) immediately */ 1807 err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); 1808 if (!force_fg) 1809 return err; 1810 1811 /* wait until all bios are completed */ 1812 wait_for_completion_io(&io[JQ_SUBMIT].u.done); 1813 1814 /* handle synchronous decompress queue in the caller context */ 1815 return z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool) ?: err; 1816 } 1817 1818 /* 1819 * Since partial uptodate is still unimplemented for now, we have to use 1820 * approximate readmore strategies as a start. 1821 */ 1822 static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f, 1823 struct readahead_control *rac, bool backmost) 1824 { 1825 struct inode *inode = f->inode; 1826 struct erofs_map_blocks *map = &f->map; 1827 erofs_off_t cur, end, headoffset = f->headoffset; 1828 int err; 1829 1830 if (backmost) { 1831 if (rac) 1832 end = headoffset + readahead_length(rac) - 1; 1833 else 1834 end = headoffset + PAGE_SIZE - 1; 1835 map->m_la = end; 1836 err = z_erofs_map_blocks_iter(inode, map, 1837 EROFS_GET_BLOCKS_READMORE); 1838 if (err) 1839 return; 1840 1841 /* expand ra for the trailing edge if readahead */ 1842 if (rac) { 1843 cur = round_up(map->m_la + map->m_llen, PAGE_SIZE); 1844 readahead_expand(rac, headoffset, cur - headoffset); 1845 return; 1846 } 1847 end = round_up(end, PAGE_SIZE); 1848 } else { 1849 end = round_up(map->m_la, PAGE_SIZE); 1850 if (!map->m_llen) 1851 return; 1852 } 1853 1854 cur = map->m_la + map->m_llen - 1; 1855 while ((cur >= end) && (cur < i_size_read(inode))) { 1856 pgoff_t index = cur >> PAGE_SHIFT; 1857 struct folio *folio; 1858 1859 folio = erofs_grab_folio_nowait(inode->i_mapping, index); 1860 if (!IS_ERR_OR_NULL(folio)) { 1861 if (folio_test_uptodate(folio)) 1862 folio_unlock(folio); 1863 else 1864 z_erofs_scan_folio(f, folio, !!rac); 1865 folio_put(folio); 1866 } 1867 1868 if (cur < PAGE_SIZE) 1869 break; 1870 cur = (index << PAGE_SHIFT) - 1; 1871 } 1872 } 1873 1874 static int z_erofs_read_folio(struct file *file, struct folio *folio) 1875 { 1876 struct inode *const inode = folio->mapping->host; 1877 Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio)); 1878 int err; 1879 1880 trace_erofs_read_folio(folio, false); 1881 z_erofs_pcluster_readmore(&f, NULL, true); 1882 err = z_erofs_scan_folio(&f, folio, false); 1883 z_erofs_pcluster_readmore(&f, NULL, false); 1884 z_erofs_pcluster_end(&f); 1885 1886 /* if some pclusters are ready, need submit them anyway */ 1887 err = z_erofs_runqueue(&f, 0) ?: err; 1888 if (err && err != -EINTR) 1889 erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu", 1890 err, folio->index, EROFS_I(inode)->nid); 1891 1892 erofs_put_metabuf(&f.map.buf); 1893 erofs_release_pages(&f.pagepool); 1894 return err; 1895 } 1896 1897 static void z_erofs_readahead(struct readahead_control *rac) 1898 { 1899 struct inode *const inode = rac->mapping->host; 1900 Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac)); 1901 unsigned int nrpages = readahead_count(rac); 1902 struct folio *head = NULL, *folio; 1903 int err; 1904 1905 trace_erofs_readahead(inode, readahead_index(rac), nrpages, false); 1906 z_erofs_pcluster_readmore(&f, rac, true); 1907 while ((folio = readahead_folio(rac))) { 1908 folio->private = head; 1909 head = folio; 1910 } 1911 1912 /* traverse in reverse order for best metadata I/O performance */ 1913 while (head) { 1914 folio = head; 1915 head = folio_get_private(folio); 1916 1917 err = z_erofs_scan_folio(&f, folio, true); 1918 if (err && err != -EINTR) 1919 erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", 1920 folio->index, EROFS_I(inode)->nid); 1921 } 1922 z_erofs_pcluster_readmore(&f, rac, false); 1923 z_erofs_pcluster_end(&f); 1924 1925 (void)z_erofs_runqueue(&f, nrpages); 1926 erofs_put_metabuf(&f.map.buf); 1927 erofs_release_pages(&f.pagepool); 1928 } 1929 1930 const struct address_space_operations z_erofs_aops = { 1931 .read_folio = z_erofs_read_folio, 1932 .readahead = z_erofs_readahead, 1933 }; 1934