1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2022 Alibaba Cloud
6 */
7 #include "compress.h"
8 #include <linux/psi.h>
9 #include <linux/cpuhotplug.h>
10 #include <trace/events/erofs.h>
11
12 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_INLINE_BVECS 2
14
15 /*
16 * let's leave a type here in case of introducing
17 * another tagged pointer later.
18 */
19 typedef void *z_erofs_next_pcluster_t;
20
21 struct z_erofs_bvec {
22 struct page *page;
23 int offset;
24 unsigned int end;
25 };
26
27 #define __Z_EROFS_BVSET(name, total) \
28 struct name { \
29 /* point to the next page which contains the following bvecs */ \
30 struct page *nextpage; \
31 struct z_erofs_bvec bvec[total]; \
32 }
33 __Z_EROFS_BVSET(z_erofs_bvset,);
34 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
35
36 /*
37 * Structure fields follow one of the following exclusion rules.
38 *
39 * I: Modifiable by initialization/destruction paths and read-only
40 * for everyone else;
41 *
42 * L: Field should be protected by the pcluster lock;
43 *
44 * A: Field should be accessed / updated in atomic for parallelized code.
45 */
46 struct z_erofs_pcluster {
47 struct erofs_workgroup obj;
48 struct mutex lock;
49
50 /* A: point to next chained pcluster or TAILs */
51 z_erofs_next_pcluster_t next;
52
53 /* L: the maximum decompression size of this round */
54 unsigned int length;
55
56 /* L: total number of bvecs */
57 unsigned int vcnt;
58
59 /* I: pcluster size (compressed size) in bytes */
60 unsigned int pclustersize;
61
62 /* I: page offset of start position of decompression */
63 unsigned short pageofs_out;
64
65 /* I: page offset of inline compressed data */
66 unsigned short pageofs_in;
67
68 union {
69 /* L: inline a certain number of bvec for bootstrap */
70 struct z_erofs_bvset_inline bvset;
71
72 /* I: can be used to free the pcluster by RCU. */
73 struct rcu_head rcu;
74 };
75
76 /* I: compression algorithm format */
77 unsigned char algorithmformat;
78
79 /* L: whether partial decompression or not */
80 bool partial;
81
82 /* L: indicate several pageofs_outs or not */
83 bool multibases;
84
85 /* L: whether extra buffer allocations are best-effort */
86 bool besteffort;
87
88 /* A: compressed bvecs (can be cached or inplaced pages) */
89 struct z_erofs_bvec compressed_bvecs[];
90 };
91
92 /* the end of a chain of pclusters */
93 #define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA)
94 #define Z_EROFS_PCLUSTER_NIL (NULL)
95
96 struct z_erofs_decompressqueue {
97 struct super_block *sb;
98 atomic_t pending_bios;
99 z_erofs_next_pcluster_t head;
100
101 union {
102 struct completion done;
103 struct work_struct work;
104 struct kthread_work kthread_work;
105 } u;
106 bool eio, sync;
107 };
108
z_erofs_is_inline_pcluster(struct z_erofs_pcluster * pcl)109 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
110 {
111 return !pcl->obj.index;
112 }
113
z_erofs_pclusterpages(struct z_erofs_pcluster * pcl)114 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
115 {
116 return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
117 }
118
119 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
erofs_folio_is_managed(struct erofs_sb_info * sbi,struct folio * fo)120 static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
121 {
122 return fo->mapping == MNGD_MAPPING(sbi);
123 }
124
125 #define Z_EROFS_ONSTACK_PAGES 32
126
127 /*
128 * since pclustersize is variable for big pcluster feature, introduce slab
129 * pools implementation for different pcluster sizes.
130 */
131 struct z_erofs_pcluster_slab {
132 struct kmem_cache *slab;
133 unsigned int maxpages;
134 char name[48];
135 };
136
137 #define _PCLP(n) { .maxpages = n }
138
139 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
140 _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
141 _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
142 };
143
144 struct z_erofs_bvec_iter {
145 struct page *bvpage;
146 struct z_erofs_bvset *bvset;
147 unsigned int nr, cur;
148 };
149
z_erofs_bvec_iter_end(struct z_erofs_bvec_iter * iter)150 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
151 {
152 if (iter->bvpage)
153 kunmap_local(iter->bvset);
154 return iter->bvpage;
155 }
156
z_erofs_bvset_flip(struct z_erofs_bvec_iter * iter)157 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
158 {
159 unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
160 /* have to access nextpage in advance, otherwise it will be unmapped */
161 struct page *nextpage = iter->bvset->nextpage;
162 struct page *oldpage;
163
164 DBG_BUGON(!nextpage);
165 oldpage = z_erofs_bvec_iter_end(iter);
166 iter->bvpage = nextpage;
167 iter->bvset = kmap_local_page(nextpage);
168 iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
169 iter->cur = 0;
170 return oldpage;
171 }
172
z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter * iter,struct z_erofs_bvset_inline * bvset,unsigned int bootstrap_nr,unsigned int cur)173 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
174 struct z_erofs_bvset_inline *bvset,
175 unsigned int bootstrap_nr,
176 unsigned int cur)
177 {
178 *iter = (struct z_erofs_bvec_iter) {
179 .nr = bootstrap_nr,
180 .bvset = (struct z_erofs_bvset *)bvset,
181 };
182
183 while (cur > iter->nr) {
184 cur -= iter->nr;
185 z_erofs_bvset_flip(iter);
186 }
187 iter->cur = cur;
188 }
189
z_erofs_bvec_enqueue(struct z_erofs_bvec_iter * iter,struct z_erofs_bvec * bvec,struct page ** candidate_bvpage,struct page ** pagepool)190 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
191 struct z_erofs_bvec *bvec,
192 struct page **candidate_bvpage,
193 struct page **pagepool)
194 {
195 if (iter->cur >= iter->nr) {
196 struct page *nextpage = *candidate_bvpage;
197
198 if (!nextpage) {
199 nextpage = __erofs_allocpage(pagepool, GFP_KERNEL,
200 true);
201 if (!nextpage)
202 return -ENOMEM;
203 set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
204 }
205 DBG_BUGON(iter->bvset->nextpage);
206 iter->bvset->nextpage = nextpage;
207 z_erofs_bvset_flip(iter);
208
209 iter->bvset->nextpage = NULL;
210 *candidate_bvpage = NULL;
211 }
212 iter->bvset->bvec[iter->cur++] = *bvec;
213 return 0;
214 }
215
z_erofs_bvec_dequeue(struct z_erofs_bvec_iter * iter,struct z_erofs_bvec * bvec,struct page ** old_bvpage)216 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
217 struct z_erofs_bvec *bvec,
218 struct page **old_bvpage)
219 {
220 if (iter->cur == iter->nr)
221 *old_bvpage = z_erofs_bvset_flip(iter);
222 else
223 *old_bvpage = NULL;
224 *bvec = iter->bvset->bvec[iter->cur++];
225 }
226
z_erofs_destroy_pcluster_pool(void)227 static void z_erofs_destroy_pcluster_pool(void)
228 {
229 int i;
230
231 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
232 if (!pcluster_pool[i].slab)
233 continue;
234 kmem_cache_destroy(pcluster_pool[i].slab);
235 pcluster_pool[i].slab = NULL;
236 }
237 }
238
z_erofs_create_pcluster_pool(void)239 static int z_erofs_create_pcluster_pool(void)
240 {
241 struct z_erofs_pcluster_slab *pcs;
242 struct z_erofs_pcluster *a;
243 unsigned int size;
244
245 for (pcs = pcluster_pool;
246 pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
247 size = struct_size(a, compressed_bvecs, pcs->maxpages);
248
249 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
250 pcs->slab = kmem_cache_create(pcs->name, size, 0,
251 SLAB_RECLAIM_ACCOUNT, NULL);
252 if (pcs->slab)
253 continue;
254
255 z_erofs_destroy_pcluster_pool();
256 return -ENOMEM;
257 }
258 return 0;
259 }
260
z_erofs_alloc_pcluster(unsigned int size)261 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
262 {
263 unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
264 struct z_erofs_pcluster_slab *pcs = pcluster_pool;
265
266 for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
267 struct z_erofs_pcluster *pcl;
268
269 if (nrpages > pcs->maxpages)
270 continue;
271
272 pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
273 if (!pcl)
274 return ERR_PTR(-ENOMEM);
275 pcl->pclustersize = size;
276 return pcl;
277 }
278 return ERR_PTR(-EINVAL);
279 }
280
z_erofs_free_pcluster(struct z_erofs_pcluster * pcl)281 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
282 {
283 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
284 int i;
285
286 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
287 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
288
289 if (pclusterpages > pcs->maxpages)
290 continue;
291
292 kmem_cache_free(pcs->slab, pcl);
293 return;
294 }
295 DBG_BUGON(1);
296 }
297
298 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
299
300 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
301 static struct kthread_worker __rcu **z_erofs_pcpu_workers;
302
erofs_destroy_percpu_workers(void)303 static void erofs_destroy_percpu_workers(void)
304 {
305 struct kthread_worker *worker;
306 unsigned int cpu;
307
308 for_each_possible_cpu(cpu) {
309 worker = rcu_dereference_protected(
310 z_erofs_pcpu_workers[cpu], 1);
311 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
312 if (worker)
313 kthread_destroy_worker(worker);
314 }
315 kfree(z_erofs_pcpu_workers);
316 }
317
erofs_init_percpu_worker(int cpu)318 static struct kthread_worker *erofs_init_percpu_worker(int cpu)
319 {
320 struct kthread_worker *worker =
321 kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
322
323 if (IS_ERR(worker))
324 return worker;
325 if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
326 sched_set_fifo_low(worker->task);
327 return worker;
328 }
329
erofs_init_percpu_workers(void)330 static int erofs_init_percpu_workers(void)
331 {
332 struct kthread_worker *worker;
333 unsigned int cpu;
334
335 z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
336 sizeof(struct kthread_worker *), GFP_ATOMIC);
337 if (!z_erofs_pcpu_workers)
338 return -ENOMEM;
339
340 for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */
341 worker = erofs_init_percpu_worker(cpu);
342 if (!IS_ERR(worker))
343 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
344 }
345 return 0;
346 }
347 #else
erofs_destroy_percpu_workers(void)348 static inline void erofs_destroy_percpu_workers(void) {}
erofs_init_percpu_workers(void)349 static inline int erofs_init_percpu_workers(void) { return 0; }
350 #endif
351
352 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
353 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
354 static enum cpuhp_state erofs_cpuhp_state;
355
erofs_cpu_online(unsigned int cpu)356 static int erofs_cpu_online(unsigned int cpu)
357 {
358 struct kthread_worker *worker, *old;
359
360 worker = erofs_init_percpu_worker(cpu);
361 if (IS_ERR(worker))
362 return PTR_ERR(worker);
363
364 spin_lock(&z_erofs_pcpu_worker_lock);
365 old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
366 lockdep_is_held(&z_erofs_pcpu_worker_lock));
367 if (!old)
368 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
369 spin_unlock(&z_erofs_pcpu_worker_lock);
370 if (old)
371 kthread_destroy_worker(worker);
372 return 0;
373 }
374
erofs_cpu_offline(unsigned int cpu)375 static int erofs_cpu_offline(unsigned int cpu)
376 {
377 struct kthread_worker *worker;
378
379 spin_lock(&z_erofs_pcpu_worker_lock);
380 worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
381 lockdep_is_held(&z_erofs_pcpu_worker_lock));
382 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
383 spin_unlock(&z_erofs_pcpu_worker_lock);
384
385 synchronize_rcu();
386 if (worker)
387 kthread_destroy_worker(worker);
388 return 0;
389 }
390
erofs_cpu_hotplug_init(void)391 static int erofs_cpu_hotplug_init(void)
392 {
393 int state;
394
395 state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
396 "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
397 if (state < 0)
398 return state;
399
400 erofs_cpuhp_state = state;
401 return 0;
402 }
403
erofs_cpu_hotplug_destroy(void)404 static void erofs_cpu_hotplug_destroy(void)
405 {
406 if (erofs_cpuhp_state)
407 cpuhp_remove_state_nocalls(erofs_cpuhp_state);
408 }
409 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
erofs_cpu_hotplug_init(void)410 static inline int erofs_cpu_hotplug_init(void) { return 0; }
erofs_cpu_hotplug_destroy(void)411 static inline void erofs_cpu_hotplug_destroy(void) {}
412 #endif
413
z_erofs_exit_subsystem(void)414 void z_erofs_exit_subsystem(void)
415 {
416 erofs_cpu_hotplug_destroy();
417 erofs_destroy_percpu_workers();
418 destroy_workqueue(z_erofs_workqueue);
419 z_erofs_destroy_pcluster_pool();
420 z_erofs_exit_decompressor();
421 }
422
z_erofs_init_subsystem(void)423 int __init z_erofs_init_subsystem(void)
424 {
425 int err = z_erofs_init_decompressor();
426
427 if (err)
428 goto err_decompressor;
429
430 err = z_erofs_create_pcluster_pool();
431 if (err)
432 goto err_pcluster_pool;
433
434 z_erofs_workqueue = alloc_workqueue("erofs_worker",
435 WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
436 if (!z_erofs_workqueue) {
437 err = -ENOMEM;
438 goto err_workqueue_init;
439 }
440
441 err = erofs_init_percpu_workers();
442 if (err)
443 goto err_pcpu_worker;
444
445 err = erofs_cpu_hotplug_init();
446 if (err < 0)
447 goto err_cpuhp_init;
448 return err;
449
450 err_cpuhp_init:
451 erofs_destroy_percpu_workers();
452 err_pcpu_worker:
453 destroy_workqueue(z_erofs_workqueue);
454 err_workqueue_init:
455 z_erofs_destroy_pcluster_pool();
456 err_pcluster_pool:
457 z_erofs_exit_decompressor();
458 err_decompressor:
459 return err;
460 }
461
462 enum z_erofs_pclustermode {
463 Z_EROFS_PCLUSTER_INFLIGHT,
464 /*
465 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
466 * could be dispatched into bypass queue later due to uptodated managed
467 * pages. All related online pages cannot be reused for inplace I/O (or
468 * bvpage) since it can be directly decoded without I/O submission.
469 */
470 Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
471 /*
472 * The pcluster was just linked to a decompression chain by us. It can
473 * also be linked with the remaining pclusters, which means if the
474 * processing page is the tail page of a pcluster, this pcluster can
475 * safely use the whole page (since the previous pcluster is within the
476 * same chain) for in-place I/O, as illustrated below:
477 * ___________________________________________________
478 * | tail (partial) page | head (partial) page |
479 * | (of the current pcl) | (of the previous pcl) |
480 * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
481 *
482 * [ (*) the page above can be used as inplace I/O. ]
483 */
484 Z_EROFS_PCLUSTER_FOLLOWED,
485 };
486
487 struct z_erofs_decompress_frontend {
488 struct inode *const inode;
489 struct erofs_map_blocks map;
490 struct z_erofs_bvec_iter biter;
491
492 struct page *pagepool;
493 struct page *candidate_bvpage;
494 struct z_erofs_pcluster *pcl;
495 z_erofs_next_pcluster_t owned_head;
496 enum z_erofs_pclustermode mode;
497
498 erofs_off_t headoffset;
499
500 /* a pointer used to pick up inplace I/O pages */
501 unsigned int icur;
502 };
503
504 #define DECOMPRESS_FRONTEND_INIT(__i) { \
505 .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
506 .mode = Z_EROFS_PCLUSTER_FOLLOWED }
507
z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend * fe)508 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
509 {
510 unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
511
512 if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
513 return false;
514
515 if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
516 return true;
517
518 if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
519 fe->map.m_la < fe->headoffset)
520 return true;
521
522 return false;
523 }
524
z_erofs_bind_cache(struct z_erofs_decompress_frontend * fe)525 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
526 {
527 struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
528 struct z_erofs_pcluster *pcl = fe->pcl;
529 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
530 bool shouldalloc = z_erofs_should_alloc_cache(fe);
531 bool standalone = true;
532 /*
533 * optimistic allocation without direct reclaim since inplace I/O
534 * can be used if low memory otherwise.
535 */
536 gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
537 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
538 unsigned int i;
539
540 if (i_blocksize(fe->inode) != PAGE_SIZE ||
541 fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
542 return;
543
544 for (i = 0; i < pclusterpages; ++i) {
545 struct page *page, *newpage;
546
547 /* Inaccurate check w/o locking to avoid unneeded lookups */
548 if (READ_ONCE(pcl->compressed_bvecs[i].page))
549 continue;
550
551 page = find_get_page(mc, pcl->obj.index + i);
552 if (!page) {
553 /* I/O is needed, no possible to decompress directly */
554 standalone = false;
555 if (!shouldalloc)
556 continue;
557
558 /*
559 * Try cached I/O if allocation succeeds or fallback to
560 * in-place I/O instead to avoid any direct reclaim.
561 */
562 newpage = erofs_allocpage(&fe->pagepool, gfp);
563 if (!newpage)
564 continue;
565 set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
566 }
567 spin_lock(&pcl->obj.lockref.lock);
568 if (!pcl->compressed_bvecs[i].page) {
569 pcl->compressed_bvecs[i].page = page ? page : newpage;
570 spin_unlock(&pcl->obj.lockref.lock);
571 continue;
572 }
573 spin_unlock(&pcl->obj.lockref.lock);
574
575 if (page)
576 put_page(page);
577 else if (newpage)
578 erofs_pagepool_add(&fe->pagepool, newpage);
579 }
580
581 /*
582 * don't do inplace I/O if all compressed pages are available in
583 * managed cache since it can be moved to the bypass queue instead.
584 */
585 if (standalone)
586 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
587 }
588
589 /* (erofs_shrinker) disconnect cached encoded data with pclusters */
erofs_try_to_free_all_cached_folios(struct erofs_sb_info * sbi,struct erofs_workgroup * grp)590 int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
591 struct erofs_workgroup *grp)
592 {
593 struct z_erofs_pcluster *const pcl =
594 container_of(grp, struct z_erofs_pcluster, obj);
595 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
596 struct folio *folio;
597 int i;
598
599 DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
600 /* Each cached folio contains one page unless bs > ps is supported */
601 for (i = 0; i < pclusterpages; ++i) {
602 if (pcl->compressed_bvecs[i].page) {
603 folio = page_folio(pcl->compressed_bvecs[i].page);
604 /* Avoid reclaiming or migrating this folio */
605 if (!folio_trylock(folio))
606 return -EBUSY;
607
608 if (!erofs_folio_is_managed(sbi, folio))
609 continue;
610 pcl->compressed_bvecs[i].page = NULL;
611 folio_detach_private(folio);
612 folio_unlock(folio);
613 }
614 }
615 return 0;
616 }
617
z_erofs_cache_release_folio(struct folio * folio,gfp_t gfp)618 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
619 {
620 struct z_erofs_pcluster *pcl = folio_get_private(folio);
621 struct z_erofs_bvec *bvec = pcl->compressed_bvecs;
622 struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl);
623 bool ret;
624
625 if (!folio_test_private(folio))
626 return true;
627
628 ret = false;
629 spin_lock(&pcl->obj.lockref.lock);
630 if (pcl->obj.lockref.count <= 0) {
631 DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
632 for (; bvec < end; ++bvec) {
633 if (bvec->page && page_folio(bvec->page) == folio) {
634 bvec->page = NULL;
635 folio_detach_private(folio);
636 ret = true;
637 break;
638 }
639 }
640 }
641 spin_unlock(&pcl->obj.lockref.lock);
642 return ret;
643 }
644
645 /*
646 * It will be called only on inode eviction. In case that there are still some
647 * decompression requests in progress, wait with rescheduling for a bit here.
648 * An extra lock could be introduced instead but it seems unnecessary.
649 */
z_erofs_cache_invalidate_folio(struct folio * folio,size_t offset,size_t length)650 static void z_erofs_cache_invalidate_folio(struct folio *folio,
651 size_t offset, size_t length)
652 {
653 const size_t stop = length + offset;
654
655 /* Check for potential overflow in debug mode */
656 DBG_BUGON(stop > folio_size(folio) || stop < length);
657
658 if (offset == 0 && stop == folio_size(folio))
659 while (!z_erofs_cache_release_folio(folio, 0))
660 cond_resched();
661 }
662
663 static const struct address_space_operations z_erofs_cache_aops = {
664 .release_folio = z_erofs_cache_release_folio,
665 .invalidate_folio = z_erofs_cache_invalidate_folio,
666 };
667
erofs_init_managed_cache(struct super_block * sb)668 int erofs_init_managed_cache(struct super_block *sb)
669 {
670 struct inode *const inode = new_inode(sb);
671
672 if (!inode)
673 return -ENOMEM;
674
675 set_nlink(inode, 1);
676 inode->i_size = OFFSET_MAX;
677 inode->i_mapping->a_ops = &z_erofs_cache_aops;
678 mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
679 EROFS_SB(sb)->managed_cache = inode;
680 return 0;
681 }
682
683 /* callers must be with pcluster lock held */
z_erofs_attach_page(struct z_erofs_decompress_frontend * fe,struct z_erofs_bvec * bvec,bool exclusive)684 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
685 struct z_erofs_bvec *bvec, bool exclusive)
686 {
687 struct z_erofs_pcluster *pcl = fe->pcl;
688 int ret;
689
690 if (exclusive) {
691 /* give priority for inplaceio to use file pages first */
692 spin_lock(&pcl->obj.lockref.lock);
693 while (fe->icur > 0) {
694 if (pcl->compressed_bvecs[--fe->icur].page)
695 continue;
696 pcl->compressed_bvecs[fe->icur] = *bvec;
697 spin_unlock(&pcl->obj.lockref.lock);
698 return 0;
699 }
700 spin_unlock(&pcl->obj.lockref.lock);
701
702 /* otherwise, check if it can be used as a bvpage */
703 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
704 !fe->candidate_bvpage)
705 fe->candidate_bvpage = bvec->page;
706 }
707 ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
708 &fe->pagepool);
709 fe->pcl->vcnt += (ret >= 0);
710 return ret;
711 }
712
z_erofs_register_pcluster(struct z_erofs_decompress_frontend * fe)713 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
714 {
715 struct erofs_map_blocks *map = &fe->map;
716 struct super_block *sb = fe->inode->i_sb;
717 bool ztailpacking = map->m_flags & EROFS_MAP_META;
718 struct z_erofs_pcluster *pcl;
719 struct erofs_workgroup *grp;
720 int err;
721
722 if (!(map->m_flags & EROFS_MAP_ENCODED) ||
723 (!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
724 DBG_BUGON(1);
725 return -EFSCORRUPTED;
726 }
727
728 /* no available pcluster, let's allocate one */
729 pcl = z_erofs_alloc_pcluster(map->m_plen);
730 if (IS_ERR(pcl))
731 return PTR_ERR(pcl);
732
733 spin_lock_init(&pcl->obj.lockref.lock);
734 pcl->obj.lockref.count = 1; /* one ref for this request */
735 pcl->algorithmformat = map->m_algorithmformat;
736 pcl->length = 0;
737 pcl->partial = true;
738
739 /* new pclusters should be claimed as type 1, primary and followed */
740 pcl->next = fe->owned_head;
741 pcl->pageofs_out = map->m_la & ~PAGE_MASK;
742 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
743
744 /*
745 * lock all primary followed works before visible to others
746 * and mutex_trylock *never* fails for a new pcluster.
747 */
748 mutex_init(&pcl->lock);
749 DBG_BUGON(!mutex_trylock(&pcl->lock));
750
751 if (ztailpacking) {
752 pcl->obj.index = 0; /* which indicates ztailpacking */
753 } else {
754 pcl->obj.index = erofs_blknr(sb, map->m_pa);
755
756 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
757 if (IS_ERR(grp)) {
758 err = PTR_ERR(grp);
759 goto err_out;
760 }
761
762 if (grp != &pcl->obj) {
763 fe->pcl = container_of(grp,
764 struct z_erofs_pcluster, obj);
765 err = -EEXIST;
766 goto err_out;
767 }
768 }
769 fe->owned_head = &pcl->next;
770 fe->pcl = pcl;
771 return 0;
772
773 err_out:
774 mutex_unlock(&pcl->lock);
775 z_erofs_free_pcluster(pcl);
776 return err;
777 }
778
z_erofs_pcluster_begin(struct z_erofs_decompress_frontend * fe)779 static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
780 {
781 struct erofs_map_blocks *map = &fe->map;
782 struct super_block *sb = fe->inode->i_sb;
783 erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
784 struct erofs_workgroup *grp = NULL;
785 int ret;
786
787 DBG_BUGON(fe->pcl);
788 /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
789 DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
790
791 if (!(map->m_flags & EROFS_MAP_META)) {
792 grp = erofs_find_workgroup(sb, blknr);
793 } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
794 DBG_BUGON(1);
795 return -EFSCORRUPTED;
796 }
797
798 if (grp) {
799 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
800 ret = -EEXIST;
801 } else {
802 ret = z_erofs_register_pcluster(fe);
803 }
804
805 if (ret == -EEXIST) {
806 mutex_lock(&fe->pcl->lock);
807 /* check if this pcluster hasn't been linked into any chain. */
808 if (cmpxchg(&fe->pcl->next, Z_EROFS_PCLUSTER_NIL,
809 fe->owned_head) == Z_EROFS_PCLUSTER_NIL) {
810 /* .. so it can be attached to our submission chain */
811 fe->owned_head = &fe->pcl->next;
812 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
813 } else { /* otherwise, it belongs to an inflight chain */
814 fe->mode = Z_EROFS_PCLUSTER_INFLIGHT;
815 }
816 } else if (ret) {
817 return ret;
818 }
819
820 z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
821 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
822 if (!z_erofs_is_inline_pcluster(fe->pcl)) {
823 /* bind cache first when cached decompression is preferred */
824 z_erofs_bind_cache(fe);
825 } else {
826 void *mptr;
827
828 mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP);
829 if (IS_ERR(mptr)) {
830 ret = PTR_ERR(mptr);
831 erofs_err(sb, "failed to get inline data %d", ret);
832 return ret;
833 }
834 get_page(map->buf.page);
835 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
836 fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
837 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
838 }
839 /* file-backed inplace I/O pages are traversed in reverse order */
840 fe->icur = z_erofs_pclusterpages(fe->pcl);
841 return 0;
842 }
843
844 /*
845 * keep in mind that no referenced pclusters will be freed
846 * only after a RCU grace period.
847 */
z_erofs_rcu_callback(struct rcu_head * head)848 static void z_erofs_rcu_callback(struct rcu_head *head)
849 {
850 z_erofs_free_pcluster(container_of(head,
851 struct z_erofs_pcluster, rcu));
852 }
853
erofs_workgroup_free_rcu(struct erofs_workgroup * grp)854 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
855 {
856 struct z_erofs_pcluster *const pcl =
857 container_of(grp, struct z_erofs_pcluster, obj);
858
859 call_rcu(&pcl->rcu, z_erofs_rcu_callback);
860 }
861
z_erofs_pcluster_end(struct z_erofs_decompress_frontend * fe)862 static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
863 {
864 struct z_erofs_pcluster *pcl = fe->pcl;
865
866 if (!pcl)
867 return;
868
869 z_erofs_bvec_iter_end(&fe->biter);
870 mutex_unlock(&pcl->lock);
871
872 if (fe->candidate_bvpage)
873 fe->candidate_bvpage = NULL;
874
875 /*
876 * if all pending pages are added, don't hold its reference
877 * any longer if the pcluster isn't hosted by ourselves.
878 */
879 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
880 erofs_workgroup_put(&pcl->obj);
881
882 fe->pcl = NULL;
883 }
884
z_erofs_read_fragment(struct super_block * sb,struct folio * folio,unsigned int cur,unsigned int end,erofs_off_t pos)885 static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
886 unsigned int cur, unsigned int end, erofs_off_t pos)
887 {
888 struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
889 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
890 unsigned int cnt;
891 u8 *src;
892
893 if (!packed_inode)
894 return -EFSCORRUPTED;
895
896 buf.mapping = packed_inode->i_mapping;
897 for (; cur < end; cur += cnt, pos += cnt) {
898 cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos));
899 src = erofs_bread(&buf, pos, EROFS_KMAP);
900 if (IS_ERR(src)) {
901 erofs_put_metabuf(&buf);
902 return PTR_ERR(src);
903 }
904 memcpy_to_folio(folio, cur, src, cnt);
905 }
906 erofs_put_metabuf(&buf);
907 return 0;
908 }
909
z_erofs_scan_folio(struct z_erofs_decompress_frontend * f,struct folio * folio,bool ra)910 static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
911 struct folio *folio, bool ra)
912 {
913 struct inode *const inode = f->inode;
914 struct erofs_map_blocks *const map = &f->map;
915 const loff_t offset = folio_pos(folio);
916 const unsigned int bs = i_blocksize(inode);
917 unsigned int end = folio_size(folio), split = 0, cur, pgs;
918 bool tight, excl;
919 int err = 0;
920
921 tight = (bs == PAGE_SIZE);
922 erofs_onlinefolio_init(folio);
923 do {
924 if (offset + end - 1 < map->m_la ||
925 offset + end - 1 >= map->m_la + map->m_llen) {
926 z_erofs_pcluster_end(f);
927 map->m_la = offset + end - 1;
928 map->m_llen = 0;
929 err = z_erofs_map_blocks_iter(inode, map, 0);
930 if (err)
931 break;
932 }
933
934 cur = offset > map->m_la ? 0 : map->m_la - offset;
935 pgs = round_down(cur, PAGE_SIZE);
936 /* bump split parts first to avoid several separate cases */
937 ++split;
938
939 if (!(map->m_flags & EROFS_MAP_MAPPED)) {
940 folio_zero_segment(folio, cur, end);
941 tight = false;
942 } else if (map->m_flags & EROFS_MAP_FRAGMENT) {
943 erofs_off_t fpos = offset + cur - map->m_la;
944
945 err = z_erofs_read_fragment(inode->i_sb, folio, cur,
946 cur + min(map->m_llen - fpos, end - cur),
947 EROFS_I(inode)->z_fragmentoff + fpos);
948 if (err)
949 break;
950 tight = false;
951 } else {
952 if (!f->pcl) {
953 err = z_erofs_pcluster_begin(f);
954 if (err)
955 break;
956 f->pcl->besteffort |= !ra;
957 }
958
959 pgs = round_down(end - 1, PAGE_SIZE);
960 /*
961 * Ensure this partial page belongs to this submit chain
962 * rather than other concurrent submit chains or
963 * noio(bypass) chains since those chains are handled
964 * asynchronously thus it cannot be used for inplace I/O
965 * or bvpage (should be processed in the strict order.)
966 */
967 tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
968 excl = false;
969 if (cur <= pgs) {
970 excl = (split <= 1) || tight;
971 cur = pgs;
972 }
973
974 err = z_erofs_attach_page(f, &((struct z_erofs_bvec) {
975 .page = folio_page(folio, pgs >> PAGE_SHIFT),
976 .offset = offset + pgs - map->m_la,
977 .end = end - pgs, }), excl);
978 if (err)
979 break;
980
981 erofs_onlinefolio_split(folio);
982 if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
983 f->pcl->multibases = true;
984 if (f->pcl->length < offset + end - map->m_la) {
985 f->pcl->length = offset + end - map->m_la;
986 f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
987 }
988 if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
989 !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
990 f->pcl->length == map->m_llen)
991 f->pcl->partial = false;
992 }
993 /* shorten the remaining extent to update progress */
994 map->m_llen = offset + cur - map->m_la;
995 map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
996 if (cur <= pgs) {
997 split = cur < pgs;
998 tight = (bs == PAGE_SIZE);
999 }
1000 } while ((end = cur) > 0);
1001 erofs_onlinefolio_end(folio, err);
1002 return err;
1003 }
1004
z_erofs_is_sync_decompress(struct erofs_sb_info * sbi,unsigned int readahead_pages)1005 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
1006 unsigned int readahead_pages)
1007 {
1008 /* auto: enable for read_folio, disable for readahead */
1009 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
1010 !readahead_pages)
1011 return true;
1012
1013 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
1014 (readahead_pages <= sbi->opt.max_sync_decompress_pages))
1015 return true;
1016
1017 return false;
1018 }
1019
z_erofs_page_is_invalidated(struct page * page)1020 static bool z_erofs_page_is_invalidated(struct page *page)
1021 {
1022 return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
1023 }
1024
1025 struct z_erofs_decompress_backend {
1026 struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
1027 struct super_block *sb;
1028 struct z_erofs_pcluster *pcl;
1029
1030 /* pages with the longest decompressed length for deduplication */
1031 struct page **decompressed_pages;
1032 /* pages to keep the compressed data */
1033 struct page **compressed_pages;
1034
1035 struct list_head decompressed_secondary_bvecs;
1036 struct page **pagepool;
1037 unsigned int onstack_used, nr_pages;
1038 };
1039
1040 struct z_erofs_bvec_item {
1041 struct z_erofs_bvec bvec;
1042 struct list_head list;
1043 };
1044
z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend * be,struct z_erofs_bvec * bvec)1045 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1046 struct z_erofs_bvec *bvec)
1047 {
1048 struct z_erofs_bvec_item *item;
1049 unsigned int pgnr;
1050
1051 if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
1052 (bvec->end == PAGE_SIZE ||
1053 bvec->offset + bvec->end == be->pcl->length)) {
1054 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
1055 DBG_BUGON(pgnr >= be->nr_pages);
1056 if (!be->decompressed_pages[pgnr]) {
1057 be->decompressed_pages[pgnr] = bvec->page;
1058 return;
1059 }
1060 }
1061
1062 /* (cold path) one pcluster is requested multiple times */
1063 item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
1064 item->bvec = *bvec;
1065 list_add(&item->list, &be->decompressed_secondary_bvecs);
1066 }
1067
z_erofs_fill_other_copies(struct z_erofs_decompress_backend * be,int err)1068 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1069 int err)
1070 {
1071 unsigned int off0 = be->pcl->pageofs_out;
1072 struct list_head *p, *n;
1073
1074 list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
1075 struct z_erofs_bvec_item *bvi;
1076 unsigned int end, cur;
1077 void *dst, *src;
1078
1079 bvi = container_of(p, struct z_erofs_bvec_item, list);
1080 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
1081 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
1082 bvi->bvec.end);
1083 dst = kmap_local_page(bvi->bvec.page);
1084 while (cur < end) {
1085 unsigned int pgnr, scur, len;
1086
1087 pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
1088 DBG_BUGON(pgnr >= be->nr_pages);
1089
1090 scur = bvi->bvec.offset + cur -
1091 ((pgnr << PAGE_SHIFT) - off0);
1092 len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
1093 if (!be->decompressed_pages[pgnr]) {
1094 err = -EFSCORRUPTED;
1095 cur += len;
1096 continue;
1097 }
1098 src = kmap_local_page(be->decompressed_pages[pgnr]);
1099 memcpy(dst + cur, src + scur, len);
1100 kunmap_local(src);
1101 cur += len;
1102 }
1103 kunmap_local(dst);
1104 erofs_onlinefolio_end(page_folio(bvi->bvec.page), err);
1105 list_del(p);
1106 kfree(bvi);
1107 }
1108 }
1109
z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend * be)1110 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1111 {
1112 struct z_erofs_pcluster *pcl = be->pcl;
1113 struct z_erofs_bvec_iter biter;
1114 struct page *old_bvpage;
1115 int i;
1116
1117 z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
1118 for (i = 0; i < pcl->vcnt; ++i) {
1119 struct z_erofs_bvec bvec;
1120
1121 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
1122
1123 if (old_bvpage)
1124 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1125
1126 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
1127 z_erofs_do_decompressed_bvec(be, &bvec);
1128 }
1129
1130 old_bvpage = z_erofs_bvec_iter_end(&biter);
1131 if (old_bvpage)
1132 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1133 }
1134
z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend * be,bool * overlapped)1135 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1136 bool *overlapped)
1137 {
1138 struct z_erofs_pcluster *pcl = be->pcl;
1139 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1140 int i, err = 0;
1141
1142 *overlapped = false;
1143 for (i = 0; i < pclusterpages; ++i) {
1144 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
1145 struct page *page = bvec->page;
1146
1147 /* compressed data ought to be valid when decompressing */
1148 if (IS_ERR(page) || !page) {
1149 bvec->page = NULL; /* clear the failure reason */
1150 err = page ? PTR_ERR(page) : -EIO;
1151 continue;
1152 }
1153 be->compressed_pages[i] = page;
1154
1155 if (z_erofs_is_inline_pcluster(pcl) ||
1156 erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) {
1157 if (!PageUptodate(page))
1158 err = -EIO;
1159 continue;
1160 }
1161
1162 DBG_BUGON(z_erofs_page_is_invalidated(page));
1163 if (z_erofs_is_shortlived_page(page))
1164 continue;
1165 z_erofs_do_decompressed_bvec(be, bvec);
1166 *overlapped = true;
1167 }
1168 return err;
1169 }
1170
z_erofs_decompress_pcluster(struct z_erofs_decompress_backend * be,int err)1171 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1172 int err)
1173 {
1174 struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1175 struct z_erofs_pcluster *pcl = be->pcl;
1176 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1177 const struct z_erofs_decompressor *decomp =
1178 z_erofs_decomp[pcl->algorithmformat];
1179 int i, j, jtop, err2;
1180 struct page *page;
1181 bool overlapped;
1182
1183 mutex_lock(&pcl->lock);
1184 be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1185
1186 /* allocate (de)compressed page arrays if cannot be kept on stack */
1187 be->decompressed_pages = NULL;
1188 be->compressed_pages = NULL;
1189 be->onstack_used = 0;
1190 if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1191 be->decompressed_pages = be->onstack_pages;
1192 be->onstack_used = be->nr_pages;
1193 memset(be->decompressed_pages, 0,
1194 sizeof(struct page *) * be->nr_pages);
1195 }
1196
1197 if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1198 be->compressed_pages = be->onstack_pages + be->onstack_used;
1199
1200 if (!be->decompressed_pages)
1201 be->decompressed_pages =
1202 kvcalloc(be->nr_pages, sizeof(struct page *),
1203 GFP_KERNEL | __GFP_NOFAIL);
1204 if (!be->compressed_pages)
1205 be->compressed_pages =
1206 kvcalloc(pclusterpages, sizeof(struct page *),
1207 GFP_KERNEL | __GFP_NOFAIL);
1208
1209 z_erofs_parse_out_bvecs(be);
1210 err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1211 if (err2)
1212 err = err2;
1213 if (!err)
1214 err = decomp->decompress(&(struct z_erofs_decompress_req) {
1215 .sb = be->sb,
1216 .in = be->compressed_pages,
1217 .out = be->decompressed_pages,
1218 .pageofs_in = pcl->pageofs_in,
1219 .pageofs_out = pcl->pageofs_out,
1220 .inputsize = pcl->pclustersize,
1221 .outputsize = pcl->length,
1222 .alg = pcl->algorithmformat,
1223 .inplace_io = overlapped,
1224 .partial_decoding = pcl->partial,
1225 .fillgaps = pcl->multibases,
1226 .gfp = pcl->besteffort ? GFP_KERNEL :
1227 GFP_NOWAIT | __GFP_NORETRY
1228 }, be->pagepool);
1229
1230 /* must handle all compressed pages before actual file pages */
1231 if (z_erofs_is_inline_pcluster(pcl)) {
1232 page = pcl->compressed_bvecs[0].page;
1233 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1234 put_page(page);
1235 } else {
1236 /* managed folios are still left in compressed_bvecs[] */
1237 for (i = 0; i < pclusterpages; ++i) {
1238 page = be->compressed_pages[i];
1239 if (!page ||
1240 erofs_folio_is_managed(sbi, page_folio(page)))
1241 continue;
1242 (void)z_erofs_put_shortlivedpage(be->pagepool, page);
1243 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1244 }
1245 }
1246 if (be->compressed_pages < be->onstack_pages ||
1247 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1248 kvfree(be->compressed_pages);
1249
1250 jtop = 0;
1251 z_erofs_fill_other_copies(be, err);
1252 for (i = 0; i < be->nr_pages; ++i) {
1253 page = be->decompressed_pages[i];
1254 if (!page)
1255 continue;
1256
1257 DBG_BUGON(z_erofs_page_is_invalidated(page));
1258 if (!z_erofs_is_shortlived_page(page)) {
1259 erofs_onlinefolio_end(page_folio(page), err);
1260 continue;
1261 }
1262 if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
1263 erofs_pagepool_add(be->pagepool, page);
1264 continue;
1265 }
1266 for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j)
1267 ;
1268 if (j >= jtop) /* this bounce page is newly detected */
1269 be->decompressed_pages[jtop++] = page;
1270 }
1271 while (jtop)
1272 erofs_pagepool_add(be->pagepool,
1273 be->decompressed_pages[--jtop]);
1274 if (be->decompressed_pages != be->onstack_pages)
1275 kvfree(be->decompressed_pages);
1276
1277 pcl->length = 0;
1278 pcl->partial = true;
1279 pcl->multibases = false;
1280 pcl->besteffort = false;
1281 pcl->bvset.nextpage = NULL;
1282 pcl->vcnt = 0;
1283
1284 /* pcluster lock MUST be taken before the following line */
1285 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1286 mutex_unlock(&pcl->lock);
1287 return err;
1288 }
1289
z_erofs_decompress_queue(const struct z_erofs_decompressqueue * io,struct page ** pagepool)1290 static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1291 struct page **pagepool)
1292 {
1293 struct z_erofs_decompress_backend be = {
1294 .sb = io->sb,
1295 .pagepool = pagepool,
1296 .decompressed_secondary_bvecs =
1297 LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1298 };
1299 z_erofs_next_pcluster_t owned = io->head;
1300 int err = io->eio ? -EIO : 0;
1301
1302 while (owned != Z_EROFS_PCLUSTER_TAIL) {
1303 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1304
1305 be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1306 owned = READ_ONCE(be.pcl->next);
1307
1308 err = z_erofs_decompress_pcluster(&be, err) ?: err;
1309 if (z_erofs_is_inline_pcluster(be.pcl))
1310 z_erofs_free_pcluster(be.pcl);
1311 else
1312 erofs_workgroup_put(&be.pcl->obj);
1313 }
1314 return err;
1315 }
1316
z_erofs_decompressqueue_work(struct work_struct * work)1317 static void z_erofs_decompressqueue_work(struct work_struct *work)
1318 {
1319 struct z_erofs_decompressqueue *bgq =
1320 container_of(work, struct z_erofs_decompressqueue, u.work);
1321 struct page *pagepool = NULL;
1322
1323 DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
1324 z_erofs_decompress_queue(bgq, &pagepool);
1325 erofs_release_pages(&pagepool);
1326 kvfree(bgq);
1327 }
1328
1329 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
z_erofs_decompressqueue_kthread_work(struct kthread_work * work)1330 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
1331 {
1332 z_erofs_decompressqueue_work((struct work_struct *)work);
1333 }
1334 #endif
1335
z_erofs_decompress_kickoff(struct z_erofs_decompressqueue * io,int bios)1336 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1337 int bios)
1338 {
1339 struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1340
1341 /* wake up the caller thread for sync decompression */
1342 if (io->sync) {
1343 if (!atomic_add_return(bios, &io->pending_bios))
1344 complete(&io->u.done);
1345 return;
1346 }
1347
1348 if (atomic_add_return(bios, &io->pending_bios))
1349 return;
1350 /* Use (kthread_)work and sync decompression for atomic contexts only */
1351 if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
1352 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1353 struct kthread_worker *worker;
1354
1355 rcu_read_lock();
1356 worker = rcu_dereference(
1357 z_erofs_pcpu_workers[raw_smp_processor_id()]);
1358 if (!worker) {
1359 INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
1360 queue_work(z_erofs_workqueue, &io->u.work);
1361 } else {
1362 kthread_queue_work(worker, &io->u.kthread_work);
1363 }
1364 rcu_read_unlock();
1365 #else
1366 queue_work(z_erofs_workqueue, &io->u.work);
1367 #endif
1368 /* enable sync decompression for readahead */
1369 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1370 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1371 return;
1372 }
1373 z_erofs_decompressqueue_work(&io->u.work);
1374 }
1375
z_erofs_fill_bio_vec(struct bio_vec * bvec,struct z_erofs_decompress_frontend * f,struct z_erofs_pcluster * pcl,unsigned int nr,struct address_space * mc)1376 static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1377 struct z_erofs_decompress_frontend *f,
1378 struct z_erofs_pcluster *pcl,
1379 unsigned int nr,
1380 struct address_space *mc)
1381 {
1382 gfp_t gfp = mapping_gfp_mask(mc);
1383 bool tocache = false;
1384 struct z_erofs_bvec zbv;
1385 struct address_space *mapping;
1386 struct folio *folio;
1387 struct page *page;
1388 int bs = i_blocksize(f->inode);
1389
1390 /* Except for inplace folios, the entire folio can be used for I/Os */
1391 bvec->bv_offset = 0;
1392 bvec->bv_len = PAGE_SIZE;
1393 repeat:
1394 spin_lock(&pcl->obj.lockref.lock);
1395 zbv = pcl->compressed_bvecs[nr];
1396 spin_unlock(&pcl->obj.lockref.lock);
1397 if (!zbv.page)
1398 goto out_allocfolio;
1399
1400 bvec->bv_page = zbv.page;
1401 DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
1402
1403 folio = page_folio(zbv.page);
1404 /*
1405 * Handle preallocated cached folios. We tried to allocate such folios
1406 * without triggering direct reclaim. If allocation failed, inplace
1407 * file-backed folios will be used instead.
1408 */
1409 if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
1410 tocache = true;
1411 goto out_tocache;
1412 }
1413
1414 mapping = READ_ONCE(folio->mapping);
1415 /*
1416 * File-backed folios for inplace I/Os are all locked steady,
1417 * therefore it is impossible for `mapping` to be NULL.
1418 */
1419 if (mapping && mapping != mc) {
1420 if (zbv.offset < 0)
1421 bvec->bv_offset = round_up(-zbv.offset, bs);
1422 bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
1423 return;
1424 }
1425
1426 folio_lock(folio);
1427 if (likely(folio->mapping == mc)) {
1428 /*
1429 * The cached folio is still in managed cache but without
1430 * a valid `->private` pcluster hint. Let's reconnect them.
1431 */
1432 if (!folio_test_private(folio)) {
1433 folio_attach_private(folio, pcl);
1434 /* compressed_bvecs[] already takes a ref before */
1435 folio_put(folio);
1436 }
1437 if (likely(folio->private == pcl)) {
1438 /* don't submit cache I/Os again if already uptodate */
1439 if (folio_test_uptodate(folio)) {
1440 folio_unlock(folio);
1441 bvec->bv_page = NULL;
1442 }
1443 return;
1444 }
1445 /*
1446 * Already linked with another pcluster, which only appears in
1447 * crafted images by fuzzers for now. But handle this anyway.
1448 */
1449 tocache = false; /* use temporary short-lived pages */
1450 } else {
1451 DBG_BUGON(1); /* referenced managed folios can't be truncated */
1452 tocache = true;
1453 }
1454 folio_unlock(folio);
1455 folio_put(folio);
1456 out_allocfolio:
1457 page = __erofs_allocpage(&f->pagepool, gfp, true);
1458 spin_lock(&pcl->obj.lockref.lock);
1459 if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) {
1460 if (page)
1461 erofs_pagepool_add(&f->pagepool, page);
1462 spin_unlock(&pcl->obj.lockref.lock);
1463 cond_resched();
1464 goto repeat;
1465 }
1466 pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM);
1467 spin_unlock(&pcl->obj.lockref.lock);
1468 bvec->bv_page = page;
1469 if (!page)
1470 return;
1471 folio = page_folio(page);
1472 out_tocache:
1473 if (!tocache || bs != PAGE_SIZE ||
1474 filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) {
1475 /* turn into a temporary shortlived folio (1 ref) */
1476 folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
1477 return;
1478 }
1479 folio_attach_private(folio, pcl);
1480 /* drop a refcount added by allocpage (then 2 refs in total here) */
1481 folio_put(folio);
1482 }
1483
jobqueue_init(struct super_block * sb,struct z_erofs_decompressqueue * fgq,bool * fg)1484 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
1485 struct z_erofs_decompressqueue *fgq, bool *fg)
1486 {
1487 struct z_erofs_decompressqueue *q;
1488
1489 if (fg && !*fg) {
1490 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1491 if (!q) {
1492 *fg = true;
1493 goto fg_out;
1494 }
1495 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1496 kthread_init_work(&q->u.kthread_work,
1497 z_erofs_decompressqueue_kthread_work);
1498 #else
1499 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1500 #endif
1501 } else {
1502 fg_out:
1503 q = fgq;
1504 init_completion(&fgq->u.done);
1505 atomic_set(&fgq->pending_bios, 0);
1506 q->eio = false;
1507 q->sync = true;
1508 }
1509 q->sb = sb;
1510 q->head = Z_EROFS_PCLUSTER_TAIL;
1511 return q;
1512 }
1513
1514 /* define decompression jobqueue types */
1515 enum {
1516 JQ_BYPASS,
1517 JQ_SUBMIT,
1518 NR_JOBQUEUES,
1519 };
1520
move_to_bypass_jobqueue(struct z_erofs_pcluster * pcl,z_erofs_next_pcluster_t qtail[],z_erofs_next_pcluster_t owned_head)1521 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1522 z_erofs_next_pcluster_t qtail[],
1523 z_erofs_next_pcluster_t owned_head)
1524 {
1525 z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1526 z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1527
1528 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
1529
1530 WRITE_ONCE(*submit_qtail, owned_head);
1531 WRITE_ONCE(*bypass_qtail, &pcl->next);
1532
1533 qtail[JQ_BYPASS] = &pcl->next;
1534 }
1535
z_erofs_endio(struct bio * bio)1536 static void z_erofs_endio(struct bio *bio)
1537 {
1538 struct z_erofs_decompressqueue *q = bio->bi_private;
1539 blk_status_t err = bio->bi_status;
1540 struct folio_iter fi;
1541
1542 bio_for_each_folio_all(fi, bio) {
1543 struct folio *folio = fi.folio;
1544
1545 DBG_BUGON(folio_test_uptodate(folio));
1546 DBG_BUGON(z_erofs_page_is_invalidated(&folio->page));
1547 if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio))
1548 continue;
1549
1550 if (!err)
1551 folio_mark_uptodate(folio);
1552 folio_unlock(folio);
1553 }
1554 if (err)
1555 q->eio = true;
1556 z_erofs_decompress_kickoff(q, -1);
1557 if (bio->bi_bdev)
1558 bio_put(bio);
1559 }
1560
z_erofs_submit_queue(struct z_erofs_decompress_frontend * f,struct z_erofs_decompressqueue * fgq,bool * force_fg,bool readahead)1561 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1562 struct z_erofs_decompressqueue *fgq,
1563 bool *force_fg, bool readahead)
1564 {
1565 struct super_block *sb = f->inode->i_sb;
1566 struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1567 z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1568 struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1569 z_erofs_next_pcluster_t owned_head = f->owned_head;
1570 /* bio is NULL initially, so no need to initialize last_{index,bdev} */
1571 erofs_off_t last_pa;
1572 unsigned int nr_bios = 0;
1573 struct bio *bio = NULL;
1574 unsigned long pflags;
1575 int memstall = 0;
1576
1577 /* No need to read from device for pclusters in the bypass queue. */
1578 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1579 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
1580
1581 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1582 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1583
1584 /* by default, all need io submission */
1585 q[JQ_SUBMIT]->head = owned_head;
1586
1587 do {
1588 struct erofs_map_dev mdev;
1589 struct z_erofs_pcluster *pcl;
1590 erofs_off_t cur, end;
1591 struct bio_vec bvec;
1592 unsigned int i = 0;
1593 bool bypass = true;
1594
1595 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1596 pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1597 owned_head = READ_ONCE(pcl->next);
1598
1599 if (z_erofs_is_inline_pcluster(pcl)) {
1600 move_to_bypass_jobqueue(pcl, qtail, owned_head);
1601 continue;
1602 }
1603
1604 /* no device id here, thus it will always succeed */
1605 mdev = (struct erofs_map_dev) {
1606 .m_pa = erofs_pos(sb, pcl->obj.index),
1607 };
1608 (void)erofs_map_dev(sb, &mdev);
1609
1610 cur = mdev.m_pa;
1611 end = cur + pcl->pclustersize;
1612 do {
1613 bvec.bv_page = NULL;
1614 if (bio && (cur != last_pa ||
1615 bio->bi_bdev != mdev.m_bdev)) {
1616 drain_io:
1617 if (erofs_is_fileio_mode(EROFS_SB(sb)))
1618 erofs_fileio_submit_bio(bio);
1619 else if (erofs_is_fscache_mode(sb))
1620 erofs_fscache_submit_bio(bio);
1621 else
1622 submit_bio(bio);
1623
1624 if (memstall) {
1625 psi_memstall_leave(&pflags);
1626 memstall = 0;
1627 }
1628 bio = NULL;
1629 }
1630
1631 if (!bvec.bv_page) {
1632 z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
1633 if (!bvec.bv_page)
1634 continue;
1635 if (cur + bvec.bv_len > end)
1636 bvec.bv_len = end - cur;
1637 DBG_BUGON(bvec.bv_len < sb->s_blocksize);
1638 }
1639
1640 if (unlikely(PageWorkingset(bvec.bv_page)) &&
1641 !memstall) {
1642 psi_memstall_enter(&pflags);
1643 memstall = 1;
1644 }
1645
1646 if (!bio) {
1647 if (erofs_is_fileio_mode(EROFS_SB(sb)))
1648 bio = erofs_fileio_bio_alloc(&mdev);
1649 else if (erofs_is_fscache_mode(sb))
1650 bio = erofs_fscache_bio_alloc(&mdev);
1651 else
1652 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1653 REQ_OP_READ, GFP_NOIO);
1654 bio->bi_end_io = z_erofs_endio;
1655 bio->bi_iter.bi_sector = cur >> 9;
1656 bio->bi_private = q[JQ_SUBMIT];
1657 if (readahead)
1658 bio->bi_opf |= REQ_RAHEAD;
1659 ++nr_bios;
1660 }
1661
1662 if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
1663 bvec.bv_offset))
1664 goto drain_io;
1665 last_pa = cur + bvec.bv_len;
1666 bypass = false;
1667 } while ((cur += bvec.bv_len) < end);
1668
1669 if (!bypass)
1670 qtail[JQ_SUBMIT] = &pcl->next;
1671 else
1672 move_to_bypass_jobqueue(pcl, qtail, owned_head);
1673 } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1674
1675 if (bio) {
1676 if (erofs_is_fileio_mode(EROFS_SB(sb)))
1677 erofs_fileio_submit_bio(bio);
1678 else if (erofs_is_fscache_mode(sb))
1679 erofs_fscache_submit_bio(bio);
1680 else
1681 submit_bio(bio);
1682 if (memstall)
1683 psi_memstall_leave(&pflags);
1684 }
1685
1686 /*
1687 * although background is preferred, no one is pending for submission.
1688 * don't issue decompression but drop it directly instead.
1689 */
1690 if (!*force_fg && !nr_bios) {
1691 kvfree(q[JQ_SUBMIT]);
1692 return;
1693 }
1694 z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
1695 }
1696
z_erofs_runqueue(struct z_erofs_decompress_frontend * f,unsigned int ra_folios)1697 static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1698 unsigned int ra_folios)
1699 {
1700 struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1701 struct erofs_sb_info *sbi = EROFS_I_SB(f->inode);
1702 bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios);
1703 int err;
1704
1705 if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1706 return 0;
1707 z_erofs_submit_queue(f, io, &force_fg, !!ra_folios);
1708
1709 /* handle bypass queue (no i/o pclusters) immediately */
1710 err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
1711 if (!force_fg)
1712 return err;
1713
1714 /* wait until all bios are completed */
1715 wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1716
1717 /* handle synchronous decompress queue in the caller context */
1718 return z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool) ?: err;
1719 }
1720
1721 /*
1722 * Since partial uptodate is still unimplemented for now, we have to use
1723 * approximate readmore strategies as a start.
1724 */
z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend * f,struct readahead_control * rac,bool backmost)1725 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1726 struct readahead_control *rac, bool backmost)
1727 {
1728 struct inode *inode = f->inode;
1729 struct erofs_map_blocks *map = &f->map;
1730 erofs_off_t cur, end, headoffset = f->headoffset;
1731 int err;
1732
1733 if (backmost) {
1734 if (rac)
1735 end = headoffset + readahead_length(rac) - 1;
1736 else
1737 end = headoffset + PAGE_SIZE - 1;
1738 map->m_la = end;
1739 err = z_erofs_map_blocks_iter(inode, map,
1740 EROFS_GET_BLOCKS_READMORE);
1741 if (err)
1742 return;
1743
1744 /* expand ra for the trailing edge if readahead */
1745 if (rac) {
1746 cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1747 readahead_expand(rac, headoffset, cur - headoffset);
1748 return;
1749 }
1750 end = round_up(end, PAGE_SIZE);
1751 } else {
1752 end = round_up(map->m_la, PAGE_SIZE);
1753 if (!map->m_llen)
1754 return;
1755 }
1756
1757 cur = map->m_la + map->m_llen - 1;
1758 while ((cur >= end) && (cur < i_size_read(inode))) {
1759 pgoff_t index = cur >> PAGE_SHIFT;
1760 struct folio *folio;
1761
1762 folio = erofs_grab_folio_nowait(inode->i_mapping, index);
1763 if (!IS_ERR_OR_NULL(folio)) {
1764 if (folio_test_uptodate(folio))
1765 folio_unlock(folio);
1766 else
1767 z_erofs_scan_folio(f, folio, !!rac);
1768 folio_put(folio);
1769 }
1770
1771 if (cur < PAGE_SIZE)
1772 break;
1773 cur = (index << PAGE_SHIFT) - 1;
1774 }
1775 }
1776
z_erofs_read_folio(struct file * file,struct folio * folio)1777 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1778 {
1779 struct inode *const inode = folio->mapping->host;
1780 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1781 int err;
1782
1783 trace_erofs_read_folio(folio, false);
1784 f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
1785
1786 z_erofs_pcluster_readmore(&f, NULL, true);
1787 err = z_erofs_scan_folio(&f, folio, false);
1788 z_erofs_pcluster_readmore(&f, NULL, false);
1789 z_erofs_pcluster_end(&f);
1790
1791 /* if some pclusters are ready, need submit them anyway */
1792 err = z_erofs_runqueue(&f, 0) ?: err;
1793 if (err && err != -EINTR)
1794 erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
1795 err, folio->index, EROFS_I(inode)->nid);
1796
1797 erofs_put_metabuf(&f.map.buf);
1798 erofs_release_pages(&f.pagepool);
1799 return err;
1800 }
1801
z_erofs_readahead(struct readahead_control * rac)1802 static void z_erofs_readahead(struct readahead_control *rac)
1803 {
1804 struct inode *const inode = rac->mapping->host;
1805 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1806 struct folio *head = NULL, *folio;
1807 unsigned int nr_folios;
1808 int err;
1809
1810 f.headoffset = readahead_pos(rac);
1811
1812 z_erofs_pcluster_readmore(&f, rac, true);
1813 nr_folios = readahead_count(rac);
1814 trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
1815
1816 while ((folio = readahead_folio(rac))) {
1817 folio->private = head;
1818 head = folio;
1819 }
1820
1821 /* traverse in reverse order for best metadata I/O performance */
1822 while (head) {
1823 folio = head;
1824 head = folio_get_private(folio);
1825
1826 err = z_erofs_scan_folio(&f, folio, true);
1827 if (err && err != -EINTR)
1828 erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
1829 folio->index, EROFS_I(inode)->nid);
1830 }
1831 z_erofs_pcluster_readmore(&f, rac, false);
1832 z_erofs_pcluster_end(&f);
1833
1834 (void)z_erofs_runqueue(&f, nr_folios);
1835 erofs_put_metabuf(&f.map.buf);
1836 erofs_release_pages(&f.pagepool);
1837 }
1838
1839 const struct address_space_operations z_erofs_aops = {
1840 .read_folio = z_erofs_read_folio,
1841 .readahead = z_erofs_readahead,
1842 };
1843