1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2022 Alibaba Cloud
6 */
7 #include "compress.h"
8 #include <linux/psi.h>
9 #include <linux/cpuhotplug.h>
10 #include <trace/events/erofs.h>
11
12 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_INLINE_BVECS 2
14
15 /*
16 * let's leave a type here in case of introducing
17 * another tagged pointer later.
18 */
19 typedef void *z_erofs_next_pcluster_t;
20
21 struct z_erofs_bvec {
22 struct page *page;
23 int offset;
24 unsigned int end;
25 };
26
27 #define __Z_EROFS_BVSET(name, total) \
28 struct name { \
29 /* point to the next page which contains the following bvecs */ \
30 struct page *nextpage; \
31 struct z_erofs_bvec bvec[total]; \
32 }
33 __Z_EROFS_BVSET(z_erofs_bvset,);
34 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
35
36 /*
37 * Structure fields follow one of the following exclusion rules.
38 *
39 * I: Modifiable by initialization/destruction paths and read-only
40 * for everyone else;
41 *
42 * L: Field should be protected by the pcluster lock;
43 *
44 * A: Field should be accessed / updated in atomic for parallelized code.
45 */
46 struct z_erofs_pcluster {
47 struct erofs_workgroup obj;
48 struct mutex lock;
49
50 /* A: point to next chained pcluster or TAILs */
51 z_erofs_next_pcluster_t next;
52
53 /* L: the maximum decompression size of this round */
54 unsigned int length;
55
56 /* L: total number of bvecs */
57 unsigned int vcnt;
58
59 /* I: pcluster size (compressed size) in bytes */
60 unsigned int pclustersize;
61
62 /* I: page offset of start position of decompression */
63 unsigned short pageofs_out;
64
65 /* I: page offset of inline compressed data */
66 unsigned short pageofs_in;
67
68 union {
69 /* L: inline a certain number of bvec for bootstrap */
70 struct z_erofs_bvset_inline bvset;
71
72 /* I: can be used to free the pcluster by RCU. */
73 struct rcu_head rcu;
74 };
75
76 /* I: compression algorithm format */
77 unsigned char algorithmformat;
78
79 /* L: whether partial decompression or not */
80 bool partial;
81
82 /* L: indicate several pageofs_outs or not */
83 bool multibases;
84
85 /* L: whether extra buffer allocations are best-effort */
86 bool besteffort;
87
88 /* A: compressed bvecs (can be cached or inplaced pages) */
89 struct z_erofs_bvec compressed_bvecs[];
90 };
91
92 /* the end of a chain of pclusters */
93 #define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA)
94 #define Z_EROFS_PCLUSTER_NIL (NULL)
95
96 struct z_erofs_decompressqueue {
97 struct super_block *sb;
98 atomic_t pending_bios;
99 z_erofs_next_pcluster_t head;
100
101 union {
102 struct completion done;
103 struct work_struct work;
104 struct kthread_work kthread_work;
105 } u;
106 bool eio, sync;
107 };
108
z_erofs_is_inline_pcluster(struct z_erofs_pcluster * pcl)109 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
110 {
111 return !pcl->obj.index;
112 }
113
z_erofs_pclusterpages(struct z_erofs_pcluster * pcl)114 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
115 {
116 return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
117 }
118
119 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
erofs_folio_is_managed(struct erofs_sb_info * sbi,struct folio * fo)120 static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
121 {
122 return fo->mapping == MNGD_MAPPING(sbi);
123 }
124
125 #define Z_EROFS_ONSTACK_PAGES 32
126
127 /*
128 * since pclustersize is variable for big pcluster feature, introduce slab
129 * pools implementation for different pcluster sizes.
130 */
131 struct z_erofs_pcluster_slab {
132 struct kmem_cache *slab;
133 unsigned int maxpages;
134 char name[48];
135 };
136
137 #define _PCLP(n) { .maxpages = n }
138
139 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
140 _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
141 _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
142 };
143
144 struct z_erofs_bvec_iter {
145 struct page *bvpage;
146 struct z_erofs_bvset *bvset;
147 unsigned int nr, cur;
148 };
149
z_erofs_bvec_iter_end(struct z_erofs_bvec_iter * iter)150 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
151 {
152 if (iter->bvpage)
153 kunmap_local(iter->bvset);
154 return iter->bvpage;
155 }
156
z_erofs_bvset_flip(struct z_erofs_bvec_iter * iter)157 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
158 {
159 unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
160 /* have to access nextpage in advance, otherwise it will be unmapped */
161 struct page *nextpage = iter->bvset->nextpage;
162 struct page *oldpage;
163
164 DBG_BUGON(!nextpage);
165 oldpage = z_erofs_bvec_iter_end(iter);
166 iter->bvpage = nextpage;
167 iter->bvset = kmap_local_page(nextpage);
168 iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
169 iter->cur = 0;
170 return oldpage;
171 }
172
z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter * iter,struct z_erofs_bvset_inline * bvset,unsigned int bootstrap_nr,unsigned int cur)173 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
174 struct z_erofs_bvset_inline *bvset,
175 unsigned int bootstrap_nr,
176 unsigned int cur)
177 {
178 *iter = (struct z_erofs_bvec_iter) {
179 .nr = bootstrap_nr,
180 .bvset = (struct z_erofs_bvset *)bvset,
181 };
182
183 while (cur > iter->nr) {
184 cur -= iter->nr;
185 z_erofs_bvset_flip(iter);
186 }
187 iter->cur = cur;
188 }
189
z_erofs_bvec_enqueue(struct z_erofs_bvec_iter * iter,struct z_erofs_bvec * bvec,struct page ** candidate_bvpage,struct page ** pagepool)190 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
191 struct z_erofs_bvec *bvec,
192 struct page **candidate_bvpage,
193 struct page **pagepool)
194 {
195 if (iter->cur >= iter->nr) {
196 struct page *nextpage = *candidate_bvpage;
197
198 if (!nextpage) {
199 nextpage = __erofs_allocpage(pagepool, GFP_KERNEL,
200 true);
201 if (!nextpage)
202 return -ENOMEM;
203 set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
204 }
205 DBG_BUGON(iter->bvset->nextpage);
206 iter->bvset->nextpage = nextpage;
207 z_erofs_bvset_flip(iter);
208
209 iter->bvset->nextpage = NULL;
210 *candidate_bvpage = NULL;
211 }
212 iter->bvset->bvec[iter->cur++] = *bvec;
213 return 0;
214 }
215
z_erofs_bvec_dequeue(struct z_erofs_bvec_iter * iter,struct z_erofs_bvec * bvec,struct page ** old_bvpage)216 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
217 struct z_erofs_bvec *bvec,
218 struct page **old_bvpage)
219 {
220 if (iter->cur == iter->nr)
221 *old_bvpage = z_erofs_bvset_flip(iter);
222 else
223 *old_bvpage = NULL;
224 *bvec = iter->bvset->bvec[iter->cur++];
225 }
226
z_erofs_destroy_pcluster_pool(void)227 static void z_erofs_destroy_pcluster_pool(void)
228 {
229 int i;
230
231 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
232 if (!pcluster_pool[i].slab)
233 continue;
234 kmem_cache_destroy(pcluster_pool[i].slab);
235 pcluster_pool[i].slab = NULL;
236 }
237 }
238
z_erofs_create_pcluster_pool(void)239 static int z_erofs_create_pcluster_pool(void)
240 {
241 struct z_erofs_pcluster_slab *pcs;
242 struct z_erofs_pcluster *a;
243 unsigned int size;
244
245 for (pcs = pcluster_pool;
246 pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
247 size = struct_size(a, compressed_bvecs, pcs->maxpages);
248
249 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
250 pcs->slab = kmem_cache_create(pcs->name, size, 0,
251 SLAB_RECLAIM_ACCOUNT, NULL);
252 if (pcs->slab)
253 continue;
254
255 z_erofs_destroy_pcluster_pool();
256 return -ENOMEM;
257 }
258 return 0;
259 }
260
z_erofs_alloc_pcluster(unsigned int size)261 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
262 {
263 unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
264 struct z_erofs_pcluster_slab *pcs = pcluster_pool;
265
266 for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
267 struct z_erofs_pcluster *pcl;
268
269 if (nrpages > pcs->maxpages)
270 continue;
271
272 pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
273 if (!pcl)
274 return ERR_PTR(-ENOMEM);
275 pcl->pclustersize = size;
276 return pcl;
277 }
278 return ERR_PTR(-EINVAL);
279 }
280
z_erofs_free_pcluster(struct z_erofs_pcluster * pcl)281 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
282 {
283 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
284 int i;
285
286 for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
287 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
288
289 if (pclusterpages > pcs->maxpages)
290 continue;
291
292 kmem_cache_free(pcs->slab, pcl);
293 return;
294 }
295 DBG_BUGON(1);
296 }
297
298 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
299
300 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
301 static struct kthread_worker __rcu **z_erofs_pcpu_workers;
302
erofs_destroy_percpu_workers(void)303 static void erofs_destroy_percpu_workers(void)
304 {
305 struct kthread_worker *worker;
306 unsigned int cpu;
307
308 for_each_possible_cpu(cpu) {
309 worker = rcu_dereference_protected(
310 z_erofs_pcpu_workers[cpu], 1);
311 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
312 if (worker)
313 kthread_destroy_worker(worker);
314 }
315 kfree(z_erofs_pcpu_workers);
316 }
317
erofs_init_percpu_worker(int cpu)318 static struct kthread_worker *erofs_init_percpu_worker(int cpu)
319 {
320 struct kthread_worker *worker =
321 kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
322
323 if (IS_ERR(worker))
324 return worker;
325 if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
326 sched_set_fifo_low(worker->task);
327 return worker;
328 }
329
erofs_init_percpu_workers(void)330 static int erofs_init_percpu_workers(void)
331 {
332 struct kthread_worker *worker;
333 unsigned int cpu;
334
335 z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
336 sizeof(struct kthread_worker *), GFP_ATOMIC);
337 if (!z_erofs_pcpu_workers)
338 return -ENOMEM;
339
340 for_each_online_cpu(cpu) { /* could miss cpu{off,on}line? */
341 worker = erofs_init_percpu_worker(cpu);
342 if (!IS_ERR(worker))
343 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
344 }
345 return 0;
346 }
347 #else
erofs_destroy_percpu_workers(void)348 static inline void erofs_destroy_percpu_workers(void) {}
erofs_init_percpu_workers(void)349 static inline int erofs_init_percpu_workers(void) { return 0; }
350 #endif
351
352 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
353 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
354 static enum cpuhp_state erofs_cpuhp_state;
355
erofs_cpu_online(unsigned int cpu)356 static int erofs_cpu_online(unsigned int cpu)
357 {
358 struct kthread_worker *worker, *old;
359
360 worker = erofs_init_percpu_worker(cpu);
361 if (IS_ERR(worker))
362 return PTR_ERR(worker);
363
364 spin_lock(&z_erofs_pcpu_worker_lock);
365 old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
366 lockdep_is_held(&z_erofs_pcpu_worker_lock));
367 if (!old)
368 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
369 spin_unlock(&z_erofs_pcpu_worker_lock);
370 if (old)
371 kthread_destroy_worker(worker);
372 return 0;
373 }
374
erofs_cpu_offline(unsigned int cpu)375 static int erofs_cpu_offline(unsigned int cpu)
376 {
377 struct kthread_worker *worker;
378
379 spin_lock(&z_erofs_pcpu_worker_lock);
380 worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
381 lockdep_is_held(&z_erofs_pcpu_worker_lock));
382 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
383 spin_unlock(&z_erofs_pcpu_worker_lock);
384
385 synchronize_rcu();
386 if (worker)
387 kthread_destroy_worker(worker);
388 return 0;
389 }
390
erofs_cpu_hotplug_init(void)391 static int erofs_cpu_hotplug_init(void)
392 {
393 int state;
394
395 state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
396 "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
397 if (state < 0)
398 return state;
399
400 erofs_cpuhp_state = state;
401 return 0;
402 }
403
erofs_cpu_hotplug_destroy(void)404 static void erofs_cpu_hotplug_destroy(void)
405 {
406 if (erofs_cpuhp_state)
407 cpuhp_remove_state_nocalls(erofs_cpuhp_state);
408 }
409 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
erofs_cpu_hotplug_init(void)410 static inline int erofs_cpu_hotplug_init(void) { return 0; }
erofs_cpu_hotplug_destroy(void)411 static inline void erofs_cpu_hotplug_destroy(void) {}
412 #endif
413
z_erofs_exit_subsystem(void)414 void z_erofs_exit_subsystem(void)
415 {
416 erofs_cpu_hotplug_destroy();
417 erofs_destroy_percpu_workers();
418 destroy_workqueue(z_erofs_workqueue);
419 z_erofs_destroy_pcluster_pool();
420 z_erofs_exit_decompressor();
421 }
422
z_erofs_init_subsystem(void)423 int __init z_erofs_init_subsystem(void)
424 {
425 int err = z_erofs_init_decompressor();
426
427 if (err)
428 goto err_decompressor;
429
430 err = z_erofs_create_pcluster_pool();
431 if (err)
432 goto err_pcluster_pool;
433
434 z_erofs_workqueue = alloc_workqueue("erofs_worker",
435 WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
436 if (!z_erofs_workqueue) {
437 err = -ENOMEM;
438 goto err_workqueue_init;
439 }
440
441 err = erofs_init_percpu_workers();
442 if (err)
443 goto err_pcpu_worker;
444
445 err = erofs_cpu_hotplug_init();
446 if (err < 0)
447 goto err_cpuhp_init;
448 return err;
449
450 err_cpuhp_init:
451 erofs_destroy_percpu_workers();
452 err_pcpu_worker:
453 destroy_workqueue(z_erofs_workqueue);
454 err_workqueue_init:
455 z_erofs_destroy_pcluster_pool();
456 err_pcluster_pool:
457 z_erofs_exit_decompressor();
458 err_decompressor:
459 return err;
460 }
461
462 enum z_erofs_pclustermode {
463 Z_EROFS_PCLUSTER_INFLIGHT,
464 /*
465 * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
466 * could be dispatched into bypass queue later due to uptodated managed
467 * pages. All related online pages cannot be reused for inplace I/O (or
468 * bvpage) since it can be directly decoded without I/O submission.
469 */
470 Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
471 /*
472 * The pcluster was just linked to a decompression chain by us. It can
473 * also be linked with the remaining pclusters, which means if the
474 * processing page is the tail page of a pcluster, this pcluster can
475 * safely use the whole page (since the previous pcluster is within the
476 * same chain) for in-place I/O, as illustrated below:
477 * ___________________________________________________
478 * | tail (partial) page | head (partial) page |
479 * | (of the current pcl) | (of the previous pcl) |
480 * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
481 *
482 * [ (*) the page above can be used as inplace I/O. ]
483 */
484 Z_EROFS_PCLUSTER_FOLLOWED,
485 };
486
487 struct z_erofs_decompress_frontend {
488 struct inode *const inode;
489 struct erofs_map_blocks map;
490 struct z_erofs_bvec_iter biter;
491
492 struct page *pagepool;
493 struct page *candidate_bvpage;
494 struct z_erofs_pcluster *pcl;
495 z_erofs_next_pcluster_t owned_head;
496 enum z_erofs_pclustermode mode;
497
498 erofs_off_t headoffset;
499
500 /* a pointer used to pick up inplace I/O pages */
501 unsigned int icur;
502 };
503
504 #define DECOMPRESS_FRONTEND_INIT(__i) { \
505 .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
506 .mode = Z_EROFS_PCLUSTER_FOLLOWED }
507
z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend * fe)508 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
509 {
510 unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
511
512 if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
513 return false;
514
515 if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
516 return true;
517
518 if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
519 fe->map.m_la < fe->headoffset)
520 return true;
521
522 return false;
523 }
524
z_erofs_bind_cache(struct z_erofs_decompress_frontend * fe)525 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
526 {
527 struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
528 struct z_erofs_pcluster *pcl = fe->pcl;
529 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
530 bool shouldalloc = z_erofs_should_alloc_cache(fe);
531 bool standalone = true;
532 /*
533 * optimistic allocation without direct reclaim since inplace I/O
534 * can be used if low memory otherwise.
535 */
536 gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
537 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
538 unsigned int i;
539
540 if (i_blocksize(fe->inode) != PAGE_SIZE ||
541 fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
542 return;
543
544 for (i = 0; i < pclusterpages; ++i) {
545 struct page *page, *newpage;
546
547 /* Inaccurate check w/o locking to avoid unneeded lookups */
548 if (READ_ONCE(pcl->compressed_bvecs[i].page))
549 continue;
550
551 page = find_get_page(mc, pcl->obj.index + i);
552 if (!page) {
553 /* I/O is needed, no possible to decompress directly */
554 standalone = false;
555 if (!shouldalloc)
556 continue;
557
558 /*
559 * Try cached I/O if allocation succeeds or fallback to
560 * in-place I/O instead to avoid any direct reclaim.
561 */
562 newpage = erofs_allocpage(&fe->pagepool, gfp);
563 if (!newpage)
564 continue;
565 set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
566 }
567 spin_lock(&pcl->obj.lockref.lock);
568 if (!pcl->compressed_bvecs[i].page) {
569 pcl->compressed_bvecs[i].page = page ? page : newpage;
570 spin_unlock(&pcl->obj.lockref.lock);
571 continue;
572 }
573 spin_unlock(&pcl->obj.lockref.lock);
574
575 if (page)
576 put_page(page);
577 else if (newpage)
578 erofs_pagepool_add(&fe->pagepool, newpage);
579 }
580
581 /*
582 * don't do inplace I/O if all compressed pages are available in
583 * managed cache since it can be moved to the bypass queue instead.
584 */
585 if (standalone)
586 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
587 }
588
589 /* (erofs_shrinker) disconnect cached encoded data with pclusters */
erofs_try_to_free_all_cached_folios(struct erofs_sb_info * sbi,struct erofs_workgroup * grp)590 int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
591 struct erofs_workgroup *grp)
592 {
593 struct z_erofs_pcluster *const pcl =
594 container_of(grp, struct z_erofs_pcluster, obj);
595 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
596 struct folio *folio;
597 int i;
598
599 DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
600 /* Each cached folio contains one page unless bs > ps is supported */
601 for (i = 0; i < pclusterpages; ++i) {
602 if (pcl->compressed_bvecs[i].page) {
603 folio = page_folio(pcl->compressed_bvecs[i].page);
604 /* Avoid reclaiming or migrating this folio */
605 if (!folio_trylock(folio))
606 return -EBUSY;
607
608 if (!erofs_folio_is_managed(sbi, folio))
609 continue;
610 pcl->compressed_bvecs[i].page = NULL;
611 folio_detach_private(folio);
612 folio_unlock(folio);
613 }
614 }
615 return 0;
616 }
617
z_erofs_cache_release_folio(struct folio * folio,gfp_t gfp)618 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
619 {
620 struct z_erofs_pcluster *pcl = folio_get_private(folio);
621 struct z_erofs_bvec *bvec = pcl->compressed_bvecs;
622 struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl);
623 bool ret;
624
625 if (!folio_test_private(folio))
626 return true;
627
628 ret = false;
629 spin_lock(&pcl->obj.lockref.lock);
630 if (pcl->obj.lockref.count <= 0) {
631 DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
632 for (; bvec < end; ++bvec) {
633 if (bvec->page && page_folio(bvec->page) == folio) {
634 bvec->page = NULL;
635 folio_detach_private(folio);
636 ret = true;
637 break;
638 }
639 }
640 }
641 spin_unlock(&pcl->obj.lockref.lock);
642 return ret;
643 }
644
645 /*
646 * It will be called only on inode eviction. In case that there are still some
647 * decompression requests in progress, wait with rescheduling for a bit here.
648 * An extra lock could be introduced instead but it seems unnecessary.
649 */
z_erofs_cache_invalidate_folio(struct folio * folio,size_t offset,size_t length)650 static void z_erofs_cache_invalidate_folio(struct folio *folio,
651 size_t offset, size_t length)
652 {
653 const size_t stop = length + offset;
654
655 /* Check for potential overflow in debug mode */
656 DBG_BUGON(stop > folio_size(folio) || stop < length);
657
658 if (offset == 0 && stop == folio_size(folio))
659 while (!z_erofs_cache_release_folio(folio, 0))
660 cond_resched();
661 }
662
663 static const struct address_space_operations z_erofs_cache_aops = {
664 .release_folio = z_erofs_cache_release_folio,
665 .invalidate_folio = z_erofs_cache_invalidate_folio,
666 };
667
erofs_init_managed_cache(struct super_block * sb)668 int erofs_init_managed_cache(struct super_block *sb)
669 {
670 struct inode *const inode = new_inode(sb);
671
672 if (!inode)
673 return -ENOMEM;
674
675 set_nlink(inode, 1);
676 inode->i_size = OFFSET_MAX;
677 inode->i_mapping->a_ops = &z_erofs_cache_aops;
678 mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
679 EROFS_SB(sb)->managed_cache = inode;
680 return 0;
681 }
682
683 /* callers must be with pcluster lock held */
z_erofs_attach_page(struct z_erofs_decompress_frontend * fe,struct z_erofs_bvec * bvec,bool exclusive)684 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
685 struct z_erofs_bvec *bvec, bool exclusive)
686 {
687 struct z_erofs_pcluster *pcl = fe->pcl;
688 int ret;
689
690 if (exclusive) {
691 /* give priority for inplaceio to use file pages first */
692 spin_lock(&pcl->obj.lockref.lock);
693 while (fe->icur > 0) {
694 if (pcl->compressed_bvecs[--fe->icur].page)
695 continue;
696 pcl->compressed_bvecs[fe->icur] = *bvec;
697 spin_unlock(&pcl->obj.lockref.lock);
698 return 0;
699 }
700 spin_unlock(&pcl->obj.lockref.lock);
701
702 /* otherwise, check if it can be used as a bvpage */
703 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
704 !fe->candidate_bvpage)
705 fe->candidate_bvpage = bvec->page;
706 }
707 ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
708 &fe->pagepool);
709 fe->pcl->vcnt += (ret >= 0);
710 return ret;
711 }
712
z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend * f)713 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
714 {
715 struct z_erofs_pcluster *pcl = f->pcl;
716 z_erofs_next_pcluster_t *owned_head = &f->owned_head;
717
718 /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
719 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
720 *owned_head) == Z_EROFS_PCLUSTER_NIL) {
721 *owned_head = &pcl->next;
722 /* so we can attach this pcluster to our submission chain. */
723 f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
724 return;
725 }
726
727 /* type 2, it belongs to an ongoing chain */
728 f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
729 }
730
z_erofs_register_pcluster(struct z_erofs_decompress_frontend * fe)731 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
732 {
733 struct erofs_map_blocks *map = &fe->map;
734 struct super_block *sb = fe->inode->i_sb;
735 bool ztailpacking = map->m_flags & EROFS_MAP_META;
736 struct z_erofs_pcluster *pcl;
737 struct erofs_workgroup *grp;
738 int err;
739
740 if (!(map->m_flags & EROFS_MAP_ENCODED) ||
741 (!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
742 DBG_BUGON(1);
743 return -EFSCORRUPTED;
744 }
745
746 /* no available pcluster, let's allocate one */
747 pcl = z_erofs_alloc_pcluster(map->m_plen);
748 if (IS_ERR(pcl))
749 return PTR_ERR(pcl);
750
751 spin_lock_init(&pcl->obj.lockref.lock);
752 pcl->obj.lockref.count = 1; /* one ref for this request */
753 pcl->algorithmformat = map->m_algorithmformat;
754 pcl->length = 0;
755 pcl->partial = true;
756
757 /* new pclusters should be claimed as type 1, primary and followed */
758 pcl->next = fe->owned_head;
759 pcl->pageofs_out = map->m_la & ~PAGE_MASK;
760 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
761
762 /*
763 * lock all primary followed works before visible to others
764 * and mutex_trylock *never* fails for a new pcluster.
765 */
766 mutex_init(&pcl->lock);
767 DBG_BUGON(!mutex_trylock(&pcl->lock));
768
769 if (ztailpacking) {
770 pcl->obj.index = 0; /* which indicates ztailpacking */
771 } else {
772 pcl->obj.index = erofs_blknr(sb, map->m_pa);
773
774 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
775 if (IS_ERR(grp)) {
776 err = PTR_ERR(grp);
777 goto err_out;
778 }
779
780 if (grp != &pcl->obj) {
781 fe->pcl = container_of(grp,
782 struct z_erofs_pcluster, obj);
783 err = -EEXIST;
784 goto err_out;
785 }
786 }
787 fe->owned_head = &pcl->next;
788 fe->pcl = pcl;
789 return 0;
790
791 err_out:
792 mutex_unlock(&pcl->lock);
793 z_erofs_free_pcluster(pcl);
794 return err;
795 }
796
z_erofs_pcluster_begin(struct z_erofs_decompress_frontend * fe)797 static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
798 {
799 struct erofs_map_blocks *map = &fe->map;
800 struct super_block *sb = fe->inode->i_sb;
801 erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
802 struct erofs_workgroup *grp = NULL;
803 int ret;
804
805 DBG_BUGON(fe->pcl);
806
807 /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
808 DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
809
810 if (!(map->m_flags & EROFS_MAP_META)) {
811 grp = erofs_find_workgroup(sb, blknr);
812 } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
813 DBG_BUGON(1);
814 return -EFSCORRUPTED;
815 }
816
817 if (grp) {
818 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
819 ret = -EEXIST;
820 } else {
821 ret = z_erofs_register_pcluster(fe);
822 }
823
824 if (ret == -EEXIST) {
825 mutex_lock(&fe->pcl->lock);
826 z_erofs_try_to_claim_pcluster(fe);
827 } else if (ret) {
828 return ret;
829 }
830
831 z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
832 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
833 if (!z_erofs_is_inline_pcluster(fe->pcl)) {
834 /* bind cache first when cached decompression is preferred */
835 z_erofs_bind_cache(fe);
836 } else {
837 void *mptr;
838
839 mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP);
840 if (IS_ERR(mptr)) {
841 ret = PTR_ERR(mptr);
842 erofs_err(sb, "failed to get inline data %d", ret);
843 return ret;
844 }
845 get_page(map->buf.page);
846 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
847 fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
848 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
849 }
850 /* file-backed inplace I/O pages are traversed in reverse order */
851 fe->icur = z_erofs_pclusterpages(fe->pcl);
852 return 0;
853 }
854
855 /*
856 * keep in mind that no referenced pclusters will be freed
857 * only after a RCU grace period.
858 */
z_erofs_rcu_callback(struct rcu_head * head)859 static void z_erofs_rcu_callback(struct rcu_head *head)
860 {
861 z_erofs_free_pcluster(container_of(head,
862 struct z_erofs_pcluster, rcu));
863 }
864
erofs_workgroup_free_rcu(struct erofs_workgroup * grp)865 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
866 {
867 struct z_erofs_pcluster *const pcl =
868 container_of(grp, struct z_erofs_pcluster, obj);
869
870 call_rcu(&pcl->rcu, z_erofs_rcu_callback);
871 }
872
z_erofs_pcluster_end(struct z_erofs_decompress_frontend * fe)873 static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
874 {
875 struct z_erofs_pcluster *pcl = fe->pcl;
876
877 if (!pcl)
878 return;
879
880 z_erofs_bvec_iter_end(&fe->biter);
881 mutex_unlock(&pcl->lock);
882
883 if (fe->candidate_bvpage)
884 fe->candidate_bvpage = NULL;
885
886 /*
887 * if all pending pages are added, don't hold its reference
888 * any longer if the pcluster isn't hosted by ourselves.
889 */
890 if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
891 erofs_workgroup_put(&pcl->obj);
892
893 fe->pcl = NULL;
894 }
895
z_erofs_read_fragment(struct super_block * sb,struct folio * folio,unsigned int cur,unsigned int end,erofs_off_t pos)896 static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
897 unsigned int cur, unsigned int end, erofs_off_t pos)
898 {
899 struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
900 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
901 unsigned int cnt;
902 u8 *src;
903
904 if (!packed_inode)
905 return -EFSCORRUPTED;
906
907 buf.mapping = packed_inode->i_mapping;
908 for (; cur < end; cur += cnt, pos += cnt) {
909 cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos));
910 src = erofs_bread(&buf, pos, EROFS_KMAP);
911 if (IS_ERR(src)) {
912 erofs_put_metabuf(&buf);
913 return PTR_ERR(src);
914 }
915 memcpy_to_folio(folio, cur, src, cnt);
916 }
917 erofs_put_metabuf(&buf);
918 return 0;
919 }
920
z_erofs_scan_folio(struct z_erofs_decompress_frontend * f,struct folio * folio,bool ra)921 static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
922 struct folio *folio, bool ra)
923 {
924 struct inode *const inode = f->inode;
925 struct erofs_map_blocks *const map = &f->map;
926 const loff_t offset = folio_pos(folio);
927 const unsigned int bs = i_blocksize(inode);
928 unsigned int end = folio_size(folio), split = 0, cur, pgs;
929 bool tight, excl;
930 int err = 0;
931
932 tight = (bs == PAGE_SIZE);
933 erofs_onlinefolio_init(folio);
934 do {
935 if (offset + end - 1 < map->m_la ||
936 offset + end - 1 >= map->m_la + map->m_llen) {
937 z_erofs_pcluster_end(f);
938 map->m_la = offset + end - 1;
939 map->m_llen = 0;
940 err = z_erofs_map_blocks_iter(inode, map, 0);
941 if (err)
942 break;
943 }
944
945 cur = offset > map->m_la ? 0 : map->m_la - offset;
946 pgs = round_down(cur, PAGE_SIZE);
947 /* bump split parts first to avoid several separate cases */
948 ++split;
949
950 if (!(map->m_flags & EROFS_MAP_MAPPED)) {
951 folio_zero_segment(folio, cur, end);
952 tight = false;
953 } else if (map->m_flags & EROFS_MAP_FRAGMENT) {
954 erofs_off_t fpos = offset + cur - map->m_la;
955
956 err = z_erofs_read_fragment(inode->i_sb, folio, cur,
957 cur + min(map->m_llen - fpos, end - cur),
958 EROFS_I(inode)->z_fragmentoff + fpos);
959 if (err)
960 break;
961 tight = false;
962 } else {
963 if (!f->pcl) {
964 err = z_erofs_pcluster_begin(f);
965 if (err)
966 break;
967 f->pcl->besteffort |= !ra;
968 }
969
970 pgs = round_down(end - 1, PAGE_SIZE);
971 /*
972 * Ensure this partial page belongs to this submit chain
973 * rather than other concurrent submit chains or
974 * noio(bypass) chains since those chains are handled
975 * asynchronously thus it cannot be used for inplace I/O
976 * or bvpage (should be processed in the strict order.)
977 */
978 tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
979 excl = false;
980 if (cur <= pgs) {
981 excl = (split <= 1) || tight;
982 cur = pgs;
983 }
984
985 err = z_erofs_attach_page(f, &((struct z_erofs_bvec) {
986 .page = folio_page(folio, pgs >> PAGE_SHIFT),
987 .offset = offset + pgs - map->m_la,
988 .end = end - pgs, }), excl);
989 if (err)
990 break;
991
992 erofs_onlinefolio_split(folio);
993 if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
994 f->pcl->multibases = true;
995 if (f->pcl->length < offset + end - map->m_la) {
996 f->pcl->length = offset + end - map->m_la;
997 f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
998 }
999 if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
1000 !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
1001 f->pcl->length == map->m_llen)
1002 f->pcl->partial = false;
1003 }
1004 /* shorten the remaining extent to update progress */
1005 map->m_llen = offset + cur - map->m_la;
1006 map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
1007 if (cur <= pgs) {
1008 split = cur < pgs;
1009 tight = (bs == PAGE_SIZE);
1010 }
1011 } while ((end = cur) > 0);
1012 erofs_onlinefolio_end(folio, err);
1013 return err;
1014 }
1015
z_erofs_is_sync_decompress(struct erofs_sb_info * sbi,unsigned int readahead_pages)1016 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
1017 unsigned int readahead_pages)
1018 {
1019 /* auto: enable for read_folio, disable for readahead */
1020 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
1021 !readahead_pages)
1022 return true;
1023
1024 if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
1025 (readahead_pages <= sbi->opt.max_sync_decompress_pages))
1026 return true;
1027
1028 return false;
1029 }
1030
z_erofs_page_is_invalidated(struct page * page)1031 static bool z_erofs_page_is_invalidated(struct page *page)
1032 {
1033 return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
1034 }
1035
1036 struct z_erofs_decompress_backend {
1037 struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
1038 struct super_block *sb;
1039 struct z_erofs_pcluster *pcl;
1040
1041 /* pages with the longest decompressed length for deduplication */
1042 struct page **decompressed_pages;
1043 /* pages to keep the compressed data */
1044 struct page **compressed_pages;
1045
1046 struct list_head decompressed_secondary_bvecs;
1047 struct page **pagepool;
1048 unsigned int onstack_used, nr_pages;
1049 };
1050
1051 struct z_erofs_bvec_item {
1052 struct z_erofs_bvec bvec;
1053 struct list_head list;
1054 };
1055
z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend * be,struct z_erofs_bvec * bvec)1056 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1057 struct z_erofs_bvec *bvec)
1058 {
1059 struct z_erofs_bvec_item *item;
1060 unsigned int pgnr;
1061
1062 if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
1063 (bvec->end == PAGE_SIZE ||
1064 bvec->offset + bvec->end == be->pcl->length)) {
1065 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
1066 DBG_BUGON(pgnr >= be->nr_pages);
1067 if (!be->decompressed_pages[pgnr]) {
1068 be->decompressed_pages[pgnr] = bvec->page;
1069 return;
1070 }
1071 }
1072
1073 /* (cold path) one pcluster is requested multiple times */
1074 item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
1075 item->bvec = *bvec;
1076 list_add(&item->list, &be->decompressed_secondary_bvecs);
1077 }
1078
z_erofs_fill_other_copies(struct z_erofs_decompress_backend * be,int err)1079 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1080 int err)
1081 {
1082 unsigned int off0 = be->pcl->pageofs_out;
1083 struct list_head *p, *n;
1084
1085 list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
1086 struct z_erofs_bvec_item *bvi;
1087 unsigned int end, cur;
1088 void *dst, *src;
1089
1090 bvi = container_of(p, struct z_erofs_bvec_item, list);
1091 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
1092 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
1093 bvi->bvec.end);
1094 dst = kmap_local_page(bvi->bvec.page);
1095 while (cur < end) {
1096 unsigned int pgnr, scur, len;
1097
1098 pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
1099 DBG_BUGON(pgnr >= be->nr_pages);
1100
1101 scur = bvi->bvec.offset + cur -
1102 ((pgnr << PAGE_SHIFT) - off0);
1103 len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
1104 if (!be->decompressed_pages[pgnr]) {
1105 err = -EFSCORRUPTED;
1106 cur += len;
1107 continue;
1108 }
1109 src = kmap_local_page(be->decompressed_pages[pgnr]);
1110 memcpy(dst + cur, src + scur, len);
1111 kunmap_local(src);
1112 cur += len;
1113 }
1114 kunmap_local(dst);
1115 erofs_onlinefolio_end(page_folio(bvi->bvec.page), err);
1116 list_del(p);
1117 kfree(bvi);
1118 }
1119 }
1120
z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend * be)1121 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1122 {
1123 struct z_erofs_pcluster *pcl = be->pcl;
1124 struct z_erofs_bvec_iter biter;
1125 struct page *old_bvpage;
1126 int i;
1127
1128 z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
1129 for (i = 0; i < pcl->vcnt; ++i) {
1130 struct z_erofs_bvec bvec;
1131
1132 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
1133
1134 if (old_bvpage)
1135 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1136
1137 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
1138 z_erofs_do_decompressed_bvec(be, &bvec);
1139 }
1140
1141 old_bvpage = z_erofs_bvec_iter_end(&biter);
1142 if (old_bvpage)
1143 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1144 }
1145
z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend * be,bool * overlapped)1146 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1147 bool *overlapped)
1148 {
1149 struct z_erofs_pcluster *pcl = be->pcl;
1150 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1151 int i, err = 0;
1152
1153 *overlapped = false;
1154 for (i = 0; i < pclusterpages; ++i) {
1155 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
1156 struct page *page = bvec->page;
1157
1158 /* compressed data ought to be valid when decompressing */
1159 if (IS_ERR(page) || !page) {
1160 bvec->page = NULL; /* clear the failure reason */
1161 err = page ? PTR_ERR(page) : -EIO;
1162 continue;
1163 }
1164 be->compressed_pages[i] = page;
1165
1166 if (z_erofs_is_inline_pcluster(pcl) ||
1167 erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) {
1168 if (!PageUptodate(page))
1169 err = -EIO;
1170 continue;
1171 }
1172
1173 DBG_BUGON(z_erofs_page_is_invalidated(page));
1174 if (z_erofs_is_shortlived_page(page))
1175 continue;
1176 z_erofs_do_decompressed_bvec(be, bvec);
1177 *overlapped = true;
1178 }
1179 return err;
1180 }
1181
z_erofs_decompress_pcluster(struct z_erofs_decompress_backend * be,int err)1182 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1183 int err)
1184 {
1185 struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1186 struct z_erofs_pcluster *pcl = be->pcl;
1187 unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1188 const struct z_erofs_decompressor *decomp =
1189 z_erofs_decomp[pcl->algorithmformat];
1190 int i, j, jtop, err2;
1191 struct page *page;
1192 bool overlapped;
1193
1194 mutex_lock(&pcl->lock);
1195 be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1196
1197 /* allocate (de)compressed page arrays if cannot be kept on stack */
1198 be->decompressed_pages = NULL;
1199 be->compressed_pages = NULL;
1200 be->onstack_used = 0;
1201 if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1202 be->decompressed_pages = be->onstack_pages;
1203 be->onstack_used = be->nr_pages;
1204 memset(be->decompressed_pages, 0,
1205 sizeof(struct page *) * be->nr_pages);
1206 }
1207
1208 if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1209 be->compressed_pages = be->onstack_pages + be->onstack_used;
1210
1211 if (!be->decompressed_pages)
1212 be->decompressed_pages =
1213 kvcalloc(be->nr_pages, sizeof(struct page *),
1214 GFP_KERNEL | __GFP_NOFAIL);
1215 if (!be->compressed_pages)
1216 be->compressed_pages =
1217 kvcalloc(pclusterpages, sizeof(struct page *),
1218 GFP_KERNEL | __GFP_NOFAIL);
1219
1220 z_erofs_parse_out_bvecs(be);
1221 err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1222 if (err2)
1223 err = err2;
1224 if (!err)
1225 err = decomp->decompress(&(struct z_erofs_decompress_req) {
1226 .sb = be->sb,
1227 .in = be->compressed_pages,
1228 .out = be->decompressed_pages,
1229 .pageofs_in = pcl->pageofs_in,
1230 .pageofs_out = pcl->pageofs_out,
1231 .inputsize = pcl->pclustersize,
1232 .outputsize = pcl->length,
1233 .alg = pcl->algorithmformat,
1234 .inplace_io = overlapped,
1235 .partial_decoding = pcl->partial,
1236 .fillgaps = pcl->multibases,
1237 .gfp = pcl->besteffort ? GFP_KERNEL :
1238 GFP_NOWAIT | __GFP_NORETRY
1239 }, be->pagepool);
1240
1241 /* must handle all compressed pages before actual file pages */
1242 if (z_erofs_is_inline_pcluster(pcl)) {
1243 page = pcl->compressed_bvecs[0].page;
1244 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1245 put_page(page);
1246 } else {
1247 /* managed folios are still left in compressed_bvecs[] */
1248 for (i = 0; i < pclusterpages; ++i) {
1249 page = be->compressed_pages[i];
1250 if (!page ||
1251 erofs_folio_is_managed(sbi, page_folio(page)))
1252 continue;
1253 (void)z_erofs_put_shortlivedpage(be->pagepool, page);
1254 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1255 }
1256 }
1257 if (be->compressed_pages < be->onstack_pages ||
1258 be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1259 kvfree(be->compressed_pages);
1260
1261 jtop = 0;
1262 z_erofs_fill_other_copies(be, err);
1263 for (i = 0; i < be->nr_pages; ++i) {
1264 page = be->decompressed_pages[i];
1265 if (!page)
1266 continue;
1267
1268 DBG_BUGON(z_erofs_page_is_invalidated(page));
1269 if (!z_erofs_is_shortlived_page(page)) {
1270 erofs_onlinefolio_end(page_folio(page), err);
1271 continue;
1272 }
1273 if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
1274 erofs_pagepool_add(be->pagepool, page);
1275 continue;
1276 }
1277 for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j)
1278 ;
1279 if (j >= jtop) /* this bounce page is newly detected */
1280 be->decompressed_pages[jtop++] = page;
1281 }
1282 while (jtop)
1283 erofs_pagepool_add(be->pagepool,
1284 be->decompressed_pages[--jtop]);
1285 if (be->decompressed_pages != be->onstack_pages)
1286 kvfree(be->decompressed_pages);
1287
1288 pcl->length = 0;
1289 pcl->partial = true;
1290 pcl->multibases = false;
1291 pcl->besteffort = false;
1292 pcl->bvset.nextpage = NULL;
1293 pcl->vcnt = 0;
1294
1295 /* pcluster lock MUST be taken before the following line */
1296 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1297 mutex_unlock(&pcl->lock);
1298 return err;
1299 }
1300
z_erofs_decompress_queue(const struct z_erofs_decompressqueue * io,struct page ** pagepool)1301 static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1302 struct page **pagepool)
1303 {
1304 struct z_erofs_decompress_backend be = {
1305 .sb = io->sb,
1306 .pagepool = pagepool,
1307 .decompressed_secondary_bvecs =
1308 LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1309 };
1310 z_erofs_next_pcluster_t owned = io->head;
1311 int err = io->eio ? -EIO : 0;
1312
1313 while (owned != Z_EROFS_PCLUSTER_TAIL) {
1314 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1315
1316 be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1317 owned = READ_ONCE(be.pcl->next);
1318
1319 err = z_erofs_decompress_pcluster(&be, err) ?: err;
1320 if (z_erofs_is_inline_pcluster(be.pcl))
1321 z_erofs_free_pcluster(be.pcl);
1322 else
1323 erofs_workgroup_put(&be.pcl->obj);
1324 }
1325 return err;
1326 }
1327
z_erofs_decompressqueue_work(struct work_struct * work)1328 static void z_erofs_decompressqueue_work(struct work_struct *work)
1329 {
1330 struct z_erofs_decompressqueue *bgq =
1331 container_of(work, struct z_erofs_decompressqueue, u.work);
1332 struct page *pagepool = NULL;
1333
1334 DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
1335 z_erofs_decompress_queue(bgq, &pagepool);
1336 erofs_release_pages(&pagepool);
1337 kvfree(bgq);
1338 }
1339
1340 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
z_erofs_decompressqueue_kthread_work(struct kthread_work * work)1341 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
1342 {
1343 z_erofs_decompressqueue_work((struct work_struct *)work);
1344 }
1345 #endif
1346
z_erofs_decompress_kickoff(struct z_erofs_decompressqueue * io,int bios)1347 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1348 int bios)
1349 {
1350 struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1351
1352 /* wake up the caller thread for sync decompression */
1353 if (io->sync) {
1354 if (!atomic_add_return(bios, &io->pending_bios))
1355 complete(&io->u.done);
1356 return;
1357 }
1358
1359 if (atomic_add_return(bios, &io->pending_bios))
1360 return;
1361 /* Use (kthread_)work and sync decompression for atomic contexts only */
1362 if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
1363 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1364 struct kthread_worker *worker;
1365
1366 rcu_read_lock();
1367 worker = rcu_dereference(
1368 z_erofs_pcpu_workers[raw_smp_processor_id()]);
1369 if (!worker) {
1370 INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
1371 queue_work(z_erofs_workqueue, &io->u.work);
1372 } else {
1373 kthread_queue_work(worker, &io->u.kthread_work);
1374 }
1375 rcu_read_unlock();
1376 #else
1377 queue_work(z_erofs_workqueue, &io->u.work);
1378 #endif
1379 /* enable sync decompression for readahead */
1380 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1381 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1382 return;
1383 }
1384 z_erofs_decompressqueue_work(&io->u.work);
1385 }
1386
z_erofs_fill_bio_vec(struct bio_vec * bvec,struct z_erofs_decompress_frontend * f,struct z_erofs_pcluster * pcl,unsigned int nr,struct address_space * mc)1387 static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1388 struct z_erofs_decompress_frontend *f,
1389 struct z_erofs_pcluster *pcl,
1390 unsigned int nr,
1391 struct address_space *mc)
1392 {
1393 gfp_t gfp = mapping_gfp_mask(mc);
1394 bool tocache = false;
1395 struct z_erofs_bvec zbv;
1396 struct address_space *mapping;
1397 struct folio *folio;
1398 struct page *page;
1399 int bs = i_blocksize(f->inode);
1400
1401 /* Except for inplace folios, the entire folio can be used for I/Os */
1402 bvec->bv_offset = 0;
1403 bvec->bv_len = PAGE_SIZE;
1404 repeat:
1405 spin_lock(&pcl->obj.lockref.lock);
1406 zbv = pcl->compressed_bvecs[nr];
1407 spin_unlock(&pcl->obj.lockref.lock);
1408 if (!zbv.page)
1409 goto out_allocfolio;
1410
1411 bvec->bv_page = zbv.page;
1412 DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
1413
1414 folio = page_folio(zbv.page);
1415 /*
1416 * Handle preallocated cached folios. We tried to allocate such folios
1417 * without triggering direct reclaim. If allocation failed, inplace
1418 * file-backed folios will be used instead.
1419 */
1420 if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
1421 tocache = true;
1422 goto out_tocache;
1423 }
1424
1425 mapping = READ_ONCE(folio->mapping);
1426 /*
1427 * File-backed folios for inplace I/Os are all locked steady,
1428 * therefore it is impossible for `mapping` to be NULL.
1429 */
1430 if (mapping && mapping != mc) {
1431 if (zbv.offset < 0)
1432 bvec->bv_offset = round_up(-zbv.offset, bs);
1433 bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
1434 return;
1435 }
1436
1437 folio_lock(folio);
1438 if (likely(folio->mapping == mc)) {
1439 /*
1440 * The cached folio is still in managed cache but without
1441 * a valid `->private` pcluster hint. Let's reconnect them.
1442 */
1443 if (!folio_test_private(folio)) {
1444 folio_attach_private(folio, pcl);
1445 /* compressed_bvecs[] already takes a ref before */
1446 folio_put(folio);
1447 }
1448 if (likely(folio->private == pcl)) {
1449 /* don't submit cache I/Os again if already uptodate */
1450 if (folio_test_uptodate(folio)) {
1451 folio_unlock(folio);
1452 bvec->bv_page = NULL;
1453 }
1454 return;
1455 }
1456 /*
1457 * Already linked with another pcluster, which only appears in
1458 * crafted images by fuzzers for now. But handle this anyway.
1459 */
1460 tocache = false; /* use temporary short-lived pages */
1461 } else {
1462 DBG_BUGON(1); /* referenced managed folios can't be truncated */
1463 tocache = true;
1464 }
1465 folio_unlock(folio);
1466 folio_put(folio);
1467 out_allocfolio:
1468 page = __erofs_allocpage(&f->pagepool, gfp, true);
1469 spin_lock(&pcl->obj.lockref.lock);
1470 if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) {
1471 if (page)
1472 erofs_pagepool_add(&f->pagepool, page);
1473 spin_unlock(&pcl->obj.lockref.lock);
1474 cond_resched();
1475 goto repeat;
1476 }
1477 pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM);
1478 spin_unlock(&pcl->obj.lockref.lock);
1479 bvec->bv_page = page;
1480 if (!page)
1481 return;
1482 folio = page_folio(page);
1483 out_tocache:
1484 if (!tocache || bs != PAGE_SIZE ||
1485 filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) {
1486 /* turn into a temporary shortlived folio (1 ref) */
1487 folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
1488 return;
1489 }
1490 folio_attach_private(folio, pcl);
1491 /* drop a refcount added by allocpage (then 2 refs in total here) */
1492 folio_put(folio);
1493 }
1494
jobqueue_init(struct super_block * sb,struct z_erofs_decompressqueue * fgq,bool * fg)1495 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
1496 struct z_erofs_decompressqueue *fgq, bool *fg)
1497 {
1498 struct z_erofs_decompressqueue *q;
1499
1500 if (fg && !*fg) {
1501 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1502 if (!q) {
1503 *fg = true;
1504 goto fg_out;
1505 }
1506 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1507 kthread_init_work(&q->u.kthread_work,
1508 z_erofs_decompressqueue_kthread_work);
1509 #else
1510 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1511 #endif
1512 } else {
1513 fg_out:
1514 q = fgq;
1515 init_completion(&fgq->u.done);
1516 atomic_set(&fgq->pending_bios, 0);
1517 q->eio = false;
1518 q->sync = true;
1519 }
1520 q->sb = sb;
1521 q->head = Z_EROFS_PCLUSTER_TAIL;
1522 return q;
1523 }
1524
1525 /* define decompression jobqueue types */
1526 enum {
1527 JQ_BYPASS,
1528 JQ_SUBMIT,
1529 NR_JOBQUEUES,
1530 };
1531
move_to_bypass_jobqueue(struct z_erofs_pcluster * pcl,z_erofs_next_pcluster_t qtail[],z_erofs_next_pcluster_t owned_head)1532 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1533 z_erofs_next_pcluster_t qtail[],
1534 z_erofs_next_pcluster_t owned_head)
1535 {
1536 z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1537 z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1538
1539 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
1540
1541 WRITE_ONCE(*submit_qtail, owned_head);
1542 WRITE_ONCE(*bypass_qtail, &pcl->next);
1543
1544 qtail[JQ_BYPASS] = &pcl->next;
1545 }
1546
z_erofs_endio(struct bio * bio)1547 static void z_erofs_endio(struct bio *bio)
1548 {
1549 struct z_erofs_decompressqueue *q = bio->bi_private;
1550 blk_status_t err = bio->bi_status;
1551 struct folio_iter fi;
1552
1553 bio_for_each_folio_all(fi, bio) {
1554 struct folio *folio = fi.folio;
1555
1556 DBG_BUGON(folio_test_uptodate(folio));
1557 DBG_BUGON(z_erofs_page_is_invalidated(&folio->page));
1558 if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio))
1559 continue;
1560
1561 if (!err)
1562 folio_mark_uptodate(folio);
1563 folio_unlock(folio);
1564 }
1565 if (err)
1566 q->eio = true;
1567 z_erofs_decompress_kickoff(q, -1);
1568 if (bio->bi_bdev)
1569 bio_put(bio);
1570 }
1571
z_erofs_submit_queue(struct z_erofs_decompress_frontend * f,struct z_erofs_decompressqueue * fgq,bool * force_fg,bool readahead)1572 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1573 struct z_erofs_decompressqueue *fgq,
1574 bool *force_fg, bool readahead)
1575 {
1576 struct super_block *sb = f->inode->i_sb;
1577 struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1578 z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1579 struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1580 z_erofs_next_pcluster_t owned_head = f->owned_head;
1581 /* bio is NULL initially, so no need to initialize last_{index,bdev} */
1582 erofs_off_t last_pa;
1583 unsigned int nr_bios = 0;
1584 struct bio *bio = NULL;
1585 unsigned long pflags;
1586 int memstall = 0;
1587
1588 /* No need to read from device for pclusters in the bypass queue. */
1589 q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1590 q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
1591
1592 qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1593 qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1594
1595 /* by default, all need io submission */
1596 q[JQ_SUBMIT]->head = owned_head;
1597
1598 do {
1599 struct erofs_map_dev mdev;
1600 struct z_erofs_pcluster *pcl;
1601 erofs_off_t cur, end;
1602 struct bio_vec bvec;
1603 unsigned int i = 0;
1604 bool bypass = true;
1605
1606 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1607 pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1608 owned_head = READ_ONCE(pcl->next);
1609
1610 if (z_erofs_is_inline_pcluster(pcl)) {
1611 move_to_bypass_jobqueue(pcl, qtail, owned_head);
1612 continue;
1613 }
1614
1615 /* no device id here, thus it will always succeed */
1616 mdev = (struct erofs_map_dev) {
1617 .m_pa = erofs_pos(sb, pcl->obj.index),
1618 };
1619 (void)erofs_map_dev(sb, &mdev);
1620
1621 cur = mdev.m_pa;
1622 end = cur + pcl->pclustersize;
1623 do {
1624 bvec.bv_page = NULL;
1625 if (bio && (cur != last_pa ||
1626 bio->bi_bdev != mdev.m_bdev)) {
1627 drain_io:
1628 if (erofs_is_fileio_mode(EROFS_SB(sb)))
1629 erofs_fileio_submit_bio(bio);
1630 else if (erofs_is_fscache_mode(sb))
1631 erofs_fscache_submit_bio(bio);
1632 else
1633 submit_bio(bio);
1634
1635 if (memstall) {
1636 psi_memstall_leave(&pflags);
1637 memstall = 0;
1638 }
1639 bio = NULL;
1640 }
1641
1642 if (!bvec.bv_page) {
1643 z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
1644 if (!bvec.bv_page)
1645 continue;
1646 if (cur + bvec.bv_len > end)
1647 bvec.bv_len = end - cur;
1648 DBG_BUGON(bvec.bv_len < sb->s_blocksize);
1649 }
1650
1651 if (unlikely(PageWorkingset(bvec.bv_page)) &&
1652 !memstall) {
1653 psi_memstall_enter(&pflags);
1654 memstall = 1;
1655 }
1656
1657 if (!bio) {
1658 if (erofs_is_fileio_mode(EROFS_SB(sb)))
1659 bio = erofs_fileio_bio_alloc(&mdev);
1660 else if (erofs_is_fscache_mode(sb))
1661 bio = erofs_fscache_bio_alloc(&mdev);
1662 else
1663 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1664 REQ_OP_READ, GFP_NOIO);
1665 bio->bi_end_io = z_erofs_endio;
1666 bio->bi_iter.bi_sector = cur >> 9;
1667 bio->bi_private = q[JQ_SUBMIT];
1668 if (readahead)
1669 bio->bi_opf |= REQ_RAHEAD;
1670 ++nr_bios;
1671 }
1672
1673 if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
1674 bvec.bv_offset))
1675 goto drain_io;
1676 last_pa = cur + bvec.bv_len;
1677 bypass = false;
1678 } while ((cur += bvec.bv_len) < end);
1679
1680 if (!bypass)
1681 qtail[JQ_SUBMIT] = &pcl->next;
1682 else
1683 move_to_bypass_jobqueue(pcl, qtail, owned_head);
1684 } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1685
1686 if (bio) {
1687 if (erofs_is_fileio_mode(EROFS_SB(sb)))
1688 erofs_fileio_submit_bio(bio);
1689 else if (erofs_is_fscache_mode(sb))
1690 erofs_fscache_submit_bio(bio);
1691 else
1692 submit_bio(bio);
1693 if (memstall)
1694 psi_memstall_leave(&pflags);
1695 }
1696
1697 /*
1698 * although background is preferred, no one is pending for submission.
1699 * don't issue decompression but drop it directly instead.
1700 */
1701 if (!*force_fg && !nr_bios) {
1702 kvfree(q[JQ_SUBMIT]);
1703 return;
1704 }
1705 z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
1706 }
1707
z_erofs_runqueue(struct z_erofs_decompress_frontend * f,unsigned int ra_folios)1708 static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1709 unsigned int ra_folios)
1710 {
1711 struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1712 struct erofs_sb_info *sbi = EROFS_I_SB(f->inode);
1713 bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios);
1714 int err;
1715
1716 if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1717 return 0;
1718 z_erofs_submit_queue(f, io, &force_fg, !!ra_folios);
1719
1720 /* handle bypass queue (no i/o pclusters) immediately */
1721 err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
1722 if (!force_fg)
1723 return err;
1724
1725 /* wait until all bios are completed */
1726 wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1727
1728 /* handle synchronous decompress queue in the caller context */
1729 return z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool) ?: err;
1730 }
1731
1732 /*
1733 * Since partial uptodate is still unimplemented for now, we have to use
1734 * approximate readmore strategies as a start.
1735 */
z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend * f,struct readahead_control * rac,bool backmost)1736 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1737 struct readahead_control *rac, bool backmost)
1738 {
1739 struct inode *inode = f->inode;
1740 struct erofs_map_blocks *map = &f->map;
1741 erofs_off_t cur, end, headoffset = f->headoffset;
1742 int err;
1743
1744 if (backmost) {
1745 if (rac)
1746 end = headoffset + readahead_length(rac) - 1;
1747 else
1748 end = headoffset + PAGE_SIZE - 1;
1749 map->m_la = end;
1750 err = z_erofs_map_blocks_iter(inode, map,
1751 EROFS_GET_BLOCKS_READMORE);
1752 if (err)
1753 return;
1754
1755 /* expand ra for the trailing edge if readahead */
1756 if (rac) {
1757 cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1758 readahead_expand(rac, headoffset, cur - headoffset);
1759 return;
1760 }
1761 end = round_up(end, PAGE_SIZE);
1762 } else {
1763 end = round_up(map->m_la, PAGE_SIZE);
1764 if (!map->m_llen)
1765 return;
1766 }
1767
1768 cur = map->m_la + map->m_llen - 1;
1769 while ((cur >= end) && (cur < i_size_read(inode))) {
1770 pgoff_t index = cur >> PAGE_SHIFT;
1771 struct folio *folio;
1772
1773 folio = erofs_grab_folio_nowait(inode->i_mapping, index);
1774 if (!IS_ERR_OR_NULL(folio)) {
1775 if (folio_test_uptodate(folio))
1776 folio_unlock(folio);
1777 else
1778 z_erofs_scan_folio(f, folio, !!rac);
1779 folio_put(folio);
1780 }
1781
1782 if (cur < PAGE_SIZE)
1783 break;
1784 cur = (index << PAGE_SHIFT) - 1;
1785 }
1786 }
1787
z_erofs_read_folio(struct file * file,struct folio * folio)1788 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1789 {
1790 struct inode *const inode = folio->mapping->host;
1791 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1792 int err;
1793
1794 trace_erofs_read_folio(folio, false);
1795 f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
1796
1797 z_erofs_pcluster_readmore(&f, NULL, true);
1798 err = z_erofs_scan_folio(&f, folio, false);
1799 z_erofs_pcluster_readmore(&f, NULL, false);
1800 z_erofs_pcluster_end(&f);
1801
1802 /* if some pclusters are ready, need submit them anyway */
1803 err = z_erofs_runqueue(&f, 0) ?: err;
1804 if (err && err != -EINTR)
1805 erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
1806 err, folio->index, EROFS_I(inode)->nid);
1807
1808 erofs_put_metabuf(&f.map.buf);
1809 erofs_release_pages(&f.pagepool);
1810 return err;
1811 }
1812
z_erofs_readahead(struct readahead_control * rac)1813 static void z_erofs_readahead(struct readahead_control *rac)
1814 {
1815 struct inode *const inode = rac->mapping->host;
1816 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1817 struct folio *head = NULL, *folio;
1818 unsigned int nr_folios;
1819 int err;
1820
1821 f.headoffset = readahead_pos(rac);
1822
1823 z_erofs_pcluster_readmore(&f, rac, true);
1824 nr_folios = readahead_count(rac);
1825 trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
1826
1827 while ((folio = readahead_folio(rac))) {
1828 folio->private = head;
1829 head = folio;
1830 }
1831
1832 /* traverse in reverse order for best metadata I/O performance */
1833 while (head) {
1834 folio = head;
1835 head = folio_get_private(folio);
1836
1837 err = z_erofs_scan_folio(&f, folio, true);
1838 if (err && err != -EINTR)
1839 erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
1840 folio->index, EROFS_I(inode)->nid);
1841 }
1842 z_erofs_pcluster_readmore(&f, rac, false);
1843 z_erofs_pcluster_end(&f);
1844
1845 (void)z_erofs_runqueue(&f, nr_folios);
1846 erofs_put_metabuf(&f.map.buf);
1847 erofs_release_pages(&f.pagepool);
1848 }
1849
1850 const struct address_space_operations z_erofs_aops = {
1851 .read_folio = z_erofs_read_folio,
1852 .readahead = z_erofs_readahead,
1853 };
1854