xref: /linux/fs/f2fs/compress.c (revision 6d61a53dd6f55405ebcaea6ee38d1ab5a8856c2c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
17 
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include <trace/events/f2fs.h>
22 
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
25 
26 static void *page_array_alloc(struct inode *inode, int nr)
27 {
28 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29 	unsigned int size = sizeof(struct page *) * nr;
30 
31 	if (likely(size <= sbi->page_array_slab_size))
32 		return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33 					GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34 	return f2fs_kzalloc(sbi, size, GFP_NOFS);
35 }
36 
37 static void page_array_free(struct inode *inode, void *pages, int nr)
38 {
39 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 	unsigned int size = sizeof(struct page *) * nr;
41 
42 	if (!pages)
43 		return;
44 
45 	if (likely(size <= sbi->page_array_slab_size))
46 		kmem_cache_free(sbi->page_array_slab, pages);
47 	else
48 		kfree(pages);
49 }
50 
51 struct f2fs_compress_ops {
52 	int (*init_compress_ctx)(struct compress_ctx *cc);
53 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 	int (*compress_pages)(struct compress_ctx *cc);
55 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57 	int (*decompress_pages)(struct decompress_io_ctx *dic);
58 	bool (*is_level_valid)(int level);
59 };
60 
61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
62 {
63 	return index & (cc->cluster_size - 1);
64 }
65 
66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
67 {
68 	return index >> cc->log_cluster_size;
69 }
70 
71 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
72 {
73 	return cc->cluster_idx << cc->log_cluster_size;
74 }
75 
76 bool f2fs_is_compressed_page(struct page *page)
77 {
78 	if (!PagePrivate(page))
79 		return false;
80 	if (!page_private(page))
81 		return false;
82 	if (page_private_nonpointer(page))
83 		return false;
84 
85 	f2fs_bug_on(F2FS_M_SB(page->mapping),
86 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
87 	return true;
88 }
89 
90 static void f2fs_set_compressed_page(struct page *page,
91 		struct inode *inode, pgoff_t index, void *data)
92 {
93 	struct folio *folio = page_folio(page);
94 
95 	folio_attach_private(folio, (void *)data);
96 
97 	/* i_crypto_info and iv index */
98 	folio->index = index;
99 	folio->mapping = inode->i_mapping;
100 }
101 
102 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
103 {
104 	int i;
105 
106 	for (i = 0; i < len; i++) {
107 		if (!cc->rpages[i])
108 			continue;
109 		if (unlock)
110 			unlock_page(cc->rpages[i]);
111 		else
112 			put_page(cc->rpages[i]);
113 	}
114 }
115 
116 static void f2fs_put_rpages(struct compress_ctx *cc)
117 {
118 	f2fs_drop_rpages(cc, cc->cluster_size, false);
119 }
120 
121 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
122 {
123 	f2fs_drop_rpages(cc, len, true);
124 }
125 
126 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
127 		struct writeback_control *wbc, bool redirty, int unlock)
128 {
129 	unsigned int i;
130 
131 	for (i = 0; i < cc->cluster_size; i++) {
132 		if (!cc->rpages[i])
133 			continue;
134 		if (redirty)
135 			redirty_page_for_writepage(wbc, cc->rpages[i]);
136 		f2fs_put_page(cc->rpages[i], unlock);
137 	}
138 }
139 
140 struct page *f2fs_compress_control_page(struct page *page)
141 {
142 	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
143 }
144 
145 int f2fs_init_compress_ctx(struct compress_ctx *cc)
146 {
147 	if (cc->rpages)
148 		return 0;
149 
150 	cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
151 	return cc->rpages ? 0 : -ENOMEM;
152 }
153 
154 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
155 {
156 	page_array_free(cc->inode, cc->rpages, cc->cluster_size);
157 	cc->rpages = NULL;
158 	cc->nr_rpages = 0;
159 	cc->nr_cpages = 0;
160 	cc->valid_nr_cpages = 0;
161 	if (!reuse)
162 		cc->cluster_idx = NULL_CLUSTER;
163 }
164 
165 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
166 {
167 	unsigned int cluster_ofs;
168 
169 	if (!f2fs_cluster_can_merge_page(cc, folio->index))
170 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
171 
172 	cluster_ofs = offset_in_cluster(cc, folio->index);
173 	cc->rpages[cluster_ofs] = folio_page(folio, 0);
174 	cc->nr_rpages++;
175 	cc->cluster_idx = cluster_idx(cc, folio->index);
176 }
177 
178 #ifdef CONFIG_F2FS_FS_LZO
179 static int lzo_init_compress_ctx(struct compress_ctx *cc)
180 {
181 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
182 				LZO1X_MEM_COMPRESS, GFP_NOFS);
183 	if (!cc->private)
184 		return -ENOMEM;
185 
186 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
187 	return 0;
188 }
189 
190 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
191 {
192 	kvfree(cc->private);
193 	cc->private = NULL;
194 }
195 
196 static int lzo_compress_pages(struct compress_ctx *cc)
197 {
198 	int ret;
199 
200 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
201 					&cc->clen, cc->private);
202 	if (ret != LZO_E_OK) {
203 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
204 				"lzo compress failed, ret:%d", ret);
205 		return -EIO;
206 	}
207 	return 0;
208 }
209 
210 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
211 {
212 	int ret;
213 
214 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
215 						dic->rbuf, &dic->rlen);
216 	if (ret != LZO_E_OK) {
217 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
218 				"lzo decompress failed, ret:%d", ret);
219 		return -EIO;
220 	}
221 
222 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
223 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
224 				"lzo invalid rlen:%zu, expected:%lu",
225 				dic->rlen, PAGE_SIZE << dic->log_cluster_size);
226 		return -EIO;
227 	}
228 	return 0;
229 }
230 
231 static const struct f2fs_compress_ops f2fs_lzo_ops = {
232 	.init_compress_ctx	= lzo_init_compress_ctx,
233 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
234 	.compress_pages		= lzo_compress_pages,
235 	.decompress_pages	= lzo_decompress_pages,
236 };
237 #endif
238 
239 #ifdef CONFIG_F2FS_FS_LZ4
240 static int lz4_init_compress_ctx(struct compress_ctx *cc)
241 {
242 	unsigned int size = LZ4_MEM_COMPRESS;
243 
244 #ifdef CONFIG_F2FS_FS_LZ4HC
245 	if (F2FS_I(cc->inode)->i_compress_level)
246 		size = LZ4HC_MEM_COMPRESS;
247 #endif
248 
249 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
250 	if (!cc->private)
251 		return -ENOMEM;
252 
253 	/*
254 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
255 	 * adapt worst compress case, because lz4 compressor can handle
256 	 * output budget properly.
257 	 */
258 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
259 	return 0;
260 }
261 
262 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
263 {
264 	kvfree(cc->private);
265 	cc->private = NULL;
266 }
267 
268 static int lz4_compress_pages(struct compress_ctx *cc)
269 {
270 	int len = -EINVAL;
271 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
272 
273 	if (!level)
274 		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
275 						cc->clen, cc->private);
276 #ifdef CONFIG_F2FS_FS_LZ4HC
277 	else
278 		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
279 					cc->clen, level, cc->private);
280 #endif
281 	if (len < 0)
282 		return len;
283 	if (!len)
284 		return -EAGAIN;
285 
286 	cc->clen = len;
287 	return 0;
288 }
289 
290 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
291 {
292 	int ret;
293 
294 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
295 						dic->clen, dic->rlen);
296 	if (ret < 0) {
297 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
298 				"lz4 decompress failed, ret:%d", ret);
299 		return -EIO;
300 	}
301 
302 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
303 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
304 				"lz4 invalid ret:%d, expected:%lu",
305 				ret, PAGE_SIZE << dic->log_cluster_size);
306 		return -EIO;
307 	}
308 	return 0;
309 }
310 
311 static bool lz4_is_level_valid(int lvl)
312 {
313 #ifdef CONFIG_F2FS_FS_LZ4HC
314 	return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
315 #else
316 	return lvl == 0;
317 #endif
318 }
319 
320 static const struct f2fs_compress_ops f2fs_lz4_ops = {
321 	.init_compress_ctx	= lz4_init_compress_ctx,
322 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
323 	.compress_pages		= lz4_compress_pages,
324 	.decompress_pages	= lz4_decompress_pages,
325 	.is_level_valid		= lz4_is_level_valid,
326 };
327 #endif
328 
329 #ifdef CONFIG_F2FS_FS_ZSTD
330 static int zstd_init_compress_ctx(struct compress_ctx *cc)
331 {
332 	zstd_parameters params;
333 	zstd_cstream *stream;
334 	void *workspace;
335 	unsigned int workspace_size;
336 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
337 
338 	/* Need to remain this for backward compatibility */
339 	if (!level)
340 		level = F2FS_ZSTD_DEFAULT_CLEVEL;
341 
342 	params = zstd_get_params(level, cc->rlen);
343 	workspace_size = zstd_cstream_workspace_bound(&params.cParams);
344 
345 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
346 					workspace_size, GFP_NOFS);
347 	if (!workspace)
348 		return -ENOMEM;
349 
350 	stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
351 	if (!stream) {
352 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
353 				"%s zstd_init_cstream failed", __func__);
354 		kvfree(workspace);
355 		return -EIO;
356 	}
357 
358 	cc->private = workspace;
359 	cc->private2 = stream;
360 
361 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
362 	return 0;
363 }
364 
365 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
366 {
367 	kvfree(cc->private);
368 	cc->private = NULL;
369 	cc->private2 = NULL;
370 }
371 
372 static int zstd_compress_pages(struct compress_ctx *cc)
373 {
374 	zstd_cstream *stream = cc->private2;
375 	zstd_in_buffer inbuf;
376 	zstd_out_buffer outbuf;
377 	int src_size = cc->rlen;
378 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
379 	int ret;
380 
381 	inbuf.pos = 0;
382 	inbuf.src = cc->rbuf;
383 	inbuf.size = src_size;
384 
385 	outbuf.pos = 0;
386 	outbuf.dst = cc->cbuf->cdata;
387 	outbuf.size = dst_size;
388 
389 	ret = zstd_compress_stream(stream, &outbuf, &inbuf);
390 	if (zstd_is_error(ret)) {
391 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
392 				"%s zstd_compress_stream failed, ret: %d",
393 				__func__, zstd_get_error_code(ret));
394 		return -EIO;
395 	}
396 
397 	ret = zstd_end_stream(stream, &outbuf);
398 	if (zstd_is_error(ret)) {
399 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
400 				"%s zstd_end_stream returned %d",
401 				__func__, zstd_get_error_code(ret));
402 		return -EIO;
403 	}
404 
405 	/*
406 	 * there is compressed data remained in intermediate buffer due to
407 	 * no more space in cbuf.cdata
408 	 */
409 	if (ret)
410 		return -EAGAIN;
411 
412 	cc->clen = outbuf.pos;
413 	return 0;
414 }
415 
416 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
417 {
418 	zstd_dstream *stream;
419 	void *workspace;
420 	unsigned int workspace_size;
421 	unsigned int max_window_size =
422 			MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
423 
424 	workspace_size = zstd_dstream_workspace_bound(max_window_size);
425 
426 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
427 					workspace_size, GFP_NOFS);
428 	if (!workspace)
429 		return -ENOMEM;
430 
431 	stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
432 	if (!stream) {
433 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
434 				"%s zstd_init_dstream failed", __func__);
435 		kvfree(workspace);
436 		return -EIO;
437 	}
438 
439 	dic->private = workspace;
440 	dic->private2 = stream;
441 
442 	return 0;
443 }
444 
445 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
446 {
447 	kvfree(dic->private);
448 	dic->private = NULL;
449 	dic->private2 = NULL;
450 }
451 
452 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
453 {
454 	zstd_dstream *stream = dic->private2;
455 	zstd_in_buffer inbuf;
456 	zstd_out_buffer outbuf;
457 	int ret;
458 
459 	inbuf.pos = 0;
460 	inbuf.src = dic->cbuf->cdata;
461 	inbuf.size = dic->clen;
462 
463 	outbuf.pos = 0;
464 	outbuf.dst = dic->rbuf;
465 	outbuf.size = dic->rlen;
466 
467 	ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
468 	if (zstd_is_error(ret)) {
469 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
470 				"%s zstd_decompress_stream failed, ret: %d",
471 				__func__, zstd_get_error_code(ret));
472 		return -EIO;
473 	}
474 
475 	if (dic->rlen != outbuf.pos) {
476 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
477 				"%s ZSTD invalid rlen:%zu, expected:%lu",
478 				__func__, dic->rlen,
479 				PAGE_SIZE << dic->log_cluster_size);
480 		return -EIO;
481 	}
482 
483 	return 0;
484 }
485 
486 static bool zstd_is_level_valid(int lvl)
487 {
488 	return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
489 }
490 
491 static const struct f2fs_compress_ops f2fs_zstd_ops = {
492 	.init_compress_ctx	= zstd_init_compress_ctx,
493 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
494 	.compress_pages		= zstd_compress_pages,
495 	.init_decompress_ctx	= zstd_init_decompress_ctx,
496 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
497 	.decompress_pages	= zstd_decompress_pages,
498 	.is_level_valid		= zstd_is_level_valid,
499 };
500 #endif
501 
502 #ifdef CONFIG_F2FS_FS_LZO
503 #ifdef CONFIG_F2FS_FS_LZORLE
504 static int lzorle_compress_pages(struct compress_ctx *cc)
505 {
506 	int ret;
507 
508 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
509 					&cc->clen, cc->private);
510 	if (ret != LZO_E_OK) {
511 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
512 				"lzo-rle compress failed, ret:%d", ret);
513 		return -EIO;
514 	}
515 	return 0;
516 }
517 
518 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
519 	.init_compress_ctx	= lzo_init_compress_ctx,
520 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
521 	.compress_pages		= lzorle_compress_pages,
522 	.decompress_pages	= lzo_decompress_pages,
523 };
524 #endif
525 #endif
526 
527 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
528 #ifdef CONFIG_F2FS_FS_LZO
529 	&f2fs_lzo_ops,
530 #else
531 	NULL,
532 #endif
533 #ifdef CONFIG_F2FS_FS_LZ4
534 	&f2fs_lz4_ops,
535 #else
536 	NULL,
537 #endif
538 #ifdef CONFIG_F2FS_FS_ZSTD
539 	&f2fs_zstd_ops,
540 #else
541 	NULL,
542 #endif
543 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
544 	&f2fs_lzorle_ops,
545 #else
546 	NULL,
547 #endif
548 };
549 
550 bool f2fs_is_compress_backend_ready(struct inode *inode)
551 {
552 	if (!f2fs_compressed_file(inode))
553 		return true;
554 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
555 }
556 
557 bool f2fs_is_compress_level_valid(int alg, int lvl)
558 {
559 	const struct f2fs_compress_ops *cops = f2fs_cops[alg];
560 
561 	if (cops->is_level_valid)
562 		return cops->is_level_valid(lvl);
563 
564 	return lvl == 0;
565 }
566 
567 static mempool_t *compress_page_pool;
568 static int num_compress_pages = 512;
569 module_param(num_compress_pages, uint, 0444);
570 MODULE_PARM_DESC(num_compress_pages,
571 		"Number of intermediate compress pages to preallocate");
572 
573 int __init f2fs_init_compress_mempool(void)
574 {
575 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
576 	return compress_page_pool ? 0 : -ENOMEM;
577 }
578 
579 void f2fs_destroy_compress_mempool(void)
580 {
581 	mempool_destroy(compress_page_pool);
582 }
583 
584 static struct page *f2fs_compress_alloc_page(void)
585 {
586 	struct page *page;
587 
588 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
589 	lock_page(page);
590 
591 	return page;
592 }
593 
594 static void f2fs_compress_free_page(struct page *page)
595 {
596 	if (!page)
597 		return;
598 	detach_page_private(page);
599 	page->mapping = NULL;
600 	unlock_page(page);
601 	mempool_free(page, compress_page_pool);
602 }
603 
604 #define MAX_VMAP_RETRIES	3
605 
606 static void *f2fs_vmap(struct page **pages, unsigned int count)
607 {
608 	int i;
609 	void *buf = NULL;
610 
611 	for (i = 0; i < MAX_VMAP_RETRIES; i++) {
612 		buf = vm_map_ram(pages, count, -1);
613 		if (buf)
614 			break;
615 		vm_unmap_aliases();
616 	}
617 	return buf;
618 }
619 
620 static int f2fs_compress_pages(struct compress_ctx *cc)
621 {
622 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
623 	const struct f2fs_compress_ops *cops =
624 				f2fs_cops[fi->i_compress_algorithm];
625 	unsigned int max_len, new_nr_cpages;
626 	u32 chksum = 0;
627 	int i, ret;
628 
629 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
630 				cc->cluster_size, fi->i_compress_algorithm);
631 
632 	if (cops->init_compress_ctx) {
633 		ret = cops->init_compress_ctx(cc);
634 		if (ret)
635 			goto out;
636 	}
637 
638 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
639 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
640 	cc->valid_nr_cpages = cc->nr_cpages;
641 
642 	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
643 	if (!cc->cpages) {
644 		ret = -ENOMEM;
645 		goto destroy_compress_ctx;
646 	}
647 
648 	for (i = 0; i < cc->nr_cpages; i++)
649 		cc->cpages[i] = f2fs_compress_alloc_page();
650 
651 	cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
652 	if (!cc->rbuf) {
653 		ret = -ENOMEM;
654 		goto out_free_cpages;
655 	}
656 
657 	cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
658 	if (!cc->cbuf) {
659 		ret = -ENOMEM;
660 		goto out_vunmap_rbuf;
661 	}
662 
663 	ret = cops->compress_pages(cc);
664 	if (ret)
665 		goto out_vunmap_cbuf;
666 
667 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
668 
669 	if (cc->clen > max_len) {
670 		ret = -EAGAIN;
671 		goto out_vunmap_cbuf;
672 	}
673 
674 	cc->cbuf->clen = cpu_to_le32(cc->clen);
675 
676 	if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
677 		chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
678 					cc->cbuf->cdata, cc->clen);
679 	cc->cbuf->chksum = cpu_to_le32(chksum);
680 
681 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
682 		cc->cbuf->reserved[i] = cpu_to_le32(0);
683 
684 	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
685 
686 	/* zero out any unused part of the last page */
687 	memset(&cc->cbuf->cdata[cc->clen], 0,
688 			(new_nr_cpages * PAGE_SIZE) -
689 			(cc->clen + COMPRESS_HEADER_SIZE));
690 
691 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
692 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
693 
694 	for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
695 		f2fs_compress_free_page(cc->cpages[i]);
696 		cc->cpages[i] = NULL;
697 	}
698 
699 	if (cops->destroy_compress_ctx)
700 		cops->destroy_compress_ctx(cc);
701 
702 	cc->valid_nr_cpages = new_nr_cpages;
703 
704 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
705 							cc->clen, ret);
706 	return 0;
707 
708 out_vunmap_cbuf:
709 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
710 out_vunmap_rbuf:
711 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
712 out_free_cpages:
713 	for (i = 0; i < cc->nr_cpages; i++) {
714 		if (cc->cpages[i])
715 			f2fs_compress_free_page(cc->cpages[i]);
716 	}
717 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
718 	cc->cpages = NULL;
719 destroy_compress_ctx:
720 	if (cops->destroy_compress_ctx)
721 		cops->destroy_compress_ctx(cc);
722 out:
723 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
724 							cc->clen, ret);
725 	return ret;
726 }
727 
728 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
729 		bool pre_alloc);
730 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
731 		bool bypass_destroy_callback, bool pre_alloc);
732 
733 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
734 {
735 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
736 	struct f2fs_inode_info *fi = F2FS_I(dic->inode);
737 	const struct f2fs_compress_ops *cops =
738 			f2fs_cops[fi->i_compress_algorithm];
739 	bool bypass_callback = false;
740 	int ret;
741 
742 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
743 				dic->cluster_size, fi->i_compress_algorithm);
744 
745 	if (dic->failed) {
746 		ret = -EIO;
747 		goto out_end_io;
748 	}
749 
750 	ret = f2fs_prepare_decomp_mem(dic, false);
751 	if (ret) {
752 		bypass_callback = true;
753 		goto out_release;
754 	}
755 
756 	dic->clen = le32_to_cpu(dic->cbuf->clen);
757 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
758 
759 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
760 		ret = -EFSCORRUPTED;
761 
762 		/* Avoid f2fs_commit_super in irq context */
763 		if (!in_task)
764 			f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
765 		else
766 			f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
767 		goto out_release;
768 	}
769 
770 	ret = cops->decompress_pages(dic);
771 
772 	if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
773 		u32 provided = le32_to_cpu(dic->cbuf->chksum);
774 		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
775 
776 		if (provided != calculated) {
777 			if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
778 				set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
779 				f2fs_info_ratelimited(sbi,
780 					"checksum invalid, nid = %lu, %x vs %x",
781 					dic->inode->i_ino,
782 					provided, calculated);
783 			}
784 			set_sbi_flag(sbi, SBI_NEED_FSCK);
785 		}
786 	}
787 
788 out_release:
789 	f2fs_release_decomp_mem(dic, bypass_callback, false);
790 
791 out_end_io:
792 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
793 							dic->clen, ret);
794 	f2fs_decompress_end_io(dic, ret, in_task);
795 }
796 
797 /*
798  * This is called when a page of a compressed cluster has been read from disk
799  * (or failed to be read from disk).  It checks whether this page was the last
800  * page being waited on in the cluster, and if so, it decompresses the cluster
801  * (or in the case of a failure, cleans up without actually decompressing).
802  */
803 void f2fs_end_read_compressed_page(struct page *page, bool failed,
804 		block_t blkaddr, bool in_task)
805 {
806 	struct decompress_io_ctx *dic =
807 			(struct decompress_io_ctx *)page_private(page);
808 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
809 
810 	dec_page_count(sbi, F2FS_RD_DATA);
811 
812 	if (failed)
813 		WRITE_ONCE(dic->failed, true);
814 	else if (blkaddr && in_task)
815 		f2fs_cache_compressed_page(sbi, page,
816 					dic->inode->i_ino, blkaddr);
817 
818 	if (atomic_dec_and_test(&dic->remaining_pages))
819 		f2fs_decompress_cluster(dic, in_task);
820 }
821 
822 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
823 {
824 	if (cc->cluster_idx == NULL_CLUSTER)
825 		return true;
826 	return cc->cluster_idx == cluster_idx(cc, index);
827 }
828 
829 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
830 {
831 	return cc->nr_rpages == 0;
832 }
833 
834 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
835 {
836 	return cc->cluster_size == cc->nr_rpages;
837 }
838 
839 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
840 {
841 	if (f2fs_cluster_is_empty(cc))
842 		return true;
843 	return is_page_in_cluster(cc, index);
844 }
845 
846 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
847 				int index, int nr_pages, bool uptodate)
848 {
849 	unsigned long pgidx = page_folio(pages[index])->index;
850 	int i = uptodate ? 0 : 1;
851 
852 	/*
853 	 * when uptodate set to true, try to check all pages in cluster is
854 	 * uptodate or not.
855 	 */
856 	if (uptodate && (pgidx % cc->cluster_size))
857 		return false;
858 
859 	if (nr_pages - index < cc->cluster_size)
860 		return false;
861 
862 	for (; i < cc->cluster_size; i++) {
863 		struct folio *folio = page_folio(pages[index + i]);
864 
865 		if (folio->index != pgidx + i)
866 			return false;
867 		if (uptodate && !folio_test_uptodate(folio))
868 			return false;
869 	}
870 
871 	return true;
872 }
873 
874 static bool cluster_has_invalid_data(struct compress_ctx *cc)
875 {
876 	loff_t i_size = i_size_read(cc->inode);
877 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
878 	int i;
879 
880 	for (i = 0; i < cc->cluster_size; i++) {
881 		struct page *page = cc->rpages[i];
882 
883 		f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
884 
885 		/* beyond EOF */
886 		if (page_folio(page)->index >= nr_pages)
887 			return true;
888 	}
889 	return false;
890 }
891 
892 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
893 {
894 #ifdef CONFIG_F2FS_CHECK_FS
895 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
896 	unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
897 	int cluster_end = 0;
898 	unsigned int count;
899 	int i;
900 	char *reason = "";
901 
902 	if (dn->data_blkaddr != COMPRESS_ADDR)
903 		return false;
904 
905 	/* [..., COMPR_ADDR, ...] */
906 	if (dn->ofs_in_node % cluster_size) {
907 		reason = "[*|C|*|*]";
908 		goto out;
909 	}
910 
911 	for (i = 1, count = 1; i < cluster_size; i++, count++) {
912 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
913 							dn->ofs_in_node + i);
914 
915 		/* [COMPR_ADDR, ..., COMPR_ADDR] */
916 		if (blkaddr == COMPRESS_ADDR) {
917 			reason = "[C|*|C|*]";
918 			goto out;
919 		}
920 		if (!__is_valid_data_blkaddr(blkaddr)) {
921 			if (!cluster_end)
922 				cluster_end = i;
923 			continue;
924 		}
925 		/* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
926 		if (cluster_end) {
927 			reason = "[C|N|N|V]";
928 			goto out;
929 		}
930 	}
931 
932 	f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
933 		!is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
934 
935 	return false;
936 out:
937 	f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
938 			dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
939 	set_sbi_flag(sbi, SBI_NEED_FSCK);
940 	return true;
941 #else
942 	return false;
943 #endif
944 }
945 
946 static int __f2fs_get_cluster_blocks(struct inode *inode,
947 					struct dnode_of_data *dn)
948 {
949 	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
950 	int count, i;
951 
952 	for (i = 0, count = 0; i < cluster_size; i++) {
953 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
954 							dn->ofs_in_node + i);
955 
956 		if (__is_valid_data_blkaddr(blkaddr))
957 			count++;
958 	}
959 
960 	return count;
961 }
962 
963 static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
964 				enum cluster_check_type type)
965 {
966 	struct dnode_of_data dn;
967 	unsigned int start_idx = cluster_idx <<
968 				F2FS_I(inode)->i_log_cluster_size;
969 	int ret;
970 
971 	set_new_dnode(&dn, inode, NULL, NULL, 0);
972 	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
973 	if (ret) {
974 		if (ret == -ENOENT)
975 			ret = 0;
976 		goto fail;
977 	}
978 
979 	if (f2fs_sanity_check_cluster(&dn)) {
980 		ret = -EFSCORRUPTED;
981 		goto fail;
982 	}
983 
984 	if (dn.data_blkaddr == COMPRESS_ADDR) {
985 		if (type == CLUSTER_COMPR_BLKS)
986 			ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
987 		else if (type == CLUSTER_IS_COMPR)
988 			ret = 1;
989 	} else if (type == CLUSTER_RAW_BLKS) {
990 		ret = __f2fs_get_cluster_blocks(inode, &dn);
991 	}
992 fail:
993 	f2fs_put_dnode(&dn);
994 	return ret;
995 }
996 
997 /* return # of compressed blocks in compressed cluster */
998 static int f2fs_compressed_blocks(struct compress_ctx *cc)
999 {
1000 	return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
1001 		CLUSTER_COMPR_BLKS);
1002 }
1003 
1004 /* return # of raw blocks in non-compressed cluster */
1005 static int f2fs_decompressed_blocks(struct inode *inode,
1006 				unsigned int cluster_idx)
1007 {
1008 	return __f2fs_cluster_blocks(inode, cluster_idx,
1009 		CLUSTER_RAW_BLKS);
1010 }
1011 
1012 /* return whether cluster is compressed one or not */
1013 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1014 {
1015 	return __f2fs_cluster_blocks(inode,
1016 		index >> F2FS_I(inode)->i_log_cluster_size,
1017 		CLUSTER_IS_COMPR);
1018 }
1019 
1020 /* return whether cluster contains non raw blocks or not */
1021 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
1022 {
1023 	unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
1024 
1025 	return f2fs_decompressed_blocks(inode, cluster_idx) !=
1026 		F2FS_I(inode)->i_cluster_size;
1027 }
1028 
1029 static bool cluster_may_compress(struct compress_ctx *cc)
1030 {
1031 	if (!f2fs_need_compress_data(cc->inode))
1032 		return false;
1033 	if (f2fs_is_atomic_file(cc->inode))
1034 		return false;
1035 	if (!f2fs_cluster_is_full(cc))
1036 		return false;
1037 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1038 		return false;
1039 	return !cluster_has_invalid_data(cc);
1040 }
1041 
1042 static void set_cluster_writeback(struct compress_ctx *cc)
1043 {
1044 	int i;
1045 
1046 	for (i = 0; i < cc->cluster_size; i++) {
1047 		if (cc->rpages[i])
1048 			set_page_writeback(cc->rpages[i]);
1049 	}
1050 }
1051 
1052 static void cancel_cluster_writeback(struct compress_ctx *cc,
1053 			struct compress_io_ctx *cic, int submitted)
1054 {
1055 	int i;
1056 
1057 	/* Wait for submitted IOs. */
1058 	if (submitted > 1) {
1059 		f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
1060 		while (atomic_read(&cic->pending_pages) !=
1061 					(cc->valid_nr_cpages - submitted + 1))
1062 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1063 	}
1064 
1065 	/* Cancel writeback and stay locked. */
1066 	for (i = 0; i < cc->cluster_size; i++) {
1067 		if (i < submitted) {
1068 			inode_inc_dirty_pages(cc->inode);
1069 			lock_page(cc->rpages[i]);
1070 		}
1071 		clear_page_private_gcing(cc->rpages[i]);
1072 		if (folio_test_writeback(page_folio(cc->rpages[i])))
1073 			end_page_writeback(cc->rpages[i]);
1074 	}
1075 }
1076 
1077 static void set_cluster_dirty(struct compress_ctx *cc)
1078 {
1079 	int i;
1080 
1081 	for (i = 0; i < cc->cluster_size; i++)
1082 		if (cc->rpages[i]) {
1083 			set_page_dirty(cc->rpages[i]);
1084 			set_page_private_gcing(cc->rpages[i]);
1085 		}
1086 }
1087 
1088 static int prepare_compress_overwrite(struct compress_ctx *cc,
1089 		struct page **pagep, pgoff_t index, void **fsdata)
1090 {
1091 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1092 	struct address_space *mapping = cc->inode->i_mapping;
1093 	struct page *page;
1094 	sector_t last_block_in_bio;
1095 	fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1096 	pgoff_t start_idx = start_idx_of_cluster(cc);
1097 	int i, ret;
1098 
1099 retry:
1100 	ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1101 	if (ret <= 0)
1102 		return ret;
1103 
1104 	ret = f2fs_init_compress_ctx(cc);
1105 	if (ret)
1106 		return ret;
1107 
1108 	/* keep page reference to avoid page reclaim */
1109 	for (i = 0; i < cc->cluster_size; i++) {
1110 		page = f2fs_pagecache_get_page(mapping, start_idx + i,
1111 							fgp_flag, GFP_NOFS);
1112 		if (!page) {
1113 			ret = -ENOMEM;
1114 			goto unlock_pages;
1115 		}
1116 
1117 		if (PageUptodate(page))
1118 			f2fs_put_page(page, 1);
1119 		else
1120 			f2fs_compress_ctx_add_page(cc, page_folio(page));
1121 	}
1122 
1123 	if (!f2fs_cluster_is_empty(cc)) {
1124 		struct bio *bio = NULL;
1125 
1126 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1127 					&last_block_in_bio, NULL, true);
1128 		f2fs_put_rpages(cc);
1129 		f2fs_destroy_compress_ctx(cc, true);
1130 		if (ret)
1131 			goto out;
1132 		if (bio)
1133 			f2fs_submit_read_bio(sbi, bio, DATA);
1134 
1135 		ret = f2fs_init_compress_ctx(cc);
1136 		if (ret)
1137 			goto out;
1138 	}
1139 
1140 	for (i = 0; i < cc->cluster_size; i++) {
1141 		f2fs_bug_on(sbi, cc->rpages[i]);
1142 
1143 		page = find_lock_page(mapping, start_idx + i);
1144 		if (!page) {
1145 			/* page can be truncated */
1146 			goto release_and_retry;
1147 		}
1148 
1149 		f2fs_wait_on_page_writeback(page, DATA, true, true);
1150 		f2fs_compress_ctx_add_page(cc, page_folio(page));
1151 
1152 		if (!PageUptodate(page)) {
1153 release_and_retry:
1154 			f2fs_put_rpages(cc);
1155 			f2fs_unlock_rpages(cc, i + 1);
1156 			f2fs_destroy_compress_ctx(cc, true);
1157 			goto retry;
1158 		}
1159 	}
1160 
1161 	if (likely(!ret)) {
1162 		*fsdata = cc->rpages;
1163 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
1164 		return cc->cluster_size;
1165 	}
1166 
1167 unlock_pages:
1168 	f2fs_put_rpages(cc);
1169 	f2fs_unlock_rpages(cc, i);
1170 	f2fs_destroy_compress_ctx(cc, true);
1171 out:
1172 	return ret;
1173 }
1174 
1175 int f2fs_prepare_compress_overwrite(struct inode *inode,
1176 		struct page **pagep, pgoff_t index, void **fsdata)
1177 {
1178 	struct compress_ctx cc = {
1179 		.inode = inode,
1180 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1181 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1182 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1183 		.rpages = NULL,
1184 		.nr_rpages = 0,
1185 	};
1186 
1187 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1188 }
1189 
1190 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1191 					pgoff_t index, unsigned copied)
1192 
1193 {
1194 	struct compress_ctx cc = {
1195 		.inode = inode,
1196 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1197 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1198 		.rpages = fsdata,
1199 	};
1200 	struct folio *folio = page_folio(cc.rpages[0]);
1201 	bool first_index = (index == folio->index);
1202 
1203 	if (copied)
1204 		set_cluster_dirty(&cc);
1205 
1206 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1207 	f2fs_destroy_compress_ctx(&cc, false);
1208 
1209 	return first_index;
1210 }
1211 
1212 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1213 {
1214 	void *fsdata = NULL;
1215 	struct page *pagep;
1216 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1217 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1218 							log_cluster_size;
1219 	int err;
1220 
1221 	err = f2fs_is_compressed_cluster(inode, start_idx);
1222 	if (err < 0)
1223 		return err;
1224 
1225 	/* truncate normal cluster */
1226 	if (!err)
1227 		return f2fs_do_truncate_blocks(inode, from, lock);
1228 
1229 	/* truncate compressed cluster */
1230 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1231 						start_idx, &fsdata);
1232 
1233 	/* should not be a normal cluster */
1234 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1235 
1236 	if (err <= 0)
1237 		return err;
1238 
1239 	if (err > 0) {
1240 		struct page **rpages = fsdata;
1241 		int cluster_size = F2FS_I(inode)->i_cluster_size;
1242 		int i;
1243 
1244 		for (i = cluster_size - 1; i >= 0; i--) {
1245 			struct folio *folio = page_folio(rpages[i]);
1246 			loff_t start = folio->index << PAGE_SHIFT;
1247 
1248 			if (from <= start) {
1249 				folio_zero_segment(folio, 0, folio_size(folio));
1250 			} else {
1251 				folio_zero_segment(folio, from - start,
1252 						folio_size(folio));
1253 				break;
1254 			}
1255 		}
1256 
1257 		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1258 	}
1259 	return 0;
1260 }
1261 
1262 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1263 					int *submitted,
1264 					struct writeback_control *wbc,
1265 					enum iostat_type io_type)
1266 {
1267 	struct inode *inode = cc->inode;
1268 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1269 	struct f2fs_inode_info *fi = F2FS_I(inode);
1270 	struct f2fs_io_info fio = {
1271 		.sbi = sbi,
1272 		.ino = cc->inode->i_ino,
1273 		.type = DATA,
1274 		.op = REQ_OP_WRITE,
1275 		.op_flags = wbc_to_write_flags(wbc),
1276 		.old_blkaddr = NEW_ADDR,
1277 		.page = NULL,
1278 		.encrypted_page = NULL,
1279 		.compressed_page = NULL,
1280 		.io_type = io_type,
1281 		.io_wbc = wbc,
1282 		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1283 									1 : 0,
1284 	};
1285 	struct folio *folio;
1286 	struct dnode_of_data dn;
1287 	struct node_info ni;
1288 	struct compress_io_ctx *cic;
1289 	pgoff_t start_idx = start_idx_of_cluster(cc);
1290 	unsigned int last_index = cc->cluster_size - 1;
1291 	loff_t psize;
1292 	int i, err;
1293 	bool quota_inode = IS_NOQUOTA(inode);
1294 
1295 	/* we should bypass data pages to proceed the kworker jobs */
1296 	if (unlikely(f2fs_cp_error(sbi))) {
1297 		mapping_set_error(inode->i_mapping, -EIO);
1298 		goto out_free;
1299 	}
1300 
1301 	if (quota_inode) {
1302 		/*
1303 		 * We need to wait for node_write to avoid block allocation during
1304 		 * checkpoint. This can only happen to quota writes which can cause
1305 		 * the below discard race condition.
1306 		 */
1307 		f2fs_down_read(&sbi->node_write);
1308 	} else if (!f2fs_trylock_op(sbi)) {
1309 		goto out_free;
1310 	}
1311 
1312 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1313 
1314 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1315 	if (err)
1316 		goto out_unlock_op;
1317 
1318 	for (i = 0; i < cc->cluster_size; i++) {
1319 		if (data_blkaddr(dn.inode, dn.node_page,
1320 					dn.ofs_in_node + i) == NULL_ADDR)
1321 			goto out_put_dnode;
1322 	}
1323 
1324 	folio = page_folio(cc->rpages[last_index]);
1325 	psize = folio_pos(folio) + folio_size(folio);
1326 
1327 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1328 	if (err)
1329 		goto out_put_dnode;
1330 
1331 	fio.version = ni.version;
1332 
1333 	cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1334 	if (!cic)
1335 		goto out_put_dnode;
1336 
1337 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1338 	cic->inode = inode;
1339 	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1340 	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1341 	if (!cic->rpages)
1342 		goto out_put_cic;
1343 
1344 	cic->nr_rpages = cc->cluster_size;
1345 
1346 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1347 		f2fs_set_compressed_page(cc->cpages[i], inode,
1348 				page_folio(cc->rpages[i + 1])->index, cic);
1349 		fio.compressed_page = cc->cpages[i];
1350 
1351 		fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1352 						dn.ofs_in_node + i + 1);
1353 
1354 		/* wait for GCed page writeback via META_MAPPING */
1355 		f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1356 
1357 		if (fio.encrypted) {
1358 			fio.page = cc->rpages[i + 1];
1359 			err = f2fs_encrypt_one_page(&fio);
1360 			if (err)
1361 				goto out_destroy_crypt;
1362 			cc->cpages[i] = fio.encrypted_page;
1363 		}
1364 	}
1365 
1366 	set_cluster_writeback(cc);
1367 
1368 	for (i = 0; i < cc->cluster_size; i++)
1369 		cic->rpages[i] = cc->rpages[i];
1370 
1371 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1372 		block_t blkaddr;
1373 
1374 		blkaddr = f2fs_data_blkaddr(&dn);
1375 		fio.page = cc->rpages[i];
1376 		fio.old_blkaddr = blkaddr;
1377 
1378 		/* cluster header */
1379 		if (i == 0) {
1380 			if (blkaddr == COMPRESS_ADDR)
1381 				fio.compr_blocks++;
1382 			if (__is_valid_data_blkaddr(blkaddr))
1383 				f2fs_invalidate_blocks(sbi, blkaddr, 1);
1384 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1385 			goto unlock_continue;
1386 		}
1387 
1388 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1389 			fio.compr_blocks++;
1390 
1391 		if (i > cc->valid_nr_cpages) {
1392 			if (__is_valid_data_blkaddr(blkaddr)) {
1393 				f2fs_invalidate_blocks(sbi, blkaddr, 1);
1394 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1395 			}
1396 			goto unlock_continue;
1397 		}
1398 
1399 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1400 
1401 		if (fio.encrypted)
1402 			fio.encrypted_page = cc->cpages[i - 1];
1403 		else
1404 			fio.compressed_page = cc->cpages[i - 1];
1405 
1406 		cc->cpages[i - 1] = NULL;
1407 		fio.submitted = 0;
1408 		f2fs_outplace_write_data(&dn, &fio);
1409 		if (unlikely(!fio.submitted)) {
1410 			cancel_cluster_writeback(cc, cic, i);
1411 
1412 			/* To call fscrypt_finalize_bounce_page */
1413 			i = cc->valid_nr_cpages;
1414 			*submitted = 0;
1415 			goto out_destroy_crypt;
1416 		}
1417 		(*submitted)++;
1418 unlock_continue:
1419 		inode_dec_dirty_pages(cc->inode);
1420 		unlock_page(fio.page);
1421 	}
1422 
1423 	if (fio.compr_blocks)
1424 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1425 	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1426 	add_compr_block_stat(inode, cc->valid_nr_cpages);
1427 
1428 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1429 
1430 	f2fs_put_dnode(&dn);
1431 	if (quota_inode)
1432 		f2fs_up_read(&sbi->node_write);
1433 	else
1434 		f2fs_unlock_op(sbi);
1435 
1436 	spin_lock(&fi->i_size_lock);
1437 	if (fi->last_disk_size < psize)
1438 		fi->last_disk_size = psize;
1439 	spin_unlock(&fi->i_size_lock);
1440 
1441 	f2fs_put_rpages(cc);
1442 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1443 	cc->cpages = NULL;
1444 	f2fs_destroy_compress_ctx(cc, false);
1445 	return 0;
1446 
1447 out_destroy_crypt:
1448 	page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1449 
1450 	for (--i; i >= 0; i--) {
1451 		if (!cc->cpages[i])
1452 			continue;
1453 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1454 	}
1455 out_put_cic:
1456 	kmem_cache_free(cic_entry_slab, cic);
1457 out_put_dnode:
1458 	f2fs_put_dnode(&dn);
1459 out_unlock_op:
1460 	if (quota_inode)
1461 		f2fs_up_read(&sbi->node_write);
1462 	else
1463 		f2fs_unlock_op(sbi);
1464 out_free:
1465 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1466 		f2fs_compress_free_page(cc->cpages[i]);
1467 		cc->cpages[i] = NULL;
1468 	}
1469 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1470 	cc->cpages = NULL;
1471 	return -EAGAIN;
1472 }
1473 
1474 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1475 {
1476 	struct f2fs_sb_info *sbi = bio->bi_private;
1477 	struct compress_io_ctx *cic =
1478 			(struct compress_io_ctx *)page_private(page);
1479 	enum count_type type = WB_DATA_TYPE(page,
1480 				f2fs_is_compressed_page(page));
1481 	int i;
1482 
1483 	if (unlikely(bio->bi_status))
1484 		mapping_set_error(cic->inode->i_mapping, -EIO);
1485 
1486 	f2fs_compress_free_page(page);
1487 
1488 	dec_page_count(sbi, type);
1489 
1490 	if (atomic_dec_return(&cic->pending_pages))
1491 		return;
1492 
1493 	for (i = 0; i < cic->nr_rpages; i++) {
1494 		WARN_ON(!cic->rpages[i]);
1495 		clear_page_private_gcing(cic->rpages[i]);
1496 		end_page_writeback(cic->rpages[i]);
1497 	}
1498 
1499 	page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1500 	kmem_cache_free(cic_entry_slab, cic);
1501 }
1502 
1503 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1504 					int *submitted_p,
1505 					struct writeback_control *wbc,
1506 					enum iostat_type io_type)
1507 {
1508 	struct address_space *mapping = cc->inode->i_mapping;
1509 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1510 	int submitted, compr_blocks, i;
1511 	int ret = 0;
1512 
1513 	compr_blocks = f2fs_compressed_blocks(cc);
1514 
1515 	for (i = 0; i < cc->cluster_size; i++) {
1516 		if (!cc->rpages[i])
1517 			continue;
1518 
1519 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1520 		unlock_page(cc->rpages[i]);
1521 	}
1522 
1523 	if (compr_blocks < 0)
1524 		return compr_blocks;
1525 
1526 	/* overwrite compressed cluster w/ normal cluster */
1527 	if (compr_blocks > 0)
1528 		f2fs_lock_op(sbi);
1529 
1530 	for (i = 0; i < cc->cluster_size; i++) {
1531 		if (!cc->rpages[i])
1532 			continue;
1533 retry_write:
1534 		lock_page(cc->rpages[i]);
1535 
1536 		if (cc->rpages[i]->mapping != mapping) {
1537 continue_unlock:
1538 			unlock_page(cc->rpages[i]);
1539 			continue;
1540 		}
1541 
1542 		if (!PageDirty(cc->rpages[i]))
1543 			goto continue_unlock;
1544 
1545 		if (folio_test_writeback(page_folio(cc->rpages[i]))) {
1546 			if (wbc->sync_mode == WB_SYNC_NONE)
1547 				goto continue_unlock;
1548 			f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1549 		}
1550 
1551 		if (!clear_page_dirty_for_io(cc->rpages[i]))
1552 			goto continue_unlock;
1553 
1554 		submitted = 0;
1555 		ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
1556 						&submitted,
1557 						NULL, NULL, wbc, io_type,
1558 						compr_blocks, false);
1559 		if (ret) {
1560 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
1561 				unlock_page(cc->rpages[i]);
1562 				ret = 0;
1563 			} else if (ret == -EAGAIN) {
1564 				ret = 0;
1565 				/*
1566 				 * for quota file, just redirty left pages to
1567 				 * avoid deadlock caused by cluster update race
1568 				 * from foreground operation.
1569 				 */
1570 				if (IS_NOQUOTA(cc->inode))
1571 					goto out;
1572 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1573 				goto retry_write;
1574 			}
1575 			goto out;
1576 		}
1577 
1578 		*submitted_p += submitted;
1579 	}
1580 
1581 out:
1582 	if (compr_blocks > 0)
1583 		f2fs_unlock_op(sbi);
1584 
1585 	f2fs_balance_fs(sbi, true);
1586 	return ret;
1587 }
1588 
1589 int f2fs_write_multi_pages(struct compress_ctx *cc,
1590 					int *submitted,
1591 					struct writeback_control *wbc,
1592 					enum iostat_type io_type)
1593 {
1594 	int err;
1595 
1596 	*submitted = 0;
1597 	if (cluster_may_compress(cc)) {
1598 		err = f2fs_compress_pages(cc);
1599 		if (err == -EAGAIN) {
1600 			add_compr_block_stat(cc->inode, cc->cluster_size);
1601 			goto write;
1602 		} else if (err) {
1603 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1604 			goto destroy_out;
1605 		}
1606 
1607 		err = f2fs_write_compressed_pages(cc, submitted,
1608 							wbc, io_type);
1609 		if (!err)
1610 			return 0;
1611 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1612 	}
1613 write:
1614 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1615 
1616 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1617 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1618 destroy_out:
1619 	f2fs_destroy_compress_ctx(cc, false);
1620 	return err;
1621 }
1622 
1623 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1624 		bool pre_alloc)
1625 {
1626 	return pre_alloc ^ f2fs_low_mem_mode(sbi);
1627 }
1628 
1629 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1630 		bool pre_alloc)
1631 {
1632 	const struct f2fs_compress_ops *cops =
1633 		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1634 	int i;
1635 
1636 	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1637 		return 0;
1638 
1639 	dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1640 	if (!dic->tpages)
1641 		return -ENOMEM;
1642 
1643 	for (i = 0; i < dic->cluster_size; i++) {
1644 		if (dic->rpages[i]) {
1645 			dic->tpages[i] = dic->rpages[i];
1646 			continue;
1647 		}
1648 
1649 		dic->tpages[i] = f2fs_compress_alloc_page();
1650 	}
1651 
1652 	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1653 	if (!dic->rbuf)
1654 		return -ENOMEM;
1655 
1656 	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1657 	if (!dic->cbuf)
1658 		return -ENOMEM;
1659 
1660 	if (cops->init_decompress_ctx)
1661 		return cops->init_decompress_ctx(dic);
1662 
1663 	return 0;
1664 }
1665 
1666 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1667 		bool bypass_destroy_callback, bool pre_alloc)
1668 {
1669 	const struct f2fs_compress_ops *cops =
1670 		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1671 
1672 	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1673 		return;
1674 
1675 	if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1676 		cops->destroy_decompress_ctx(dic);
1677 
1678 	if (dic->cbuf)
1679 		vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1680 
1681 	if (dic->rbuf)
1682 		vm_unmap_ram(dic->rbuf, dic->cluster_size);
1683 }
1684 
1685 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1686 		bool bypass_destroy_callback);
1687 
1688 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1689 {
1690 	struct decompress_io_ctx *dic;
1691 	pgoff_t start_idx = start_idx_of_cluster(cc);
1692 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1693 	int i, ret;
1694 
1695 	dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1696 	if (!dic)
1697 		return ERR_PTR(-ENOMEM);
1698 
1699 	dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1700 	if (!dic->rpages) {
1701 		kmem_cache_free(dic_entry_slab, dic);
1702 		return ERR_PTR(-ENOMEM);
1703 	}
1704 
1705 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1706 	dic->inode = cc->inode;
1707 	atomic_set(&dic->remaining_pages, cc->nr_cpages);
1708 	dic->cluster_idx = cc->cluster_idx;
1709 	dic->cluster_size = cc->cluster_size;
1710 	dic->log_cluster_size = cc->log_cluster_size;
1711 	dic->nr_cpages = cc->nr_cpages;
1712 	refcount_set(&dic->refcnt, 1);
1713 	dic->failed = false;
1714 	dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1715 
1716 	for (i = 0; i < dic->cluster_size; i++)
1717 		dic->rpages[i] = cc->rpages[i];
1718 	dic->nr_rpages = cc->cluster_size;
1719 
1720 	dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1721 	if (!dic->cpages) {
1722 		ret = -ENOMEM;
1723 		goto out_free;
1724 	}
1725 
1726 	for (i = 0; i < dic->nr_cpages; i++) {
1727 		struct page *page;
1728 
1729 		page = f2fs_compress_alloc_page();
1730 		f2fs_set_compressed_page(page, cc->inode,
1731 					start_idx + i + 1, dic);
1732 		dic->cpages[i] = page;
1733 	}
1734 
1735 	ret = f2fs_prepare_decomp_mem(dic, true);
1736 	if (ret)
1737 		goto out_free;
1738 
1739 	return dic;
1740 
1741 out_free:
1742 	f2fs_free_dic(dic, true);
1743 	return ERR_PTR(ret);
1744 }
1745 
1746 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1747 		bool bypass_destroy_callback)
1748 {
1749 	int i;
1750 
1751 	f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1752 
1753 	if (dic->tpages) {
1754 		for (i = 0; i < dic->cluster_size; i++) {
1755 			if (dic->rpages[i])
1756 				continue;
1757 			if (!dic->tpages[i])
1758 				continue;
1759 			f2fs_compress_free_page(dic->tpages[i]);
1760 		}
1761 		page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1762 	}
1763 
1764 	if (dic->cpages) {
1765 		for (i = 0; i < dic->nr_cpages; i++) {
1766 			if (!dic->cpages[i])
1767 				continue;
1768 			f2fs_compress_free_page(dic->cpages[i]);
1769 		}
1770 		page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1771 	}
1772 
1773 	page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1774 	kmem_cache_free(dic_entry_slab, dic);
1775 }
1776 
1777 static void f2fs_late_free_dic(struct work_struct *work)
1778 {
1779 	struct decompress_io_ctx *dic =
1780 		container_of(work, struct decompress_io_ctx, free_work);
1781 
1782 	f2fs_free_dic(dic, false);
1783 }
1784 
1785 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1786 {
1787 	if (refcount_dec_and_test(&dic->refcnt)) {
1788 		if (in_task) {
1789 			f2fs_free_dic(dic, false);
1790 		} else {
1791 			INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1792 			queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1793 					&dic->free_work);
1794 		}
1795 	}
1796 }
1797 
1798 static void f2fs_verify_cluster(struct work_struct *work)
1799 {
1800 	struct decompress_io_ctx *dic =
1801 		container_of(work, struct decompress_io_ctx, verity_work);
1802 	int i;
1803 
1804 	/* Verify, update, and unlock the decompressed pages. */
1805 	for (i = 0; i < dic->cluster_size; i++) {
1806 		struct page *rpage = dic->rpages[i];
1807 
1808 		if (!rpage)
1809 			continue;
1810 
1811 		if (fsverity_verify_page(rpage))
1812 			SetPageUptodate(rpage);
1813 		else
1814 			ClearPageUptodate(rpage);
1815 		unlock_page(rpage);
1816 	}
1817 
1818 	f2fs_put_dic(dic, true);
1819 }
1820 
1821 /*
1822  * This is called when a compressed cluster has been decompressed
1823  * (or failed to be read and/or decompressed).
1824  */
1825 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1826 				bool in_task)
1827 {
1828 	int i;
1829 
1830 	if (!failed && dic->need_verity) {
1831 		/*
1832 		 * Note that to avoid deadlocks, the verity work can't be done
1833 		 * on the decompression workqueue.  This is because verifying
1834 		 * the data pages can involve reading metadata pages from the
1835 		 * file, and these metadata pages may be compressed.
1836 		 */
1837 		INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1838 		fsverity_enqueue_verify_work(&dic->verity_work);
1839 		return;
1840 	}
1841 
1842 	/* Update and unlock the cluster's pagecache pages. */
1843 	for (i = 0; i < dic->cluster_size; i++) {
1844 		struct page *rpage = dic->rpages[i];
1845 
1846 		if (!rpage)
1847 			continue;
1848 
1849 		if (failed)
1850 			ClearPageUptodate(rpage);
1851 		else
1852 			SetPageUptodate(rpage);
1853 		unlock_page(rpage);
1854 	}
1855 
1856 	/*
1857 	 * Release the reference to the decompress_io_ctx that was being held
1858 	 * for I/O completion.
1859 	 */
1860 	f2fs_put_dic(dic, in_task);
1861 }
1862 
1863 /*
1864  * Put a reference to a compressed page's decompress_io_ctx.
1865  *
1866  * This is called when the page is no longer needed and can be freed.
1867  */
1868 void f2fs_put_page_dic(struct page *page, bool in_task)
1869 {
1870 	struct decompress_io_ctx *dic =
1871 			(struct decompress_io_ctx *)page_private(page);
1872 
1873 	f2fs_put_dic(dic, in_task);
1874 }
1875 
1876 /*
1877  * check whether cluster blocks are contiguous, and add extent cache entry
1878  * only if cluster blocks are logically and physically contiguous.
1879  */
1880 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
1881 						unsigned int ofs_in_node)
1882 {
1883 	bool compressed = data_blkaddr(dn->inode, dn->node_page,
1884 					ofs_in_node) == COMPRESS_ADDR;
1885 	int i = compressed ? 1 : 0;
1886 	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1887 							ofs_in_node + i);
1888 
1889 	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1890 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1891 							ofs_in_node + i);
1892 
1893 		if (!__is_valid_data_blkaddr(blkaddr))
1894 			break;
1895 		if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1896 			return 0;
1897 	}
1898 
1899 	return compressed ? i - 1 : i;
1900 }
1901 
1902 const struct address_space_operations f2fs_compress_aops = {
1903 	.release_folio = f2fs_release_folio,
1904 	.invalidate_folio = f2fs_invalidate_folio,
1905 	.migrate_folio	= filemap_migrate_folio,
1906 };
1907 
1908 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1909 {
1910 	return sbi->compress_inode->i_mapping;
1911 }
1912 
1913 void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
1914 				block_t blkaddr, unsigned int len)
1915 {
1916 	if (!sbi->compress_inode)
1917 		return;
1918 	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
1919 }
1920 
1921 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1922 						nid_t ino, block_t blkaddr)
1923 {
1924 	struct page *cpage;
1925 	int ret;
1926 
1927 	if (!test_opt(sbi, COMPRESS_CACHE))
1928 		return;
1929 
1930 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1931 		return;
1932 
1933 	if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1934 		return;
1935 
1936 	cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1937 	if (cpage) {
1938 		f2fs_put_page(cpage, 0);
1939 		return;
1940 	}
1941 
1942 	cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1943 	if (!cpage)
1944 		return;
1945 
1946 	ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1947 						blkaddr, GFP_NOFS);
1948 	if (ret) {
1949 		f2fs_put_page(cpage, 0);
1950 		return;
1951 	}
1952 
1953 	set_page_private_data(cpage, ino);
1954 
1955 	memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1956 	SetPageUptodate(cpage);
1957 	f2fs_put_page(cpage, 1);
1958 }
1959 
1960 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1961 								block_t blkaddr)
1962 {
1963 	struct page *cpage;
1964 	bool hitted = false;
1965 
1966 	if (!test_opt(sbi, COMPRESS_CACHE))
1967 		return false;
1968 
1969 	cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1970 				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1971 	if (cpage) {
1972 		if (PageUptodate(cpage)) {
1973 			atomic_inc(&sbi->compress_page_hit);
1974 			memcpy(page_address(page),
1975 				page_address(cpage), PAGE_SIZE);
1976 			hitted = true;
1977 		}
1978 		f2fs_put_page(cpage, 1);
1979 	}
1980 
1981 	return hitted;
1982 }
1983 
1984 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1985 {
1986 	struct address_space *mapping = COMPRESS_MAPPING(sbi);
1987 	struct folio_batch fbatch;
1988 	pgoff_t index = 0;
1989 	pgoff_t end = MAX_BLKADDR(sbi);
1990 
1991 	if (!mapping->nrpages)
1992 		return;
1993 
1994 	folio_batch_init(&fbatch);
1995 
1996 	do {
1997 		unsigned int nr, i;
1998 
1999 		nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
2000 		if (!nr)
2001 			break;
2002 
2003 		for (i = 0; i < nr; i++) {
2004 			struct folio *folio = fbatch.folios[i];
2005 
2006 			folio_lock(folio);
2007 			if (folio->mapping != mapping) {
2008 				folio_unlock(folio);
2009 				continue;
2010 			}
2011 
2012 			if (ino != get_page_private_data(&folio->page)) {
2013 				folio_unlock(folio);
2014 				continue;
2015 			}
2016 
2017 			generic_error_remove_folio(mapping, folio);
2018 			folio_unlock(folio);
2019 		}
2020 		folio_batch_release(&fbatch);
2021 		cond_resched();
2022 	} while (index < end);
2023 }
2024 
2025 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
2026 {
2027 	struct inode *inode;
2028 
2029 	if (!test_opt(sbi, COMPRESS_CACHE))
2030 		return 0;
2031 
2032 	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
2033 	if (IS_ERR(inode))
2034 		return PTR_ERR(inode);
2035 	sbi->compress_inode = inode;
2036 
2037 	sbi->compress_percent = COMPRESS_PERCENT;
2038 	sbi->compress_watermark = COMPRESS_WATERMARK;
2039 
2040 	atomic_set(&sbi->compress_page_hit, 0);
2041 
2042 	return 0;
2043 }
2044 
2045 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
2046 {
2047 	if (!sbi->compress_inode)
2048 		return;
2049 	iput(sbi->compress_inode);
2050 	sbi->compress_inode = NULL;
2051 }
2052 
2053 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
2054 {
2055 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2056 	char slab_name[35];
2057 
2058 	if (!f2fs_sb_has_compression(sbi))
2059 		return 0;
2060 
2061 	sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
2062 
2063 	sbi->page_array_slab_size = sizeof(struct page *) <<
2064 					F2FS_OPTION(sbi).compress_log_size;
2065 
2066 	sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
2067 					sbi->page_array_slab_size);
2068 	return sbi->page_array_slab ? 0 : -ENOMEM;
2069 }
2070 
2071 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
2072 {
2073 	kmem_cache_destroy(sbi->page_array_slab);
2074 }
2075 
2076 int __init f2fs_init_compress_cache(void)
2077 {
2078 	cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2079 					sizeof(struct compress_io_ctx));
2080 	if (!cic_entry_slab)
2081 		return -ENOMEM;
2082 	dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2083 					sizeof(struct decompress_io_ctx));
2084 	if (!dic_entry_slab)
2085 		goto free_cic;
2086 	return 0;
2087 free_cic:
2088 	kmem_cache_destroy(cic_entry_slab);
2089 	return -ENOMEM;
2090 }
2091 
2092 void f2fs_destroy_compress_cache(void)
2093 {
2094 	kmem_cache_destroy(dic_entry_slab);
2095 	kmem_cache_destroy(cic_entry_slab);
2096 }
2097