xref: /linux/fs/f2fs/compress.c (revision dd9a41bc61cc62d38306465ed62373b98df0049e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include <trace/events/f2fs.h>
19 
20 struct f2fs_compress_ops {
21 	int (*init_compress_ctx)(struct compress_ctx *cc);
22 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
23 	int (*compress_pages)(struct compress_ctx *cc);
24 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
25 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
26 	int (*decompress_pages)(struct decompress_io_ctx *dic);
27 };
28 
29 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
30 {
31 	return index & (cc->cluster_size - 1);
32 }
33 
34 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
35 {
36 	return index >> cc->log_cluster_size;
37 }
38 
39 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
40 {
41 	return cc->cluster_idx << cc->log_cluster_size;
42 }
43 
44 bool f2fs_is_compressed_page(struct page *page)
45 {
46 	if (!PagePrivate(page))
47 		return false;
48 	if (!page_private(page))
49 		return false;
50 	if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
51 		return false;
52 	f2fs_bug_on(F2FS_M_SB(page->mapping),
53 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
54 	return true;
55 }
56 
57 static void f2fs_set_compressed_page(struct page *page,
58 		struct inode *inode, pgoff_t index, void *data)
59 {
60 	SetPagePrivate(page);
61 	set_page_private(page, (unsigned long)data);
62 
63 	/* i_crypto_info and iv index */
64 	page->index = index;
65 	page->mapping = inode->i_mapping;
66 }
67 
68 static void f2fs_put_compressed_page(struct page *page)
69 {
70 	set_page_private(page, (unsigned long)NULL);
71 	ClearPagePrivate(page);
72 	page->mapping = NULL;
73 	unlock_page(page);
74 	put_page(page);
75 }
76 
77 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
78 {
79 	int i;
80 
81 	for (i = 0; i < len; i++) {
82 		if (!cc->rpages[i])
83 			continue;
84 		if (unlock)
85 			unlock_page(cc->rpages[i]);
86 		else
87 			put_page(cc->rpages[i]);
88 	}
89 }
90 
91 static void f2fs_put_rpages(struct compress_ctx *cc)
92 {
93 	f2fs_drop_rpages(cc, cc->cluster_size, false);
94 }
95 
96 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
97 {
98 	f2fs_drop_rpages(cc, len, true);
99 }
100 
101 static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
102 				struct address_space *mapping,
103 				pgoff_t start, int len)
104 {
105 	int i;
106 
107 	for (i = 0; i < len; i++) {
108 		struct page *page = find_get_page(mapping, start + i);
109 
110 		put_page(page);
111 		put_page(page);
112 	}
113 }
114 
115 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
116 		struct writeback_control *wbc, bool redirty, int unlock)
117 {
118 	unsigned int i;
119 
120 	for (i = 0; i < cc->cluster_size; i++) {
121 		if (!cc->rpages[i])
122 			continue;
123 		if (redirty)
124 			redirty_page_for_writepage(wbc, cc->rpages[i]);
125 		f2fs_put_page(cc->rpages[i], unlock);
126 	}
127 }
128 
129 struct page *f2fs_compress_control_page(struct page *page)
130 {
131 	return ((struct compress_io_ctx *)page_private(page))->rpages[0];
132 }
133 
134 int f2fs_init_compress_ctx(struct compress_ctx *cc)
135 {
136 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
137 
138 	if (cc->nr_rpages)
139 		return 0;
140 
141 	cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
142 					cc->log_cluster_size, GFP_NOFS);
143 	return cc->rpages ? 0 : -ENOMEM;
144 }
145 
146 void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
147 {
148 	kfree(cc->rpages);
149 	cc->rpages = NULL;
150 	cc->nr_rpages = 0;
151 	cc->nr_cpages = 0;
152 	cc->cluster_idx = NULL_CLUSTER;
153 }
154 
155 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
156 {
157 	unsigned int cluster_ofs;
158 
159 	if (!f2fs_cluster_can_merge_page(cc, page->index))
160 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
161 
162 	cluster_ofs = offset_in_cluster(cc, page->index);
163 	cc->rpages[cluster_ofs] = page;
164 	cc->nr_rpages++;
165 	cc->cluster_idx = cluster_idx(cc, page->index);
166 }
167 
168 #ifdef CONFIG_F2FS_FS_LZO
169 static int lzo_init_compress_ctx(struct compress_ctx *cc)
170 {
171 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
172 				LZO1X_MEM_COMPRESS, GFP_NOFS);
173 	if (!cc->private)
174 		return -ENOMEM;
175 
176 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
177 	return 0;
178 }
179 
180 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
181 {
182 	kvfree(cc->private);
183 	cc->private = NULL;
184 }
185 
186 static int lzo_compress_pages(struct compress_ctx *cc)
187 {
188 	int ret;
189 
190 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
191 					&cc->clen, cc->private);
192 	if (ret != LZO_E_OK) {
193 		printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
194 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
195 		return -EIO;
196 	}
197 	return 0;
198 }
199 
200 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
201 {
202 	int ret;
203 
204 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
205 						dic->rbuf, &dic->rlen);
206 	if (ret != LZO_E_OK) {
207 		printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
208 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
209 		return -EIO;
210 	}
211 
212 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
213 		printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
214 					"expected:%lu\n", KERN_ERR,
215 					F2FS_I_SB(dic->inode)->sb->s_id,
216 					dic->rlen,
217 					PAGE_SIZE << dic->log_cluster_size);
218 		return -EIO;
219 	}
220 	return 0;
221 }
222 
223 static const struct f2fs_compress_ops f2fs_lzo_ops = {
224 	.init_compress_ctx	= lzo_init_compress_ctx,
225 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
226 	.compress_pages		= lzo_compress_pages,
227 	.decompress_pages	= lzo_decompress_pages,
228 };
229 #endif
230 
231 #ifdef CONFIG_F2FS_FS_LZ4
232 static int lz4_init_compress_ctx(struct compress_ctx *cc)
233 {
234 	cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
235 				LZ4_MEM_COMPRESS, GFP_NOFS);
236 	if (!cc->private)
237 		return -ENOMEM;
238 
239 	cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size);
240 	return 0;
241 }
242 
243 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
244 {
245 	kvfree(cc->private);
246 	cc->private = NULL;
247 }
248 
249 static int lz4_compress_pages(struct compress_ctx *cc)
250 {
251 	int len;
252 
253 	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
254 						cc->clen, cc->private);
255 	if (!len) {
256 		printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
257 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id);
258 		return -EIO;
259 	}
260 	cc->clen = len;
261 	return 0;
262 }
263 
264 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
265 {
266 	int ret;
267 
268 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
269 						dic->clen, dic->rlen);
270 	if (ret < 0) {
271 		printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
272 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
273 		return -EIO;
274 	}
275 
276 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
277 		printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
278 					"expected:%lu\n", KERN_ERR,
279 					F2FS_I_SB(dic->inode)->sb->s_id,
280 					dic->rlen,
281 					PAGE_SIZE << dic->log_cluster_size);
282 		return -EIO;
283 	}
284 	return 0;
285 }
286 
287 static const struct f2fs_compress_ops f2fs_lz4_ops = {
288 	.init_compress_ctx	= lz4_init_compress_ctx,
289 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
290 	.compress_pages		= lz4_compress_pages,
291 	.decompress_pages	= lz4_decompress_pages,
292 };
293 #endif
294 
295 #ifdef CONFIG_F2FS_FS_ZSTD
296 #define F2FS_ZSTD_DEFAULT_CLEVEL	1
297 
298 static int zstd_init_compress_ctx(struct compress_ctx *cc)
299 {
300 	ZSTD_parameters params;
301 	ZSTD_CStream *stream;
302 	void *workspace;
303 	unsigned int workspace_size;
304 
305 	params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
306 	workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
307 
308 	workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
309 					workspace_size, GFP_NOFS);
310 	if (!workspace)
311 		return -ENOMEM;
312 
313 	stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
314 	if (!stream) {
315 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
316 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
317 				__func__);
318 		kvfree(workspace);
319 		return -EIO;
320 	}
321 
322 	cc->private = workspace;
323 	cc->private2 = stream;
324 
325 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
326 	return 0;
327 }
328 
329 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
330 {
331 	kvfree(cc->private);
332 	cc->private = NULL;
333 	cc->private2 = NULL;
334 }
335 
336 static int zstd_compress_pages(struct compress_ctx *cc)
337 {
338 	ZSTD_CStream *stream = cc->private2;
339 	ZSTD_inBuffer inbuf;
340 	ZSTD_outBuffer outbuf;
341 	int src_size = cc->rlen;
342 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
343 	int ret;
344 
345 	inbuf.pos = 0;
346 	inbuf.src = cc->rbuf;
347 	inbuf.size = src_size;
348 
349 	outbuf.pos = 0;
350 	outbuf.dst = cc->cbuf->cdata;
351 	outbuf.size = dst_size;
352 
353 	ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
354 	if (ZSTD_isError(ret)) {
355 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
356 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
357 				__func__, ZSTD_getErrorCode(ret));
358 		return -EIO;
359 	}
360 
361 	ret = ZSTD_endStream(stream, &outbuf);
362 	if (ZSTD_isError(ret)) {
363 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
364 				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
365 				__func__, ZSTD_getErrorCode(ret));
366 		return -EIO;
367 	}
368 
369 	cc->clen = outbuf.pos;
370 	return 0;
371 }
372 
373 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
374 {
375 	ZSTD_DStream *stream;
376 	void *workspace;
377 	unsigned int workspace_size;
378 
379 	workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE);
380 
381 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
382 					workspace_size, GFP_NOFS);
383 	if (!workspace)
384 		return -ENOMEM;
385 
386 	stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE,
387 					workspace, workspace_size);
388 	if (!stream) {
389 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
390 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
391 				__func__);
392 		kvfree(workspace);
393 		return -EIO;
394 	}
395 
396 	dic->private = workspace;
397 	dic->private2 = stream;
398 
399 	return 0;
400 }
401 
402 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
403 {
404 	kvfree(dic->private);
405 	dic->private = NULL;
406 	dic->private2 = NULL;
407 }
408 
409 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
410 {
411 	ZSTD_DStream *stream = dic->private2;
412 	ZSTD_inBuffer inbuf;
413 	ZSTD_outBuffer outbuf;
414 	int ret;
415 
416 	inbuf.pos = 0;
417 	inbuf.src = dic->cbuf->cdata;
418 	inbuf.size = dic->clen;
419 
420 	outbuf.pos = 0;
421 	outbuf.dst = dic->rbuf;
422 	outbuf.size = dic->rlen;
423 
424 	ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
425 	if (ZSTD_isError(ret)) {
426 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
427 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
428 				__func__, ZSTD_getErrorCode(ret));
429 		return -EIO;
430 	}
431 
432 	if (dic->rlen != outbuf.pos) {
433 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
434 				"expected:%lu\n", KERN_ERR,
435 				F2FS_I_SB(dic->inode)->sb->s_id,
436 				__func__, dic->rlen,
437 				PAGE_SIZE << dic->log_cluster_size);
438 		return -EIO;
439 	}
440 
441 	return 0;
442 }
443 
444 static const struct f2fs_compress_ops f2fs_zstd_ops = {
445 	.init_compress_ctx	= zstd_init_compress_ctx,
446 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
447 	.compress_pages		= zstd_compress_pages,
448 	.init_decompress_ctx	= zstd_init_decompress_ctx,
449 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
450 	.decompress_pages	= zstd_decompress_pages,
451 };
452 #endif
453 
454 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
455 #ifdef CONFIG_F2FS_FS_LZO
456 	&f2fs_lzo_ops,
457 #else
458 	NULL,
459 #endif
460 #ifdef CONFIG_F2FS_FS_LZ4
461 	&f2fs_lz4_ops,
462 #else
463 	NULL,
464 #endif
465 #ifdef CONFIG_F2FS_FS_ZSTD
466 	&f2fs_zstd_ops,
467 #else
468 	NULL,
469 #endif
470 };
471 
472 bool f2fs_is_compress_backend_ready(struct inode *inode)
473 {
474 	if (!f2fs_compressed_file(inode))
475 		return true;
476 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
477 }
478 
479 static struct page *f2fs_grab_page(void)
480 {
481 	struct page *page;
482 
483 	page = alloc_page(GFP_NOFS);
484 	if (!page)
485 		return NULL;
486 	lock_page(page);
487 	return page;
488 }
489 
490 static int f2fs_compress_pages(struct compress_ctx *cc)
491 {
492 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
493 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
494 	const struct f2fs_compress_ops *cops =
495 				f2fs_cops[fi->i_compress_algorithm];
496 	unsigned int max_len, nr_cpages;
497 	int i, ret;
498 
499 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
500 				cc->cluster_size, fi->i_compress_algorithm);
501 
502 	if (cops->init_compress_ctx) {
503 		ret = cops->init_compress_ctx(cc);
504 		if (ret)
505 			goto out;
506 	}
507 
508 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
509 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
510 
511 	cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
512 					cc->nr_cpages, GFP_NOFS);
513 	if (!cc->cpages) {
514 		ret = -ENOMEM;
515 		goto destroy_compress_ctx;
516 	}
517 
518 	for (i = 0; i < cc->nr_cpages; i++) {
519 		cc->cpages[i] = f2fs_grab_page();
520 		if (!cc->cpages[i]) {
521 			ret = -ENOMEM;
522 			goto out_free_cpages;
523 		}
524 	}
525 
526 	cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
527 	if (!cc->rbuf) {
528 		ret = -ENOMEM;
529 		goto out_free_cpages;
530 	}
531 
532 	cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
533 	if (!cc->cbuf) {
534 		ret = -ENOMEM;
535 		goto out_vunmap_rbuf;
536 	}
537 
538 	ret = cops->compress_pages(cc);
539 	if (ret)
540 		goto out_vunmap_cbuf;
541 
542 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
543 
544 	if (cc->clen > max_len) {
545 		ret = -EAGAIN;
546 		goto out_vunmap_cbuf;
547 	}
548 
549 	cc->cbuf->clen = cpu_to_le32(cc->clen);
550 
551 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
552 		cc->cbuf->reserved[i] = cpu_to_le32(0);
553 
554 	nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
555 
556 	/* zero out any unused part of the last page */
557 	memset(&cc->cbuf->cdata[cc->clen], 0,
558 	       (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
559 
560 	vunmap(cc->cbuf);
561 	vunmap(cc->rbuf);
562 
563 	for (i = nr_cpages; i < cc->nr_cpages; i++) {
564 		f2fs_put_compressed_page(cc->cpages[i]);
565 		cc->cpages[i] = NULL;
566 	}
567 
568 	if (cops->destroy_compress_ctx)
569 		cops->destroy_compress_ctx(cc);
570 
571 	cc->nr_cpages = nr_cpages;
572 
573 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
574 							cc->clen, ret);
575 	return 0;
576 
577 out_vunmap_cbuf:
578 	vunmap(cc->cbuf);
579 out_vunmap_rbuf:
580 	vunmap(cc->rbuf);
581 out_free_cpages:
582 	for (i = 0; i < cc->nr_cpages; i++) {
583 		if (cc->cpages[i])
584 			f2fs_put_compressed_page(cc->cpages[i]);
585 	}
586 	kfree(cc->cpages);
587 	cc->cpages = NULL;
588 destroy_compress_ctx:
589 	if (cops->destroy_compress_ctx)
590 		cops->destroy_compress_ctx(cc);
591 out:
592 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
593 							cc->clen, ret);
594 	return ret;
595 }
596 
597 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
598 {
599 	struct decompress_io_ctx *dic =
600 			(struct decompress_io_ctx *)page_private(page);
601 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
602 	struct f2fs_inode_info *fi= F2FS_I(dic->inode);
603 	const struct f2fs_compress_ops *cops =
604 			f2fs_cops[fi->i_compress_algorithm];
605 	int ret;
606 
607 	dec_page_count(sbi, F2FS_RD_DATA);
608 
609 	if (bio->bi_status || PageError(page))
610 		dic->failed = true;
611 
612 	if (refcount_dec_not_one(&dic->ref))
613 		return;
614 
615 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
616 				dic->cluster_size, fi->i_compress_algorithm);
617 
618 	/* submit partial compressed pages */
619 	if (dic->failed) {
620 		ret = -EIO;
621 		goto out_free_dic;
622 	}
623 
624 	if (cops->init_decompress_ctx) {
625 		ret = cops->init_decompress_ctx(dic);
626 		if (ret)
627 			goto out_free_dic;
628 	}
629 
630 	dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
631 	if (!dic->rbuf) {
632 		ret = -ENOMEM;
633 		goto destroy_decompress_ctx;
634 	}
635 
636 	dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
637 	if (!dic->cbuf) {
638 		ret = -ENOMEM;
639 		goto out_vunmap_rbuf;
640 	}
641 
642 	dic->clen = le32_to_cpu(dic->cbuf->clen);
643 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
644 
645 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
646 		ret = -EFSCORRUPTED;
647 		goto out_vunmap_cbuf;
648 	}
649 
650 	ret = cops->decompress_pages(dic);
651 
652 out_vunmap_cbuf:
653 	vunmap(dic->cbuf);
654 out_vunmap_rbuf:
655 	vunmap(dic->rbuf);
656 destroy_decompress_ctx:
657 	if (cops->destroy_decompress_ctx)
658 		cops->destroy_decompress_ctx(dic);
659 out_free_dic:
660 	if (verity)
661 		refcount_set(&dic->ref, dic->nr_cpages);
662 	if (!verity)
663 		f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
664 								ret, false);
665 
666 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
667 							dic->clen, ret);
668 	if (!verity)
669 		f2fs_free_dic(dic);
670 }
671 
672 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
673 {
674 	if (cc->cluster_idx == NULL_CLUSTER)
675 		return true;
676 	return cc->cluster_idx == cluster_idx(cc, index);
677 }
678 
679 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
680 {
681 	return cc->nr_rpages == 0;
682 }
683 
684 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
685 {
686 	return cc->cluster_size == cc->nr_rpages;
687 }
688 
689 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
690 {
691 	if (f2fs_cluster_is_empty(cc))
692 		return true;
693 	return is_page_in_cluster(cc, index);
694 }
695 
696 static bool __cluster_may_compress(struct compress_ctx *cc)
697 {
698 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
699 	loff_t i_size = i_size_read(cc->inode);
700 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
701 	int i;
702 
703 	for (i = 0; i < cc->cluster_size; i++) {
704 		struct page *page = cc->rpages[i];
705 
706 		f2fs_bug_on(sbi, !page);
707 
708 		if (unlikely(f2fs_cp_error(sbi)))
709 			return false;
710 		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
711 			return false;
712 
713 		/* beyond EOF */
714 		if (page->index >= nr_pages)
715 			return false;
716 	}
717 	return true;
718 }
719 
720 static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
721 {
722 	struct dnode_of_data dn;
723 	int ret;
724 
725 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
726 	ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
727 							LOOKUP_NODE);
728 	if (ret) {
729 		if (ret == -ENOENT)
730 			ret = 0;
731 		goto fail;
732 	}
733 
734 	if (dn.data_blkaddr == COMPRESS_ADDR) {
735 		int i;
736 
737 		ret = 1;
738 		for (i = 1; i < cc->cluster_size; i++) {
739 			block_t blkaddr;
740 
741 			blkaddr = data_blkaddr(dn.inode,
742 					dn.node_page, dn.ofs_in_node + i);
743 			if (compr) {
744 				if (__is_valid_data_blkaddr(blkaddr))
745 					ret++;
746 			} else {
747 				if (blkaddr != NULL_ADDR)
748 					ret++;
749 			}
750 		}
751 	}
752 fail:
753 	f2fs_put_dnode(&dn);
754 	return ret;
755 }
756 
757 /* return # of compressed blocks in compressed cluster */
758 static int f2fs_compressed_blocks(struct compress_ctx *cc)
759 {
760 	return __f2fs_cluster_blocks(cc, true);
761 }
762 
763 /* return # of valid blocks in compressed cluster */
764 static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
765 {
766 	return __f2fs_cluster_blocks(cc, false);
767 }
768 
769 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
770 {
771 	struct compress_ctx cc = {
772 		.inode = inode,
773 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
774 		.cluster_size = F2FS_I(inode)->i_cluster_size,
775 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
776 	};
777 
778 	return f2fs_cluster_blocks(&cc, false);
779 }
780 
781 static bool cluster_may_compress(struct compress_ctx *cc)
782 {
783 	if (!f2fs_compressed_file(cc->inode))
784 		return false;
785 	if (f2fs_is_atomic_file(cc->inode))
786 		return false;
787 	if (f2fs_is_mmap_file(cc->inode))
788 		return false;
789 	if (!f2fs_cluster_is_full(cc))
790 		return false;
791 	return __cluster_may_compress(cc);
792 }
793 
794 static void set_cluster_writeback(struct compress_ctx *cc)
795 {
796 	int i;
797 
798 	for (i = 0; i < cc->cluster_size; i++) {
799 		if (cc->rpages[i])
800 			set_page_writeback(cc->rpages[i]);
801 	}
802 }
803 
804 static void set_cluster_dirty(struct compress_ctx *cc)
805 {
806 	int i;
807 
808 	for (i = 0; i < cc->cluster_size; i++)
809 		if (cc->rpages[i])
810 			set_page_dirty(cc->rpages[i]);
811 }
812 
813 static int prepare_compress_overwrite(struct compress_ctx *cc,
814 		struct page **pagep, pgoff_t index, void **fsdata)
815 {
816 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
817 	struct address_space *mapping = cc->inode->i_mapping;
818 	struct page *page;
819 	struct dnode_of_data dn;
820 	sector_t last_block_in_bio;
821 	unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
822 	pgoff_t start_idx = start_idx_of_cluster(cc);
823 	int i, ret;
824 	bool prealloc;
825 
826 retry:
827 	ret = f2fs_cluster_blocks(cc, false);
828 	if (ret <= 0)
829 		return ret;
830 
831 	/* compressed case */
832 	prealloc = (ret < cc->cluster_size);
833 
834 	ret = f2fs_init_compress_ctx(cc);
835 	if (ret)
836 		return ret;
837 
838 	/* keep page reference to avoid page reclaim */
839 	for (i = 0; i < cc->cluster_size; i++) {
840 		page = f2fs_pagecache_get_page(mapping, start_idx + i,
841 							fgp_flag, GFP_NOFS);
842 		if (!page) {
843 			ret = -ENOMEM;
844 			goto unlock_pages;
845 		}
846 
847 		if (PageUptodate(page))
848 			unlock_page(page);
849 		else
850 			f2fs_compress_ctx_add_page(cc, page);
851 	}
852 
853 	if (!f2fs_cluster_is_empty(cc)) {
854 		struct bio *bio = NULL;
855 
856 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
857 					&last_block_in_bio, false, true);
858 		f2fs_destroy_compress_ctx(cc);
859 		if (ret)
860 			goto release_pages;
861 		if (bio)
862 			f2fs_submit_bio(sbi, bio, DATA);
863 
864 		ret = f2fs_init_compress_ctx(cc);
865 		if (ret)
866 			goto release_pages;
867 	}
868 
869 	for (i = 0; i < cc->cluster_size; i++) {
870 		f2fs_bug_on(sbi, cc->rpages[i]);
871 
872 		page = find_lock_page(mapping, start_idx + i);
873 		f2fs_bug_on(sbi, !page);
874 
875 		f2fs_wait_on_page_writeback(page, DATA, true, true);
876 
877 		f2fs_compress_ctx_add_page(cc, page);
878 		f2fs_put_page(page, 0);
879 
880 		if (!PageUptodate(page)) {
881 			f2fs_unlock_rpages(cc, i + 1);
882 			f2fs_put_rpages_mapping(cc, mapping, start_idx,
883 					cc->cluster_size);
884 			f2fs_destroy_compress_ctx(cc);
885 			goto retry;
886 		}
887 	}
888 
889 	if (prealloc) {
890 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
891 
892 		set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
893 
894 		for (i = cc->cluster_size - 1; i > 0; i--) {
895 			ret = f2fs_get_block(&dn, start_idx + i);
896 			if (ret) {
897 				i = cc->cluster_size;
898 				break;
899 			}
900 
901 			if (dn.data_blkaddr != NEW_ADDR)
902 				break;
903 		}
904 
905 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
906 	}
907 
908 	if (likely(!ret)) {
909 		*fsdata = cc->rpages;
910 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
911 		return cc->cluster_size;
912 	}
913 
914 unlock_pages:
915 	f2fs_unlock_rpages(cc, i);
916 release_pages:
917 	f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
918 	f2fs_destroy_compress_ctx(cc);
919 	return ret;
920 }
921 
922 int f2fs_prepare_compress_overwrite(struct inode *inode,
923 		struct page **pagep, pgoff_t index, void **fsdata)
924 {
925 	struct compress_ctx cc = {
926 		.inode = inode,
927 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
928 		.cluster_size = F2FS_I(inode)->i_cluster_size,
929 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
930 		.rpages = NULL,
931 		.nr_rpages = 0,
932 	};
933 
934 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
935 }
936 
937 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
938 					pgoff_t index, unsigned copied)
939 
940 {
941 	struct compress_ctx cc = {
942 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
943 		.cluster_size = F2FS_I(inode)->i_cluster_size,
944 		.rpages = fsdata,
945 	};
946 	bool first_index = (index == cc.rpages[0]->index);
947 
948 	if (copied)
949 		set_cluster_dirty(&cc);
950 
951 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
952 	f2fs_destroy_compress_ctx(&cc);
953 
954 	return first_index;
955 }
956 
957 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
958 					int *submitted,
959 					struct writeback_control *wbc,
960 					enum iostat_type io_type)
961 {
962 	struct inode *inode = cc->inode;
963 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
964 	struct f2fs_inode_info *fi = F2FS_I(inode);
965 	struct f2fs_io_info fio = {
966 		.sbi = sbi,
967 		.ino = cc->inode->i_ino,
968 		.type = DATA,
969 		.op = REQ_OP_WRITE,
970 		.op_flags = wbc_to_write_flags(wbc),
971 		.old_blkaddr = NEW_ADDR,
972 		.page = NULL,
973 		.encrypted_page = NULL,
974 		.compressed_page = NULL,
975 		.submitted = false,
976 		.io_type = io_type,
977 		.io_wbc = wbc,
978 		.encrypted = f2fs_encrypted_file(cc->inode),
979 	};
980 	struct dnode_of_data dn;
981 	struct node_info ni;
982 	struct compress_io_ctx *cic;
983 	pgoff_t start_idx = start_idx_of_cluster(cc);
984 	unsigned int last_index = cc->cluster_size - 1;
985 	loff_t psize;
986 	int i, err;
987 
988 	if (!f2fs_trylock_op(sbi))
989 		return -EAGAIN;
990 
991 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
992 
993 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
994 	if (err)
995 		goto out_unlock_op;
996 
997 	for (i = 0; i < cc->cluster_size; i++) {
998 		if (data_blkaddr(dn.inode, dn.node_page,
999 					dn.ofs_in_node + i) == NULL_ADDR)
1000 			goto out_put_dnode;
1001 	}
1002 
1003 	psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1004 
1005 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1006 	if (err)
1007 		goto out_put_dnode;
1008 
1009 	fio.version = ni.version;
1010 
1011 	cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
1012 	if (!cic)
1013 		goto out_put_dnode;
1014 
1015 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1016 	cic->inode = inode;
1017 	refcount_set(&cic->ref, cc->nr_cpages);
1018 	cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1019 			cc->log_cluster_size, GFP_NOFS);
1020 	if (!cic->rpages)
1021 		goto out_put_cic;
1022 
1023 	cic->nr_rpages = cc->cluster_size;
1024 
1025 	for (i = 0; i < cc->nr_cpages; i++) {
1026 		f2fs_set_compressed_page(cc->cpages[i], inode,
1027 					cc->rpages[i + 1]->index, cic);
1028 		fio.compressed_page = cc->cpages[i];
1029 		if (fio.encrypted) {
1030 			fio.page = cc->rpages[i + 1];
1031 			err = f2fs_encrypt_one_page(&fio);
1032 			if (err)
1033 				goto out_destroy_crypt;
1034 			cc->cpages[i] = fio.encrypted_page;
1035 		}
1036 	}
1037 
1038 	set_cluster_writeback(cc);
1039 
1040 	for (i = 0; i < cc->cluster_size; i++)
1041 		cic->rpages[i] = cc->rpages[i];
1042 
1043 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1044 		block_t blkaddr;
1045 
1046 		blkaddr = f2fs_data_blkaddr(&dn);
1047 		fio.page = cc->rpages[i];
1048 		fio.old_blkaddr = blkaddr;
1049 
1050 		/* cluster header */
1051 		if (i == 0) {
1052 			if (blkaddr == COMPRESS_ADDR)
1053 				fio.compr_blocks++;
1054 			if (__is_valid_data_blkaddr(blkaddr))
1055 				f2fs_invalidate_blocks(sbi, blkaddr);
1056 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1057 			goto unlock_continue;
1058 		}
1059 
1060 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1061 			fio.compr_blocks++;
1062 
1063 		if (i > cc->nr_cpages) {
1064 			if (__is_valid_data_blkaddr(blkaddr)) {
1065 				f2fs_invalidate_blocks(sbi, blkaddr);
1066 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1067 			}
1068 			goto unlock_continue;
1069 		}
1070 
1071 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1072 
1073 		if (fio.encrypted)
1074 			fio.encrypted_page = cc->cpages[i - 1];
1075 		else
1076 			fio.compressed_page = cc->cpages[i - 1];
1077 
1078 		cc->cpages[i - 1] = NULL;
1079 		f2fs_outplace_write_data(&dn, &fio);
1080 		(*submitted)++;
1081 unlock_continue:
1082 		inode_dec_dirty_pages(cc->inode);
1083 		unlock_page(fio.page);
1084 	}
1085 
1086 	if (fio.compr_blocks)
1087 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1088 	f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1089 
1090 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1091 	if (cc->cluster_idx == 0)
1092 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1093 
1094 	f2fs_put_dnode(&dn);
1095 	f2fs_unlock_op(sbi);
1096 
1097 	spin_lock(&fi->i_size_lock);
1098 	if (fi->last_disk_size < psize)
1099 		fi->last_disk_size = psize;
1100 	spin_unlock(&fi->i_size_lock);
1101 
1102 	f2fs_put_rpages(cc);
1103 	f2fs_destroy_compress_ctx(cc);
1104 	return 0;
1105 
1106 out_destroy_crypt:
1107 	kfree(cic->rpages);
1108 
1109 	for (--i; i >= 0; i--)
1110 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1111 	for (i = 0; i < cc->nr_cpages; i++) {
1112 		if (!cc->cpages[i])
1113 			continue;
1114 		f2fs_put_page(cc->cpages[i], 1);
1115 	}
1116 out_put_cic:
1117 	kfree(cic);
1118 out_put_dnode:
1119 	f2fs_put_dnode(&dn);
1120 out_unlock_op:
1121 	f2fs_unlock_op(sbi);
1122 	return -EAGAIN;
1123 }
1124 
1125 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1126 {
1127 	struct f2fs_sb_info *sbi = bio->bi_private;
1128 	struct compress_io_ctx *cic =
1129 			(struct compress_io_ctx *)page_private(page);
1130 	int i;
1131 
1132 	if (unlikely(bio->bi_status))
1133 		mapping_set_error(cic->inode->i_mapping, -EIO);
1134 
1135 	f2fs_put_compressed_page(page);
1136 
1137 	dec_page_count(sbi, F2FS_WB_DATA);
1138 
1139 	if (refcount_dec_not_one(&cic->ref))
1140 		return;
1141 
1142 	for (i = 0; i < cic->nr_rpages; i++) {
1143 		WARN_ON(!cic->rpages[i]);
1144 		clear_cold_data(cic->rpages[i]);
1145 		end_page_writeback(cic->rpages[i]);
1146 	}
1147 
1148 	kfree(cic->rpages);
1149 	kfree(cic);
1150 }
1151 
1152 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1153 					int *submitted,
1154 					struct writeback_control *wbc,
1155 					enum iostat_type io_type)
1156 {
1157 	struct address_space *mapping = cc->inode->i_mapping;
1158 	int _submitted, compr_blocks, ret;
1159 	int i = -1, err = 0;
1160 
1161 	compr_blocks = f2fs_compressed_blocks(cc);
1162 	if (compr_blocks < 0) {
1163 		err = compr_blocks;
1164 		goto out_err;
1165 	}
1166 
1167 	for (i = 0; i < cc->cluster_size; i++) {
1168 		if (!cc->rpages[i])
1169 			continue;
1170 retry_write:
1171 		if (cc->rpages[i]->mapping != mapping) {
1172 			unlock_page(cc->rpages[i]);
1173 			continue;
1174 		}
1175 
1176 		BUG_ON(!PageLocked(cc->rpages[i]));
1177 
1178 		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1179 						NULL, NULL, wbc, io_type,
1180 						compr_blocks);
1181 		if (ret) {
1182 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
1183 				unlock_page(cc->rpages[i]);
1184 				ret = 0;
1185 			} else if (ret == -EAGAIN) {
1186 				/*
1187 				 * for quota file, just redirty left pages to
1188 				 * avoid deadlock caused by cluster update race
1189 				 * from foreground operation.
1190 				 */
1191 				if (IS_NOQUOTA(cc->inode)) {
1192 					err = 0;
1193 					goto out_err;
1194 				}
1195 				ret = 0;
1196 				cond_resched();
1197 				congestion_wait(BLK_RW_ASYNC,
1198 						DEFAULT_IO_TIMEOUT);
1199 				lock_page(cc->rpages[i]);
1200 				clear_page_dirty_for_io(cc->rpages[i]);
1201 				goto retry_write;
1202 			}
1203 			err = ret;
1204 			goto out_err;
1205 		}
1206 
1207 		*submitted += _submitted;
1208 	}
1209 	return 0;
1210 out_err:
1211 	for (++i; i < cc->cluster_size; i++) {
1212 		if (!cc->rpages[i])
1213 			continue;
1214 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1215 		unlock_page(cc->rpages[i]);
1216 	}
1217 	return err;
1218 }
1219 
1220 int f2fs_write_multi_pages(struct compress_ctx *cc,
1221 					int *submitted,
1222 					struct writeback_control *wbc,
1223 					enum iostat_type io_type)
1224 {
1225 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
1226 	const struct f2fs_compress_ops *cops =
1227 			f2fs_cops[fi->i_compress_algorithm];
1228 	int err;
1229 
1230 	*submitted = 0;
1231 	if (cluster_may_compress(cc)) {
1232 		err = f2fs_compress_pages(cc);
1233 		if (err == -EAGAIN) {
1234 			goto write;
1235 		} else if (err) {
1236 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1237 			goto destroy_out;
1238 		}
1239 
1240 		err = f2fs_write_compressed_pages(cc, submitted,
1241 							wbc, io_type);
1242 		cops->destroy_compress_ctx(cc);
1243 		if (!err)
1244 			return 0;
1245 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1246 	}
1247 write:
1248 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1249 
1250 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1251 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1252 destroy_out:
1253 	f2fs_destroy_compress_ctx(cc);
1254 	return err;
1255 }
1256 
1257 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1258 {
1259 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1260 	struct decompress_io_ctx *dic;
1261 	pgoff_t start_idx = start_idx_of_cluster(cc);
1262 	int i;
1263 
1264 	dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
1265 	if (!dic)
1266 		return ERR_PTR(-ENOMEM);
1267 
1268 	dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1269 			cc->log_cluster_size, GFP_NOFS);
1270 	if (!dic->rpages) {
1271 		kfree(dic);
1272 		return ERR_PTR(-ENOMEM);
1273 	}
1274 
1275 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1276 	dic->inode = cc->inode;
1277 	refcount_set(&dic->ref, cc->nr_cpages);
1278 	dic->cluster_idx = cc->cluster_idx;
1279 	dic->cluster_size = cc->cluster_size;
1280 	dic->log_cluster_size = cc->log_cluster_size;
1281 	dic->nr_cpages = cc->nr_cpages;
1282 	dic->failed = false;
1283 
1284 	for (i = 0; i < dic->cluster_size; i++)
1285 		dic->rpages[i] = cc->rpages[i];
1286 	dic->nr_rpages = cc->cluster_size;
1287 
1288 	dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1289 					dic->nr_cpages, GFP_NOFS);
1290 	if (!dic->cpages)
1291 		goto out_free;
1292 
1293 	for (i = 0; i < dic->nr_cpages; i++) {
1294 		struct page *page;
1295 
1296 		page = f2fs_grab_page();
1297 		if (!page)
1298 			goto out_free;
1299 
1300 		f2fs_set_compressed_page(page, cc->inode,
1301 					start_idx + i + 1, dic);
1302 		dic->cpages[i] = page;
1303 	}
1304 
1305 	dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1306 					dic->cluster_size, GFP_NOFS);
1307 	if (!dic->tpages)
1308 		goto out_free;
1309 
1310 	for (i = 0; i < dic->cluster_size; i++) {
1311 		if (cc->rpages[i]) {
1312 			dic->tpages[i] = cc->rpages[i];
1313 			continue;
1314 		}
1315 
1316 		dic->tpages[i] = f2fs_grab_page();
1317 		if (!dic->tpages[i])
1318 			goto out_free;
1319 	}
1320 
1321 	return dic;
1322 
1323 out_free:
1324 	f2fs_free_dic(dic);
1325 	return ERR_PTR(-ENOMEM);
1326 }
1327 
1328 void f2fs_free_dic(struct decompress_io_ctx *dic)
1329 {
1330 	int i;
1331 
1332 	if (dic->tpages) {
1333 		for (i = 0; i < dic->cluster_size; i++) {
1334 			if (dic->rpages[i])
1335 				continue;
1336 			if (!dic->tpages[i])
1337 				continue;
1338 			unlock_page(dic->tpages[i]);
1339 			put_page(dic->tpages[i]);
1340 		}
1341 		kfree(dic->tpages);
1342 	}
1343 
1344 	if (dic->cpages) {
1345 		for (i = 0; i < dic->nr_cpages; i++) {
1346 			if (!dic->cpages[i])
1347 				continue;
1348 			f2fs_put_compressed_page(dic->cpages[i]);
1349 		}
1350 		kfree(dic->cpages);
1351 	}
1352 
1353 	kfree(dic->rpages);
1354 	kfree(dic);
1355 }
1356 
1357 void f2fs_decompress_end_io(struct page **rpages,
1358 			unsigned int cluster_size, bool err, bool verity)
1359 {
1360 	int i;
1361 
1362 	for (i = 0; i < cluster_size; i++) {
1363 		struct page *rpage = rpages[i];
1364 
1365 		if (!rpage)
1366 			continue;
1367 
1368 		if (err || PageError(rpage))
1369 			goto clear_uptodate;
1370 
1371 		if (!verity || fsverity_verify_page(rpage)) {
1372 			SetPageUptodate(rpage);
1373 			goto unlock;
1374 		}
1375 clear_uptodate:
1376 		ClearPageUptodate(rpage);
1377 		ClearPageError(rpage);
1378 unlock:
1379 		unlock_page(rpage);
1380 	}
1381 }
1382