xref: /linux/fs/f2fs/compress.c (revision 0974f486f3dde9df1ad979d4ff341dc9c2d545f5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
17 
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include <trace/events/f2fs.h>
22 
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
25 
page_array_alloc(struct f2fs_sb_info * sbi,int nr)26 static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr)
27 {
28 	unsigned int size = sizeof(struct page *) * nr;
29 
30 	if (likely(size <= sbi->page_array_slab_size))
31 		return f2fs_kmem_cache_alloc(sbi->page_array_slab,
32 					GFP_F2FS_ZERO, false, sbi);
33 	return f2fs_kzalloc(sbi, size, GFP_NOFS);
34 }
35 
page_array_free(struct f2fs_sb_info * sbi,void * pages,int nr)36 static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr)
37 {
38 	unsigned int size = sizeof(struct page *) * nr;
39 
40 	if (!pages)
41 		return;
42 
43 	if (likely(size <= sbi->page_array_slab_size))
44 		kmem_cache_free(sbi->page_array_slab, pages);
45 	else
46 		kfree(pages);
47 }
48 
49 struct f2fs_compress_ops {
50 	int (*init_compress_ctx)(struct compress_ctx *cc);
51 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
52 	int (*compress_pages)(struct compress_ctx *cc);
53 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
54 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
55 	int (*decompress_pages)(struct decompress_io_ctx *dic);
56 	bool (*is_level_valid)(int level);
57 };
58 
offset_in_cluster(struct compress_ctx * cc,pgoff_t index)59 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
60 {
61 	return index & (cc->cluster_size - 1);
62 }
63 
cluster_idx(struct compress_ctx * cc,pgoff_t index)64 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
65 {
66 	return index >> cc->log_cluster_size;
67 }
68 
start_idx_of_cluster(struct compress_ctx * cc)69 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
70 {
71 	return cc->cluster_idx << cc->log_cluster_size;
72 }
73 
f2fs_is_compressed_page(struct folio * folio)74 bool f2fs_is_compressed_page(struct folio *folio)
75 {
76 	if (!folio->private)
77 		return false;
78 	if (folio_test_f2fs_nonpointer(folio))
79 		return false;
80 
81 	f2fs_bug_on(F2FS_F_SB(folio),
82 		*((u32 *)folio->private) != F2FS_COMPRESSED_PAGE_MAGIC);
83 	return true;
84 }
85 
f2fs_set_compressed_page(struct page * page,struct inode * inode,pgoff_t index,void * data)86 static void f2fs_set_compressed_page(struct page *page,
87 		struct inode *inode, pgoff_t index, void *data)
88 {
89 	struct folio *folio = page_folio(page);
90 
91 	folio_attach_private(folio, (void *)data);
92 
93 	/* i_crypto_info and iv index */
94 	folio->index = index;
95 	folio->mapping = inode->i_mapping;
96 }
97 
f2fs_drop_rpages(struct compress_ctx * cc,int len,bool unlock)98 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
99 {
100 	int i;
101 
102 	for (i = 0; i < len; i++) {
103 		if (!cc->rpages[i])
104 			continue;
105 		if (unlock)
106 			unlock_page(cc->rpages[i]);
107 		else
108 			put_page(cc->rpages[i]);
109 	}
110 }
111 
f2fs_put_rpages(struct compress_ctx * cc)112 static void f2fs_put_rpages(struct compress_ctx *cc)
113 {
114 	f2fs_drop_rpages(cc, cc->cluster_size, false);
115 }
116 
f2fs_unlock_rpages(struct compress_ctx * cc,int len)117 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
118 {
119 	f2fs_drop_rpages(cc, len, true);
120 }
121 
f2fs_put_rpages_wbc(struct compress_ctx * cc,struct writeback_control * wbc,bool redirty,int unlock)122 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
123 		struct writeback_control *wbc, bool redirty, int unlock)
124 {
125 	unsigned int i;
126 
127 	for (i = 0; i < cc->cluster_size; i++) {
128 		if (!cc->rpages[i])
129 			continue;
130 		if (redirty)
131 			redirty_page_for_writepage(wbc, cc->rpages[i]);
132 		f2fs_put_page(cc->rpages[i], unlock);
133 	}
134 }
135 
f2fs_compress_control_folio(struct folio * folio)136 struct folio *f2fs_compress_control_folio(struct folio *folio)
137 {
138 	struct compress_io_ctx *ctx = folio->private;
139 
140 	return page_folio(ctx->rpages[0]);
141 }
142 
f2fs_init_compress_ctx(struct compress_ctx * cc)143 int f2fs_init_compress_ctx(struct compress_ctx *cc)
144 {
145 	if (cc->rpages)
146 		return 0;
147 
148 	cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size);
149 	return cc->rpages ? 0 : -ENOMEM;
150 }
151 
f2fs_destroy_compress_ctx(struct compress_ctx * cc,bool reuse)152 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
153 {
154 	page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size);
155 	cc->rpages = NULL;
156 	cc->nr_rpages = 0;
157 	cc->nr_cpages = 0;
158 	cc->valid_nr_cpages = 0;
159 	if (!reuse)
160 		cc->cluster_idx = NULL_CLUSTER;
161 }
162 
f2fs_compress_ctx_add_page(struct compress_ctx * cc,struct folio * folio)163 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
164 {
165 	unsigned int cluster_ofs;
166 
167 	if (!f2fs_cluster_can_merge_page(cc, folio->index))
168 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
169 
170 	cluster_ofs = offset_in_cluster(cc, folio->index);
171 	cc->rpages[cluster_ofs] = folio_page(folio, 0);
172 	cc->nr_rpages++;
173 	cc->cluster_idx = cluster_idx(cc, folio->index);
174 }
175 
176 #ifdef CONFIG_F2FS_FS_LZO
lzo_init_compress_ctx(struct compress_ctx * cc)177 static int lzo_init_compress_ctx(struct compress_ctx *cc)
178 {
179 	cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode),
180 					LZO1X_MEM_COMPRESS);
181 	if (!cc->private)
182 		return -ENOMEM;
183 
184 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
185 	return 0;
186 }
187 
lzo_destroy_compress_ctx(struct compress_ctx * cc)188 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
189 {
190 	vfree(cc->private);
191 	cc->private = NULL;
192 }
193 
lzo_compress_pages(struct compress_ctx * cc)194 static int lzo_compress_pages(struct compress_ctx *cc)
195 {
196 	int ret;
197 
198 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
199 					&cc->clen, cc->private);
200 	if (ret != LZO_E_OK) {
201 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
202 				"lzo compress failed, ret:%d", ret);
203 		return -EIO;
204 	}
205 	return 0;
206 }
207 
lzo_decompress_pages(struct decompress_io_ctx * dic)208 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
209 {
210 	int ret;
211 
212 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
213 						dic->rbuf, &dic->rlen);
214 	if (ret != LZO_E_OK) {
215 		f2fs_err_ratelimited(dic->sbi,
216 				"lzo decompress failed, ret:%d", ret);
217 		return -EIO;
218 	}
219 
220 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
221 		f2fs_err_ratelimited(dic->sbi,
222 				"lzo invalid rlen:%zu, expected:%lu",
223 				dic->rlen, PAGE_SIZE << dic->log_cluster_size);
224 		return -EIO;
225 	}
226 	return 0;
227 }
228 
229 static const struct f2fs_compress_ops f2fs_lzo_ops = {
230 	.init_compress_ctx	= lzo_init_compress_ctx,
231 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
232 	.compress_pages		= lzo_compress_pages,
233 	.decompress_pages	= lzo_decompress_pages,
234 };
235 #endif
236 
237 #ifdef CONFIG_F2FS_FS_LZ4
lz4_init_compress_ctx(struct compress_ctx * cc)238 static int lz4_init_compress_ctx(struct compress_ctx *cc)
239 {
240 	unsigned int size = LZ4_MEM_COMPRESS;
241 
242 #ifdef CONFIG_F2FS_FS_LZ4HC
243 	if (F2FS_I(cc->inode)->i_compress_level)
244 		size = LZ4HC_MEM_COMPRESS;
245 #endif
246 
247 	cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode), size);
248 	if (!cc->private)
249 		return -ENOMEM;
250 
251 	/*
252 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
253 	 * adapt worst compress case, because lz4 compressor can handle
254 	 * output budget properly.
255 	 */
256 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
257 	return 0;
258 }
259 
lz4_destroy_compress_ctx(struct compress_ctx * cc)260 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
261 {
262 	vfree(cc->private);
263 	cc->private = NULL;
264 }
265 
lz4_compress_pages(struct compress_ctx * cc)266 static int lz4_compress_pages(struct compress_ctx *cc)
267 {
268 	int len = -EINVAL;
269 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
270 
271 	if (!level)
272 		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
273 						cc->clen, cc->private);
274 #ifdef CONFIG_F2FS_FS_LZ4HC
275 	else
276 		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
277 					cc->clen, level, cc->private);
278 #endif
279 	if (len < 0)
280 		return len;
281 	if (!len)
282 		return -EAGAIN;
283 
284 	cc->clen = len;
285 	return 0;
286 }
287 
lz4_decompress_pages(struct decompress_io_ctx * dic)288 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
289 {
290 	int ret;
291 
292 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
293 						dic->clen, dic->rlen);
294 	if (ret < 0) {
295 		f2fs_err_ratelimited(dic->sbi,
296 				"lz4 decompress failed, ret:%d", ret);
297 		return -EIO;
298 	}
299 
300 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
301 		f2fs_err_ratelimited(dic->sbi,
302 				"lz4 invalid ret:%d, expected:%lu",
303 				ret, PAGE_SIZE << dic->log_cluster_size);
304 		return -EIO;
305 	}
306 	return 0;
307 }
308 
lz4_is_level_valid(int lvl)309 static bool lz4_is_level_valid(int lvl)
310 {
311 #ifdef CONFIG_F2FS_FS_LZ4HC
312 	return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
313 #else
314 	return lvl == 0;
315 #endif
316 }
317 
318 static const struct f2fs_compress_ops f2fs_lz4_ops = {
319 	.init_compress_ctx	= lz4_init_compress_ctx,
320 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
321 	.compress_pages		= lz4_compress_pages,
322 	.decompress_pages	= lz4_decompress_pages,
323 	.is_level_valid		= lz4_is_level_valid,
324 };
325 #endif
326 
327 #ifdef CONFIG_F2FS_FS_ZSTD
zstd_init_compress_ctx(struct compress_ctx * cc)328 static int zstd_init_compress_ctx(struct compress_ctx *cc)
329 {
330 	zstd_parameters params;
331 	zstd_cstream *stream;
332 	void *workspace;
333 	unsigned int workspace_size;
334 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
335 
336 	/* Need to remain this for backward compatibility */
337 	if (!level)
338 		level = F2FS_ZSTD_DEFAULT_CLEVEL;
339 
340 	params = zstd_get_params(level, cc->rlen);
341 	workspace_size = zstd_cstream_workspace_bound(&params.cParams);
342 
343 	workspace = f2fs_vmalloc(F2FS_I_SB(cc->inode), workspace_size);
344 	if (!workspace)
345 		return -ENOMEM;
346 
347 	stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
348 	if (!stream) {
349 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
350 				"%s zstd_init_cstream failed", __func__);
351 		vfree(workspace);
352 		return -EIO;
353 	}
354 
355 	cc->private = workspace;
356 	cc->private2 = stream;
357 
358 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
359 	return 0;
360 }
361 
zstd_destroy_compress_ctx(struct compress_ctx * cc)362 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
363 {
364 	vfree(cc->private);
365 	cc->private = NULL;
366 	cc->private2 = NULL;
367 }
368 
zstd_compress_pages(struct compress_ctx * cc)369 static int zstd_compress_pages(struct compress_ctx *cc)
370 {
371 	zstd_cstream *stream = cc->private2;
372 	zstd_in_buffer inbuf;
373 	zstd_out_buffer outbuf;
374 	int src_size = cc->rlen;
375 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
376 	int ret;
377 
378 	inbuf.pos = 0;
379 	inbuf.src = cc->rbuf;
380 	inbuf.size = src_size;
381 
382 	outbuf.pos = 0;
383 	outbuf.dst = cc->cbuf->cdata;
384 	outbuf.size = dst_size;
385 
386 	ret = zstd_compress_stream(stream, &outbuf, &inbuf);
387 	if (zstd_is_error(ret)) {
388 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
389 				"%s zstd_compress_stream failed, ret: %d",
390 				__func__, zstd_get_error_code(ret));
391 		return -EIO;
392 	}
393 
394 	ret = zstd_end_stream(stream, &outbuf);
395 	if (zstd_is_error(ret)) {
396 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
397 				"%s zstd_end_stream returned %d",
398 				__func__, zstd_get_error_code(ret));
399 		return -EIO;
400 	}
401 
402 	/*
403 	 * there is compressed data remained in intermediate buffer due to
404 	 * no more space in cbuf.cdata
405 	 */
406 	if (ret)
407 		return -EAGAIN;
408 
409 	cc->clen = outbuf.pos;
410 	return 0;
411 }
412 
zstd_init_decompress_ctx(struct decompress_io_ctx * dic)413 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
414 {
415 	zstd_dstream *stream;
416 	void *workspace;
417 	unsigned int workspace_size;
418 	unsigned int max_window_size =
419 			MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
420 
421 	workspace_size = zstd_dstream_workspace_bound(max_window_size);
422 
423 	workspace = f2fs_vmalloc(dic->sbi, workspace_size);
424 	if (!workspace)
425 		return -ENOMEM;
426 
427 	stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
428 	if (!stream) {
429 		f2fs_err_ratelimited(dic->sbi,
430 				"%s zstd_init_dstream failed", __func__);
431 		vfree(workspace);
432 		return -EIO;
433 	}
434 
435 	dic->private = workspace;
436 	dic->private2 = stream;
437 
438 	return 0;
439 }
440 
zstd_destroy_decompress_ctx(struct decompress_io_ctx * dic)441 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
442 {
443 	vfree(dic->private);
444 	dic->private = NULL;
445 	dic->private2 = NULL;
446 }
447 
zstd_decompress_pages(struct decompress_io_ctx * dic)448 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
449 {
450 	zstd_dstream *stream = dic->private2;
451 	zstd_in_buffer inbuf;
452 	zstd_out_buffer outbuf;
453 	int ret;
454 
455 	inbuf.pos = 0;
456 	inbuf.src = dic->cbuf->cdata;
457 	inbuf.size = dic->clen;
458 
459 	outbuf.pos = 0;
460 	outbuf.dst = dic->rbuf;
461 	outbuf.size = dic->rlen;
462 
463 	ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
464 	if (zstd_is_error(ret)) {
465 		f2fs_err_ratelimited(dic->sbi,
466 				"%s zstd_decompress_stream failed, ret: %d",
467 				__func__, zstd_get_error_code(ret));
468 		return -EIO;
469 	}
470 
471 	if (dic->rlen != outbuf.pos) {
472 		f2fs_err_ratelimited(dic->sbi,
473 				"%s ZSTD invalid rlen:%zu, expected:%lu",
474 				__func__, dic->rlen,
475 				PAGE_SIZE << dic->log_cluster_size);
476 		return -EIO;
477 	}
478 
479 	return 0;
480 }
481 
zstd_is_level_valid(int lvl)482 static bool zstd_is_level_valid(int lvl)
483 {
484 	return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
485 }
486 
487 static const struct f2fs_compress_ops f2fs_zstd_ops = {
488 	.init_compress_ctx	= zstd_init_compress_ctx,
489 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
490 	.compress_pages		= zstd_compress_pages,
491 	.init_decompress_ctx	= zstd_init_decompress_ctx,
492 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
493 	.decompress_pages	= zstd_decompress_pages,
494 	.is_level_valid		= zstd_is_level_valid,
495 };
496 #endif
497 
498 #ifdef CONFIG_F2FS_FS_LZO
499 #ifdef CONFIG_F2FS_FS_LZORLE
lzorle_compress_pages(struct compress_ctx * cc)500 static int lzorle_compress_pages(struct compress_ctx *cc)
501 {
502 	int ret;
503 
504 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
505 					&cc->clen, cc->private);
506 	if (ret != LZO_E_OK) {
507 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
508 				"lzo-rle compress failed, ret:%d", ret);
509 		return -EIO;
510 	}
511 	return 0;
512 }
513 
514 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
515 	.init_compress_ctx	= lzo_init_compress_ctx,
516 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
517 	.compress_pages		= lzorle_compress_pages,
518 	.decompress_pages	= lzo_decompress_pages,
519 };
520 #endif
521 #endif
522 
523 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
524 #ifdef CONFIG_F2FS_FS_LZO
525 	&f2fs_lzo_ops,
526 #else
527 	NULL,
528 #endif
529 #ifdef CONFIG_F2FS_FS_LZ4
530 	&f2fs_lz4_ops,
531 #else
532 	NULL,
533 #endif
534 #ifdef CONFIG_F2FS_FS_ZSTD
535 	&f2fs_zstd_ops,
536 #else
537 	NULL,
538 #endif
539 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
540 	&f2fs_lzorle_ops,
541 #else
542 	NULL,
543 #endif
544 };
545 
f2fs_is_compress_backend_ready(struct inode * inode)546 bool f2fs_is_compress_backend_ready(struct inode *inode)
547 {
548 	if (!f2fs_compressed_file(inode))
549 		return true;
550 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
551 }
552 
f2fs_is_compress_level_valid(int alg,int lvl)553 bool f2fs_is_compress_level_valid(int alg, int lvl)
554 {
555 	const struct f2fs_compress_ops *cops = f2fs_cops[alg];
556 
557 	if (cops->is_level_valid)
558 		return cops->is_level_valid(lvl);
559 
560 	return lvl == 0;
561 }
562 
563 static mempool_t *compress_page_pool;
564 static int num_compress_pages = 512;
565 module_param(num_compress_pages, uint, 0444);
566 MODULE_PARM_DESC(num_compress_pages,
567 		"Number of intermediate compress pages to preallocate");
568 
f2fs_init_compress_mempool(void)569 int __init f2fs_init_compress_mempool(void)
570 {
571 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
572 	return compress_page_pool ? 0 : -ENOMEM;
573 }
574 
f2fs_destroy_compress_mempool(void)575 void f2fs_destroy_compress_mempool(void)
576 {
577 	mempool_destroy(compress_page_pool);
578 }
579 
f2fs_compress_alloc_page(void)580 static struct page *f2fs_compress_alloc_page(void)
581 {
582 	struct page *page;
583 
584 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
585 	lock_page(page);
586 
587 	return page;
588 }
589 
f2fs_compress_free_page(struct page * page)590 static void f2fs_compress_free_page(struct page *page)
591 {
592 	struct folio *folio;
593 
594 	if (!page)
595 		return;
596 	folio = page_folio(page);
597 	folio_detach_private(folio);
598 	folio->mapping = NULL;
599 	folio_unlock(folio);
600 	mempool_free(page, compress_page_pool);
601 }
602 
603 #define MAX_VMAP_RETRIES	3
604 
f2fs_vmap(struct page ** pages,unsigned int count)605 static void *f2fs_vmap(struct page **pages, unsigned int count)
606 {
607 	int i;
608 	void *buf = NULL;
609 
610 	for (i = 0; i < MAX_VMAP_RETRIES; i++) {
611 		buf = vm_map_ram(pages, count, -1);
612 		if (buf)
613 			break;
614 		vm_unmap_aliases();
615 	}
616 	return buf;
617 }
618 
f2fs_compress_pages(struct compress_ctx * cc)619 static int f2fs_compress_pages(struct compress_ctx *cc)
620 {
621 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
622 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
623 	const struct f2fs_compress_ops *cops =
624 				f2fs_cops[fi->i_compress_algorithm];
625 	unsigned int max_len, new_nr_cpages;
626 	u32 chksum = 0;
627 	int i, ret;
628 
629 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
630 				cc->cluster_size, fi->i_compress_algorithm);
631 
632 	if (cops->init_compress_ctx) {
633 		ret = cops->init_compress_ctx(cc);
634 		if (ret)
635 			goto out;
636 	}
637 
638 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
639 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
640 	cc->valid_nr_cpages = cc->nr_cpages;
641 
642 	cc->cpages = page_array_alloc(sbi, cc->nr_cpages);
643 	if (!cc->cpages) {
644 		ret = -ENOMEM;
645 		goto destroy_compress_ctx;
646 	}
647 
648 	for (i = 0; i < cc->nr_cpages; i++)
649 		cc->cpages[i] = f2fs_compress_alloc_page();
650 
651 	cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
652 	if (!cc->rbuf) {
653 		ret = -ENOMEM;
654 		goto out_free_cpages;
655 	}
656 
657 	cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
658 	if (!cc->cbuf) {
659 		ret = -ENOMEM;
660 		goto out_vunmap_rbuf;
661 	}
662 
663 	ret = cops->compress_pages(cc);
664 	if (ret)
665 		goto out_vunmap_cbuf;
666 
667 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
668 
669 	if (cc->clen > max_len) {
670 		ret = -EAGAIN;
671 		goto out_vunmap_cbuf;
672 	}
673 
674 	cc->cbuf->clen = cpu_to_le32(cc->clen);
675 
676 	if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
677 		chksum = f2fs_crc32(cc->cbuf->cdata, cc->clen);
678 	cc->cbuf->chksum = cpu_to_le32(chksum);
679 
680 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
681 		cc->cbuf->reserved[i] = cpu_to_le32(0);
682 
683 	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
684 
685 	/* zero out any unused part of the last page */
686 	memset(&cc->cbuf->cdata[cc->clen], 0,
687 			(new_nr_cpages * PAGE_SIZE) -
688 			(cc->clen + COMPRESS_HEADER_SIZE));
689 
690 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
691 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
692 
693 	for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
694 		f2fs_compress_free_page(cc->cpages[i]);
695 		cc->cpages[i] = NULL;
696 	}
697 
698 	if (cops->destroy_compress_ctx)
699 		cops->destroy_compress_ctx(cc);
700 
701 	cc->valid_nr_cpages = new_nr_cpages;
702 
703 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
704 							cc->clen, ret);
705 	return 0;
706 
707 out_vunmap_cbuf:
708 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
709 out_vunmap_rbuf:
710 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
711 out_free_cpages:
712 	for (i = 0; i < cc->nr_cpages; i++) {
713 		if (cc->cpages[i])
714 			f2fs_compress_free_page(cc->cpages[i]);
715 	}
716 	page_array_free(sbi, cc->cpages, cc->nr_cpages);
717 	cc->cpages = NULL;
718 destroy_compress_ctx:
719 	if (cops->destroy_compress_ctx)
720 		cops->destroy_compress_ctx(cc);
721 out:
722 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
723 							cc->clen, ret);
724 	return ret;
725 }
726 
727 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
728 		bool pre_alloc);
729 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
730 		bool bypass_destroy_callback, bool pre_alloc);
731 
f2fs_decompress_cluster(struct decompress_io_ctx * dic,bool in_task)732 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
733 {
734 	struct f2fs_sb_info *sbi = dic->sbi;
735 	struct f2fs_inode_info *fi = F2FS_I(dic->inode);
736 	const struct f2fs_compress_ops *cops =
737 			f2fs_cops[fi->i_compress_algorithm];
738 	bool bypass_callback = false;
739 	int ret;
740 
741 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
742 				dic->cluster_size, fi->i_compress_algorithm);
743 
744 	if (dic->failed) {
745 		ret = -EIO;
746 		goto out_end_io;
747 	}
748 
749 	ret = f2fs_prepare_decomp_mem(dic, false);
750 	if (ret) {
751 		bypass_callback = true;
752 		goto out_release;
753 	}
754 
755 	dic->clen = le32_to_cpu(dic->cbuf->clen);
756 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
757 
758 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
759 		ret = -EFSCORRUPTED;
760 
761 		/* Avoid f2fs_commit_super in irq context */
762 		if (!in_task)
763 			f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
764 		else
765 			f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
766 		goto out_release;
767 	}
768 
769 	ret = cops->decompress_pages(dic);
770 
771 	if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
772 		u32 provided = le32_to_cpu(dic->cbuf->chksum);
773 		u32 calculated = f2fs_crc32(dic->cbuf->cdata, dic->clen);
774 
775 		if (provided != calculated) {
776 			if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
777 				set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
778 				f2fs_info_ratelimited(sbi,
779 					"checksum invalid, nid = %lu, %x vs %x",
780 					dic->inode->i_ino,
781 					provided, calculated);
782 			}
783 			set_sbi_flag(sbi, SBI_NEED_FSCK);
784 		}
785 	}
786 
787 out_release:
788 	f2fs_release_decomp_mem(dic, bypass_callback, false);
789 
790 out_end_io:
791 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
792 							dic->clen, ret);
793 	f2fs_decompress_end_io(dic, ret, in_task);
794 }
795 
796 static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
797 		struct folio *folio, nid_t ino, block_t blkaddr);
798 
799 /*
800  * This is called when a page of a compressed cluster has been read from disk
801  * (or failed to be read from disk).  It checks whether this page was the last
802  * page being waited on in the cluster, and if so, it decompresses the cluster
803  * (or in the case of a failure, cleans up without actually decompressing).
804  */
f2fs_end_read_compressed_page(struct folio * folio,bool failed,block_t blkaddr,bool in_task)805 void f2fs_end_read_compressed_page(struct folio *folio, bool failed,
806 		block_t blkaddr, bool in_task)
807 {
808 	struct decompress_io_ctx *dic = folio->private;
809 	struct f2fs_sb_info *sbi = dic->sbi;
810 
811 	dec_page_count(sbi, F2FS_RD_DATA);
812 
813 	if (failed)
814 		WRITE_ONCE(dic->failed, true);
815 	else if (blkaddr && in_task)
816 		f2fs_cache_compressed_page(sbi, folio,
817 					dic->inode->i_ino, blkaddr);
818 
819 	if (atomic_dec_and_test(&dic->remaining_pages))
820 		f2fs_decompress_cluster(dic, in_task);
821 }
822 
is_page_in_cluster(struct compress_ctx * cc,pgoff_t index)823 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
824 {
825 	if (cc->cluster_idx == NULL_CLUSTER)
826 		return true;
827 	return cc->cluster_idx == cluster_idx(cc, index);
828 }
829 
f2fs_cluster_is_empty(struct compress_ctx * cc)830 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
831 {
832 	return cc->nr_rpages == 0;
833 }
834 
f2fs_cluster_is_full(struct compress_ctx * cc)835 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
836 {
837 	return cc->cluster_size == cc->nr_rpages;
838 }
839 
f2fs_cluster_can_merge_page(struct compress_ctx * cc,pgoff_t index)840 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
841 {
842 	if (f2fs_cluster_is_empty(cc))
843 		return true;
844 	return is_page_in_cluster(cc, index);
845 }
846 
f2fs_all_cluster_page_ready(struct compress_ctx * cc,struct page ** pages,int index,int nr_pages,bool uptodate)847 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
848 				int index, int nr_pages, bool uptodate)
849 {
850 	unsigned long pgidx = page_folio(pages[index])->index;
851 	int i = uptodate ? 0 : 1;
852 
853 	/*
854 	 * when uptodate set to true, try to check all pages in cluster is
855 	 * uptodate or not.
856 	 */
857 	if (uptodate && (pgidx % cc->cluster_size))
858 		return false;
859 
860 	if (nr_pages - index < cc->cluster_size)
861 		return false;
862 
863 	for (; i < cc->cluster_size; i++) {
864 		struct folio *folio = page_folio(pages[index + i]);
865 
866 		if (folio->index != pgidx + i)
867 			return false;
868 		if (uptodate && !folio_test_uptodate(folio))
869 			return false;
870 	}
871 
872 	return true;
873 }
874 
cluster_has_invalid_data(struct compress_ctx * cc)875 static bool cluster_has_invalid_data(struct compress_ctx *cc)
876 {
877 	loff_t i_size = i_size_read(cc->inode);
878 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
879 	int i;
880 
881 	for (i = 0; i < cc->cluster_size; i++) {
882 		struct page *page = cc->rpages[i];
883 
884 		f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
885 
886 		/* beyond EOF */
887 		if (page_folio(page)->index >= nr_pages)
888 			return true;
889 	}
890 	return false;
891 }
892 
f2fs_sanity_check_cluster(struct dnode_of_data * dn)893 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
894 {
895 #ifdef CONFIG_F2FS_CHECK_FS
896 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
897 	unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
898 	int cluster_end = 0;
899 	unsigned int count;
900 	int i;
901 	char *reason = "";
902 
903 	if (dn->data_blkaddr != COMPRESS_ADDR)
904 		return false;
905 
906 	/* [..., COMPR_ADDR, ...] */
907 	if (dn->ofs_in_node % cluster_size) {
908 		reason = "[*|C|*|*]";
909 		goto out;
910 	}
911 
912 	for (i = 1, count = 1; i < cluster_size; i++, count++) {
913 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
914 							dn->ofs_in_node + i);
915 
916 		/* [COMPR_ADDR, ..., COMPR_ADDR] */
917 		if (blkaddr == COMPRESS_ADDR) {
918 			reason = "[C|*|C|*]";
919 			goto out;
920 		}
921 		if (!__is_valid_data_blkaddr(blkaddr)) {
922 			if (!cluster_end)
923 				cluster_end = i;
924 			continue;
925 		}
926 		/* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
927 		if (cluster_end) {
928 			reason = "[C|N|N|V]";
929 			goto out;
930 		}
931 	}
932 
933 	f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
934 		!is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
935 
936 	return false;
937 out:
938 	f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
939 			dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
940 	set_sbi_flag(sbi, SBI_NEED_FSCK);
941 	return true;
942 #else
943 	return false;
944 #endif
945 }
946 
__f2fs_get_cluster_blocks(struct inode * inode,struct dnode_of_data * dn)947 static int __f2fs_get_cluster_blocks(struct inode *inode,
948 					struct dnode_of_data *dn)
949 {
950 	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
951 	int count, i;
952 
953 	for (i = 0, count = 0; i < cluster_size; i++) {
954 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
955 							dn->ofs_in_node + i);
956 
957 		if (__is_valid_data_blkaddr(blkaddr))
958 			count++;
959 	}
960 
961 	return count;
962 }
963 
__f2fs_cluster_blocks(struct inode * inode,unsigned int cluster_idx,enum cluster_check_type type)964 static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
965 				enum cluster_check_type type)
966 {
967 	struct dnode_of_data dn;
968 	unsigned int start_idx = cluster_idx <<
969 				F2FS_I(inode)->i_log_cluster_size;
970 	int ret;
971 
972 	set_new_dnode(&dn, inode, NULL, NULL, 0);
973 	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
974 	if (ret) {
975 		if (ret == -ENOENT)
976 			ret = 0;
977 		goto fail;
978 	}
979 
980 	if (f2fs_sanity_check_cluster(&dn)) {
981 		ret = -EFSCORRUPTED;
982 		goto fail;
983 	}
984 
985 	if (dn.data_blkaddr == COMPRESS_ADDR) {
986 		if (type == CLUSTER_COMPR_BLKS)
987 			ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
988 		else if (type == CLUSTER_IS_COMPR)
989 			ret = 1;
990 	} else if (type == CLUSTER_RAW_BLKS) {
991 		ret = __f2fs_get_cluster_blocks(inode, &dn);
992 	}
993 fail:
994 	f2fs_put_dnode(&dn);
995 	return ret;
996 }
997 
998 /* return # of compressed blocks in compressed cluster */
f2fs_compressed_blocks(struct compress_ctx * cc)999 static int f2fs_compressed_blocks(struct compress_ctx *cc)
1000 {
1001 	return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
1002 		CLUSTER_COMPR_BLKS);
1003 }
1004 
1005 /* return # of raw blocks in non-compressed cluster */
f2fs_decompressed_blocks(struct inode * inode,unsigned int cluster_idx)1006 static int f2fs_decompressed_blocks(struct inode *inode,
1007 				unsigned int cluster_idx)
1008 {
1009 	return __f2fs_cluster_blocks(inode, cluster_idx,
1010 		CLUSTER_RAW_BLKS);
1011 }
1012 
1013 /* return whether cluster is compressed one or not */
f2fs_is_compressed_cluster(struct inode * inode,pgoff_t index)1014 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1015 {
1016 	return __f2fs_cluster_blocks(inode,
1017 		index >> F2FS_I(inode)->i_log_cluster_size,
1018 		CLUSTER_IS_COMPR);
1019 }
1020 
1021 /* return whether cluster contains non raw blocks or not */
f2fs_is_sparse_cluster(struct inode * inode,pgoff_t index)1022 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
1023 {
1024 	unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
1025 
1026 	return f2fs_decompressed_blocks(inode, cluster_idx) !=
1027 		F2FS_I(inode)->i_cluster_size;
1028 }
1029 
cluster_may_compress(struct compress_ctx * cc)1030 static bool cluster_may_compress(struct compress_ctx *cc)
1031 {
1032 	if (!f2fs_need_compress_data(cc->inode))
1033 		return false;
1034 	if (f2fs_is_atomic_file(cc->inode))
1035 		return false;
1036 	if (!f2fs_cluster_is_full(cc))
1037 		return false;
1038 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1039 		return false;
1040 	return !cluster_has_invalid_data(cc);
1041 }
1042 
set_cluster_writeback(struct compress_ctx * cc)1043 static void set_cluster_writeback(struct compress_ctx *cc)
1044 {
1045 	int i;
1046 
1047 	for (i = 0; i < cc->cluster_size; i++) {
1048 		if (cc->rpages[i])
1049 			set_page_writeback(cc->rpages[i]);
1050 	}
1051 }
1052 
cancel_cluster_writeback(struct compress_ctx * cc,struct compress_io_ctx * cic,int submitted)1053 static void cancel_cluster_writeback(struct compress_ctx *cc,
1054 			struct compress_io_ctx *cic, int submitted)
1055 {
1056 	int i;
1057 
1058 	/* Wait for submitted IOs. */
1059 	if (submitted > 1) {
1060 		f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
1061 		while (atomic_read(&cic->pending_pages) !=
1062 					(cc->valid_nr_cpages - submitted + 1))
1063 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1064 	}
1065 
1066 	/* Cancel writeback and stay locked. */
1067 	for (i = 0; i < cc->cluster_size; i++) {
1068 		if (i < submitted) {
1069 			inode_inc_dirty_pages(cc->inode);
1070 			lock_page(cc->rpages[i]);
1071 		}
1072 		clear_page_private_gcing(cc->rpages[i]);
1073 		if (folio_test_writeback(page_folio(cc->rpages[i])))
1074 			end_page_writeback(cc->rpages[i]);
1075 	}
1076 }
1077 
set_cluster_dirty(struct compress_ctx * cc)1078 static void set_cluster_dirty(struct compress_ctx *cc)
1079 {
1080 	int i;
1081 
1082 	for (i = 0; i < cc->cluster_size; i++)
1083 		if (cc->rpages[i]) {
1084 			set_page_dirty(cc->rpages[i]);
1085 			set_page_private_gcing(cc->rpages[i]);
1086 		}
1087 }
1088 
prepare_compress_overwrite(struct compress_ctx * cc,struct page ** pagep,pgoff_t index,void ** fsdata)1089 static int prepare_compress_overwrite(struct compress_ctx *cc,
1090 		struct page **pagep, pgoff_t index, void **fsdata)
1091 {
1092 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1093 	struct address_space *mapping = cc->inode->i_mapping;
1094 	struct folio *folio;
1095 	sector_t last_block_in_bio;
1096 	fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1097 	pgoff_t start_idx = start_idx_of_cluster(cc);
1098 	int i, ret;
1099 
1100 retry:
1101 	ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1102 	if (ret <= 0)
1103 		return ret;
1104 
1105 	ret = f2fs_init_compress_ctx(cc);
1106 	if (ret)
1107 		return ret;
1108 
1109 	/* keep folio reference to avoid page reclaim */
1110 	for (i = 0; i < cc->cluster_size; i++) {
1111 		folio = f2fs_filemap_get_folio(mapping, start_idx + i,
1112 				fgp_flag, GFP_NOFS);
1113 		if (IS_ERR(folio)) {
1114 			ret = PTR_ERR(folio);
1115 			goto unlock_pages;
1116 		}
1117 
1118 		if (folio_test_uptodate(folio))
1119 			f2fs_folio_put(folio, true);
1120 		else
1121 			f2fs_compress_ctx_add_page(cc, folio);
1122 	}
1123 
1124 	if (!f2fs_cluster_is_empty(cc)) {
1125 		struct bio *bio = NULL;
1126 
1127 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1128 					&last_block_in_bio, NULL, true);
1129 		f2fs_put_rpages(cc);
1130 		f2fs_destroy_compress_ctx(cc, true);
1131 		if (ret)
1132 			goto out;
1133 		if (bio)
1134 			f2fs_submit_read_bio(sbi, bio, DATA);
1135 
1136 		ret = f2fs_init_compress_ctx(cc);
1137 		if (ret)
1138 			goto out;
1139 	}
1140 
1141 	for (i = 0; i < cc->cluster_size; i++) {
1142 		f2fs_bug_on(sbi, cc->rpages[i]);
1143 
1144 		folio = filemap_lock_folio(mapping, start_idx + i);
1145 		if (IS_ERR(folio)) {
1146 			/* folio could be truncated */
1147 			goto release_and_retry;
1148 		}
1149 
1150 		f2fs_folio_wait_writeback(folio, DATA, true, true);
1151 		f2fs_compress_ctx_add_page(cc, folio);
1152 
1153 		if (!folio_test_uptodate(folio)) {
1154 			f2fs_handle_page_eio(sbi, folio, DATA);
1155 release_and_retry:
1156 			f2fs_put_rpages(cc);
1157 			f2fs_unlock_rpages(cc, i + 1);
1158 			f2fs_destroy_compress_ctx(cc, true);
1159 			goto retry;
1160 		}
1161 	}
1162 
1163 	if (likely(!ret)) {
1164 		*fsdata = cc->rpages;
1165 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
1166 		return cc->cluster_size;
1167 	}
1168 
1169 unlock_pages:
1170 	f2fs_put_rpages(cc);
1171 	f2fs_unlock_rpages(cc, i);
1172 	f2fs_destroy_compress_ctx(cc, true);
1173 out:
1174 	return ret;
1175 }
1176 
f2fs_prepare_compress_overwrite(struct inode * inode,struct page ** pagep,pgoff_t index,void ** fsdata)1177 int f2fs_prepare_compress_overwrite(struct inode *inode,
1178 		struct page **pagep, pgoff_t index, void **fsdata)
1179 {
1180 	struct compress_ctx cc = {
1181 		.inode = inode,
1182 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1183 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1184 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1185 		.rpages = NULL,
1186 		.nr_rpages = 0,
1187 	};
1188 
1189 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1190 }
1191 
f2fs_compress_write_end(struct inode * inode,void * fsdata,pgoff_t index,unsigned copied)1192 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1193 					pgoff_t index, unsigned copied)
1194 
1195 {
1196 	struct compress_ctx cc = {
1197 		.inode = inode,
1198 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1199 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1200 		.rpages = fsdata,
1201 	};
1202 	struct folio *folio = page_folio(cc.rpages[0]);
1203 	bool first_index = (index == folio->index);
1204 
1205 	if (copied)
1206 		set_cluster_dirty(&cc);
1207 
1208 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1209 	f2fs_destroy_compress_ctx(&cc, false);
1210 
1211 	return first_index;
1212 }
1213 
f2fs_truncate_partial_cluster(struct inode * inode,u64 from,bool lock)1214 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1215 {
1216 	void *fsdata = NULL;
1217 	struct page *pagep;
1218 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1219 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1220 							log_cluster_size;
1221 	int err;
1222 
1223 	err = f2fs_is_compressed_cluster(inode, start_idx);
1224 	if (err < 0)
1225 		return err;
1226 
1227 	/* truncate normal cluster */
1228 	if (!err)
1229 		return f2fs_do_truncate_blocks(inode, from, lock);
1230 
1231 	/* truncate compressed cluster */
1232 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1233 						start_idx, &fsdata);
1234 
1235 	/* should not be a normal cluster */
1236 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1237 
1238 	if (err <= 0)
1239 		return err;
1240 
1241 	if (err > 0) {
1242 		struct page **rpages = fsdata;
1243 		int cluster_size = F2FS_I(inode)->i_cluster_size;
1244 		int i;
1245 
1246 		for (i = cluster_size - 1; i >= 0; i--) {
1247 			struct folio *folio = page_folio(rpages[i]);
1248 			loff_t start = folio->index << PAGE_SHIFT;
1249 
1250 			if (from <= start) {
1251 				folio_zero_segment(folio, 0, folio_size(folio));
1252 			} else {
1253 				folio_zero_segment(folio, from - start,
1254 						folio_size(folio));
1255 				break;
1256 			}
1257 		}
1258 
1259 		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1260 	}
1261 	return 0;
1262 }
1263 
f2fs_write_compressed_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1264 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1265 					int *submitted,
1266 					struct writeback_control *wbc,
1267 					enum iostat_type io_type)
1268 {
1269 	struct inode *inode = cc->inode;
1270 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1271 	struct f2fs_inode_info *fi = F2FS_I(inode);
1272 	struct f2fs_io_info fio = {
1273 		.sbi = sbi,
1274 		.ino = cc->inode->i_ino,
1275 		.type = DATA,
1276 		.op = REQ_OP_WRITE,
1277 		.op_flags = wbc_to_write_flags(wbc),
1278 		.old_blkaddr = NEW_ADDR,
1279 		.page = NULL,
1280 		.encrypted_page = NULL,
1281 		.compressed_page = NULL,
1282 		.io_type = io_type,
1283 		.io_wbc = wbc,
1284 		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1285 									1 : 0,
1286 	};
1287 	struct folio *folio;
1288 	struct dnode_of_data dn;
1289 	struct node_info ni;
1290 	struct compress_io_ctx *cic;
1291 	pgoff_t start_idx = start_idx_of_cluster(cc);
1292 	unsigned int last_index = cc->cluster_size - 1;
1293 	loff_t psize;
1294 	int i, err;
1295 	bool quota_inode = IS_NOQUOTA(inode);
1296 
1297 	/* we should bypass data pages to proceed the kworker jobs */
1298 	if (unlikely(f2fs_cp_error(sbi))) {
1299 		mapping_set_error(inode->i_mapping, -EIO);
1300 		goto out_free;
1301 	}
1302 
1303 	if (quota_inode) {
1304 		/*
1305 		 * We need to wait for node_write to avoid block allocation during
1306 		 * checkpoint. This can only happen to quota writes which can cause
1307 		 * the below discard race condition.
1308 		 */
1309 		f2fs_down_read(&sbi->node_write);
1310 	} else if (!f2fs_trylock_op(sbi)) {
1311 		goto out_free;
1312 	}
1313 
1314 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1315 
1316 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1317 	if (err)
1318 		goto out_unlock_op;
1319 
1320 	for (i = 0; i < cc->cluster_size; i++) {
1321 		if (data_blkaddr(dn.inode, dn.node_folio,
1322 					dn.ofs_in_node + i) == NULL_ADDR)
1323 			goto out_put_dnode;
1324 	}
1325 
1326 	folio = page_folio(cc->rpages[last_index]);
1327 	psize = folio_pos(folio) + folio_size(folio);
1328 
1329 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1330 	if (err)
1331 		goto out_put_dnode;
1332 
1333 	fio.version = ni.version;
1334 
1335 	cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1336 	if (!cic)
1337 		goto out_put_dnode;
1338 
1339 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1340 	cic->inode = inode;
1341 	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1342 	cic->rpages = page_array_alloc(sbi, cc->cluster_size);
1343 	if (!cic->rpages)
1344 		goto out_put_cic;
1345 
1346 	cic->nr_rpages = cc->cluster_size;
1347 
1348 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1349 		f2fs_set_compressed_page(cc->cpages[i], inode,
1350 				page_folio(cc->rpages[i + 1])->index, cic);
1351 		fio.compressed_page = cc->cpages[i];
1352 
1353 		fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_folio,
1354 						dn.ofs_in_node + i + 1);
1355 
1356 		/* wait for GCed page writeback via META_MAPPING */
1357 		f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1358 
1359 		if (fio.encrypted) {
1360 			fio.page = cc->rpages[i + 1];
1361 			err = f2fs_encrypt_one_page(&fio);
1362 			if (err)
1363 				goto out_destroy_crypt;
1364 			cc->cpages[i] = fio.encrypted_page;
1365 		}
1366 	}
1367 
1368 	set_cluster_writeback(cc);
1369 
1370 	for (i = 0; i < cc->cluster_size; i++)
1371 		cic->rpages[i] = cc->rpages[i];
1372 
1373 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1374 		block_t blkaddr;
1375 
1376 		blkaddr = f2fs_data_blkaddr(&dn);
1377 		fio.page = cc->rpages[i];
1378 		fio.old_blkaddr = blkaddr;
1379 
1380 		/* cluster header */
1381 		if (i == 0) {
1382 			if (blkaddr == COMPRESS_ADDR)
1383 				fio.compr_blocks++;
1384 			if (__is_valid_data_blkaddr(blkaddr))
1385 				f2fs_invalidate_blocks(sbi, blkaddr, 1);
1386 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1387 			goto unlock_continue;
1388 		}
1389 
1390 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1391 			fio.compr_blocks++;
1392 
1393 		if (i > cc->valid_nr_cpages) {
1394 			if (__is_valid_data_blkaddr(blkaddr)) {
1395 				f2fs_invalidate_blocks(sbi, blkaddr, 1);
1396 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1397 			}
1398 			goto unlock_continue;
1399 		}
1400 
1401 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1402 
1403 		if (fio.encrypted)
1404 			fio.encrypted_page = cc->cpages[i - 1];
1405 		else
1406 			fio.compressed_page = cc->cpages[i - 1];
1407 
1408 		cc->cpages[i - 1] = NULL;
1409 		fio.submitted = 0;
1410 		f2fs_outplace_write_data(&dn, &fio);
1411 		if (unlikely(!fio.submitted)) {
1412 			cancel_cluster_writeback(cc, cic, i);
1413 
1414 			/* To call fscrypt_finalize_bounce_page */
1415 			i = cc->valid_nr_cpages;
1416 			*submitted = 0;
1417 			goto out_destroy_crypt;
1418 		}
1419 		(*submitted)++;
1420 unlock_continue:
1421 		inode_dec_dirty_pages(cc->inode);
1422 		folio_unlock(fio.folio);
1423 	}
1424 
1425 	if (fio.compr_blocks)
1426 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1427 	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1428 	add_compr_block_stat(inode, cc->valid_nr_cpages);
1429 
1430 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1431 
1432 	f2fs_put_dnode(&dn);
1433 	if (quota_inode)
1434 		f2fs_up_read(&sbi->node_write);
1435 	else
1436 		f2fs_unlock_op(sbi);
1437 
1438 	spin_lock(&fi->i_size_lock);
1439 	if (fi->last_disk_size < psize)
1440 		fi->last_disk_size = psize;
1441 	spin_unlock(&fi->i_size_lock);
1442 
1443 	f2fs_put_rpages(cc);
1444 	page_array_free(sbi, cc->cpages, cc->nr_cpages);
1445 	cc->cpages = NULL;
1446 	f2fs_destroy_compress_ctx(cc, false);
1447 	return 0;
1448 
1449 out_destroy_crypt:
1450 	page_array_free(sbi, cic->rpages, cc->cluster_size);
1451 
1452 	for (--i; i >= 0; i--) {
1453 		if (!cc->cpages[i])
1454 			continue;
1455 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1456 	}
1457 out_put_cic:
1458 	kmem_cache_free(cic_entry_slab, cic);
1459 out_put_dnode:
1460 	f2fs_put_dnode(&dn);
1461 out_unlock_op:
1462 	if (quota_inode)
1463 		f2fs_up_read(&sbi->node_write);
1464 	else
1465 		f2fs_unlock_op(sbi);
1466 out_free:
1467 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1468 		f2fs_compress_free_page(cc->cpages[i]);
1469 		cc->cpages[i] = NULL;
1470 	}
1471 	page_array_free(sbi, cc->cpages, cc->nr_cpages);
1472 	cc->cpages = NULL;
1473 	return -EAGAIN;
1474 }
1475 
f2fs_compress_write_end_io(struct bio * bio,struct folio * folio)1476 void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio)
1477 {
1478 	struct page *page = &folio->page;
1479 	struct f2fs_sb_info *sbi = bio->bi_private;
1480 	struct compress_io_ctx *cic = folio->private;
1481 	enum count_type type = WB_DATA_TYPE(folio,
1482 				f2fs_is_compressed_page(folio));
1483 	int i;
1484 
1485 	if (unlikely(bio->bi_status != BLK_STS_OK))
1486 		mapping_set_error(cic->inode->i_mapping, -EIO);
1487 
1488 	f2fs_compress_free_page(page);
1489 
1490 	dec_page_count(sbi, type);
1491 
1492 	if (atomic_dec_return(&cic->pending_pages))
1493 		return;
1494 
1495 	for (i = 0; i < cic->nr_rpages; i++) {
1496 		WARN_ON(!cic->rpages[i]);
1497 		clear_page_private_gcing(cic->rpages[i]);
1498 		end_page_writeback(cic->rpages[i]);
1499 	}
1500 
1501 	page_array_free(sbi, cic->rpages, cic->nr_rpages);
1502 	kmem_cache_free(cic_entry_slab, cic);
1503 }
1504 
f2fs_write_raw_pages(struct compress_ctx * cc,int * submitted_p,struct writeback_control * wbc,enum iostat_type io_type)1505 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1506 					int *submitted_p,
1507 					struct writeback_control *wbc,
1508 					enum iostat_type io_type)
1509 {
1510 	struct address_space *mapping = cc->inode->i_mapping;
1511 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1512 	int submitted, compr_blocks, i;
1513 	int ret = 0;
1514 
1515 	compr_blocks = f2fs_compressed_blocks(cc);
1516 
1517 	for (i = 0; i < cc->cluster_size; i++) {
1518 		if (!cc->rpages[i])
1519 			continue;
1520 
1521 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1522 		unlock_page(cc->rpages[i]);
1523 	}
1524 
1525 	if (compr_blocks < 0)
1526 		return compr_blocks;
1527 
1528 	/* overwrite compressed cluster w/ normal cluster */
1529 	if (compr_blocks > 0)
1530 		f2fs_lock_op(sbi);
1531 
1532 	for (i = 0; i < cc->cluster_size; i++) {
1533 		struct folio *folio;
1534 
1535 		if (!cc->rpages[i])
1536 			continue;
1537 		folio = page_folio(cc->rpages[i]);
1538 retry_write:
1539 		folio_lock(folio);
1540 
1541 		if (folio->mapping != mapping) {
1542 continue_unlock:
1543 			folio_unlock(folio);
1544 			continue;
1545 		}
1546 
1547 		if (!folio_test_dirty(folio))
1548 			goto continue_unlock;
1549 
1550 		if (folio_test_writeback(folio)) {
1551 			if (wbc->sync_mode == WB_SYNC_NONE)
1552 				goto continue_unlock;
1553 			f2fs_folio_wait_writeback(folio, DATA, true, true);
1554 		}
1555 
1556 		if (!folio_clear_dirty_for_io(folio))
1557 			goto continue_unlock;
1558 
1559 		submitted = 0;
1560 		ret = f2fs_write_single_data_page(folio, &submitted,
1561 						NULL, NULL, wbc, io_type,
1562 						compr_blocks, false);
1563 		if (ret) {
1564 			if (ret == 1) {
1565 				ret = 0;
1566 			} else if (ret == -EAGAIN) {
1567 				ret = 0;
1568 				/*
1569 				 * for quota file, just redirty left pages to
1570 				 * avoid deadlock caused by cluster update race
1571 				 * from foreground operation.
1572 				 */
1573 				if (IS_NOQUOTA(cc->inode))
1574 					goto out;
1575 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1576 				goto retry_write;
1577 			}
1578 			goto out;
1579 		}
1580 
1581 		*submitted_p += submitted;
1582 	}
1583 
1584 out:
1585 	if (compr_blocks > 0)
1586 		f2fs_unlock_op(sbi);
1587 
1588 	f2fs_balance_fs(sbi, true);
1589 	return ret;
1590 }
1591 
f2fs_write_multi_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1592 int f2fs_write_multi_pages(struct compress_ctx *cc,
1593 					int *submitted,
1594 					struct writeback_control *wbc,
1595 					enum iostat_type io_type)
1596 {
1597 	int err;
1598 
1599 	*submitted = 0;
1600 	if (cluster_may_compress(cc)) {
1601 		err = f2fs_compress_pages(cc);
1602 		if (err == -EAGAIN) {
1603 			add_compr_block_stat(cc->inode, cc->cluster_size);
1604 			goto write;
1605 		} else if (err) {
1606 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1607 			goto destroy_out;
1608 		}
1609 
1610 		err = f2fs_write_compressed_pages(cc, submitted,
1611 							wbc, io_type);
1612 		if (!err)
1613 			return 0;
1614 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1615 	}
1616 write:
1617 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1618 
1619 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1620 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1621 destroy_out:
1622 	f2fs_destroy_compress_ctx(cc, false);
1623 	return err;
1624 }
1625 
allow_memalloc_for_decomp(struct f2fs_sb_info * sbi,bool pre_alloc)1626 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1627 		bool pre_alloc)
1628 {
1629 	return pre_alloc ^ f2fs_low_mem_mode(sbi);
1630 }
1631 
f2fs_prepare_decomp_mem(struct decompress_io_ctx * dic,bool pre_alloc)1632 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1633 		bool pre_alloc)
1634 {
1635 	const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
1636 	int i;
1637 
1638 	if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
1639 		return 0;
1640 
1641 	dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size);
1642 	if (!dic->tpages)
1643 		return -ENOMEM;
1644 
1645 	for (i = 0; i < dic->cluster_size; i++) {
1646 		if (dic->rpages[i]) {
1647 			dic->tpages[i] = dic->rpages[i];
1648 			continue;
1649 		}
1650 
1651 		dic->tpages[i] = f2fs_compress_alloc_page();
1652 	}
1653 
1654 	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1655 	if (!dic->rbuf)
1656 		return -ENOMEM;
1657 
1658 	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1659 	if (!dic->cbuf)
1660 		return -ENOMEM;
1661 
1662 	if (cops->init_decompress_ctx)
1663 		return cops->init_decompress_ctx(dic);
1664 
1665 	return 0;
1666 }
1667 
f2fs_release_decomp_mem(struct decompress_io_ctx * dic,bool bypass_destroy_callback,bool pre_alloc)1668 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1669 		bool bypass_destroy_callback, bool pre_alloc)
1670 {
1671 	const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
1672 
1673 	if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
1674 		return;
1675 
1676 	if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1677 		cops->destroy_decompress_ctx(dic);
1678 
1679 	if (dic->cbuf)
1680 		vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1681 
1682 	if (dic->rbuf)
1683 		vm_unmap_ram(dic->rbuf, dic->cluster_size);
1684 }
1685 
1686 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1687 		bool bypass_destroy_callback);
1688 
f2fs_alloc_dic(struct compress_ctx * cc)1689 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1690 {
1691 	struct decompress_io_ctx *dic;
1692 	pgoff_t start_idx = start_idx_of_cluster(cc);
1693 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1694 	int i, ret;
1695 
1696 	dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1697 	if (!dic)
1698 		return ERR_PTR(-ENOMEM);
1699 
1700 	dic->rpages = page_array_alloc(sbi, cc->cluster_size);
1701 	if (!dic->rpages) {
1702 		kmem_cache_free(dic_entry_slab, dic);
1703 		return ERR_PTR(-ENOMEM);
1704 	}
1705 
1706 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1707 	dic->inode = cc->inode;
1708 	dic->sbi = sbi;
1709 	dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm;
1710 	atomic_set(&dic->remaining_pages, cc->nr_cpages);
1711 	dic->cluster_idx = cc->cluster_idx;
1712 	dic->cluster_size = cc->cluster_size;
1713 	dic->log_cluster_size = cc->log_cluster_size;
1714 	dic->nr_cpages = cc->nr_cpages;
1715 	refcount_set(&dic->refcnt, 1);
1716 	dic->failed = false;
1717 	dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1718 
1719 	for (i = 0; i < dic->cluster_size; i++)
1720 		dic->rpages[i] = cc->rpages[i];
1721 	dic->nr_rpages = cc->cluster_size;
1722 
1723 	dic->cpages = page_array_alloc(sbi, dic->nr_cpages);
1724 	if (!dic->cpages) {
1725 		ret = -ENOMEM;
1726 		goto out_free;
1727 	}
1728 
1729 	for (i = 0; i < dic->nr_cpages; i++) {
1730 		struct page *page;
1731 
1732 		page = f2fs_compress_alloc_page();
1733 		f2fs_set_compressed_page(page, cc->inode,
1734 					start_idx + i + 1, dic);
1735 		dic->cpages[i] = page;
1736 	}
1737 
1738 	ret = f2fs_prepare_decomp_mem(dic, true);
1739 	if (ret)
1740 		goto out_free;
1741 
1742 	return dic;
1743 
1744 out_free:
1745 	f2fs_free_dic(dic, true);
1746 	return ERR_PTR(ret);
1747 }
1748 
f2fs_free_dic(struct decompress_io_ctx * dic,bool bypass_destroy_callback)1749 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1750 		bool bypass_destroy_callback)
1751 {
1752 	int i;
1753 	/* use sbi in dic to avoid UFA of dic->inode*/
1754 	struct f2fs_sb_info *sbi = dic->sbi;
1755 
1756 	f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1757 
1758 	if (dic->tpages) {
1759 		for (i = 0; i < dic->cluster_size; i++) {
1760 			if (dic->rpages[i])
1761 				continue;
1762 			if (!dic->tpages[i])
1763 				continue;
1764 			f2fs_compress_free_page(dic->tpages[i]);
1765 		}
1766 		page_array_free(sbi, dic->tpages, dic->cluster_size);
1767 	}
1768 
1769 	if (dic->cpages) {
1770 		for (i = 0; i < dic->nr_cpages; i++) {
1771 			if (!dic->cpages[i])
1772 				continue;
1773 			f2fs_compress_free_page(dic->cpages[i]);
1774 		}
1775 		page_array_free(sbi, dic->cpages, dic->nr_cpages);
1776 	}
1777 
1778 	page_array_free(sbi, dic->rpages, dic->nr_rpages);
1779 	kmem_cache_free(dic_entry_slab, dic);
1780 }
1781 
f2fs_late_free_dic(struct work_struct * work)1782 static void f2fs_late_free_dic(struct work_struct *work)
1783 {
1784 	struct decompress_io_ctx *dic =
1785 		container_of(work, struct decompress_io_ctx, free_work);
1786 
1787 	f2fs_free_dic(dic, false);
1788 }
1789 
f2fs_put_dic(struct decompress_io_ctx * dic,bool in_task)1790 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1791 {
1792 	if (refcount_dec_and_test(&dic->refcnt)) {
1793 		if (in_task) {
1794 			f2fs_free_dic(dic, false);
1795 		} else {
1796 			INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1797 			queue_work(dic->sbi->post_read_wq, &dic->free_work);
1798 		}
1799 	}
1800 }
1801 
f2fs_verify_cluster(struct work_struct * work)1802 static void f2fs_verify_cluster(struct work_struct *work)
1803 {
1804 	struct decompress_io_ctx *dic =
1805 		container_of(work, struct decompress_io_ctx, verity_work);
1806 	int i;
1807 
1808 	/* Verify, update, and unlock the decompressed pages. */
1809 	for (i = 0; i < dic->cluster_size; i++) {
1810 		struct page *rpage = dic->rpages[i];
1811 
1812 		if (!rpage)
1813 			continue;
1814 
1815 		if (fsverity_verify_page(rpage))
1816 			SetPageUptodate(rpage);
1817 		else
1818 			ClearPageUptodate(rpage);
1819 		unlock_page(rpage);
1820 	}
1821 
1822 	f2fs_put_dic(dic, true);
1823 }
1824 
1825 /*
1826  * This is called when a compressed cluster has been decompressed
1827  * (or failed to be read and/or decompressed).
1828  */
f2fs_decompress_end_io(struct decompress_io_ctx * dic,bool failed,bool in_task)1829 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1830 				bool in_task)
1831 {
1832 	int i;
1833 
1834 	if (!failed && dic->need_verity) {
1835 		/*
1836 		 * Note that to avoid deadlocks, the verity work can't be done
1837 		 * on the decompression workqueue.  This is because verifying
1838 		 * the data pages can involve reading metadata pages from the
1839 		 * file, and these metadata pages may be compressed.
1840 		 */
1841 		INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1842 		fsverity_enqueue_verify_work(&dic->verity_work);
1843 		return;
1844 	}
1845 
1846 	/* Update and unlock the cluster's pagecache pages. */
1847 	for (i = 0; i < dic->cluster_size; i++) {
1848 		struct page *rpage = dic->rpages[i];
1849 
1850 		if (!rpage)
1851 			continue;
1852 
1853 		if (failed)
1854 			ClearPageUptodate(rpage);
1855 		else
1856 			SetPageUptodate(rpage);
1857 		unlock_page(rpage);
1858 	}
1859 
1860 	/*
1861 	 * Release the reference to the decompress_io_ctx that was being held
1862 	 * for I/O completion.
1863 	 */
1864 	f2fs_put_dic(dic, in_task);
1865 }
1866 
1867 /*
1868  * Put a reference to a compressed folio's decompress_io_ctx.
1869  *
1870  * This is called when the folio is no longer needed and can be freed.
1871  */
f2fs_put_folio_dic(struct folio * folio,bool in_task)1872 void f2fs_put_folio_dic(struct folio *folio, bool in_task)
1873 {
1874 	struct decompress_io_ctx *dic = folio->private;
1875 
1876 	f2fs_put_dic(dic, in_task);
1877 }
1878 
1879 /*
1880  * check whether cluster blocks are contiguous, and add extent cache entry
1881  * only if cluster blocks are logically and physically contiguous.
1882  */
f2fs_cluster_blocks_are_contiguous(struct dnode_of_data * dn,unsigned int ofs_in_node)1883 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
1884 						unsigned int ofs_in_node)
1885 {
1886 	bool compressed = data_blkaddr(dn->inode, dn->node_folio,
1887 					ofs_in_node) == COMPRESS_ADDR;
1888 	int i = compressed ? 1 : 0;
1889 	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
1890 							ofs_in_node + i);
1891 
1892 	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1893 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
1894 							ofs_in_node + i);
1895 
1896 		if (!__is_valid_data_blkaddr(blkaddr))
1897 			break;
1898 		if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1899 			return 0;
1900 	}
1901 
1902 	return compressed ? i - 1 : i;
1903 }
1904 
1905 const struct address_space_operations f2fs_compress_aops = {
1906 	.release_folio = f2fs_release_folio,
1907 	.invalidate_folio = f2fs_invalidate_folio,
1908 	.migrate_folio	= filemap_migrate_folio,
1909 };
1910 
COMPRESS_MAPPING(struct f2fs_sb_info * sbi)1911 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1912 {
1913 	return sbi->compress_inode->i_mapping;
1914 }
1915 
f2fs_invalidate_compress_pages_range(struct f2fs_sb_info * sbi,block_t blkaddr,unsigned int len)1916 void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
1917 				block_t blkaddr, unsigned int len)
1918 {
1919 	if (!sbi->compress_inode)
1920 		return;
1921 	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
1922 }
1923 
f2fs_cache_compressed_page(struct f2fs_sb_info * sbi,struct folio * folio,nid_t ino,block_t blkaddr)1924 static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
1925 		struct folio *folio, nid_t ino, block_t blkaddr)
1926 {
1927 	struct folio *cfolio;
1928 	int ret;
1929 
1930 	if (!test_opt(sbi, COMPRESS_CACHE))
1931 		return;
1932 
1933 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1934 		return;
1935 
1936 	if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1937 		return;
1938 
1939 	cfolio = filemap_get_folio(COMPRESS_MAPPING(sbi), blkaddr);
1940 	if (!IS_ERR(cfolio)) {
1941 		f2fs_folio_put(cfolio, false);
1942 		return;
1943 	}
1944 
1945 	cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0);
1946 	if (!cfolio)
1947 		return;
1948 
1949 	ret = filemap_add_folio(COMPRESS_MAPPING(sbi), cfolio,
1950 						blkaddr, GFP_NOFS);
1951 	if (ret) {
1952 		f2fs_folio_put(cfolio, false);
1953 		return;
1954 	}
1955 
1956 	folio_set_f2fs_data(cfolio, ino);
1957 
1958 	memcpy(folio_address(cfolio), folio_address(folio), PAGE_SIZE);
1959 	folio_mark_uptodate(cfolio);
1960 	f2fs_folio_put(cfolio, true);
1961 }
1962 
f2fs_load_compressed_folio(struct f2fs_sb_info * sbi,struct folio * folio,block_t blkaddr)1963 bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio,
1964 								block_t blkaddr)
1965 {
1966 	struct folio *cfolio;
1967 	bool hitted = false;
1968 
1969 	if (!test_opt(sbi, COMPRESS_CACHE))
1970 		return false;
1971 
1972 	cfolio = f2fs_filemap_get_folio(COMPRESS_MAPPING(sbi),
1973 				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1974 	if (!IS_ERR(cfolio)) {
1975 		if (folio_test_uptodate(cfolio)) {
1976 			atomic_inc(&sbi->compress_page_hit);
1977 			memcpy(folio_address(folio),
1978 				folio_address(cfolio), folio_size(folio));
1979 			hitted = true;
1980 		}
1981 		f2fs_folio_put(cfolio, true);
1982 	}
1983 
1984 	return hitted;
1985 }
1986 
f2fs_invalidate_compress_pages(struct f2fs_sb_info * sbi,nid_t ino)1987 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1988 {
1989 	struct address_space *mapping = COMPRESS_MAPPING(sbi);
1990 	struct folio_batch fbatch;
1991 	pgoff_t index = 0;
1992 	pgoff_t end = MAX_BLKADDR(sbi);
1993 
1994 	if (!mapping->nrpages)
1995 		return;
1996 
1997 	folio_batch_init(&fbatch);
1998 
1999 	do {
2000 		unsigned int nr, i;
2001 
2002 		nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
2003 		if (!nr)
2004 			break;
2005 
2006 		for (i = 0; i < nr; i++) {
2007 			struct folio *folio = fbatch.folios[i];
2008 
2009 			folio_lock(folio);
2010 			if (folio->mapping != mapping) {
2011 				folio_unlock(folio);
2012 				continue;
2013 			}
2014 
2015 			if (ino != folio_get_f2fs_data(folio)) {
2016 				folio_unlock(folio);
2017 				continue;
2018 			}
2019 
2020 			generic_error_remove_folio(mapping, folio);
2021 			folio_unlock(folio);
2022 		}
2023 		folio_batch_release(&fbatch);
2024 		cond_resched();
2025 	} while (index < end);
2026 }
2027 
f2fs_init_compress_inode(struct f2fs_sb_info * sbi)2028 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
2029 {
2030 	struct inode *inode;
2031 
2032 	if (!test_opt(sbi, COMPRESS_CACHE))
2033 		return 0;
2034 
2035 	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
2036 	if (IS_ERR(inode))
2037 		return PTR_ERR(inode);
2038 	sbi->compress_inode = inode;
2039 
2040 	sbi->compress_percent = COMPRESS_PERCENT;
2041 	sbi->compress_watermark = COMPRESS_WATERMARK;
2042 
2043 	atomic_set(&sbi->compress_page_hit, 0);
2044 
2045 	return 0;
2046 }
2047 
f2fs_destroy_compress_inode(struct f2fs_sb_info * sbi)2048 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
2049 {
2050 	if (!sbi->compress_inode)
2051 		return;
2052 	iput(sbi->compress_inode);
2053 	sbi->compress_inode = NULL;
2054 }
2055 
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)2056 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
2057 {
2058 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2059 	char slab_name[35];
2060 
2061 	if (!f2fs_sb_has_compression(sbi))
2062 		return 0;
2063 
2064 	sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
2065 
2066 	sbi->page_array_slab_size = sizeof(struct page *) <<
2067 					F2FS_OPTION(sbi).compress_log_size;
2068 
2069 	sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
2070 					sbi->page_array_slab_size);
2071 	return sbi->page_array_slab ? 0 : -ENOMEM;
2072 }
2073 
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)2074 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
2075 {
2076 	kmem_cache_destroy(sbi->page_array_slab);
2077 }
2078 
f2fs_init_compress_cache(void)2079 int __init f2fs_init_compress_cache(void)
2080 {
2081 	cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2082 					sizeof(struct compress_io_ctx));
2083 	if (!cic_entry_slab)
2084 		return -ENOMEM;
2085 	dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2086 					sizeof(struct decompress_io_ctx));
2087 	if (!dic_entry_slab)
2088 		goto free_cic;
2089 	return 0;
2090 free_cic:
2091 	kmem_cache_destroy(cic_entry_slab);
2092 	return -ENOMEM;
2093 }
2094 
f2fs_destroy_compress_cache(void)2095 void f2fs_destroy_compress_cache(void)
2096 {
2097 	kmem_cache_destroy(dic_entry_slab);
2098 	kmem_cache_destroy(cic_entry_slab);
2099 }
2100