xref: /linux/fs/f2fs/compress.c (revision cb015814f8b6eebcbb8e46e111d108892c5e6821)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
17 
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include <trace/events/f2fs.h>
22 
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
25 
page_array_alloc(struct f2fs_sb_info * sbi,int nr)26 static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr)
27 {
28 	unsigned int size = sizeof(struct page *) * nr;
29 
30 	if (likely(size <= sbi->page_array_slab_size))
31 		return f2fs_kmem_cache_alloc(sbi->page_array_slab,
32 					GFP_F2FS_ZERO, false, sbi);
33 	return f2fs_kzalloc(sbi, size, GFP_NOFS);
34 }
35 
page_array_free(struct f2fs_sb_info * sbi,void * pages,int nr)36 static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr)
37 {
38 	unsigned int size = sizeof(struct page *) * nr;
39 
40 	if (!pages)
41 		return;
42 
43 	if (likely(size <= sbi->page_array_slab_size))
44 		kmem_cache_free(sbi->page_array_slab, pages);
45 	else
46 		kfree(pages);
47 }
48 
49 struct f2fs_compress_ops {
50 	int (*init_compress_ctx)(struct compress_ctx *cc);
51 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
52 	int (*compress_pages)(struct compress_ctx *cc);
53 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
54 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
55 	int (*decompress_pages)(struct decompress_io_ctx *dic);
56 	bool (*is_level_valid)(int level);
57 };
58 
offset_in_cluster(struct compress_ctx * cc,pgoff_t index)59 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
60 {
61 	return index & (cc->cluster_size - 1);
62 }
63 
cluster_idx(struct compress_ctx * cc,pgoff_t index)64 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
65 {
66 	return index >> cc->log_cluster_size;
67 }
68 
start_idx_of_cluster(struct compress_ctx * cc)69 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
70 {
71 	return cc->cluster_idx << cc->log_cluster_size;
72 }
73 
f2fs_is_compressed_page(struct folio * folio)74 bool f2fs_is_compressed_page(struct folio *folio)
75 {
76 	if (!folio->private)
77 		return false;
78 	if (folio_test_f2fs_nonpointer(folio))
79 		return false;
80 
81 	f2fs_bug_on(F2FS_F_SB(folio),
82 		*((u32 *)folio->private) != F2FS_COMPRESSED_PAGE_MAGIC);
83 	return true;
84 }
85 
f2fs_set_compressed_page(struct page * page,struct inode * inode,pgoff_t index,void * data)86 static void f2fs_set_compressed_page(struct page *page,
87 		struct inode *inode, pgoff_t index, void *data)
88 {
89 	struct folio *folio = page_folio(page);
90 
91 	folio_attach_private(folio, (void *)data);
92 
93 	/* i_crypto_info and iv index */
94 	folio->index = index;
95 	folio->mapping = inode->i_mapping;
96 }
97 
f2fs_drop_rpages(struct compress_ctx * cc,int len,bool unlock)98 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
99 {
100 	int i;
101 
102 	for (i = 0; i < len; i++) {
103 		if (!cc->rpages[i])
104 			continue;
105 		if (unlock)
106 			unlock_page(cc->rpages[i]);
107 		else
108 			put_page(cc->rpages[i]);
109 	}
110 }
111 
f2fs_put_rpages(struct compress_ctx * cc)112 static void f2fs_put_rpages(struct compress_ctx *cc)
113 {
114 	f2fs_drop_rpages(cc, cc->cluster_size, false);
115 }
116 
f2fs_unlock_rpages(struct compress_ctx * cc,int len)117 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
118 {
119 	f2fs_drop_rpages(cc, len, true);
120 }
121 
f2fs_put_rpages_wbc(struct compress_ctx * cc,struct writeback_control * wbc,bool redirty,bool unlock)122 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
123 		struct writeback_control *wbc, bool redirty, bool unlock)
124 {
125 	unsigned int i;
126 
127 	for (i = 0; i < cc->cluster_size; i++) {
128 		if (!cc->rpages[i])
129 			continue;
130 		if (redirty)
131 			redirty_page_for_writepage(wbc, cc->rpages[i]);
132 		f2fs_put_page(cc->rpages[i], unlock);
133 	}
134 }
135 
f2fs_compress_control_folio(struct folio * folio)136 struct folio *f2fs_compress_control_folio(struct folio *folio)
137 {
138 	struct compress_io_ctx *ctx = folio->private;
139 
140 	return page_folio(ctx->rpages[0]);
141 }
142 
f2fs_init_compress_ctx(struct compress_ctx * cc)143 int f2fs_init_compress_ctx(struct compress_ctx *cc)
144 {
145 	if (cc->rpages)
146 		return 0;
147 
148 	cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size);
149 	return cc->rpages ? 0 : -ENOMEM;
150 }
151 
f2fs_destroy_compress_ctx(struct compress_ctx * cc,bool reuse)152 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
153 {
154 	page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size);
155 	cc->rpages = NULL;
156 	cc->nr_rpages = 0;
157 	cc->nr_cpages = 0;
158 	cc->valid_nr_cpages = 0;
159 	if (!reuse)
160 		cc->cluster_idx = NULL_CLUSTER;
161 }
162 
f2fs_compress_ctx_add_page(struct compress_ctx * cc,struct folio * folio)163 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
164 {
165 	unsigned int cluster_ofs;
166 
167 	if (!f2fs_cluster_can_merge_page(cc, folio->index))
168 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
169 
170 	cluster_ofs = offset_in_cluster(cc, folio->index);
171 	cc->rpages[cluster_ofs] = folio_page(folio, 0);
172 	cc->nr_rpages++;
173 	cc->cluster_idx = cluster_idx(cc, folio->index);
174 }
175 
176 #ifdef CONFIG_F2FS_FS_LZO
lzo_init_compress_ctx(struct compress_ctx * cc)177 static int lzo_init_compress_ctx(struct compress_ctx *cc)
178 {
179 	cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode),
180 					LZO1X_MEM_COMPRESS);
181 	if (!cc->private)
182 		return -ENOMEM;
183 
184 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
185 	return 0;
186 }
187 
lzo_destroy_compress_ctx(struct compress_ctx * cc)188 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
189 {
190 	vfree(cc->private);
191 	cc->private = NULL;
192 }
193 
lzo_compress_pages(struct compress_ctx * cc)194 static int lzo_compress_pages(struct compress_ctx *cc)
195 {
196 	int ret;
197 
198 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
199 					&cc->clen, cc->private);
200 	if (ret != LZO_E_OK) {
201 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
202 				"lzo compress failed, ret:%d", ret);
203 		return -EIO;
204 	}
205 	return 0;
206 }
207 
lzo_decompress_pages(struct decompress_io_ctx * dic)208 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
209 {
210 	int ret;
211 
212 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
213 						dic->rbuf, &dic->rlen);
214 	if (ret != LZO_E_OK) {
215 		f2fs_err_ratelimited(dic->sbi,
216 				"lzo decompress failed, ret:%d", ret);
217 		return -EIO;
218 	}
219 
220 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
221 		f2fs_err_ratelimited(dic->sbi,
222 				"lzo invalid rlen:%zu, expected:%lu",
223 				dic->rlen, PAGE_SIZE << dic->log_cluster_size);
224 		return -EIO;
225 	}
226 	return 0;
227 }
228 
229 static const struct f2fs_compress_ops f2fs_lzo_ops = {
230 	.init_compress_ctx	= lzo_init_compress_ctx,
231 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
232 	.compress_pages		= lzo_compress_pages,
233 	.decompress_pages	= lzo_decompress_pages,
234 };
235 #endif
236 
237 #ifdef CONFIG_F2FS_FS_LZ4
lz4_init_compress_ctx(struct compress_ctx * cc)238 static int lz4_init_compress_ctx(struct compress_ctx *cc)
239 {
240 	unsigned int size = LZ4_MEM_COMPRESS;
241 
242 #ifdef CONFIG_F2FS_FS_LZ4HC
243 	if (F2FS_I(cc->inode)->i_compress_level)
244 		size = LZ4HC_MEM_COMPRESS;
245 #endif
246 
247 	cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode), size);
248 	if (!cc->private)
249 		return -ENOMEM;
250 
251 	/*
252 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
253 	 * adapt worst compress case, because lz4 compressor can handle
254 	 * output budget properly.
255 	 */
256 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
257 	return 0;
258 }
259 
lz4_destroy_compress_ctx(struct compress_ctx * cc)260 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
261 {
262 	vfree(cc->private);
263 	cc->private = NULL;
264 }
265 
lz4_compress_pages(struct compress_ctx * cc)266 static int lz4_compress_pages(struct compress_ctx *cc)
267 {
268 	int len = -EINVAL;
269 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
270 
271 	if (!level)
272 		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
273 						cc->clen, cc->private);
274 #ifdef CONFIG_F2FS_FS_LZ4HC
275 	else
276 		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
277 					cc->clen, level, cc->private);
278 #endif
279 	if (len < 0)
280 		return len;
281 	if (!len)
282 		return -EAGAIN;
283 
284 	cc->clen = len;
285 	return 0;
286 }
287 
lz4_decompress_pages(struct decompress_io_ctx * dic)288 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
289 {
290 	int ret;
291 
292 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
293 						dic->clen, dic->rlen);
294 	if (ret < 0) {
295 		f2fs_err_ratelimited(dic->sbi,
296 				"lz4 decompress failed, ret:%d", ret);
297 		return -EIO;
298 	}
299 
300 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
301 		f2fs_err_ratelimited(dic->sbi,
302 				"lz4 invalid ret:%d, expected:%lu",
303 				ret, PAGE_SIZE << dic->log_cluster_size);
304 		return -EIO;
305 	}
306 	return 0;
307 }
308 
lz4_is_level_valid(int lvl)309 static bool lz4_is_level_valid(int lvl)
310 {
311 #ifdef CONFIG_F2FS_FS_LZ4HC
312 	return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
313 #else
314 	return lvl == 0;
315 #endif
316 }
317 
318 static const struct f2fs_compress_ops f2fs_lz4_ops = {
319 	.init_compress_ctx	= lz4_init_compress_ctx,
320 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
321 	.compress_pages		= lz4_compress_pages,
322 	.decompress_pages	= lz4_decompress_pages,
323 	.is_level_valid		= lz4_is_level_valid,
324 };
325 #endif
326 
327 #ifdef CONFIG_F2FS_FS_ZSTD
zstd_init_compress_ctx(struct compress_ctx * cc)328 static int zstd_init_compress_ctx(struct compress_ctx *cc)
329 {
330 	zstd_parameters params;
331 	zstd_cstream *stream;
332 	void *workspace;
333 	unsigned int workspace_size;
334 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
335 
336 	/* Need to remain this for backward compatibility */
337 	if (!level)
338 		level = F2FS_ZSTD_DEFAULT_CLEVEL;
339 
340 	params = zstd_get_params(level, cc->rlen);
341 	workspace_size = zstd_cstream_workspace_bound(&params.cParams);
342 
343 	workspace = f2fs_vmalloc(F2FS_I_SB(cc->inode), workspace_size);
344 	if (!workspace)
345 		return -ENOMEM;
346 
347 	stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
348 	if (!stream) {
349 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
350 				"%s zstd_init_cstream failed", __func__);
351 		vfree(workspace);
352 		return -EIO;
353 	}
354 
355 	cc->private = workspace;
356 	cc->private2 = stream;
357 
358 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
359 	return 0;
360 }
361 
zstd_destroy_compress_ctx(struct compress_ctx * cc)362 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
363 {
364 	vfree(cc->private);
365 	cc->private = NULL;
366 	cc->private2 = NULL;
367 }
368 
zstd_compress_pages(struct compress_ctx * cc)369 static int zstd_compress_pages(struct compress_ctx *cc)
370 {
371 	zstd_cstream *stream = cc->private2;
372 	zstd_in_buffer inbuf;
373 	zstd_out_buffer outbuf;
374 	int src_size = cc->rlen;
375 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
376 	int ret;
377 
378 	inbuf.pos = 0;
379 	inbuf.src = cc->rbuf;
380 	inbuf.size = src_size;
381 
382 	outbuf.pos = 0;
383 	outbuf.dst = cc->cbuf->cdata;
384 	outbuf.size = dst_size;
385 
386 	ret = zstd_compress_stream(stream, &outbuf, &inbuf);
387 	if (zstd_is_error(ret)) {
388 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
389 				"%s zstd_compress_stream failed, ret: %d",
390 				__func__, zstd_get_error_code(ret));
391 		return -EIO;
392 	}
393 
394 	ret = zstd_end_stream(stream, &outbuf);
395 	if (zstd_is_error(ret)) {
396 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
397 				"%s zstd_end_stream returned %d",
398 				__func__, zstd_get_error_code(ret));
399 		return -EIO;
400 	}
401 
402 	/*
403 	 * there is compressed data remained in intermediate buffer due to
404 	 * no more space in cbuf.cdata
405 	 */
406 	if (ret)
407 		return -EAGAIN;
408 
409 	cc->clen = outbuf.pos;
410 	return 0;
411 }
412 
zstd_init_decompress_ctx(struct decompress_io_ctx * dic)413 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
414 {
415 	zstd_dstream *stream;
416 	void *workspace;
417 	unsigned int workspace_size;
418 	unsigned int max_window_size =
419 			MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
420 
421 	workspace_size = zstd_dstream_workspace_bound(max_window_size);
422 
423 	workspace = f2fs_vmalloc(dic->sbi, workspace_size);
424 	if (!workspace)
425 		return -ENOMEM;
426 
427 	stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
428 	if (!stream) {
429 		f2fs_err_ratelimited(dic->sbi,
430 				"%s zstd_init_dstream failed", __func__);
431 		vfree(workspace);
432 		return -EIO;
433 	}
434 
435 	dic->private = workspace;
436 	dic->private2 = stream;
437 
438 	return 0;
439 }
440 
zstd_destroy_decompress_ctx(struct decompress_io_ctx * dic)441 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
442 {
443 	vfree(dic->private);
444 	dic->private = NULL;
445 	dic->private2 = NULL;
446 }
447 
zstd_decompress_pages(struct decompress_io_ctx * dic)448 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
449 {
450 	zstd_dstream *stream = dic->private2;
451 	zstd_in_buffer inbuf;
452 	zstd_out_buffer outbuf;
453 	int ret;
454 
455 	inbuf.pos = 0;
456 	inbuf.src = dic->cbuf->cdata;
457 	inbuf.size = dic->clen;
458 
459 	outbuf.pos = 0;
460 	outbuf.dst = dic->rbuf;
461 	outbuf.size = dic->rlen;
462 
463 	ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
464 	if (zstd_is_error(ret)) {
465 		f2fs_err_ratelimited(dic->sbi,
466 				"%s zstd_decompress_stream failed, ret: %d",
467 				__func__, zstd_get_error_code(ret));
468 		return -EIO;
469 	}
470 
471 	if (dic->rlen != outbuf.pos) {
472 		f2fs_err_ratelimited(dic->sbi,
473 				"%s ZSTD invalid rlen:%zu, expected:%lu",
474 				__func__, dic->rlen,
475 				PAGE_SIZE << dic->log_cluster_size);
476 		return -EIO;
477 	}
478 
479 	return 0;
480 }
481 
zstd_is_level_valid(int lvl)482 static bool zstd_is_level_valid(int lvl)
483 {
484 	return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
485 }
486 
487 static const struct f2fs_compress_ops f2fs_zstd_ops = {
488 	.init_compress_ctx	= zstd_init_compress_ctx,
489 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
490 	.compress_pages		= zstd_compress_pages,
491 	.init_decompress_ctx	= zstd_init_decompress_ctx,
492 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
493 	.decompress_pages	= zstd_decompress_pages,
494 	.is_level_valid		= zstd_is_level_valid,
495 };
496 #endif
497 
498 #ifdef CONFIG_F2FS_FS_LZO
499 #ifdef CONFIG_F2FS_FS_LZORLE
lzorle_compress_pages(struct compress_ctx * cc)500 static int lzorle_compress_pages(struct compress_ctx *cc)
501 {
502 	int ret;
503 
504 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
505 					&cc->clen, cc->private);
506 	if (ret != LZO_E_OK) {
507 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
508 				"lzo-rle compress failed, ret:%d", ret);
509 		return -EIO;
510 	}
511 	return 0;
512 }
513 
514 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
515 	.init_compress_ctx	= lzo_init_compress_ctx,
516 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
517 	.compress_pages		= lzorle_compress_pages,
518 	.decompress_pages	= lzo_decompress_pages,
519 };
520 #endif
521 #endif
522 
523 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
524 #ifdef CONFIG_F2FS_FS_LZO
525 	&f2fs_lzo_ops,
526 #else
527 	NULL,
528 #endif
529 #ifdef CONFIG_F2FS_FS_LZ4
530 	&f2fs_lz4_ops,
531 #else
532 	NULL,
533 #endif
534 #ifdef CONFIG_F2FS_FS_ZSTD
535 	&f2fs_zstd_ops,
536 #else
537 	NULL,
538 #endif
539 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
540 	&f2fs_lzorle_ops,
541 #else
542 	NULL,
543 #endif
544 };
545 
f2fs_is_compress_backend_ready(struct inode * inode)546 bool f2fs_is_compress_backend_ready(struct inode *inode)
547 {
548 	if (!f2fs_compressed_file(inode))
549 		return true;
550 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
551 }
552 
f2fs_is_compress_level_valid(int alg,int lvl)553 bool f2fs_is_compress_level_valid(int alg, int lvl)
554 {
555 	const struct f2fs_compress_ops *cops = f2fs_cops[alg];
556 
557 	if (cops->is_level_valid)
558 		return cops->is_level_valid(lvl);
559 
560 	return lvl == 0;
561 }
562 
563 static mempool_t *compress_page_pool;
564 static int num_compress_pages = 512;
565 module_param(num_compress_pages, uint, 0444);
566 MODULE_PARM_DESC(num_compress_pages,
567 		"Number of intermediate compress pages to preallocate");
568 
f2fs_init_compress_mempool(void)569 int __init f2fs_init_compress_mempool(void)
570 {
571 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
572 	return compress_page_pool ? 0 : -ENOMEM;
573 }
574 
f2fs_destroy_compress_mempool(void)575 void f2fs_destroy_compress_mempool(void)
576 {
577 	mempool_destroy(compress_page_pool);
578 }
579 
f2fs_compress_alloc_page(void)580 static struct page *f2fs_compress_alloc_page(void)
581 {
582 	struct page *page;
583 
584 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
585 	lock_page(page);
586 
587 	return page;
588 }
589 
f2fs_compress_free_page(struct page * page)590 static void f2fs_compress_free_page(struct page *page)
591 {
592 	struct folio *folio;
593 
594 	if (!page)
595 		return;
596 	folio = page_folio(page);
597 	folio_detach_private(folio);
598 	folio->mapping = NULL;
599 	folio_unlock(folio);
600 	mempool_free(page, compress_page_pool);
601 }
602 
603 #define MAX_VMAP_RETRIES	3
604 
f2fs_vmap(struct page ** pages,unsigned int count)605 static void *f2fs_vmap(struct page **pages, unsigned int count)
606 {
607 	int i;
608 	void *buf = NULL;
609 
610 	for (i = 0; i < MAX_VMAP_RETRIES; i++) {
611 		buf = vm_map_ram(pages, count, -1);
612 		if (buf)
613 			break;
614 		vm_unmap_aliases();
615 	}
616 	return buf;
617 }
618 
f2fs_compress_pages(struct compress_ctx * cc)619 static int f2fs_compress_pages(struct compress_ctx *cc)
620 {
621 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
622 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
623 	const struct f2fs_compress_ops *cops =
624 				f2fs_cops[fi->i_compress_algorithm];
625 	unsigned int max_len, new_nr_cpages;
626 	u32 chksum = 0;
627 	int i, ret;
628 
629 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
630 				cc->cluster_size, fi->i_compress_algorithm);
631 
632 	if (cops->init_compress_ctx) {
633 		ret = cops->init_compress_ctx(cc);
634 		if (ret)
635 			goto out;
636 	}
637 
638 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
639 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
640 	cc->valid_nr_cpages = cc->nr_cpages;
641 
642 	cc->cpages = page_array_alloc(sbi, cc->nr_cpages);
643 	if (!cc->cpages) {
644 		ret = -ENOMEM;
645 		goto destroy_compress_ctx;
646 	}
647 
648 	for (i = 0; i < cc->nr_cpages; i++)
649 		cc->cpages[i] = f2fs_compress_alloc_page();
650 
651 	cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
652 	if (!cc->rbuf) {
653 		ret = -ENOMEM;
654 		goto out_free_cpages;
655 	}
656 
657 	cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
658 	if (!cc->cbuf) {
659 		ret = -ENOMEM;
660 		goto out_vunmap_rbuf;
661 	}
662 
663 	ret = cops->compress_pages(cc);
664 	if (ret)
665 		goto out_vunmap_cbuf;
666 
667 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
668 
669 	if (cc->clen > max_len) {
670 		ret = -EAGAIN;
671 		goto out_vunmap_cbuf;
672 	}
673 
674 	cc->cbuf->clen = cpu_to_le32(cc->clen);
675 
676 	if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
677 		chksum = f2fs_crc32(cc->cbuf->cdata, cc->clen);
678 	cc->cbuf->chksum = cpu_to_le32(chksum);
679 
680 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
681 		cc->cbuf->reserved[i] = cpu_to_le32(0);
682 
683 	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
684 
685 	/* zero out any unused part of the last page */
686 	memset(&cc->cbuf->cdata[cc->clen], 0,
687 			(new_nr_cpages * PAGE_SIZE) -
688 			(cc->clen + COMPRESS_HEADER_SIZE));
689 
690 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
691 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
692 
693 	for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
694 		f2fs_compress_free_page(cc->cpages[i]);
695 		cc->cpages[i] = NULL;
696 	}
697 
698 	if (cops->destroy_compress_ctx)
699 		cops->destroy_compress_ctx(cc);
700 
701 	cc->valid_nr_cpages = new_nr_cpages;
702 
703 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
704 							cc->clen, ret);
705 	return 0;
706 
707 out_vunmap_cbuf:
708 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
709 out_vunmap_rbuf:
710 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
711 out_free_cpages:
712 	for (i = 0; i < cc->nr_cpages; i++) {
713 		if (cc->cpages[i])
714 			f2fs_compress_free_page(cc->cpages[i]);
715 	}
716 	page_array_free(sbi, cc->cpages, cc->nr_cpages);
717 	cc->cpages = NULL;
718 destroy_compress_ctx:
719 	if (cops->destroy_compress_ctx)
720 		cops->destroy_compress_ctx(cc);
721 out:
722 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
723 							cc->clen, ret);
724 	return ret;
725 }
726 
727 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
728 		bool pre_alloc);
729 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
730 		bool bypass_destroy_callback, bool pre_alloc);
731 
f2fs_decompress_cluster(struct decompress_io_ctx * dic,bool in_task)732 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
733 {
734 	struct f2fs_sb_info *sbi = dic->sbi;
735 	struct f2fs_inode_info *fi = F2FS_I(dic->inode);
736 	const struct f2fs_compress_ops *cops =
737 			f2fs_cops[fi->i_compress_algorithm];
738 	bool bypass_callback = false;
739 	int ret;
740 
741 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
742 				dic->cluster_size, fi->i_compress_algorithm);
743 
744 	if (dic->failed) {
745 		ret = -EIO;
746 		goto out_end_io;
747 	}
748 
749 	ret = f2fs_prepare_decomp_mem(dic, false);
750 	if (ret) {
751 		bypass_callback = true;
752 		goto out_release;
753 	}
754 
755 	dic->clen = le32_to_cpu(dic->cbuf->clen);
756 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
757 
758 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
759 		ret = -EFSCORRUPTED;
760 
761 		/* Avoid f2fs_commit_super in irq context */
762 		f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
763 		goto out_release;
764 	}
765 
766 	ret = cops->decompress_pages(dic);
767 
768 	if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
769 		u32 provided = le32_to_cpu(dic->cbuf->chksum);
770 		u32 calculated = f2fs_crc32(dic->cbuf->cdata, dic->clen);
771 
772 		if (provided != calculated) {
773 			if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
774 				set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
775 				f2fs_info_ratelimited(sbi,
776 					"checksum invalid, nid = %lu, %x vs %x",
777 					dic->inode->i_ino,
778 					provided, calculated);
779 			}
780 			set_sbi_flag(sbi, SBI_NEED_FSCK);
781 		}
782 	}
783 
784 out_release:
785 	f2fs_release_decomp_mem(dic, bypass_callback, false);
786 
787 out_end_io:
788 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
789 							dic->clen, ret);
790 	f2fs_decompress_end_io(dic, ret, in_task);
791 }
792 
793 static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
794 		struct folio *folio, nid_t ino, block_t blkaddr);
795 
796 /*
797  * This is called when a page of a compressed cluster has been read from disk
798  * (or failed to be read from disk).  It checks whether this page was the last
799  * page being waited on in the cluster, and if so, it decompresses the cluster
800  * (or in the case of a failure, cleans up without actually decompressing).
801  */
f2fs_end_read_compressed_page(struct folio * folio,bool failed,block_t blkaddr,bool in_task)802 void f2fs_end_read_compressed_page(struct folio *folio, bool failed,
803 		block_t blkaddr, bool in_task)
804 {
805 	struct decompress_io_ctx *dic = folio->private;
806 	struct f2fs_sb_info *sbi = dic->sbi;
807 
808 	dec_page_count(sbi, F2FS_RD_DATA);
809 
810 	if (failed)
811 		WRITE_ONCE(dic->failed, true);
812 	else if (blkaddr && in_task)
813 		f2fs_cache_compressed_page(sbi, folio,
814 					dic->inode->i_ino, blkaddr);
815 
816 	if (atomic_dec_and_test(&dic->remaining_pages))
817 		f2fs_decompress_cluster(dic, in_task);
818 }
819 
is_page_in_cluster(struct compress_ctx * cc,pgoff_t index)820 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
821 {
822 	if (cc->cluster_idx == NULL_CLUSTER)
823 		return true;
824 	return cc->cluster_idx == cluster_idx(cc, index);
825 }
826 
f2fs_cluster_is_empty(struct compress_ctx * cc)827 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
828 {
829 	return cc->nr_rpages == 0;
830 }
831 
f2fs_cluster_is_full(struct compress_ctx * cc)832 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
833 {
834 	return cc->cluster_size == cc->nr_rpages;
835 }
836 
f2fs_cluster_can_merge_page(struct compress_ctx * cc,pgoff_t index)837 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
838 {
839 	if (f2fs_cluster_is_empty(cc))
840 		return true;
841 	return is_page_in_cluster(cc, index);
842 }
843 
f2fs_all_cluster_page_ready(struct compress_ctx * cc,struct page ** pages,int index,int nr_pages,bool uptodate)844 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
845 				int index, int nr_pages, bool uptodate)
846 {
847 	unsigned long pgidx = page_folio(pages[index])->index;
848 	int i = uptodate ? 0 : 1;
849 
850 	/*
851 	 * when uptodate set to true, try to check all pages in cluster is
852 	 * uptodate or not.
853 	 */
854 	if (uptodate && (pgidx % cc->cluster_size))
855 		return false;
856 
857 	if (nr_pages - index < cc->cluster_size)
858 		return false;
859 
860 	for (; i < cc->cluster_size; i++) {
861 		struct folio *folio = page_folio(pages[index + i]);
862 
863 		if (folio->index != pgidx + i)
864 			return false;
865 		if (uptodate && !folio_test_uptodate(folio))
866 			return false;
867 	}
868 
869 	return true;
870 }
871 
cluster_has_invalid_data(struct compress_ctx * cc)872 static bool cluster_has_invalid_data(struct compress_ctx *cc)
873 {
874 	loff_t i_size = i_size_read(cc->inode);
875 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
876 	int i;
877 
878 	for (i = 0; i < cc->cluster_size; i++) {
879 		struct page *page = cc->rpages[i];
880 
881 		f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
882 
883 		/* beyond EOF */
884 		if (page_folio(page)->index >= nr_pages)
885 			return true;
886 	}
887 	return false;
888 }
889 
f2fs_sanity_check_cluster(struct dnode_of_data * dn)890 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
891 {
892 #ifdef CONFIG_F2FS_CHECK_FS
893 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
894 	unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
895 	int cluster_end = 0;
896 	unsigned int count;
897 	int i;
898 	char *reason = "";
899 
900 	if (dn->data_blkaddr != COMPRESS_ADDR)
901 		return false;
902 
903 	/* [..., COMPR_ADDR, ...] */
904 	if (dn->ofs_in_node % cluster_size) {
905 		reason = "[*|C|*|*]";
906 		goto out;
907 	}
908 
909 	for (i = 1, count = 1; i < cluster_size; i++, count++) {
910 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
911 							dn->ofs_in_node + i);
912 
913 		/* [COMPR_ADDR, ..., COMPR_ADDR] */
914 		if (blkaddr == COMPRESS_ADDR) {
915 			reason = "[C|*|C|*]";
916 			goto out;
917 		}
918 		if (!__is_valid_data_blkaddr(blkaddr)) {
919 			if (!cluster_end)
920 				cluster_end = i;
921 			continue;
922 		}
923 		/* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
924 		if (cluster_end) {
925 			reason = "[C|N|N|V]";
926 			goto out;
927 		}
928 	}
929 
930 	f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
931 		!is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
932 
933 	return false;
934 out:
935 	f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
936 			dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
937 	set_sbi_flag(sbi, SBI_NEED_FSCK);
938 	return true;
939 #else
940 	return false;
941 #endif
942 }
943 
__f2fs_get_cluster_blocks(struct inode * inode,struct dnode_of_data * dn)944 static int __f2fs_get_cluster_blocks(struct inode *inode,
945 					struct dnode_of_data *dn)
946 {
947 	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
948 	int count, i;
949 
950 	for (i = 0, count = 0; i < cluster_size; i++) {
951 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
952 							dn->ofs_in_node + i);
953 
954 		if (__is_valid_data_blkaddr(blkaddr))
955 			count++;
956 	}
957 
958 	return count;
959 }
960 
__f2fs_cluster_blocks(struct inode * inode,unsigned int cluster_idx,enum cluster_check_type type)961 static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
962 				enum cluster_check_type type)
963 {
964 	struct dnode_of_data dn;
965 	unsigned int start_idx = cluster_idx <<
966 				F2FS_I(inode)->i_log_cluster_size;
967 	int ret;
968 
969 	set_new_dnode(&dn, inode, NULL, NULL, 0);
970 	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
971 	if (ret) {
972 		if (ret == -ENOENT)
973 			ret = 0;
974 		goto fail;
975 	}
976 
977 	if (f2fs_sanity_check_cluster(&dn)) {
978 		ret = -EFSCORRUPTED;
979 		goto fail;
980 	}
981 
982 	if (dn.data_blkaddr == COMPRESS_ADDR) {
983 		if (type == CLUSTER_COMPR_BLKS)
984 			ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
985 		else if (type == CLUSTER_IS_COMPR)
986 			ret = 1;
987 	} else if (type == CLUSTER_RAW_BLKS) {
988 		ret = __f2fs_get_cluster_blocks(inode, &dn);
989 	}
990 fail:
991 	f2fs_put_dnode(&dn);
992 	return ret;
993 }
994 
995 /* return # of compressed blocks in compressed cluster */
f2fs_compressed_blocks(struct compress_ctx * cc)996 static int f2fs_compressed_blocks(struct compress_ctx *cc)
997 {
998 	return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
999 		CLUSTER_COMPR_BLKS);
1000 }
1001 
1002 /* return # of raw blocks in non-compressed cluster */
f2fs_decompressed_blocks(struct inode * inode,unsigned int cluster_idx)1003 static int f2fs_decompressed_blocks(struct inode *inode,
1004 				unsigned int cluster_idx)
1005 {
1006 	return __f2fs_cluster_blocks(inode, cluster_idx,
1007 		CLUSTER_RAW_BLKS);
1008 }
1009 
1010 /* return whether cluster is compressed one or not */
f2fs_is_compressed_cluster(struct inode * inode,pgoff_t index)1011 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1012 {
1013 	return __f2fs_cluster_blocks(inode,
1014 		index >> F2FS_I(inode)->i_log_cluster_size,
1015 		CLUSTER_IS_COMPR);
1016 }
1017 
1018 /* return whether cluster contains non raw blocks or not */
f2fs_is_sparse_cluster(struct inode * inode,pgoff_t index)1019 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
1020 {
1021 	unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
1022 
1023 	return f2fs_decompressed_blocks(inode, cluster_idx) !=
1024 		F2FS_I(inode)->i_cluster_size;
1025 }
1026 
cluster_may_compress(struct compress_ctx * cc)1027 static bool cluster_may_compress(struct compress_ctx *cc)
1028 {
1029 	if (!f2fs_need_compress_data(cc->inode))
1030 		return false;
1031 	if (f2fs_is_atomic_file(cc->inode))
1032 		return false;
1033 	if (!f2fs_cluster_is_full(cc))
1034 		return false;
1035 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1036 		return false;
1037 	return !cluster_has_invalid_data(cc);
1038 }
1039 
set_cluster_writeback(struct compress_ctx * cc)1040 static void set_cluster_writeback(struct compress_ctx *cc)
1041 {
1042 	int i;
1043 
1044 	for (i = 0; i < cc->cluster_size; i++) {
1045 		if (cc->rpages[i])
1046 			set_page_writeback(cc->rpages[i]);
1047 	}
1048 }
1049 
cancel_cluster_writeback(struct compress_ctx * cc,struct compress_io_ctx * cic,int submitted)1050 static void cancel_cluster_writeback(struct compress_ctx *cc,
1051 			struct compress_io_ctx *cic, int submitted)
1052 {
1053 	int i;
1054 
1055 	/* Wait for submitted IOs. */
1056 	if (submitted > 1) {
1057 		f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
1058 		while (atomic_read(&cic->pending_pages) !=
1059 					(cc->valid_nr_cpages - submitted + 1))
1060 			f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
1061 	}
1062 
1063 	/* Cancel writeback and stay locked. */
1064 	for (i = 0; i < cc->cluster_size; i++) {
1065 		if (i < submitted) {
1066 			inode_inc_dirty_pages(cc->inode);
1067 			lock_page(cc->rpages[i]);
1068 		}
1069 		clear_page_private_gcing(cc->rpages[i]);
1070 		if (folio_test_writeback(page_folio(cc->rpages[i])))
1071 			end_page_writeback(cc->rpages[i]);
1072 	}
1073 }
1074 
set_cluster_dirty(struct compress_ctx * cc)1075 static void set_cluster_dirty(struct compress_ctx *cc)
1076 {
1077 	int i;
1078 
1079 	for (i = 0; i < cc->cluster_size; i++)
1080 		if (cc->rpages[i]) {
1081 			set_page_dirty(cc->rpages[i]);
1082 			set_page_private_gcing(cc->rpages[i]);
1083 		}
1084 }
1085 
prepare_compress_overwrite(struct compress_ctx * cc,struct page ** pagep,pgoff_t index,void ** fsdata)1086 static int prepare_compress_overwrite(struct compress_ctx *cc,
1087 		struct page **pagep, pgoff_t index, void **fsdata)
1088 {
1089 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1090 	struct address_space *mapping = cc->inode->i_mapping;
1091 	struct folio *folio;
1092 	sector_t last_block_in_bio;
1093 	fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1094 	pgoff_t start_idx = start_idx_of_cluster(cc);
1095 	int i, ret;
1096 
1097 retry:
1098 	ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1099 	if (ret <= 0)
1100 		return ret;
1101 
1102 	ret = f2fs_init_compress_ctx(cc);
1103 	if (ret)
1104 		return ret;
1105 
1106 	/* keep folio reference to avoid page reclaim */
1107 	for (i = 0; i < cc->cluster_size; i++) {
1108 		folio = f2fs_filemap_get_folio(mapping, start_idx + i,
1109 				fgp_flag, GFP_NOFS);
1110 		if (IS_ERR(folio)) {
1111 			ret = PTR_ERR(folio);
1112 			goto unlock_pages;
1113 		}
1114 
1115 		if (folio_test_uptodate(folio))
1116 			f2fs_folio_put(folio, true);
1117 		else
1118 			f2fs_compress_ctx_add_page(cc, folio);
1119 	}
1120 
1121 	if (!f2fs_cluster_is_empty(cc)) {
1122 		struct bio *bio = NULL;
1123 
1124 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1125 					&last_block_in_bio, NULL, true);
1126 		f2fs_put_rpages(cc);
1127 		f2fs_destroy_compress_ctx(cc, true);
1128 		if (ret)
1129 			goto out;
1130 		if (bio)
1131 			f2fs_submit_read_bio(sbi, bio, DATA);
1132 
1133 		ret = f2fs_init_compress_ctx(cc);
1134 		if (ret)
1135 			goto out;
1136 	}
1137 
1138 	for (i = 0; i < cc->cluster_size; i++) {
1139 		f2fs_bug_on(sbi, cc->rpages[i]);
1140 
1141 		folio = filemap_lock_folio(mapping, start_idx + i);
1142 		if (IS_ERR(folio)) {
1143 			/* folio could be truncated */
1144 			goto release_and_retry;
1145 		}
1146 
1147 		f2fs_folio_wait_writeback(folio, DATA, true, true);
1148 		f2fs_compress_ctx_add_page(cc, folio);
1149 
1150 		if (!folio_test_uptodate(folio)) {
1151 			f2fs_handle_page_eio(sbi, folio, DATA);
1152 release_and_retry:
1153 			f2fs_put_rpages(cc);
1154 			f2fs_unlock_rpages(cc, i + 1);
1155 			f2fs_destroy_compress_ctx(cc, true);
1156 			goto retry;
1157 		}
1158 	}
1159 
1160 	if (likely(!ret)) {
1161 		*fsdata = cc->rpages;
1162 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
1163 		return cc->cluster_size;
1164 	}
1165 
1166 unlock_pages:
1167 	f2fs_put_rpages(cc);
1168 	f2fs_unlock_rpages(cc, i);
1169 	f2fs_destroy_compress_ctx(cc, true);
1170 out:
1171 	return ret;
1172 }
1173 
f2fs_prepare_compress_overwrite(struct inode * inode,struct page ** pagep,pgoff_t index,void ** fsdata)1174 int f2fs_prepare_compress_overwrite(struct inode *inode,
1175 		struct page **pagep, pgoff_t index, void **fsdata)
1176 {
1177 	struct compress_ctx cc = {
1178 		.inode = inode,
1179 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1180 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1181 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1182 		.rpages = NULL,
1183 		.nr_rpages = 0,
1184 	};
1185 
1186 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1187 }
1188 
f2fs_compress_write_end(struct inode * inode,void * fsdata,pgoff_t index,unsigned copied)1189 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1190 					pgoff_t index, unsigned copied)
1191 
1192 {
1193 	struct compress_ctx cc = {
1194 		.inode = inode,
1195 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1196 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1197 		.rpages = fsdata,
1198 	};
1199 	struct folio *folio = page_folio(cc.rpages[0]);
1200 	bool first_index = (index == folio->index);
1201 
1202 	if (copied)
1203 		set_cluster_dirty(&cc);
1204 
1205 	f2fs_put_rpages_wbc(&cc, NULL, false, true);
1206 	f2fs_destroy_compress_ctx(&cc, false);
1207 
1208 	return first_index;
1209 }
1210 
f2fs_truncate_partial_cluster(struct inode * inode,u64 from,bool lock)1211 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1212 {
1213 	void *fsdata = NULL;
1214 	struct page *pagep;
1215 	struct page **rpages;
1216 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1217 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1218 							log_cluster_size;
1219 	int i;
1220 	int err;
1221 
1222 	err = f2fs_is_compressed_cluster(inode, start_idx);
1223 	if (err < 0)
1224 		return err;
1225 
1226 	/* truncate normal cluster */
1227 	if (!err)
1228 		return f2fs_do_truncate_blocks(inode, from, lock);
1229 
1230 	/* truncate compressed cluster */
1231 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1232 						start_idx, &fsdata);
1233 
1234 	/* should not be a normal cluster */
1235 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1236 
1237 	if (err <= 0)
1238 		return err;
1239 
1240 	rpages = fsdata;
1241 
1242 	for (i = (1 << log_cluster_size) - 1; i >= 0; i--) {
1243 		struct folio *folio = page_folio(rpages[i]);
1244 		loff_t start = (loff_t)folio->index << PAGE_SHIFT;
1245 		loff_t offset = from > start ? from - start : 0;
1246 
1247 		folio_zero_segment(folio, offset, folio_size(folio));
1248 
1249 		if (from >= start)
1250 			break;
1251 	}
1252 
1253 	f2fs_compress_write_end(inode, fsdata, start_idx, true);
1254 
1255 	err = filemap_write_and_wait_range(inode->i_mapping,
1256 			round_down(from, 1 << log_cluster_size << PAGE_SHIFT),
1257 			LLONG_MAX);
1258 	if (err)
1259 		return err;
1260 
1261 	truncate_pagecache(inode, from);
1262 
1263 	return f2fs_do_truncate_blocks(inode, round_up(from, PAGE_SIZE), lock);
1264 }
1265 
f2fs_write_compressed_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1266 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1267 					int *submitted,
1268 					struct writeback_control *wbc,
1269 					enum iostat_type io_type)
1270 {
1271 	struct inode *inode = cc->inode;
1272 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1273 	struct f2fs_inode_info *fi = F2FS_I(inode);
1274 	struct f2fs_io_info fio = {
1275 		.sbi = sbi,
1276 		.ino = cc->inode->i_ino,
1277 		.type = DATA,
1278 		.op = REQ_OP_WRITE,
1279 		.op_flags = wbc_to_write_flags(wbc),
1280 		.old_blkaddr = NEW_ADDR,
1281 		.page = NULL,
1282 		.encrypted_page = NULL,
1283 		.compressed_page = NULL,
1284 		.io_type = io_type,
1285 		.io_wbc = wbc,
1286 		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1287 									1 : 0,
1288 	};
1289 	struct folio *folio;
1290 	struct dnode_of_data dn;
1291 	struct node_info ni;
1292 	struct compress_io_ctx *cic;
1293 	pgoff_t start_idx = start_idx_of_cluster(cc);
1294 	unsigned int last_index = cc->cluster_size - 1;
1295 	loff_t psize;
1296 	int i, err;
1297 	bool quota_inode = IS_NOQUOTA(inode);
1298 
1299 	/* we should bypass data pages to proceed the kworker jobs */
1300 	if (unlikely(f2fs_cp_error(sbi))) {
1301 		mapping_set_error(inode->i_mapping, -EIO);
1302 		goto out_free;
1303 	}
1304 
1305 	if (quota_inode) {
1306 		/*
1307 		 * We need to wait for node_write to avoid block allocation during
1308 		 * checkpoint. This can only happen to quota writes which can cause
1309 		 * the below discard race condition.
1310 		 */
1311 		f2fs_down_read(&sbi->node_write);
1312 	} else if (!f2fs_trylock_op(sbi)) {
1313 		goto out_free;
1314 	}
1315 
1316 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1317 
1318 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1319 	if (err)
1320 		goto out_unlock_op;
1321 
1322 	for (i = 0; i < cc->cluster_size; i++) {
1323 		if (data_blkaddr(dn.inode, dn.node_folio,
1324 					dn.ofs_in_node + i) == NULL_ADDR)
1325 			goto out_put_dnode;
1326 	}
1327 
1328 	folio = page_folio(cc->rpages[last_index]);
1329 	psize = folio_next_pos(folio);
1330 
1331 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1332 	if (err)
1333 		goto out_put_dnode;
1334 
1335 	fio.version = ni.version;
1336 
1337 	cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1338 	if (!cic)
1339 		goto out_put_dnode;
1340 
1341 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1342 	cic->inode = inode;
1343 	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1344 	cic->rpages = page_array_alloc(sbi, cc->cluster_size);
1345 	if (!cic->rpages)
1346 		goto out_put_cic;
1347 
1348 	cic->nr_rpages = cc->cluster_size;
1349 
1350 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1351 		f2fs_set_compressed_page(cc->cpages[i], inode,
1352 				page_folio(cc->rpages[i + 1])->index, cic);
1353 		fio.compressed_page = cc->cpages[i];
1354 
1355 		fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_folio,
1356 						dn.ofs_in_node + i + 1);
1357 
1358 		/* wait for GCed page writeback via META_MAPPING */
1359 		f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1360 
1361 		if (fio.encrypted) {
1362 			fio.page = cc->rpages[i + 1];
1363 			err = f2fs_encrypt_one_page(&fio);
1364 			if (err)
1365 				goto out_destroy_crypt;
1366 			cc->cpages[i] = fio.encrypted_page;
1367 		}
1368 	}
1369 
1370 	set_cluster_writeback(cc);
1371 
1372 	for (i = 0; i < cc->cluster_size; i++)
1373 		cic->rpages[i] = cc->rpages[i];
1374 
1375 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1376 		block_t blkaddr;
1377 
1378 		blkaddr = f2fs_data_blkaddr(&dn);
1379 		fio.page = cc->rpages[i];
1380 		fio.old_blkaddr = blkaddr;
1381 
1382 		/* cluster header */
1383 		if (i == 0) {
1384 			if (blkaddr == COMPRESS_ADDR)
1385 				fio.compr_blocks++;
1386 			if (__is_valid_data_blkaddr(blkaddr))
1387 				f2fs_invalidate_blocks(sbi, blkaddr, 1);
1388 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1389 			goto unlock_continue;
1390 		}
1391 
1392 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1393 			fio.compr_blocks++;
1394 
1395 		if (i > cc->valid_nr_cpages) {
1396 			if (__is_valid_data_blkaddr(blkaddr)) {
1397 				f2fs_invalidate_blocks(sbi, blkaddr, 1);
1398 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1399 			}
1400 			goto unlock_continue;
1401 		}
1402 
1403 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1404 
1405 		if (fio.encrypted)
1406 			fio.encrypted_page = cc->cpages[i - 1];
1407 		else
1408 			fio.compressed_page = cc->cpages[i - 1];
1409 
1410 		cc->cpages[i - 1] = NULL;
1411 		fio.submitted = 0;
1412 		f2fs_outplace_write_data(&dn, &fio);
1413 		if (unlikely(!fio.submitted)) {
1414 			cancel_cluster_writeback(cc, cic, i);
1415 
1416 			/* To call fscrypt_finalize_bounce_page */
1417 			i = cc->valid_nr_cpages;
1418 			*submitted = 0;
1419 			goto out_destroy_crypt;
1420 		}
1421 		(*submitted)++;
1422 unlock_continue:
1423 		inode_dec_dirty_pages(cc->inode);
1424 		folio_unlock(fio.folio);
1425 	}
1426 
1427 	if (fio.compr_blocks)
1428 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1429 	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1430 	add_compr_block_stat(inode, cc->valid_nr_cpages);
1431 
1432 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1433 
1434 	f2fs_put_dnode(&dn);
1435 	if (quota_inode)
1436 		f2fs_up_read(&sbi->node_write);
1437 	else
1438 		f2fs_unlock_op(sbi);
1439 
1440 	spin_lock(&fi->i_size_lock);
1441 	if (fi->last_disk_size < psize)
1442 		fi->last_disk_size = psize;
1443 	spin_unlock(&fi->i_size_lock);
1444 
1445 	f2fs_put_rpages(cc);
1446 	page_array_free(sbi, cc->cpages, cc->nr_cpages);
1447 	cc->cpages = NULL;
1448 	f2fs_destroy_compress_ctx(cc, false);
1449 	return 0;
1450 
1451 out_destroy_crypt:
1452 	page_array_free(sbi, cic->rpages, cc->cluster_size);
1453 
1454 	for (--i; i >= 0; i--) {
1455 		if (!cc->cpages[i])
1456 			continue;
1457 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1458 	}
1459 out_put_cic:
1460 	kmem_cache_free(cic_entry_slab, cic);
1461 out_put_dnode:
1462 	f2fs_put_dnode(&dn);
1463 out_unlock_op:
1464 	if (quota_inode)
1465 		f2fs_up_read(&sbi->node_write);
1466 	else
1467 		f2fs_unlock_op(sbi);
1468 out_free:
1469 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1470 		f2fs_compress_free_page(cc->cpages[i]);
1471 		cc->cpages[i] = NULL;
1472 	}
1473 	page_array_free(sbi, cc->cpages, cc->nr_cpages);
1474 	cc->cpages = NULL;
1475 	return -EAGAIN;
1476 }
1477 
f2fs_compress_write_end_io(struct bio * bio,struct folio * folio)1478 void f2fs_compress_write_end_io(struct bio *bio, struct folio *folio)
1479 {
1480 	struct page *page = &folio->page;
1481 	struct f2fs_sb_info *sbi = bio->bi_private;
1482 	struct compress_io_ctx *cic = folio->private;
1483 	enum count_type type = WB_DATA_TYPE(folio,
1484 				f2fs_is_compressed_page(folio));
1485 	int i;
1486 
1487 	if (unlikely(bio->bi_status != BLK_STS_OK))
1488 		mapping_set_error(cic->inode->i_mapping, -EIO);
1489 
1490 	f2fs_compress_free_page(page);
1491 
1492 	dec_page_count(sbi, type);
1493 
1494 	if (atomic_dec_return(&cic->pending_pages))
1495 		return;
1496 
1497 	for (i = 0; i < cic->nr_rpages; i++) {
1498 		WARN_ON(!cic->rpages[i]);
1499 		clear_page_private_gcing(cic->rpages[i]);
1500 		end_page_writeback(cic->rpages[i]);
1501 	}
1502 
1503 	page_array_free(sbi, cic->rpages, cic->nr_rpages);
1504 	kmem_cache_free(cic_entry_slab, cic);
1505 }
1506 
f2fs_write_raw_pages(struct compress_ctx * cc,int * submitted_p,struct writeback_control * wbc,enum iostat_type io_type)1507 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1508 					int *submitted_p,
1509 					struct writeback_control *wbc,
1510 					enum iostat_type io_type)
1511 {
1512 	struct address_space *mapping = cc->inode->i_mapping;
1513 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1514 	int submitted, compr_blocks, i;
1515 	int ret = 0;
1516 
1517 	compr_blocks = f2fs_compressed_blocks(cc);
1518 
1519 	for (i = 0; i < cc->cluster_size; i++) {
1520 		if (!cc->rpages[i])
1521 			continue;
1522 
1523 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1524 		unlock_page(cc->rpages[i]);
1525 	}
1526 
1527 	if (compr_blocks < 0)
1528 		return compr_blocks;
1529 
1530 	/* overwrite compressed cluster w/ normal cluster */
1531 	if (compr_blocks > 0)
1532 		f2fs_lock_op(sbi);
1533 
1534 	for (i = 0; i < cc->cluster_size; i++) {
1535 		struct folio *folio;
1536 
1537 		if (!cc->rpages[i])
1538 			continue;
1539 		folio = page_folio(cc->rpages[i]);
1540 retry_write:
1541 		folio_lock(folio);
1542 
1543 		if (folio->mapping != mapping) {
1544 continue_unlock:
1545 			folio_unlock(folio);
1546 			continue;
1547 		}
1548 
1549 		if (!folio_test_dirty(folio))
1550 			goto continue_unlock;
1551 
1552 		if (folio_test_writeback(folio)) {
1553 			if (wbc->sync_mode == WB_SYNC_NONE)
1554 				goto continue_unlock;
1555 			f2fs_folio_wait_writeback(folio, DATA, true, true);
1556 		}
1557 
1558 		if (!folio_clear_dirty_for_io(folio))
1559 			goto continue_unlock;
1560 
1561 		submitted = 0;
1562 		ret = f2fs_write_single_data_page(folio, &submitted,
1563 						NULL, NULL, wbc, io_type,
1564 						compr_blocks, false);
1565 		if (ret) {
1566 			if (ret == 1) {
1567 				ret = 0;
1568 			} else if (ret == -EAGAIN) {
1569 				ret = 0;
1570 				/*
1571 				 * for quota file, just redirty left pages to
1572 				 * avoid deadlock caused by cluster update race
1573 				 * from foreground operation.
1574 				 */
1575 				if (IS_NOQUOTA(cc->inode))
1576 					goto out;
1577 				f2fs_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
1578 				goto retry_write;
1579 			}
1580 			goto out;
1581 		}
1582 
1583 		*submitted_p += submitted;
1584 	}
1585 
1586 out:
1587 	if (compr_blocks > 0)
1588 		f2fs_unlock_op(sbi);
1589 
1590 	f2fs_balance_fs(sbi, true);
1591 	return ret;
1592 }
1593 
f2fs_write_multi_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1594 int f2fs_write_multi_pages(struct compress_ctx *cc,
1595 					int *submitted,
1596 					struct writeback_control *wbc,
1597 					enum iostat_type io_type)
1598 {
1599 	int err;
1600 
1601 	*submitted = 0;
1602 	if (cluster_may_compress(cc)) {
1603 		err = f2fs_compress_pages(cc);
1604 		if (err == -EAGAIN) {
1605 			add_compr_block_stat(cc->inode, cc->cluster_size);
1606 			goto write;
1607 		} else if (err) {
1608 			f2fs_put_rpages_wbc(cc, wbc, true, true);
1609 			goto destroy_out;
1610 		}
1611 
1612 		err = f2fs_write_compressed_pages(cc, submitted,
1613 							wbc, io_type);
1614 		if (!err)
1615 			return 0;
1616 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1617 	}
1618 write:
1619 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1620 
1621 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1622 	f2fs_put_rpages_wbc(cc, wbc, false, false);
1623 destroy_out:
1624 	f2fs_destroy_compress_ctx(cc, false);
1625 	return err;
1626 }
1627 
allow_memalloc_for_decomp(struct f2fs_sb_info * sbi,bool pre_alloc)1628 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1629 		bool pre_alloc)
1630 {
1631 	return pre_alloc ^ f2fs_low_mem_mode(sbi);
1632 }
1633 
f2fs_prepare_decomp_mem(struct decompress_io_ctx * dic,bool pre_alloc)1634 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1635 		bool pre_alloc)
1636 {
1637 	const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
1638 	int i;
1639 
1640 	if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
1641 		return 0;
1642 
1643 	dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size);
1644 	if (!dic->tpages)
1645 		return -ENOMEM;
1646 
1647 	for (i = 0; i < dic->cluster_size; i++) {
1648 		if (dic->rpages[i]) {
1649 			dic->tpages[i] = dic->rpages[i];
1650 			continue;
1651 		}
1652 
1653 		dic->tpages[i] = f2fs_compress_alloc_page();
1654 	}
1655 
1656 	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1657 	if (!dic->rbuf)
1658 		return -ENOMEM;
1659 
1660 	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1661 	if (!dic->cbuf)
1662 		return -ENOMEM;
1663 
1664 	if (cops->init_decompress_ctx)
1665 		return cops->init_decompress_ctx(dic);
1666 
1667 	return 0;
1668 }
1669 
f2fs_release_decomp_mem(struct decompress_io_ctx * dic,bool bypass_destroy_callback,bool pre_alloc)1670 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1671 		bool bypass_destroy_callback, bool pre_alloc)
1672 {
1673 	const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
1674 
1675 	if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
1676 		return;
1677 
1678 	if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1679 		cops->destroy_decompress_ctx(dic);
1680 
1681 	if (dic->cbuf)
1682 		vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1683 
1684 	if (dic->rbuf)
1685 		vm_unmap_ram(dic->rbuf, dic->cluster_size);
1686 }
1687 
1688 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1689 		bool bypass_destroy_callback);
1690 
f2fs_alloc_dic(struct compress_ctx * cc)1691 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1692 {
1693 	struct decompress_io_ctx *dic;
1694 	pgoff_t start_idx = start_idx_of_cluster(cc);
1695 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1696 	int i, ret;
1697 
1698 	dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1699 	if (!dic)
1700 		return ERR_PTR(-ENOMEM);
1701 
1702 	dic->rpages = page_array_alloc(sbi, cc->cluster_size);
1703 	if (!dic->rpages) {
1704 		kmem_cache_free(dic_entry_slab, dic);
1705 		return ERR_PTR(-ENOMEM);
1706 	}
1707 
1708 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1709 	dic->inode = cc->inode;
1710 	dic->sbi = sbi;
1711 	dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm;
1712 	atomic_set(&dic->remaining_pages, cc->nr_cpages);
1713 	dic->cluster_idx = cc->cluster_idx;
1714 	dic->cluster_size = cc->cluster_size;
1715 	dic->log_cluster_size = cc->log_cluster_size;
1716 	dic->nr_cpages = cc->nr_cpages;
1717 	refcount_set(&dic->refcnt, 1);
1718 	dic->failed = false;
1719 	dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1720 
1721 	for (i = 0; i < dic->cluster_size; i++)
1722 		dic->rpages[i] = cc->rpages[i];
1723 	dic->nr_rpages = cc->cluster_size;
1724 
1725 	dic->cpages = page_array_alloc(sbi, dic->nr_cpages);
1726 	if (!dic->cpages) {
1727 		ret = -ENOMEM;
1728 		goto out_free;
1729 	}
1730 
1731 	for (i = 0; i < dic->nr_cpages; i++) {
1732 		struct page *page;
1733 
1734 		page = f2fs_compress_alloc_page();
1735 		f2fs_set_compressed_page(page, cc->inode,
1736 					start_idx + i + 1, dic);
1737 		dic->cpages[i] = page;
1738 	}
1739 
1740 	ret = f2fs_prepare_decomp_mem(dic, true);
1741 	if (ret)
1742 		goto out_free;
1743 
1744 	return dic;
1745 
1746 out_free:
1747 	f2fs_free_dic(dic, true);
1748 	return ERR_PTR(ret);
1749 }
1750 
f2fs_free_dic(struct decompress_io_ctx * dic,bool bypass_destroy_callback)1751 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1752 		bool bypass_destroy_callback)
1753 {
1754 	int i;
1755 	/* use sbi in dic to avoid UFA of dic->inode*/
1756 	struct f2fs_sb_info *sbi = dic->sbi;
1757 
1758 	f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1759 
1760 	if (dic->tpages) {
1761 		for (i = 0; i < dic->cluster_size; i++) {
1762 			if (dic->rpages[i])
1763 				continue;
1764 			if (!dic->tpages[i])
1765 				continue;
1766 			f2fs_compress_free_page(dic->tpages[i]);
1767 		}
1768 		page_array_free(sbi, dic->tpages, dic->cluster_size);
1769 	}
1770 
1771 	if (dic->cpages) {
1772 		for (i = 0; i < dic->nr_cpages; i++) {
1773 			if (!dic->cpages[i])
1774 				continue;
1775 			f2fs_compress_free_page(dic->cpages[i]);
1776 		}
1777 		page_array_free(sbi, dic->cpages, dic->nr_cpages);
1778 	}
1779 
1780 	page_array_free(sbi, dic->rpages, dic->nr_rpages);
1781 	kmem_cache_free(dic_entry_slab, dic);
1782 }
1783 
f2fs_late_free_dic(struct work_struct * work)1784 static void f2fs_late_free_dic(struct work_struct *work)
1785 {
1786 	struct decompress_io_ctx *dic =
1787 		container_of(work, struct decompress_io_ctx, free_work);
1788 
1789 	f2fs_free_dic(dic, false);
1790 }
1791 
f2fs_put_dic(struct decompress_io_ctx * dic,bool in_task)1792 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1793 {
1794 	if (refcount_dec_and_test(&dic->refcnt)) {
1795 		if (in_task) {
1796 			f2fs_free_dic(dic, false);
1797 		} else {
1798 			INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1799 			queue_work(dic->sbi->post_read_wq, &dic->free_work);
1800 		}
1801 	}
1802 }
1803 
f2fs_verify_cluster(struct work_struct * work)1804 static void f2fs_verify_cluster(struct work_struct *work)
1805 {
1806 	struct decompress_io_ctx *dic =
1807 		container_of(work, struct decompress_io_ctx, verity_work);
1808 	int i;
1809 
1810 	/* Verify, update, and unlock the decompressed pages. */
1811 	for (i = 0; i < dic->cluster_size; i++) {
1812 		struct page *rpage = dic->rpages[i];
1813 
1814 		if (!rpage)
1815 			continue;
1816 
1817 		if (fsverity_verify_page(rpage))
1818 			SetPageUptodate(rpage);
1819 		else
1820 			ClearPageUptodate(rpage);
1821 		unlock_page(rpage);
1822 	}
1823 
1824 	f2fs_put_dic(dic, true);
1825 }
1826 
1827 /*
1828  * This is called when a compressed cluster has been decompressed
1829  * (or failed to be read and/or decompressed).
1830  */
f2fs_decompress_end_io(struct decompress_io_ctx * dic,bool failed,bool in_task)1831 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1832 				bool in_task)
1833 {
1834 	int i;
1835 
1836 	if (!failed && dic->need_verity) {
1837 		/*
1838 		 * Note that to avoid deadlocks, the verity work can't be done
1839 		 * on the decompression workqueue.  This is because verifying
1840 		 * the data pages can involve reading metadata pages from the
1841 		 * file, and these metadata pages may be compressed.
1842 		 */
1843 		INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1844 		fsverity_enqueue_verify_work(&dic->verity_work);
1845 		return;
1846 	}
1847 
1848 	/* Update and unlock the cluster's pagecache pages. */
1849 	for (i = 0; i < dic->cluster_size; i++) {
1850 		struct page *rpage = dic->rpages[i];
1851 
1852 		if (!rpage)
1853 			continue;
1854 
1855 		if (failed)
1856 			ClearPageUptodate(rpage);
1857 		else
1858 			SetPageUptodate(rpage);
1859 		unlock_page(rpage);
1860 	}
1861 
1862 	/*
1863 	 * Release the reference to the decompress_io_ctx that was being held
1864 	 * for I/O completion.
1865 	 */
1866 	f2fs_put_dic(dic, in_task);
1867 }
1868 
1869 /*
1870  * Put a reference to a compressed folio's decompress_io_ctx.
1871  *
1872  * This is called when the folio is no longer needed and can be freed.
1873  */
f2fs_put_folio_dic(struct folio * folio,bool in_task)1874 void f2fs_put_folio_dic(struct folio *folio, bool in_task)
1875 {
1876 	struct decompress_io_ctx *dic = folio->private;
1877 
1878 	f2fs_put_dic(dic, in_task);
1879 }
1880 
1881 /*
1882  * check whether cluster blocks are contiguous, and add extent cache entry
1883  * only if cluster blocks are logically and physically contiguous.
1884  */
f2fs_cluster_blocks_are_contiguous(struct dnode_of_data * dn,unsigned int ofs_in_node)1885 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
1886 						unsigned int ofs_in_node)
1887 {
1888 	bool compressed = data_blkaddr(dn->inode, dn->node_folio,
1889 					ofs_in_node) == COMPRESS_ADDR;
1890 	int i = compressed ? 1 : 0;
1891 	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
1892 							ofs_in_node + i);
1893 
1894 	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1895 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
1896 							ofs_in_node + i);
1897 
1898 		if (!__is_valid_data_blkaddr(blkaddr))
1899 			break;
1900 		if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1901 			return 0;
1902 	}
1903 
1904 	return compressed ? i - 1 : i;
1905 }
1906 
1907 const struct address_space_operations f2fs_compress_aops = {
1908 	.release_folio = f2fs_release_folio,
1909 	.invalidate_folio = f2fs_invalidate_folio,
1910 	.migrate_folio	= filemap_migrate_folio,
1911 };
1912 
COMPRESS_MAPPING(struct f2fs_sb_info * sbi)1913 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1914 {
1915 	return sbi->compress_inode->i_mapping;
1916 }
1917 
f2fs_invalidate_compress_pages_range(struct f2fs_sb_info * sbi,block_t blkaddr,unsigned int len)1918 void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
1919 				block_t blkaddr, unsigned int len)
1920 {
1921 	if (!sbi->compress_inode)
1922 		return;
1923 	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
1924 }
1925 
f2fs_cache_compressed_page(struct f2fs_sb_info * sbi,struct folio * folio,nid_t ino,block_t blkaddr)1926 static void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
1927 		struct folio *folio, nid_t ino, block_t blkaddr)
1928 {
1929 	struct folio *cfolio;
1930 	int ret;
1931 
1932 	if (!test_opt(sbi, COMPRESS_CACHE))
1933 		return;
1934 
1935 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1936 		return;
1937 
1938 	if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1939 		return;
1940 
1941 	cfolio = filemap_get_folio(COMPRESS_MAPPING(sbi), blkaddr);
1942 	if (!IS_ERR(cfolio)) {
1943 		f2fs_folio_put(cfolio, false);
1944 		return;
1945 	}
1946 
1947 	cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0, NULL);
1948 	if (!cfolio)
1949 		return;
1950 
1951 	ret = filemap_add_folio(COMPRESS_MAPPING(sbi), cfolio,
1952 						blkaddr, GFP_NOFS);
1953 	if (ret) {
1954 		f2fs_folio_put(cfolio, false);
1955 		return;
1956 	}
1957 
1958 	folio_set_f2fs_data(cfolio, ino);
1959 
1960 	memcpy(folio_address(cfolio), folio_address(folio), PAGE_SIZE);
1961 	folio_mark_uptodate(cfolio);
1962 	f2fs_folio_put(cfolio, true);
1963 }
1964 
f2fs_load_compressed_folio(struct f2fs_sb_info * sbi,struct folio * folio,block_t blkaddr)1965 bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio,
1966 								block_t blkaddr)
1967 {
1968 	struct folio *cfolio;
1969 	bool hitted = false;
1970 
1971 	if (!test_opt(sbi, COMPRESS_CACHE))
1972 		return false;
1973 
1974 	cfolio = f2fs_filemap_get_folio(COMPRESS_MAPPING(sbi),
1975 				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1976 	if (!IS_ERR(cfolio)) {
1977 		if (folio_test_uptodate(cfolio)) {
1978 			atomic_inc(&sbi->compress_page_hit);
1979 			memcpy(folio_address(folio),
1980 				folio_address(cfolio), folio_size(folio));
1981 			hitted = true;
1982 		}
1983 		f2fs_folio_put(cfolio, true);
1984 	}
1985 
1986 	return hitted;
1987 }
1988 
f2fs_invalidate_compress_pages(struct f2fs_sb_info * sbi,nid_t ino)1989 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1990 {
1991 	struct address_space *mapping = COMPRESS_MAPPING(sbi);
1992 	struct folio_batch fbatch;
1993 	pgoff_t index = 0;
1994 	pgoff_t end = MAX_BLKADDR(sbi);
1995 
1996 	if (!mapping->nrpages)
1997 		return;
1998 
1999 	folio_batch_init(&fbatch);
2000 
2001 	do {
2002 		unsigned int nr, i;
2003 
2004 		nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
2005 		if (!nr)
2006 			break;
2007 
2008 		for (i = 0; i < nr; i++) {
2009 			struct folio *folio = fbatch.folios[i];
2010 
2011 			folio_lock(folio);
2012 			if (folio->mapping != mapping) {
2013 				folio_unlock(folio);
2014 				continue;
2015 			}
2016 
2017 			if (ino != folio_get_f2fs_data(folio)) {
2018 				folio_unlock(folio);
2019 				continue;
2020 			}
2021 
2022 			generic_error_remove_folio(mapping, folio);
2023 			folio_unlock(folio);
2024 		}
2025 		folio_batch_release(&fbatch);
2026 		cond_resched();
2027 	} while (index < end);
2028 }
2029 
f2fs_init_compress_inode(struct f2fs_sb_info * sbi)2030 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
2031 {
2032 	struct inode *inode;
2033 
2034 	if (!test_opt(sbi, COMPRESS_CACHE))
2035 		return 0;
2036 
2037 	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
2038 	if (IS_ERR(inode))
2039 		return PTR_ERR(inode);
2040 	sbi->compress_inode = inode;
2041 
2042 	sbi->compress_percent = COMPRESS_PERCENT;
2043 	sbi->compress_watermark = COMPRESS_WATERMARK;
2044 
2045 	atomic_set(&sbi->compress_page_hit, 0);
2046 
2047 	return 0;
2048 }
2049 
f2fs_destroy_compress_inode(struct f2fs_sb_info * sbi)2050 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
2051 {
2052 	if (!sbi->compress_inode)
2053 		return;
2054 	iput(sbi->compress_inode);
2055 	sbi->compress_inode = NULL;
2056 }
2057 
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)2058 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
2059 {
2060 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2061 	char slab_name[35];
2062 
2063 	if (!f2fs_sb_has_compression(sbi))
2064 		return 0;
2065 
2066 	sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
2067 
2068 	sbi->page_array_slab_size = sizeof(struct page *) <<
2069 					F2FS_OPTION(sbi).compress_log_size;
2070 
2071 	sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
2072 					sbi->page_array_slab_size);
2073 	return sbi->page_array_slab ? 0 : -ENOMEM;
2074 }
2075 
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)2076 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
2077 {
2078 	kmem_cache_destroy(sbi->page_array_slab);
2079 }
2080 
f2fs_init_compress_cache(void)2081 int __init f2fs_init_compress_cache(void)
2082 {
2083 	cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2084 					sizeof(struct compress_io_ctx));
2085 	if (!cic_entry_slab)
2086 		return -ENOMEM;
2087 	dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2088 					sizeof(struct decompress_io_ctx));
2089 	if (!dic_entry_slab)
2090 		goto free_cic;
2091 	return 0;
2092 free_cic:
2093 	kmem_cache_destroy(cic_entry_slab);
2094 	return -ENOMEM;
2095 }
2096 
f2fs_destroy_compress_cache(void)2097 void f2fs_destroy_compress_cache(void)
2098 {
2099 	kmem_cache_destroy(dic_entry_slab);
2100 	kmem_cache_destroy(cic_entry_slab);
2101 }
2102