xref: /linux/fs/f2fs/compress.c (revision d8441523f21375b11a4593a2d89942b407bcb44f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs compress support
4  *
5  * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
17 
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include <trace/events/f2fs.h>
22 
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
25 
page_array_alloc(struct inode * inode,int nr)26 static void *page_array_alloc(struct inode *inode, int nr)
27 {
28 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29 	unsigned int size = sizeof(struct page *) * nr;
30 
31 	if (likely(size <= sbi->page_array_slab_size))
32 		return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33 					GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34 	return f2fs_kzalloc(sbi, size, GFP_NOFS);
35 }
36 
page_array_free(struct inode * inode,void * pages,int nr)37 static void page_array_free(struct inode *inode, void *pages, int nr)
38 {
39 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 	unsigned int size = sizeof(struct page *) * nr;
41 
42 	if (!pages)
43 		return;
44 
45 	if (likely(size <= sbi->page_array_slab_size))
46 		kmem_cache_free(sbi->page_array_slab, pages);
47 	else
48 		kfree(pages);
49 }
50 
51 struct f2fs_compress_ops {
52 	int (*init_compress_ctx)(struct compress_ctx *cc);
53 	void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 	int (*compress_pages)(struct compress_ctx *cc);
55 	int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56 	void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57 	int (*decompress_pages)(struct decompress_io_ctx *dic);
58 	bool (*is_level_valid)(int level);
59 };
60 
offset_in_cluster(struct compress_ctx * cc,pgoff_t index)61 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
62 {
63 	return index & (cc->cluster_size - 1);
64 }
65 
cluster_idx(struct compress_ctx * cc,pgoff_t index)66 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
67 {
68 	return index >> cc->log_cluster_size;
69 }
70 
start_idx_of_cluster(struct compress_ctx * cc)71 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
72 {
73 	return cc->cluster_idx << cc->log_cluster_size;
74 }
75 
f2fs_is_compressed_page(struct page * page)76 bool f2fs_is_compressed_page(struct page *page)
77 {
78 	if (!PagePrivate(page))
79 		return false;
80 	if (!page_private(page))
81 		return false;
82 	if (page_private_nonpointer(page))
83 		return false;
84 
85 	f2fs_bug_on(F2FS_P_SB(page),
86 		*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
87 	return true;
88 }
89 
f2fs_set_compressed_page(struct page * page,struct inode * inode,pgoff_t index,void * data)90 static void f2fs_set_compressed_page(struct page *page,
91 		struct inode *inode, pgoff_t index, void *data)
92 {
93 	struct folio *folio = page_folio(page);
94 
95 	folio_attach_private(folio, (void *)data);
96 
97 	/* i_crypto_info and iv index */
98 	folio->index = index;
99 	folio->mapping = inode->i_mapping;
100 }
101 
f2fs_drop_rpages(struct compress_ctx * cc,int len,bool unlock)102 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
103 {
104 	int i;
105 
106 	for (i = 0; i < len; i++) {
107 		if (!cc->rpages[i])
108 			continue;
109 		if (unlock)
110 			unlock_page(cc->rpages[i]);
111 		else
112 			put_page(cc->rpages[i]);
113 	}
114 }
115 
f2fs_put_rpages(struct compress_ctx * cc)116 static void f2fs_put_rpages(struct compress_ctx *cc)
117 {
118 	f2fs_drop_rpages(cc, cc->cluster_size, false);
119 }
120 
f2fs_unlock_rpages(struct compress_ctx * cc,int len)121 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
122 {
123 	f2fs_drop_rpages(cc, len, true);
124 }
125 
f2fs_put_rpages_wbc(struct compress_ctx * cc,struct writeback_control * wbc,bool redirty,int unlock)126 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
127 		struct writeback_control *wbc, bool redirty, int unlock)
128 {
129 	unsigned int i;
130 
131 	for (i = 0; i < cc->cluster_size; i++) {
132 		if (!cc->rpages[i])
133 			continue;
134 		if (redirty)
135 			redirty_page_for_writepage(wbc, cc->rpages[i]);
136 		f2fs_put_page(cc->rpages[i], unlock);
137 	}
138 }
139 
f2fs_compress_control_folio(struct folio * folio)140 struct folio *f2fs_compress_control_folio(struct folio *folio)
141 {
142 	struct compress_io_ctx *ctx = folio->private;
143 
144 	return page_folio(ctx->rpages[0]);
145 }
146 
f2fs_init_compress_ctx(struct compress_ctx * cc)147 int f2fs_init_compress_ctx(struct compress_ctx *cc)
148 {
149 	if (cc->rpages)
150 		return 0;
151 
152 	cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
153 	return cc->rpages ? 0 : -ENOMEM;
154 }
155 
f2fs_destroy_compress_ctx(struct compress_ctx * cc,bool reuse)156 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
157 {
158 	page_array_free(cc->inode, cc->rpages, cc->cluster_size);
159 	cc->rpages = NULL;
160 	cc->nr_rpages = 0;
161 	cc->nr_cpages = 0;
162 	cc->valid_nr_cpages = 0;
163 	if (!reuse)
164 		cc->cluster_idx = NULL_CLUSTER;
165 }
166 
f2fs_compress_ctx_add_page(struct compress_ctx * cc,struct folio * folio)167 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
168 {
169 	unsigned int cluster_ofs;
170 
171 	if (!f2fs_cluster_can_merge_page(cc, folio->index))
172 		f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
173 
174 	cluster_ofs = offset_in_cluster(cc, folio->index);
175 	cc->rpages[cluster_ofs] = folio_page(folio, 0);
176 	cc->nr_rpages++;
177 	cc->cluster_idx = cluster_idx(cc, folio->index);
178 }
179 
180 #ifdef CONFIG_F2FS_FS_LZO
lzo_init_compress_ctx(struct compress_ctx * cc)181 static int lzo_init_compress_ctx(struct compress_ctx *cc)
182 {
183 	cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode),
184 					LZO1X_MEM_COMPRESS);
185 	if (!cc->private)
186 		return -ENOMEM;
187 
188 	cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
189 	return 0;
190 }
191 
lzo_destroy_compress_ctx(struct compress_ctx * cc)192 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
193 {
194 	vfree(cc->private);
195 	cc->private = NULL;
196 }
197 
lzo_compress_pages(struct compress_ctx * cc)198 static int lzo_compress_pages(struct compress_ctx *cc)
199 {
200 	int ret;
201 
202 	ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
203 					&cc->clen, cc->private);
204 	if (ret != LZO_E_OK) {
205 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
206 				"lzo compress failed, ret:%d", ret);
207 		return -EIO;
208 	}
209 	return 0;
210 }
211 
lzo_decompress_pages(struct decompress_io_ctx * dic)212 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
213 {
214 	int ret;
215 
216 	ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
217 						dic->rbuf, &dic->rlen);
218 	if (ret != LZO_E_OK) {
219 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
220 				"lzo decompress failed, ret:%d", ret);
221 		return -EIO;
222 	}
223 
224 	if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
225 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
226 				"lzo invalid rlen:%zu, expected:%lu",
227 				dic->rlen, PAGE_SIZE << dic->log_cluster_size);
228 		return -EIO;
229 	}
230 	return 0;
231 }
232 
233 static const struct f2fs_compress_ops f2fs_lzo_ops = {
234 	.init_compress_ctx	= lzo_init_compress_ctx,
235 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
236 	.compress_pages		= lzo_compress_pages,
237 	.decompress_pages	= lzo_decompress_pages,
238 };
239 #endif
240 
241 #ifdef CONFIG_F2FS_FS_LZ4
lz4_init_compress_ctx(struct compress_ctx * cc)242 static int lz4_init_compress_ctx(struct compress_ctx *cc)
243 {
244 	unsigned int size = LZ4_MEM_COMPRESS;
245 
246 #ifdef CONFIG_F2FS_FS_LZ4HC
247 	if (F2FS_I(cc->inode)->i_compress_level)
248 		size = LZ4HC_MEM_COMPRESS;
249 #endif
250 
251 	cc->private = f2fs_vmalloc(F2FS_I_SB(cc->inode), size);
252 	if (!cc->private)
253 		return -ENOMEM;
254 
255 	/*
256 	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
257 	 * adapt worst compress case, because lz4 compressor can handle
258 	 * output budget properly.
259 	 */
260 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
261 	return 0;
262 }
263 
lz4_destroy_compress_ctx(struct compress_ctx * cc)264 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
265 {
266 	vfree(cc->private);
267 	cc->private = NULL;
268 }
269 
lz4_compress_pages(struct compress_ctx * cc)270 static int lz4_compress_pages(struct compress_ctx *cc)
271 {
272 	int len = -EINVAL;
273 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
274 
275 	if (!level)
276 		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
277 						cc->clen, cc->private);
278 #ifdef CONFIG_F2FS_FS_LZ4HC
279 	else
280 		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
281 					cc->clen, level, cc->private);
282 #endif
283 	if (len < 0)
284 		return len;
285 	if (!len)
286 		return -EAGAIN;
287 
288 	cc->clen = len;
289 	return 0;
290 }
291 
lz4_decompress_pages(struct decompress_io_ctx * dic)292 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
293 {
294 	int ret;
295 
296 	ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
297 						dic->clen, dic->rlen);
298 	if (ret < 0) {
299 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
300 				"lz4 decompress failed, ret:%d", ret);
301 		return -EIO;
302 	}
303 
304 	if (ret != PAGE_SIZE << dic->log_cluster_size) {
305 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
306 				"lz4 invalid ret:%d, expected:%lu",
307 				ret, PAGE_SIZE << dic->log_cluster_size);
308 		return -EIO;
309 	}
310 	return 0;
311 }
312 
lz4_is_level_valid(int lvl)313 static bool lz4_is_level_valid(int lvl)
314 {
315 #ifdef CONFIG_F2FS_FS_LZ4HC
316 	return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
317 #else
318 	return lvl == 0;
319 #endif
320 }
321 
322 static const struct f2fs_compress_ops f2fs_lz4_ops = {
323 	.init_compress_ctx	= lz4_init_compress_ctx,
324 	.destroy_compress_ctx	= lz4_destroy_compress_ctx,
325 	.compress_pages		= lz4_compress_pages,
326 	.decompress_pages	= lz4_decompress_pages,
327 	.is_level_valid		= lz4_is_level_valid,
328 };
329 #endif
330 
331 #ifdef CONFIG_F2FS_FS_ZSTD
zstd_init_compress_ctx(struct compress_ctx * cc)332 static int zstd_init_compress_ctx(struct compress_ctx *cc)
333 {
334 	zstd_parameters params;
335 	zstd_cstream *stream;
336 	void *workspace;
337 	unsigned int workspace_size;
338 	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
339 
340 	/* Need to remain this for backward compatibility */
341 	if (!level)
342 		level = F2FS_ZSTD_DEFAULT_CLEVEL;
343 
344 	params = zstd_get_params(level, cc->rlen);
345 	workspace_size = zstd_cstream_workspace_bound(&params.cParams);
346 
347 	workspace = f2fs_vmalloc(F2FS_I_SB(cc->inode), workspace_size);
348 	if (!workspace)
349 		return -ENOMEM;
350 
351 	stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
352 	if (!stream) {
353 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
354 				"%s zstd_init_cstream failed", __func__);
355 		vfree(workspace);
356 		return -EIO;
357 	}
358 
359 	cc->private = workspace;
360 	cc->private2 = stream;
361 
362 	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
363 	return 0;
364 }
365 
zstd_destroy_compress_ctx(struct compress_ctx * cc)366 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
367 {
368 	vfree(cc->private);
369 	cc->private = NULL;
370 	cc->private2 = NULL;
371 }
372 
zstd_compress_pages(struct compress_ctx * cc)373 static int zstd_compress_pages(struct compress_ctx *cc)
374 {
375 	zstd_cstream *stream = cc->private2;
376 	zstd_in_buffer inbuf;
377 	zstd_out_buffer outbuf;
378 	int src_size = cc->rlen;
379 	int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
380 	int ret;
381 
382 	inbuf.pos = 0;
383 	inbuf.src = cc->rbuf;
384 	inbuf.size = src_size;
385 
386 	outbuf.pos = 0;
387 	outbuf.dst = cc->cbuf->cdata;
388 	outbuf.size = dst_size;
389 
390 	ret = zstd_compress_stream(stream, &outbuf, &inbuf);
391 	if (zstd_is_error(ret)) {
392 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
393 				"%s zstd_compress_stream failed, ret: %d",
394 				__func__, zstd_get_error_code(ret));
395 		return -EIO;
396 	}
397 
398 	ret = zstd_end_stream(stream, &outbuf);
399 	if (zstd_is_error(ret)) {
400 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
401 				"%s zstd_end_stream returned %d",
402 				__func__, zstd_get_error_code(ret));
403 		return -EIO;
404 	}
405 
406 	/*
407 	 * there is compressed data remained in intermediate buffer due to
408 	 * no more space in cbuf.cdata
409 	 */
410 	if (ret)
411 		return -EAGAIN;
412 
413 	cc->clen = outbuf.pos;
414 	return 0;
415 }
416 
zstd_init_decompress_ctx(struct decompress_io_ctx * dic)417 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
418 {
419 	zstd_dstream *stream;
420 	void *workspace;
421 	unsigned int workspace_size;
422 	unsigned int max_window_size =
423 			MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
424 
425 	workspace_size = zstd_dstream_workspace_bound(max_window_size);
426 
427 	workspace = f2fs_vmalloc(F2FS_I_SB(dic->inode), workspace_size);
428 	if (!workspace)
429 		return -ENOMEM;
430 
431 	stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
432 	if (!stream) {
433 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
434 				"%s zstd_init_dstream failed", __func__);
435 		vfree(workspace);
436 		return -EIO;
437 	}
438 
439 	dic->private = workspace;
440 	dic->private2 = stream;
441 
442 	return 0;
443 }
444 
zstd_destroy_decompress_ctx(struct decompress_io_ctx * dic)445 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
446 {
447 	vfree(dic->private);
448 	dic->private = NULL;
449 	dic->private2 = NULL;
450 }
451 
zstd_decompress_pages(struct decompress_io_ctx * dic)452 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
453 {
454 	zstd_dstream *stream = dic->private2;
455 	zstd_in_buffer inbuf;
456 	zstd_out_buffer outbuf;
457 	int ret;
458 
459 	inbuf.pos = 0;
460 	inbuf.src = dic->cbuf->cdata;
461 	inbuf.size = dic->clen;
462 
463 	outbuf.pos = 0;
464 	outbuf.dst = dic->rbuf;
465 	outbuf.size = dic->rlen;
466 
467 	ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
468 	if (zstd_is_error(ret)) {
469 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
470 				"%s zstd_decompress_stream failed, ret: %d",
471 				__func__, zstd_get_error_code(ret));
472 		return -EIO;
473 	}
474 
475 	if (dic->rlen != outbuf.pos) {
476 		f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
477 				"%s ZSTD invalid rlen:%zu, expected:%lu",
478 				__func__, dic->rlen,
479 				PAGE_SIZE << dic->log_cluster_size);
480 		return -EIO;
481 	}
482 
483 	return 0;
484 }
485 
zstd_is_level_valid(int lvl)486 static bool zstd_is_level_valid(int lvl)
487 {
488 	return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
489 }
490 
491 static const struct f2fs_compress_ops f2fs_zstd_ops = {
492 	.init_compress_ctx	= zstd_init_compress_ctx,
493 	.destroy_compress_ctx	= zstd_destroy_compress_ctx,
494 	.compress_pages		= zstd_compress_pages,
495 	.init_decompress_ctx	= zstd_init_decompress_ctx,
496 	.destroy_decompress_ctx	= zstd_destroy_decompress_ctx,
497 	.decompress_pages	= zstd_decompress_pages,
498 	.is_level_valid		= zstd_is_level_valid,
499 };
500 #endif
501 
502 #ifdef CONFIG_F2FS_FS_LZO
503 #ifdef CONFIG_F2FS_FS_LZORLE
lzorle_compress_pages(struct compress_ctx * cc)504 static int lzorle_compress_pages(struct compress_ctx *cc)
505 {
506 	int ret;
507 
508 	ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
509 					&cc->clen, cc->private);
510 	if (ret != LZO_E_OK) {
511 		f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
512 				"lzo-rle compress failed, ret:%d", ret);
513 		return -EIO;
514 	}
515 	return 0;
516 }
517 
518 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
519 	.init_compress_ctx	= lzo_init_compress_ctx,
520 	.destroy_compress_ctx	= lzo_destroy_compress_ctx,
521 	.compress_pages		= lzorle_compress_pages,
522 	.decompress_pages	= lzo_decompress_pages,
523 };
524 #endif
525 #endif
526 
527 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
528 #ifdef CONFIG_F2FS_FS_LZO
529 	&f2fs_lzo_ops,
530 #else
531 	NULL,
532 #endif
533 #ifdef CONFIG_F2FS_FS_LZ4
534 	&f2fs_lz4_ops,
535 #else
536 	NULL,
537 #endif
538 #ifdef CONFIG_F2FS_FS_ZSTD
539 	&f2fs_zstd_ops,
540 #else
541 	NULL,
542 #endif
543 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
544 	&f2fs_lzorle_ops,
545 #else
546 	NULL,
547 #endif
548 };
549 
f2fs_is_compress_backend_ready(struct inode * inode)550 bool f2fs_is_compress_backend_ready(struct inode *inode)
551 {
552 	if (!f2fs_compressed_file(inode))
553 		return true;
554 	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
555 }
556 
f2fs_is_compress_level_valid(int alg,int lvl)557 bool f2fs_is_compress_level_valid(int alg, int lvl)
558 {
559 	const struct f2fs_compress_ops *cops = f2fs_cops[alg];
560 
561 	if (cops->is_level_valid)
562 		return cops->is_level_valid(lvl);
563 
564 	return lvl == 0;
565 }
566 
567 static mempool_t *compress_page_pool;
568 static int num_compress_pages = 512;
569 module_param(num_compress_pages, uint, 0444);
570 MODULE_PARM_DESC(num_compress_pages,
571 		"Number of intermediate compress pages to preallocate");
572 
f2fs_init_compress_mempool(void)573 int __init f2fs_init_compress_mempool(void)
574 {
575 	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
576 	return compress_page_pool ? 0 : -ENOMEM;
577 }
578 
f2fs_destroy_compress_mempool(void)579 void f2fs_destroy_compress_mempool(void)
580 {
581 	mempool_destroy(compress_page_pool);
582 }
583 
f2fs_compress_alloc_page(void)584 static struct page *f2fs_compress_alloc_page(void)
585 {
586 	struct page *page;
587 
588 	page = mempool_alloc(compress_page_pool, GFP_NOFS);
589 	lock_page(page);
590 
591 	return page;
592 }
593 
f2fs_compress_free_page(struct page * page)594 static void f2fs_compress_free_page(struct page *page)
595 {
596 	struct folio *folio;
597 
598 	if (!page)
599 		return;
600 	folio = page_folio(page);
601 	folio_detach_private(folio);
602 	folio->mapping = NULL;
603 	folio_unlock(folio);
604 	mempool_free(page, compress_page_pool);
605 }
606 
607 #define MAX_VMAP_RETRIES	3
608 
f2fs_vmap(struct page ** pages,unsigned int count)609 static void *f2fs_vmap(struct page **pages, unsigned int count)
610 {
611 	int i;
612 	void *buf = NULL;
613 
614 	for (i = 0; i < MAX_VMAP_RETRIES; i++) {
615 		buf = vm_map_ram(pages, count, -1);
616 		if (buf)
617 			break;
618 		vm_unmap_aliases();
619 	}
620 	return buf;
621 }
622 
f2fs_compress_pages(struct compress_ctx * cc)623 static int f2fs_compress_pages(struct compress_ctx *cc)
624 {
625 	struct f2fs_inode_info *fi = F2FS_I(cc->inode);
626 	const struct f2fs_compress_ops *cops =
627 				f2fs_cops[fi->i_compress_algorithm];
628 	unsigned int max_len, new_nr_cpages;
629 	u32 chksum = 0;
630 	int i, ret;
631 
632 	trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
633 				cc->cluster_size, fi->i_compress_algorithm);
634 
635 	if (cops->init_compress_ctx) {
636 		ret = cops->init_compress_ctx(cc);
637 		if (ret)
638 			goto out;
639 	}
640 
641 	max_len = COMPRESS_HEADER_SIZE + cc->clen;
642 	cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
643 	cc->valid_nr_cpages = cc->nr_cpages;
644 
645 	cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
646 	if (!cc->cpages) {
647 		ret = -ENOMEM;
648 		goto destroy_compress_ctx;
649 	}
650 
651 	for (i = 0; i < cc->nr_cpages; i++)
652 		cc->cpages[i] = f2fs_compress_alloc_page();
653 
654 	cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
655 	if (!cc->rbuf) {
656 		ret = -ENOMEM;
657 		goto out_free_cpages;
658 	}
659 
660 	cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
661 	if (!cc->cbuf) {
662 		ret = -ENOMEM;
663 		goto out_vunmap_rbuf;
664 	}
665 
666 	ret = cops->compress_pages(cc);
667 	if (ret)
668 		goto out_vunmap_cbuf;
669 
670 	max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
671 
672 	if (cc->clen > max_len) {
673 		ret = -EAGAIN;
674 		goto out_vunmap_cbuf;
675 	}
676 
677 	cc->cbuf->clen = cpu_to_le32(cc->clen);
678 
679 	if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
680 		chksum = f2fs_crc32(cc->cbuf->cdata, cc->clen);
681 	cc->cbuf->chksum = cpu_to_le32(chksum);
682 
683 	for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
684 		cc->cbuf->reserved[i] = cpu_to_le32(0);
685 
686 	new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
687 
688 	/* zero out any unused part of the last page */
689 	memset(&cc->cbuf->cdata[cc->clen], 0,
690 			(new_nr_cpages * PAGE_SIZE) -
691 			(cc->clen + COMPRESS_HEADER_SIZE));
692 
693 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
694 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
695 
696 	for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
697 		f2fs_compress_free_page(cc->cpages[i]);
698 		cc->cpages[i] = NULL;
699 	}
700 
701 	if (cops->destroy_compress_ctx)
702 		cops->destroy_compress_ctx(cc);
703 
704 	cc->valid_nr_cpages = new_nr_cpages;
705 
706 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
707 							cc->clen, ret);
708 	return 0;
709 
710 out_vunmap_cbuf:
711 	vm_unmap_ram(cc->cbuf, cc->nr_cpages);
712 out_vunmap_rbuf:
713 	vm_unmap_ram(cc->rbuf, cc->cluster_size);
714 out_free_cpages:
715 	for (i = 0; i < cc->nr_cpages; i++) {
716 		if (cc->cpages[i])
717 			f2fs_compress_free_page(cc->cpages[i]);
718 	}
719 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
720 	cc->cpages = NULL;
721 destroy_compress_ctx:
722 	if (cops->destroy_compress_ctx)
723 		cops->destroy_compress_ctx(cc);
724 out:
725 	trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
726 							cc->clen, ret);
727 	return ret;
728 }
729 
730 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
731 		bool pre_alloc);
732 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
733 		bool bypass_destroy_callback, bool pre_alloc);
734 
f2fs_decompress_cluster(struct decompress_io_ctx * dic,bool in_task)735 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
736 {
737 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
738 	struct f2fs_inode_info *fi = F2FS_I(dic->inode);
739 	const struct f2fs_compress_ops *cops =
740 			f2fs_cops[fi->i_compress_algorithm];
741 	bool bypass_callback = false;
742 	int ret;
743 
744 	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
745 				dic->cluster_size, fi->i_compress_algorithm);
746 
747 	if (dic->failed) {
748 		ret = -EIO;
749 		goto out_end_io;
750 	}
751 
752 	ret = f2fs_prepare_decomp_mem(dic, false);
753 	if (ret) {
754 		bypass_callback = true;
755 		goto out_release;
756 	}
757 
758 	dic->clen = le32_to_cpu(dic->cbuf->clen);
759 	dic->rlen = PAGE_SIZE << dic->log_cluster_size;
760 
761 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
762 		ret = -EFSCORRUPTED;
763 
764 		/* Avoid f2fs_commit_super in irq context */
765 		if (!in_task)
766 			f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION);
767 		else
768 			f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
769 		goto out_release;
770 	}
771 
772 	ret = cops->decompress_pages(dic);
773 
774 	if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
775 		u32 provided = le32_to_cpu(dic->cbuf->chksum);
776 		u32 calculated = f2fs_crc32(dic->cbuf->cdata, dic->clen);
777 
778 		if (provided != calculated) {
779 			if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
780 				set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
781 				f2fs_info_ratelimited(sbi,
782 					"checksum invalid, nid = %lu, %x vs %x",
783 					dic->inode->i_ino,
784 					provided, calculated);
785 			}
786 			set_sbi_flag(sbi, SBI_NEED_FSCK);
787 		}
788 	}
789 
790 out_release:
791 	f2fs_release_decomp_mem(dic, bypass_callback, false);
792 
793 out_end_io:
794 	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
795 							dic->clen, ret);
796 	f2fs_decompress_end_io(dic, ret, in_task);
797 }
798 
799 /*
800  * This is called when a page of a compressed cluster has been read from disk
801  * (or failed to be read from disk).  It checks whether this page was the last
802  * page being waited on in the cluster, and if so, it decompresses the cluster
803  * (or in the case of a failure, cleans up without actually decompressing).
804  */
f2fs_end_read_compressed_page(struct page * page,bool failed,block_t blkaddr,bool in_task)805 void f2fs_end_read_compressed_page(struct page *page, bool failed,
806 		block_t blkaddr, bool in_task)
807 {
808 	struct decompress_io_ctx *dic =
809 			(struct decompress_io_ctx *)page_private(page);
810 	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
811 
812 	dec_page_count(sbi, F2FS_RD_DATA);
813 
814 	if (failed)
815 		WRITE_ONCE(dic->failed, true);
816 	else if (blkaddr && in_task)
817 		f2fs_cache_compressed_page(sbi, page,
818 					dic->inode->i_ino, blkaddr);
819 
820 	if (atomic_dec_and_test(&dic->remaining_pages))
821 		f2fs_decompress_cluster(dic, in_task);
822 }
823 
is_page_in_cluster(struct compress_ctx * cc,pgoff_t index)824 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
825 {
826 	if (cc->cluster_idx == NULL_CLUSTER)
827 		return true;
828 	return cc->cluster_idx == cluster_idx(cc, index);
829 }
830 
f2fs_cluster_is_empty(struct compress_ctx * cc)831 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
832 {
833 	return cc->nr_rpages == 0;
834 }
835 
f2fs_cluster_is_full(struct compress_ctx * cc)836 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
837 {
838 	return cc->cluster_size == cc->nr_rpages;
839 }
840 
f2fs_cluster_can_merge_page(struct compress_ctx * cc,pgoff_t index)841 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
842 {
843 	if (f2fs_cluster_is_empty(cc))
844 		return true;
845 	return is_page_in_cluster(cc, index);
846 }
847 
f2fs_all_cluster_page_ready(struct compress_ctx * cc,struct page ** pages,int index,int nr_pages,bool uptodate)848 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
849 				int index, int nr_pages, bool uptodate)
850 {
851 	unsigned long pgidx = page_folio(pages[index])->index;
852 	int i = uptodate ? 0 : 1;
853 
854 	/*
855 	 * when uptodate set to true, try to check all pages in cluster is
856 	 * uptodate or not.
857 	 */
858 	if (uptodate && (pgidx % cc->cluster_size))
859 		return false;
860 
861 	if (nr_pages - index < cc->cluster_size)
862 		return false;
863 
864 	for (; i < cc->cluster_size; i++) {
865 		struct folio *folio = page_folio(pages[index + i]);
866 
867 		if (folio->index != pgidx + i)
868 			return false;
869 		if (uptodate && !folio_test_uptodate(folio))
870 			return false;
871 	}
872 
873 	return true;
874 }
875 
cluster_has_invalid_data(struct compress_ctx * cc)876 static bool cluster_has_invalid_data(struct compress_ctx *cc)
877 {
878 	loff_t i_size = i_size_read(cc->inode);
879 	unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
880 	int i;
881 
882 	for (i = 0; i < cc->cluster_size; i++) {
883 		struct page *page = cc->rpages[i];
884 
885 		f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
886 
887 		/* beyond EOF */
888 		if (page_folio(page)->index >= nr_pages)
889 			return true;
890 	}
891 	return false;
892 }
893 
f2fs_sanity_check_cluster(struct dnode_of_data * dn)894 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
895 {
896 #ifdef CONFIG_F2FS_CHECK_FS
897 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
898 	unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
899 	int cluster_end = 0;
900 	unsigned int count;
901 	int i;
902 	char *reason = "";
903 
904 	if (dn->data_blkaddr != COMPRESS_ADDR)
905 		return false;
906 
907 	/* [..., COMPR_ADDR, ...] */
908 	if (dn->ofs_in_node % cluster_size) {
909 		reason = "[*|C|*|*]";
910 		goto out;
911 	}
912 
913 	for (i = 1, count = 1; i < cluster_size; i++, count++) {
914 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
915 							dn->ofs_in_node + i);
916 
917 		/* [COMPR_ADDR, ..., COMPR_ADDR] */
918 		if (blkaddr == COMPRESS_ADDR) {
919 			reason = "[C|*|C|*]";
920 			goto out;
921 		}
922 		if (!__is_valid_data_blkaddr(blkaddr)) {
923 			if (!cluster_end)
924 				cluster_end = i;
925 			continue;
926 		}
927 		/* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
928 		if (cluster_end) {
929 			reason = "[C|N|N|V]";
930 			goto out;
931 		}
932 	}
933 
934 	f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
935 		!is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
936 
937 	return false;
938 out:
939 	f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
940 			dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
941 	set_sbi_flag(sbi, SBI_NEED_FSCK);
942 	return true;
943 #else
944 	return false;
945 #endif
946 }
947 
__f2fs_get_cluster_blocks(struct inode * inode,struct dnode_of_data * dn)948 static int __f2fs_get_cluster_blocks(struct inode *inode,
949 					struct dnode_of_data *dn)
950 {
951 	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
952 	int count, i;
953 
954 	for (i = 0, count = 0; i < cluster_size; i++) {
955 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
956 							dn->ofs_in_node + i);
957 
958 		if (__is_valid_data_blkaddr(blkaddr))
959 			count++;
960 	}
961 
962 	return count;
963 }
964 
__f2fs_cluster_blocks(struct inode * inode,unsigned int cluster_idx,enum cluster_check_type type)965 static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
966 				enum cluster_check_type type)
967 {
968 	struct dnode_of_data dn;
969 	unsigned int start_idx = cluster_idx <<
970 				F2FS_I(inode)->i_log_cluster_size;
971 	int ret;
972 
973 	set_new_dnode(&dn, inode, NULL, NULL, 0);
974 	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
975 	if (ret) {
976 		if (ret == -ENOENT)
977 			ret = 0;
978 		goto fail;
979 	}
980 
981 	if (f2fs_sanity_check_cluster(&dn)) {
982 		ret = -EFSCORRUPTED;
983 		goto fail;
984 	}
985 
986 	if (dn.data_blkaddr == COMPRESS_ADDR) {
987 		if (type == CLUSTER_COMPR_BLKS)
988 			ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
989 		else if (type == CLUSTER_IS_COMPR)
990 			ret = 1;
991 	} else if (type == CLUSTER_RAW_BLKS) {
992 		ret = __f2fs_get_cluster_blocks(inode, &dn);
993 	}
994 fail:
995 	f2fs_put_dnode(&dn);
996 	return ret;
997 }
998 
999 /* return # of compressed blocks in compressed cluster */
f2fs_compressed_blocks(struct compress_ctx * cc)1000 static int f2fs_compressed_blocks(struct compress_ctx *cc)
1001 {
1002 	return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
1003 		CLUSTER_COMPR_BLKS);
1004 }
1005 
1006 /* return # of raw blocks in non-compressed cluster */
f2fs_decompressed_blocks(struct inode * inode,unsigned int cluster_idx)1007 static int f2fs_decompressed_blocks(struct inode *inode,
1008 				unsigned int cluster_idx)
1009 {
1010 	return __f2fs_cluster_blocks(inode, cluster_idx,
1011 		CLUSTER_RAW_BLKS);
1012 }
1013 
1014 /* return whether cluster is compressed one or not */
f2fs_is_compressed_cluster(struct inode * inode,pgoff_t index)1015 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1016 {
1017 	return __f2fs_cluster_blocks(inode,
1018 		index >> F2FS_I(inode)->i_log_cluster_size,
1019 		CLUSTER_IS_COMPR);
1020 }
1021 
1022 /* return whether cluster contains non raw blocks or not */
f2fs_is_sparse_cluster(struct inode * inode,pgoff_t index)1023 bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
1024 {
1025 	unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
1026 
1027 	return f2fs_decompressed_blocks(inode, cluster_idx) !=
1028 		F2FS_I(inode)->i_cluster_size;
1029 }
1030 
cluster_may_compress(struct compress_ctx * cc)1031 static bool cluster_may_compress(struct compress_ctx *cc)
1032 {
1033 	if (!f2fs_need_compress_data(cc->inode))
1034 		return false;
1035 	if (f2fs_is_atomic_file(cc->inode))
1036 		return false;
1037 	if (!f2fs_cluster_is_full(cc))
1038 		return false;
1039 	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1040 		return false;
1041 	return !cluster_has_invalid_data(cc);
1042 }
1043 
set_cluster_writeback(struct compress_ctx * cc)1044 static void set_cluster_writeback(struct compress_ctx *cc)
1045 {
1046 	int i;
1047 
1048 	for (i = 0; i < cc->cluster_size; i++) {
1049 		if (cc->rpages[i])
1050 			set_page_writeback(cc->rpages[i]);
1051 	}
1052 }
1053 
cancel_cluster_writeback(struct compress_ctx * cc,struct compress_io_ctx * cic,int submitted)1054 static void cancel_cluster_writeback(struct compress_ctx *cc,
1055 			struct compress_io_ctx *cic, int submitted)
1056 {
1057 	int i;
1058 
1059 	/* Wait for submitted IOs. */
1060 	if (submitted > 1) {
1061 		f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
1062 		while (atomic_read(&cic->pending_pages) !=
1063 					(cc->valid_nr_cpages - submitted + 1))
1064 			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1065 	}
1066 
1067 	/* Cancel writeback and stay locked. */
1068 	for (i = 0; i < cc->cluster_size; i++) {
1069 		if (i < submitted) {
1070 			inode_inc_dirty_pages(cc->inode);
1071 			lock_page(cc->rpages[i]);
1072 		}
1073 		clear_page_private_gcing(cc->rpages[i]);
1074 		if (folio_test_writeback(page_folio(cc->rpages[i])))
1075 			end_page_writeback(cc->rpages[i]);
1076 	}
1077 }
1078 
set_cluster_dirty(struct compress_ctx * cc)1079 static void set_cluster_dirty(struct compress_ctx *cc)
1080 {
1081 	int i;
1082 
1083 	for (i = 0; i < cc->cluster_size; i++)
1084 		if (cc->rpages[i]) {
1085 			set_page_dirty(cc->rpages[i]);
1086 			set_page_private_gcing(cc->rpages[i]);
1087 		}
1088 }
1089 
prepare_compress_overwrite(struct compress_ctx * cc,struct page ** pagep,pgoff_t index,void ** fsdata)1090 static int prepare_compress_overwrite(struct compress_ctx *cc,
1091 		struct page **pagep, pgoff_t index, void **fsdata)
1092 {
1093 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1094 	struct address_space *mapping = cc->inode->i_mapping;
1095 	struct folio *folio;
1096 	sector_t last_block_in_bio;
1097 	fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1098 	pgoff_t start_idx = start_idx_of_cluster(cc);
1099 	int i, ret;
1100 
1101 retry:
1102 	ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1103 	if (ret <= 0)
1104 		return ret;
1105 
1106 	ret = f2fs_init_compress_ctx(cc);
1107 	if (ret)
1108 		return ret;
1109 
1110 	/* keep folio reference to avoid page reclaim */
1111 	for (i = 0; i < cc->cluster_size; i++) {
1112 		folio = f2fs_filemap_get_folio(mapping, start_idx + i,
1113 				fgp_flag, GFP_NOFS);
1114 		if (IS_ERR(folio)) {
1115 			ret = PTR_ERR(folio);
1116 			goto unlock_pages;
1117 		}
1118 
1119 		if (folio_test_uptodate(folio))
1120 			f2fs_folio_put(folio, true);
1121 		else
1122 			f2fs_compress_ctx_add_page(cc, folio);
1123 	}
1124 
1125 	if (!f2fs_cluster_is_empty(cc)) {
1126 		struct bio *bio = NULL;
1127 
1128 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1129 					&last_block_in_bio, NULL, true);
1130 		f2fs_put_rpages(cc);
1131 		f2fs_destroy_compress_ctx(cc, true);
1132 		if (ret)
1133 			goto out;
1134 		if (bio)
1135 			f2fs_submit_read_bio(sbi, bio, DATA);
1136 
1137 		ret = f2fs_init_compress_ctx(cc);
1138 		if (ret)
1139 			goto out;
1140 	}
1141 
1142 	for (i = 0; i < cc->cluster_size; i++) {
1143 		f2fs_bug_on(sbi, cc->rpages[i]);
1144 
1145 		folio = filemap_lock_folio(mapping, start_idx + i);
1146 		if (IS_ERR(folio)) {
1147 			/* folio could be truncated */
1148 			goto release_and_retry;
1149 		}
1150 
1151 		f2fs_folio_wait_writeback(folio, DATA, true, true);
1152 		f2fs_compress_ctx_add_page(cc, folio);
1153 
1154 		if (!folio_test_uptodate(folio)) {
1155 			f2fs_handle_page_eio(sbi, folio, DATA);
1156 release_and_retry:
1157 			f2fs_put_rpages(cc);
1158 			f2fs_unlock_rpages(cc, i + 1);
1159 			f2fs_destroy_compress_ctx(cc, true);
1160 			goto retry;
1161 		}
1162 	}
1163 
1164 	if (likely(!ret)) {
1165 		*fsdata = cc->rpages;
1166 		*pagep = cc->rpages[offset_in_cluster(cc, index)];
1167 		return cc->cluster_size;
1168 	}
1169 
1170 unlock_pages:
1171 	f2fs_put_rpages(cc);
1172 	f2fs_unlock_rpages(cc, i);
1173 	f2fs_destroy_compress_ctx(cc, true);
1174 out:
1175 	return ret;
1176 }
1177 
f2fs_prepare_compress_overwrite(struct inode * inode,struct page ** pagep,pgoff_t index,void ** fsdata)1178 int f2fs_prepare_compress_overwrite(struct inode *inode,
1179 		struct page **pagep, pgoff_t index, void **fsdata)
1180 {
1181 	struct compress_ctx cc = {
1182 		.inode = inode,
1183 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1184 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1185 		.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1186 		.rpages = NULL,
1187 		.nr_rpages = 0,
1188 	};
1189 
1190 	return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1191 }
1192 
f2fs_compress_write_end(struct inode * inode,void * fsdata,pgoff_t index,unsigned copied)1193 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1194 					pgoff_t index, unsigned copied)
1195 
1196 {
1197 	struct compress_ctx cc = {
1198 		.inode = inode,
1199 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1200 		.cluster_size = F2FS_I(inode)->i_cluster_size,
1201 		.rpages = fsdata,
1202 	};
1203 	struct folio *folio = page_folio(cc.rpages[0]);
1204 	bool first_index = (index == folio->index);
1205 
1206 	if (copied)
1207 		set_cluster_dirty(&cc);
1208 
1209 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1210 	f2fs_destroy_compress_ctx(&cc, false);
1211 
1212 	return first_index;
1213 }
1214 
f2fs_truncate_partial_cluster(struct inode * inode,u64 from,bool lock)1215 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1216 {
1217 	void *fsdata = NULL;
1218 	struct page *pagep;
1219 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1220 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1221 							log_cluster_size;
1222 	int err;
1223 
1224 	err = f2fs_is_compressed_cluster(inode, start_idx);
1225 	if (err < 0)
1226 		return err;
1227 
1228 	/* truncate normal cluster */
1229 	if (!err)
1230 		return f2fs_do_truncate_blocks(inode, from, lock);
1231 
1232 	/* truncate compressed cluster */
1233 	err = f2fs_prepare_compress_overwrite(inode, &pagep,
1234 						start_idx, &fsdata);
1235 
1236 	/* should not be a normal cluster */
1237 	f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1238 
1239 	if (err <= 0)
1240 		return err;
1241 
1242 	if (err > 0) {
1243 		struct page **rpages = fsdata;
1244 		int cluster_size = F2FS_I(inode)->i_cluster_size;
1245 		int i;
1246 
1247 		for (i = cluster_size - 1; i >= 0; i--) {
1248 			struct folio *folio = page_folio(rpages[i]);
1249 			loff_t start = folio->index << PAGE_SHIFT;
1250 
1251 			if (from <= start) {
1252 				folio_zero_segment(folio, 0, folio_size(folio));
1253 			} else {
1254 				folio_zero_segment(folio, from - start,
1255 						folio_size(folio));
1256 				break;
1257 			}
1258 		}
1259 
1260 		f2fs_compress_write_end(inode, fsdata, start_idx, true);
1261 	}
1262 	return 0;
1263 }
1264 
f2fs_write_compressed_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1265 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1266 					int *submitted,
1267 					struct writeback_control *wbc,
1268 					enum iostat_type io_type)
1269 {
1270 	struct inode *inode = cc->inode;
1271 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1272 	struct f2fs_inode_info *fi = F2FS_I(inode);
1273 	struct f2fs_io_info fio = {
1274 		.sbi = sbi,
1275 		.ino = cc->inode->i_ino,
1276 		.type = DATA,
1277 		.op = REQ_OP_WRITE,
1278 		.op_flags = wbc_to_write_flags(wbc),
1279 		.old_blkaddr = NEW_ADDR,
1280 		.page = NULL,
1281 		.encrypted_page = NULL,
1282 		.compressed_page = NULL,
1283 		.io_type = io_type,
1284 		.io_wbc = wbc,
1285 		.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1286 									1 : 0,
1287 	};
1288 	struct folio *folio;
1289 	struct dnode_of_data dn;
1290 	struct node_info ni;
1291 	struct compress_io_ctx *cic;
1292 	pgoff_t start_idx = start_idx_of_cluster(cc);
1293 	unsigned int last_index = cc->cluster_size - 1;
1294 	loff_t psize;
1295 	int i, err;
1296 	bool quota_inode = IS_NOQUOTA(inode);
1297 
1298 	/* we should bypass data pages to proceed the kworker jobs */
1299 	if (unlikely(f2fs_cp_error(sbi))) {
1300 		mapping_set_error(inode->i_mapping, -EIO);
1301 		goto out_free;
1302 	}
1303 
1304 	if (quota_inode) {
1305 		/*
1306 		 * We need to wait for node_write to avoid block allocation during
1307 		 * checkpoint. This can only happen to quota writes which can cause
1308 		 * the below discard race condition.
1309 		 */
1310 		f2fs_down_read(&sbi->node_write);
1311 	} else if (!f2fs_trylock_op(sbi)) {
1312 		goto out_free;
1313 	}
1314 
1315 	set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1316 
1317 	err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1318 	if (err)
1319 		goto out_unlock_op;
1320 
1321 	for (i = 0; i < cc->cluster_size; i++) {
1322 		if (data_blkaddr(dn.inode, dn.node_folio,
1323 					dn.ofs_in_node + i) == NULL_ADDR)
1324 			goto out_put_dnode;
1325 	}
1326 
1327 	folio = page_folio(cc->rpages[last_index]);
1328 	psize = folio_pos(folio) + folio_size(folio);
1329 
1330 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1331 	if (err)
1332 		goto out_put_dnode;
1333 
1334 	fio.version = ni.version;
1335 
1336 	cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1337 	if (!cic)
1338 		goto out_put_dnode;
1339 
1340 	cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1341 	cic->inode = inode;
1342 	atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
1343 	cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1344 	if (!cic->rpages)
1345 		goto out_put_cic;
1346 
1347 	cic->nr_rpages = cc->cluster_size;
1348 
1349 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1350 		f2fs_set_compressed_page(cc->cpages[i], inode,
1351 				page_folio(cc->rpages[i + 1])->index, cic);
1352 		fio.compressed_page = cc->cpages[i];
1353 
1354 		fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_folio,
1355 						dn.ofs_in_node + i + 1);
1356 
1357 		/* wait for GCed page writeback via META_MAPPING */
1358 		f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1359 
1360 		if (fio.encrypted) {
1361 			fio.page = cc->rpages[i + 1];
1362 			err = f2fs_encrypt_one_page(&fio);
1363 			if (err)
1364 				goto out_destroy_crypt;
1365 			cc->cpages[i] = fio.encrypted_page;
1366 		}
1367 	}
1368 
1369 	set_cluster_writeback(cc);
1370 
1371 	for (i = 0; i < cc->cluster_size; i++)
1372 		cic->rpages[i] = cc->rpages[i];
1373 
1374 	for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1375 		block_t blkaddr;
1376 
1377 		blkaddr = f2fs_data_blkaddr(&dn);
1378 		fio.page = cc->rpages[i];
1379 		fio.old_blkaddr = blkaddr;
1380 
1381 		/* cluster header */
1382 		if (i == 0) {
1383 			if (blkaddr == COMPRESS_ADDR)
1384 				fio.compr_blocks++;
1385 			if (__is_valid_data_blkaddr(blkaddr))
1386 				f2fs_invalidate_blocks(sbi, blkaddr, 1);
1387 			f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1388 			goto unlock_continue;
1389 		}
1390 
1391 		if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1392 			fio.compr_blocks++;
1393 
1394 		if (i > cc->valid_nr_cpages) {
1395 			if (__is_valid_data_blkaddr(blkaddr)) {
1396 				f2fs_invalidate_blocks(sbi, blkaddr, 1);
1397 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1398 			}
1399 			goto unlock_continue;
1400 		}
1401 
1402 		f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1403 
1404 		if (fio.encrypted)
1405 			fio.encrypted_page = cc->cpages[i - 1];
1406 		else
1407 			fio.compressed_page = cc->cpages[i - 1];
1408 
1409 		cc->cpages[i - 1] = NULL;
1410 		fio.submitted = 0;
1411 		f2fs_outplace_write_data(&dn, &fio);
1412 		if (unlikely(!fio.submitted)) {
1413 			cancel_cluster_writeback(cc, cic, i);
1414 
1415 			/* To call fscrypt_finalize_bounce_page */
1416 			i = cc->valid_nr_cpages;
1417 			*submitted = 0;
1418 			goto out_destroy_crypt;
1419 		}
1420 		(*submitted)++;
1421 unlock_continue:
1422 		inode_dec_dirty_pages(cc->inode);
1423 		unlock_page(fio.page);
1424 	}
1425 
1426 	if (fio.compr_blocks)
1427 		f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1428 	f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1429 	add_compr_block_stat(inode, cc->valid_nr_cpages);
1430 
1431 	set_inode_flag(cc->inode, FI_APPEND_WRITE);
1432 
1433 	f2fs_put_dnode(&dn);
1434 	if (quota_inode)
1435 		f2fs_up_read(&sbi->node_write);
1436 	else
1437 		f2fs_unlock_op(sbi);
1438 
1439 	spin_lock(&fi->i_size_lock);
1440 	if (fi->last_disk_size < psize)
1441 		fi->last_disk_size = psize;
1442 	spin_unlock(&fi->i_size_lock);
1443 
1444 	f2fs_put_rpages(cc);
1445 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1446 	cc->cpages = NULL;
1447 	f2fs_destroy_compress_ctx(cc, false);
1448 	return 0;
1449 
1450 out_destroy_crypt:
1451 	page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1452 
1453 	for (--i; i >= 0; i--) {
1454 		if (!cc->cpages[i])
1455 			continue;
1456 		fscrypt_finalize_bounce_page(&cc->cpages[i]);
1457 	}
1458 out_put_cic:
1459 	kmem_cache_free(cic_entry_slab, cic);
1460 out_put_dnode:
1461 	f2fs_put_dnode(&dn);
1462 out_unlock_op:
1463 	if (quota_inode)
1464 		f2fs_up_read(&sbi->node_write);
1465 	else
1466 		f2fs_unlock_op(sbi);
1467 out_free:
1468 	for (i = 0; i < cc->valid_nr_cpages; i++) {
1469 		f2fs_compress_free_page(cc->cpages[i]);
1470 		cc->cpages[i] = NULL;
1471 	}
1472 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1473 	cc->cpages = NULL;
1474 	return -EAGAIN;
1475 }
1476 
f2fs_compress_write_end_io(struct bio * bio,struct page * page)1477 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1478 {
1479 	struct f2fs_sb_info *sbi = bio->bi_private;
1480 	struct compress_io_ctx *cic =
1481 			(struct compress_io_ctx *)page_private(page);
1482 	enum count_type type = WB_DATA_TYPE(page,
1483 				f2fs_is_compressed_page(page));
1484 	int i;
1485 
1486 	if (unlikely(bio->bi_status != BLK_STS_OK))
1487 		mapping_set_error(cic->inode->i_mapping, -EIO);
1488 
1489 	f2fs_compress_free_page(page);
1490 
1491 	dec_page_count(sbi, type);
1492 
1493 	if (atomic_dec_return(&cic->pending_pages))
1494 		return;
1495 
1496 	for (i = 0; i < cic->nr_rpages; i++) {
1497 		WARN_ON(!cic->rpages[i]);
1498 		clear_page_private_gcing(cic->rpages[i]);
1499 		end_page_writeback(cic->rpages[i]);
1500 	}
1501 
1502 	page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1503 	kmem_cache_free(cic_entry_slab, cic);
1504 }
1505 
f2fs_write_raw_pages(struct compress_ctx * cc,int * submitted_p,struct writeback_control * wbc,enum iostat_type io_type)1506 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1507 					int *submitted_p,
1508 					struct writeback_control *wbc,
1509 					enum iostat_type io_type)
1510 {
1511 	struct address_space *mapping = cc->inode->i_mapping;
1512 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1513 	int submitted, compr_blocks, i;
1514 	int ret = 0;
1515 
1516 	compr_blocks = f2fs_compressed_blocks(cc);
1517 
1518 	for (i = 0; i < cc->cluster_size; i++) {
1519 		if (!cc->rpages[i])
1520 			continue;
1521 
1522 		redirty_page_for_writepage(wbc, cc->rpages[i]);
1523 		unlock_page(cc->rpages[i]);
1524 	}
1525 
1526 	if (compr_blocks < 0)
1527 		return compr_blocks;
1528 
1529 	/* overwrite compressed cluster w/ normal cluster */
1530 	if (compr_blocks > 0)
1531 		f2fs_lock_op(sbi);
1532 
1533 	for (i = 0; i < cc->cluster_size; i++) {
1534 		struct folio *folio;
1535 
1536 		if (!cc->rpages[i])
1537 			continue;
1538 		folio = page_folio(cc->rpages[i]);
1539 retry_write:
1540 		folio_lock(folio);
1541 
1542 		if (folio->mapping != mapping) {
1543 continue_unlock:
1544 			folio_unlock(folio);
1545 			continue;
1546 		}
1547 
1548 		if (!folio_test_dirty(folio))
1549 			goto continue_unlock;
1550 
1551 		if (folio_test_writeback(folio)) {
1552 			if (wbc->sync_mode == WB_SYNC_NONE)
1553 				goto continue_unlock;
1554 			f2fs_folio_wait_writeback(folio, DATA, true, true);
1555 		}
1556 
1557 		if (!folio_clear_dirty_for_io(folio))
1558 			goto continue_unlock;
1559 
1560 		submitted = 0;
1561 		ret = f2fs_write_single_data_page(folio, &submitted,
1562 						NULL, NULL, wbc, io_type,
1563 						compr_blocks, false);
1564 		if (ret) {
1565 			if (ret == 1) {
1566 				ret = 0;
1567 			} else if (ret == -EAGAIN) {
1568 				ret = 0;
1569 				/*
1570 				 * for quota file, just redirty left pages to
1571 				 * avoid deadlock caused by cluster update race
1572 				 * from foreground operation.
1573 				 */
1574 				if (IS_NOQUOTA(cc->inode))
1575 					goto out;
1576 				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1577 				goto retry_write;
1578 			}
1579 			goto out;
1580 		}
1581 
1582 		*submitted_p += submitted;
1583 	}
1584 
1585 out:
1586 	if (compr_blocks > 0)
1587 		f2fs_unlock_op(sbi);
1588 
1589 	f2fs_balance_fs(sbi, true);
1590 	return ret;
1591 }
1592 
f2fs_write_multi_pages(struct compress_ctx * cc,int * submitted,struct writeback_control * wbc,enum iostat_type io_type)1593 int f2fs_write_multi_pages(struct compress_ctx *cc,
1594 					int *submitted,
1595 					struct writeback_control *wbc,
1596 					enum iostat_type io_type)
1597 {
1598 	int err;
1599 
1600 	*submitted = 0;
1601 	if (cluster_may_compress(cc)) {
1602 		err = f2fs_compress_pages(cc);
1603 		if (err == -EAGAIN) {
1604 			add_compr_block_stat(cc->inode, cc->cluster_size);
1605 			goto write;
1606 		} else if (err) {
1607 			f2fs_put_rpages_wbc(cc, wbc, true, 1);
1608 			goto destroy_out;
1609 		}
1610 
1611 		err = f2fs_write_compressed_pages(cc, submitted,
1612 							wbc, io_type);
1613 		if (!err)
1614 			return 0;
1615 		f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1616 	}
1617 write:
1618 	f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1619 
1620 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1621 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
1622 destroy_out:
1623 	f2fs_destroy_compress_ctx(cc, false);
1624 	return err;
1625 }
1626 
allow_memalloc_for_decomp(struct f2fs_sb_info * sbi,bool pre_alloc)1627 static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1628 		bool pre_alloc)
1629 {
1630 	return pre_alloc ^ f2fs_low_mem_mode(sbi);
1631 }
1632 
f2fs_prepare_decomp_mem(struct decompress_io_ctx * dic,bool pre_alloc)1633 static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1634 		bool pre_alloc)
1635 {
1636 	const struct f2fs_compress_ops *cops =
1637 		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1638 	int i;
1639 
1640 	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1641 		return 0;
1642 
1643 	dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1644 	if (!dic->tpages)
1645 		return -ENOMEM;
1646 
1647 	for (i = 0; i < dic->cluster_size; i++) {
1648 		if (dic->rpages[i]) {
1649 			dic->tpages[i] = dic->rpages[i];
1650 			continue;
1651 		}
1652 
1653 		dic->tpages[i] = f2fs_compress_alloc_page();
1654 	}
1655 
1656 	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1657 	if (!dic->rbuf)
1658 		return -ENOMEM;
1659 
1660 	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1661 	if (!dic->cbuf)
1662 		return -ENOMEM;
1663 
1664 	if (cops->init_decompress_ctx)
1665 		return cops->init_decompress_ctx(dic);
1666 
1667 	return 0;
1668 }
1669 
f2fs_release_decomp_mem(struct decompress_io_ctx * dic,bool bypass_destroy_callback,bool pre_alloc)1670 static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1671 		bool bypass_destroy_callback, bool pre_alloc)
1672 {
1673 	const struct f2fs_compress_ops *cops =
1674 		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1675 
1676 	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1677 		return;
1678 
1679 	if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1680 		cops->destroy_decompress_ctx(dic);
1681 
1682 	if (dic->cbuf)
1683 		vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1684 
1685 	if (dic->rbuf)
1686 		vm_unmap_ram(dic->rbuf, dic->cluster_size);
1687 }
1688 
1689 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1690 		bool bypass_destroy_callback);
1691 
f2fs_alloc_dic(struct compress_ctx * cc)1692 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1693 {
1694 	struct decompress_io_ctx *dic;
1695 	pgoff_t start_idx = start_idx_of_cluster(cc);
1696 	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1697 	int i, ret;
1698 
1699 	dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1700 	if (!dic)
1701 		return ERR_PTR(-ENOMEM);
1702 
1703 	dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1704 	if (!dic->rpages) {
1705 		kmem_cache_free(dic_entry_slab, dic);
1706 		return ERR_PTR(-ENOMEM);
1707 	}
1708 
1709 	dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1710 	dic->inode = cc->inode;
1711 	atomic_set(&dic->remaining_pages, cc->nr_cpages);
1712 	dic->cluster_idx = cc->cluster_idx;
1713 	dic->cluster_size = cc->cluster_size;
1714 	dic->log_cluster_size = cc->log_cluster_size;
1715 	dic->nr_cpages = cc->nr_cpages;
1716 	refcount_set(&dic->refcnt, 1);
1717 	dic->failed = false;
1718 	dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1719 
1720 	for (i = 0; i < dic->cluster_size; i++)
1721 		dic->rpages[i] = cc->rpages[i];
1722 	dic->nr_rpages = cc->cluster_size;
1723 
1724 	dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1725 	if (!dic->cpages) {
1726 		ret = -ENOMEM;
1727 		goto out_free;
1728 	}
1729 
1730 	for (i = 0; i < dic->nr_cpages; i++) {
1731 		struct page *page;
1732 
1733 		page = f2fs_compress_alloc_page();
1734 		f2fs_set_compressed_page(page, cc->inode,
1735 					start_idx + i + 1, dic);
1736 		dic->cpages[i] = page;
1737 	}
1738 
1739 	ret = f2fs_prepare_decomp_mem(dic, true);
1740 	if (ret)
1741 		goto out_free;
1742 
1743 	return dic;
1744 
1745 out_free:
1746 	f2fs_free_dic(dic, true);
1747 	return ERR_PTR(ret);
1748 }
1749 
f2fs_free_dic(struct decompress_io_ctx * dic,bool bypass_destroy_callback)1750 static void f2fs_free_dic(struct decompress_io_ctx *dic,
1751 		bool bypass_destroy_callback)
1752 {
1753 	int i;
1754 
1755 	f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1756 
1757 	if (dic->tpages) {
1758 		for (i = 0; i < dic->cluster_size; i++) {
1759 			if (dic->rpages[i])
1760 				continue;
1761 			if (!dic->tpages[i])
1762 				continue;
1763 			f2fs_compress_free_page(dic->tpages[i]);
1764 		}
1765 		page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1766 	}
1767 
1768 	if (dic->cpages) {
1769 		for (i = 0; i < dic->nr_cpages; i++) {
1770 			if (!dic->cpages[i])
1771 				continue;
1772 			f2fs_compress_free_page(dic->cpages[i]);
1773 		}
1774 		page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1775 	}
1776 
1777 	page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1778 	kmem_cache_free(dic_entry_slab, dic);
1779 }
1780 
f2fs_late_free_dic(struct work_struct * work)1781 static void f2fs_late_free_dic(struct work_struct *work)
1782 {
1783 	struct decompress_io_ctx *dic =
1784 		container_of(work, struct decompress_io_ctx, free_work);
1785 
1786 	f2fs_free_dic(dic, false);
1787 }
1788 
f2fs_put_dic(struct decompress_io_ctx * dic,bool in_task)1789 static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
1790 {
1791 	if (refcount_dec_and_test(&dic->refcnt)) {
1792 		if (in_task) {
1793 			f2fs_free_dic(dic, false);
1794 		} else {
1795 			INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1796 			queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1797 					&dic->free_work);
1798 		}
1799 	}
1800 }
1801 
f2fs_verify_cluster(struct work_struct * work)1802 static void f2fs_verify_cluster(struct work_struct *work)
1803 {
1804 	struct decompress_io_ctx *dic =
1805 		container_of(work, struct decompress_io_ctx, verity_work);
1806 	int i;
1807 
1808 	/* Verify, update, and unlock the decompressed pages. */
1809 	for (i = 0; i < dic->cluster_size; i++) {
1810 		struct page *rpage = dic->rpages[i];
1811 
1812 		if (!rpage)
1813 			continue;
1814 
1815 		if (fsverity_verify_page(rpage))
1816 			SetPageUptodate(rpage);
1817 		else
1818 			ClearPageUptodate(rpage);
1819 		unlock_page(rpage);
1820 	}
1821 
1822 	f2fs_put_dic(dic, true);
1823 }
1824 
1825 /*
1826  * This is called when a compressed cluster has been decompressed
1827  * (or failed to be read and/or decompressed).
1828  */
f2fs_decompress_end_io(struct decompress_io_ctx * dic,bool failed,bool in_task)1829 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1830 				bool in_task)
1831 {
1832 	int i;
1833 
1834 	if (!failed && dic->need_verity) {
1835 		/*
1836 		 * Note that to avoid deadlocks, the verity work can't be done
1837 		 * on the decompression workqueue.  This is because verifying
1838 		 * the data pages can involve reading metadata pages from the
1839 		 * file, and these metadata pages may be compressed.
1840 		 */
1841 		INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1842 		fsverity_enqueue_verify_work(&dic->verity_work);
1843 		return;
1844 	}
1845 
1846 	/* Update and unlock the cluster's pagecache pages. */
1847 	for (i = 0; i < dic->cluster_size; i++) {
1848 		struct page *rpage = dic->rpages[i];
1849 
1850 		if (!rpage)
1851 			continue;
1852 
1853 		if (failed)
1854 			ClearPageUptodate(rpage);
1855 		else
1856 			SetPageUptodate(rpage);
1857 		unlock_page(rpage);
1858 	}
1859 
1860 	/*
1861 	 * Release the reference to the decompress_io_ctx that was being held
1862 	 * for I/O completion.
1863 	 */
1864 	f2fs_put_dic(dic, in_task);
1865 }
1866 
1867 /*
1868  * Put a reference to a compressed folio's decompress_io_ctx.
1869  *
1870  * This is called when the folio is no longer needed and can be freed.
1871  */
f2fs_put_folio_dic(struct folio * folio,bool in_task)1872 void f2fs_put_folio_dic(struct folio *folio, bool in_task)
1873 {
1874 	struct decompress_io_ctx *dic = folio->private;
1875 
1876 	f2fs_put_dic(dic, in_task);
1877 }
1878 
1879 /*
1880  * check whether cluster blocks are contiguous, and add extent cache entry
1881  * only if cluster blocks are logically and physically contiguous.
1882  */
f2fs_cluster_blocks_are_contiguous(struct dnode_of_data * dn,unsigned int ofs_in_node)1883 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
1884 						unsigned int ofs_in_node)
1885 {
1886 	bool compressed = data_blkaddr(dn->inode, dn->node_folio,
1887 					ofs_in_node) == COMPRESS_ADDR;
1888 	int i = compressed ? 1 : 0;
1889 	block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
1890 							ofs_in_node + i);
1891 
1892 	for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1893 		block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
1894 							ofs_in_node + i);
1895 
1896 		if (!__is_valid_data_blkaddr(blkaddr))
1897 			break;
1898 		if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1899 			return 0;
1900 	}
1901 
1902 	return compressed ? i - 1 : i;
1903 }
1904 
1905 const struct address_space_operations f2fs_compress_aops = {
1906 	.release_folio = f2fs_release_folio,
1907 	.invalidate_folio = f2fs_invalidate_folio,
1908 	.migrate_folio	= filemap_migrate_folio,
1909 };
1910 
COMPRESS_MAPPING(struct f2fs_sb_info * sbi)1911 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1912 {
1913 	return sbi->compress_inode->i_mapping;
1914 }
1915 
f2fs_invalidate_compress_pages_range(struct f2fs_sb_info * sbi,block_t blkaddr,unsigned int len)1916 void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi,
1917 				block_t blkaddr, unsigned int len)
1918 {
1919 	if (!sbi->compress_inode)
1920 		return;
1921 	invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1);
1922 }
1923 
f2fs_cache_compressed_page(struct f2fs_sb_info * sbi,struct page * page,nid_t ino,block_t blkaddr)1924 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1925 						nid_t ino, block_t blkaddr)
1926 {
1927 	struct folio *cfolio;
1928 	int ret;
1929 
1930 	if (!test_opt(sbi, COMPRESS_CACHE))
1931 		return;
1932 
1933 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1934 		return;
1935 
1936 	if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1937 		return;
1938 
1939 	cfolio = filemap_get_folio(COMPRESS_MAPPING(sbi), blkaddr);
1940 	if (!IS_ERR(cfolio)) {
1941 		f2fs_folio_put(cfolio, false);
1942 		return;
1943 	}
1944 
1945 	cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0);
1946 	if (!cfolio)
1947 		return;
1948 
1949 	ret = filemap_add_folio(COMPRESS_MAPPING(sbi), cfolio,
1950 						blkaddr, GFP_NOFS);
1951 	if (ret) {
1952 		f2fs_folio_put(cfolio, false);
1953 		return;
1954 	}
1955 
1956 	set_page_private_data(&cfolio->page, ino);
1957 
1958 	memcpy(folio_address(cfolio), page_address(page), PAGE_SIZE);
1959 	folio_mark_uptodate(cfolio);
1960 	f2fs_folio_put(cfolio, true);
1961 }
1962 
f2fs_load_compressed_folio(struct f2fs_sb_info * sbi,struct folio * folio,block_t blkaddr)1963 bool f2fs_load_compressed_folio(struct f2fs_sb_info *sbi, struct folio *folio,
1964 								block_t blkaddr)
1965 {
1966 	struct folio *cfolio;
1967 	bool hitted = false;
1968 
1969 	if (!test_opt(sbi, COMPRESS_CACHE))
1970 		return false;
1971 
1972 	cfolio = f2fs_filemap_get_folio(COMPRESS_MAPPING(sbi),
1973 				blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1974 	if (!IS_ERR(cfolio)) {
1975 		if (folio_test_uptodate(cfolio)) {
1976 			atomic_inc(&sbi->compress_page_hit);
1977 			memcpy(folio_address(folio),
1978 				folio_address(cfolio), folio_size(folio));
1979 			hitted = true;
1980 		}
1981 		f2fs_folio_put(cfolio, true);
1982 	}
1983 
1984 	return hitted;
1985 }
1986 
f2fs_invalidate_compress_pages(struct f2fs_sb_info * sbi,nid_t ino)1987 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1988 {
1989 	struct address_space *mapping = COMPRESS_MAPPING(sbi);
1990 	struct folio_batch fbatch;
1991 	pgoff_t index = 0;
1992 	pgoff_t end = MAX_BLKADDR(sbi);
1993 
1994 	if (!mapping->nrpages)
1995 		return;
1996 
1997 	folio_batch_init(&fbatch);
1998 
1999 	do {
2000 		unsigned int nr, i;
2001 
2002 		nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
2003 		if (!nr)
2004 			break;
2005 
2006 		for (i = 0; i < nr; i++) {
2007 			struct folio *folio = fbatch.folios[i];
2008 
2009 			folio_lock(folio);
2010 			if (folio->mapping != mapping) {
2011 				folio_unlock(folio);
2012 				continue;
2013 			}
2014 
2015 			if (ino != get_page_private_data(&folio->page)) {
2016 				folio_unlock(folio);
2017 				continue;
2018 			}
2019 
2020 			generic_error_remove_folio(mapping, folio);
2021 			folio_unlock(folio);
2022 		}
2023 		folio_batch_release(&fbatch);
2024 		cond_resched();
2025 	} while (index < end);
2026 }
2027 
f2fs_init_compress_inode(struct f2fs_sb_info * sbi)2028 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
2029 {
2030 	struct inode *inode;
2031 
2032 	if (!test_opt(sbi, COMPRESS_CACHE))
2033 		return 0;
2034 
2035 	inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
2036 	if (IS_ERR(inode))
2037 		return PTR_ERR(inode);
2038 	sbi->compress_inode = inode;
2039 
2040 	sbi->compress_percent = COMPRESS_PERCENT;
2041 	sbi->compress_watermark = COMPRESS_WATERMARK;
2042 
2043 	atomic_set(&sbi->compress_page_hit, 0);
2044 
2045 	return 0;
2046 }
2047 
f2fs_destroy_compress_inode(struct f2fs_sb_info * sbi)2048 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
2049 {
2050 	if (!sbi->compress_inode)
2051 		return;
2052 	iput(sbi->compress_inode);
2053 	sbi->compress_inode = NULL;
2054 }
2055 
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)2056 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
2057 {
2058 	dev_t dev = sbi->sb->s_bdev->bd_dev;
2059 	char slab_name[35];
2060 
2061 	if (!f2fs_sb_has_compression(sbi))
2062 		return 0;
2063 
2064 	sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
2065 
2066 	sbi->page_array_slab_size = sizeof(struct page *) <<
2067 					F2FS_OPTION(sbi).compress_log_size;
2068 
2069 	sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
2070 					sbi->page_array_slab_size);
2071 	return sbi->page_array_slab ? 0 : -ENOMEM;
2072 }
2073 
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)2074 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
2075 {
2076 	kmem_cache_destroy(sbi->page_array_slab);
2077 }
2078 
f2fs_init_compress_cache(void)2079 int __init f2fs_init_compress_cache(void)
2080 {
2081 	cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
2082 					sizeof(struct compress_io_ctx));
2083 	if (!cic_entry_slab)
2084 		return -ENOMEM;
2085 	dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
2086 					sizeof(struct decompress_io_ctx));
2087 	if (!dic_entry_slab)
2088 		goto free_cic;
2089 	return 0;
2090 free_cic:
2091 	kmem_cache_destroy(cic_entry_slab);
2092 	return -ENOMEM;
2093 }
2094 
f2fs_destroy_compress_cache(void)2095 void f2fs_destroy_compress_cache(void)
2096 {
2097 	kmem_cache_destroy(dic_entry_slab);
2098 	kmem_cache_destroy(cic_entry_slab);
2099 }
2100