xref: /linux/fs/bcachefs/compress.c (revision 8080ff5ac656b9ca6c282e4044be19d2b8a837df)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "compress.h"
5 #include "error.h"
6 #include "extents.h"
7 #include "io_write.h"
8 #include "opts.h"
9 #include "super-io.h"
10 
11 #include <linux/lz4.h>
12 #include <linux/zlib.h>
13 #include <linux/zstd.h>
14 
bch2_compression_type_to_opt(enum bch_compression_type type)15 static inline enum bch_compression_opts bch2_compression_type_to_opt(enum bch_compression_type type)
16 {
17 	switch (type) {
18 	case BCH_COMPRESSION_TYPE_none:
19 	case BCH_COMPRESSION_TYPE_incompressible:
20 		return BCH_COMPRESSION_OPT_none;
21 	case BCH_COMPRESSION_TYPE_lz4_old:
22 	case BCH_COMPRESSION_TYPE_lz4:
23 		return BCH_COMPRESSION_OPT_lz4;
24 	case BCH_COMPRESSION_TYPE_gzip:
25 		return BCH_COMPRESSION_OPT_gzip;
26 	case BCH_COMPRESSION_TYPE_zstd:
27 		return BCH_COMPRESSION_OPT_zstd;
28 	default:
29 		BUG();
30 	}
31 }
32 
33 /* Bounce buffer: */
34 struct bbuf {
35 	void		*b;
36 	enum {
37 		BB_NONE,
38 		BB_VMAP,
39 		BB_KMALLOC,
40 		BB_MEMPOOL,
41 	}		type;
42 	int		rw;
43 };
44 
__bounce_alloc(struct bch_fs * c,unsigned size,int rw)45 static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
46 {
47 	void *b;
48 
49 	BUG_ON(size > c->opts.encoded_extent_max);
50 
51 	b = kmalloc(size, GFP_NOFS|__GFP_NOWARN);
52 	if (b)
53 		return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
54 
55 	b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS);
56 	if (b)
57 		return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
58 
59 	BUG();
60 }
61 
bio_phys_contig(struct bio * bio,struct bvec_iter start)62 static bool bio_phys_contig(struct bio *bio, struct bvec_iter start)
63 {
64 	struct bio_vec bv;
65 	struct bvec_iter iter;
66 	void *expected_start = NULL;
67 
68 	__bio_for_each_bvec(bv, bio, iter, start) {
69 		if (expected_start &&
70 		    expected_start != page_address(bv.bv_page) + bv.bv_offset)
71 			return false;
72 
73 		expected_start = page_address(bv.bv_page) +
74 			bv.bv_offset + bv.bv_len;
75 	}
76 
77 	return true;
78 }
79 
__bio_map_or_bounce(struct bch_fs * c,struct bio * bio,struct bvec_iter start,int rw)80 static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
81 				       struct bvec_iter start, int rw)
82 {
83 	struct bbuf ret;
84 	struct bio_vec bv;
85 	struct bvec_iter iter;
86 	unsigned nr_pages = 0;
87 	struct page *stack_pages[16];
88 	struct page **pages = NULL;
89 	void *data;
90 
91 	BUG_ON(start.bi_size > c->opts.encoded_extent_max);
92 
93 	if (!PageHighMem(bio_iter_page(bio, start)) &&
94 	    bio_phys_contig(bio, start))
95 		return (struct bbuf) {
96 			.b = page_address(bio_iter_page(bio, start)) +
97 				bio_iter_offset(bio, start),
98 			.type = BB_NONE, .rw = rw
99 		};
100 
101 	/* check if we can map the pages contiguously: */
102 	__bio_for_each_segment(bv, bio, iter, start) {
103 		if (iter.bi_size != start.bi_size &&
104 		    bv.bv_offset)
105 			goto bounce;
106 
107 		if (bv.bv_len < iter.bi_size &&
108 		    bv.bv_offset + bv.bv_len < PAGE_SIZE)
109 			goto bounce;
110 
111 		nr_pages++;
112 	}
113 
114 	BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages);
115 
116 	pages = nr_pages > ARRAY_SIZE(stack_pages)
117 		? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS)
118 		: stack_pages;
119 	if (!pages)
120 		goto bounce;
121 
122 	nr_pages = 0;
123 	__bio_for_each_segment(bv, bio, iter, start)
124 		pages[nr_pages++] = bv.bv_page;
125 
126 	data = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
127 	if (pages != stack_pages)
128 		kfree(pages);
129 
130 	if (data)
131 		return (struct bbuf) {
132 			.b = data + bio_iter_offset(bio, start),
133 			.type = BB_VMAP, .rw = rw
134 		};
135 bounce:
136 	ret = __bounce_alloc(c, start.bi_size, rw);
137 
138 	if (rw == READ)
139 		memcpy_from_bio(ret.b, bio, start);
140 
141 	return ret;
142 }
143 
bio_map_or_bounce(struct bch_fs * c,struct bio * bio,int rw)144 static struct bbuf bio_map_or_bounce(struct bch_fs *c, struct bio *bio, int rw)
145 {
146 	return __bio_map_or_bounce(c, bio, bio->bi_iter, rw);
147 }
148 
bio_unmap_or_unbounce(struct bch_fs * c,struct bbuf buf)149 static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
150 {
151 	switch (buf.type) {
152 	case BB_NONE:
153 		break;
154 	case BB_VMAP:
155 		vunmap((void *) ((unsigned long) buf.b & PAGE_MASK));
156 		break;
157 	case BB_KMALLOC:
158 		kfree(buf.b);
159 		break;
160 	case BB_MEMPOOL:
161 		mempool_free(buf.b, &c->compression_bounce[buf.rw]);
162 		break;
163 	}
164 }
165 
zlib_set_workspace(z_stream * strm,void * workspace)166 static inline void zlib_set_workspace(z_stream *strm, void *workspace)
167 {
168 #ifdef __KERNEL__
169 	strm->workspace = workspace;
170 #endif
171 }
172 
__bio_uncompress(struct bch_fs * c,struct bio * src,void * dst_data,struct bch_extent_crc_unpacked crc)173 static int __bio_uncompress(struct bch_fs *c, struct bio *src,
174 			    void *dst_data, struct bch_extent_crc_unpacked crc)
175 {
176 	struct bbuf src_data = { NULL };
177 	size_t src_len = src->bi_iter.bi_size;
178 	size_t dst_len = crc.uncompressed_size << 9;
179 	void *workspace;
180 	int ret;
181 
182 	enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type);
183 	mempool_t *workspace_pool = &c->compress_workspace[opt];
184 	if (unlikely(!mempool_initialized(workspace_pool))) {
185 		if (fsck_err(c, compression_type_not_marked_in_sb,
186 			     "compression type %s set but not marked in superblock",
187 			     __bch2_compression_types[crc.compression_type]))
188 			ret = bch2_check_set_has_compressed_data(c, opt);
189 		else
190 			ret = -BCH_ERR_compression_workspace_not_initialized;
191 		if (ret)
192 			goto out;
193 	}
194 
195 	src_data = bio_map_or_bounce(c, src, READ);
196 
197 	switch (crc.compression_type) {
198 	case BCH_COMPRESSION_TYPE_lz4_old:
199 	case BCH_COMPRESSION_TYPE_lz4:
200 		ret = LZ4_decompress_safe_partial(src_data.b, dst_data,
201 						  src_len, dst_len, dst_len);
202 		if (ret != dst_len)
203 			goto err;
204 		break;
205 	case BCH_COMPRESSION_TYPE_gzip: {
206 		z_stream strm = {
207 			.next_in	= src_data.b,
208 			.avail_in	= src_len,
209 			.next_out	= dst_data,
210 			.avail_out	= dst_len,
211 		};
212 
213 		workspace = mempool_alloc(workspace_pool, GFP_NOFS);
214 
215 		zlib_set_workspace(&strm, workspace);
216 		zlib_inflateInit2(&strm, -MAX_WBITS);
217 		ret = zlib_inflate(&strm, Z_FINISH);
218 
219 		mempool_free(workspace, workspace_pool);
220 
221 		if (ret != Z_STREAM_END)
222 			goto err;
223 		break;
224 	}
225 	case BCH_COMPRESSION_TYPE_zstd: {
226 		ZSTD_DCtx *ctx;
227 		size_t real_src_len = le32_to_cpup(src_data.b);
228 
229 		if (real_src_len > src_len - 4)
230 			goto err;
231 
232 		workspace = mempool_alloc(workspace_pool, GFP_NOFS);
233 		ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound());
234 
235 		ret = zstd_decompress_dctx(ctx,
236 				dst_data,	dst_len,
237 				src_data.b + 4, real_src_len);
238 
239 		mempool_free(workspace, workspace_pool);
240 
241 		if (ret != dst_len)
242 			goto err;
243 		break;
244 	}
245 	default:
246 		BUG();
247 	}
248 	ret = 0;
249 fsck_err:
250 out:
251 	bio_unmap_or_unbounce(c, src_data);
252 	return ret;
253 err:
254 	ret = -EIO;
255 	goto out;
256 }
257 
bch2_bio_uncompress_inplace(struct bch_write_op * op,struct bio * bio)258 int bch2_bio_uncompress_inplace(struct bch_write_op *op,
259 				struct bio *bio)
260 {
261 	struct bch_fs *c = op->c;
262 	struct bch_extent_crc_unpacked *crc = &op->crc;
263 	struct bbuf data = { NULL };
264 	size_t dst_len = crc->uncompressed_size << 9;
265 	int ret = 0;
266 
267 	/* bio must own its pages: */
268 	BUG_ON(!bio->bi_vcnt);
269 	BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs);
270 
271 	if (crc->uncompressed_size << 9	> c->opts.encoded_extent_max ||
272 	    crc->compressed_size << 9	> c->opts.encoded_extent_max) {
273 		struct printbuf buf = PRINTBUF;
274 		bch2_write_op_error(&buf, op);
275 		prt_printf(&buf, "error rewriting existing data: extent too big");
276 		bch_err_ratelimited(c, "%s", buf.buf);
277 		printbuf_exit(&buf);
278 		return -EIO;
279 	}
280 
281 	data = __bounce_alloc(c, dst_len, WRITE);
282 
283 	if (__bio_uncompress(c, bio, data.b, *crc)) {
284 		if (!c->opts.no_data_io) {
285 			struct printbuf buf = PRINTBUF;
286 			bch2_write_op_error(&buf, op);
287 			prt_printf(&buf, "error rewriting existing data: decompression error");
288 			bch_err_ratelimited(c, "%s", buf.buf);
289 			printbuf_exit(&buf);
290 		}
291 		ret = -EIO;
292 		goto err;
293 	}
294 
295 	/*
296 	 * XXX: don't have a good way to assert that the bio was allocated with
297 	 * enough space, we depend on bch2_move_extent doing the right thing
298 	 */
299 	bio->bi_iter.bi_size = crc->live_size << 9;
300 
301 	memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9));
302 
303 	crc->csum_type		= 0;
304 	crc->compression_type	= 0;
305 	crc->compressed_size	= crc->live_size;
306 	crc->uncompressed_size	= crc->live_size;
307 	crc->offset		= 0;
308 	crc->csum		= (struct bch_csum) { 0, 0 };
309 err:
310 	bio_unmap_or_unbounce(c, data);
311 	return ret;
312 }
313 
bch2_bio_uncompress(struct bch_fs * c,struct bio * src,struct bio * dst,struct bvec_iter dst_iter,struct bch_extent_crc_unpacked crc)314 int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
315 		       struct bio *dst, struct bvec_iter dst_iter,
316 		       struct bch_extent_crc_unpacked crc)
317 {
318 	struct bbuf dst_data = { NULL };
319 	size_t dst_len = crc.uncompressed_size << 9;
320 	int ret;
321 
322 	if (crc.uncompressed_size << 9	> c->opts.encoded_extent_max ||
323 	    crc.compressed_size << 9	> c->opts.encoded_extent_max)
324 		return -EIO;
325 
326 	dst_data = dst_len == dst_iter.bi_size
327 		? __bio_map_or_bounce(c, dst, dst_iter, WRITE)
328 		: __bounce_alloc(c, dst_len, WRITE);
329 
330 	ret = __bio_uncompress(c, src, dst_data.b, crc);
331 	if (ret)
332 		goto err;
333 
334 	if (dst_data.type != BB_NONE &&
335 	    dst_data.type != BB_VMAP)
336 		memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9));
337 err:
338 	bio_unmap_or_unbounce(c, dst_data);
339 	return ret;
340 }
341 
attempt_compress(struct bch_fs * c,void * workspace,void * dst,size_t dst_len,void * src,size_t src_len,struct bch_compression_opt compression)342 static int attempt_compress(struct bch_fs *c,
343 			    void *workspace,
344 			    void *dst, size_t dst_len,
345 			    void *src, size_t src_len,
346 			    struct bch_compression_opt compression)
347 {
348 	enum bch_compression_type compression_type =
349 		__bch2_compression_opt_to_type[compression.type];
350 
351 	switch (compression_type) {
352 	case BCH_COMPRESSION_TYPE_lz4:
353 		if (compression.level < LZ4HC_MIN_CLEVEL) {
354 			int len = src_len;
355 			int ret = LZ4_compress_destSize(
356 					src,		dst,
357 					&len,		dst_len,
358 					workspace);
359 			if (len < src_len)
360 				return -len;
361 
362 			return ret;
363 		} else {
364 			int ret = LZ4_compress_HC(
365 					src,		dst,
366 					src_len,	dst_len,
367 					compression.level,
368 					workspace);
369 
370 			return ret ?: -1;
371 		}
372 	case BCH_COMPRESSION_TYPE_gzip: {
373 		z_stream strm = {
374 			.next_in	= src,
375 			.avail_in	= src_len,
376 			.next_out	= dst,
377 			.avail_out	= dst_len,
378 		};
379 
380 		zlib_set_workspace(&strm, workspace);
381 		zlib_deflateInit2(&strm,
382 				  compression.level
383 				  ? clamp_t(unsigned, compression.level,
384 					    Z_BEST_SPEED, Z_BEST_COMPRESSION)
385 				  : Z_DEFAULT_COMPRESSION,
386 				  Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL,
387 				  Z_DEFAULT_STRATEGY);
388 
389 		if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END)
390 			return 0;
391 
392 		if (zlib_deflateEnd(&strm) != Z_OK)
393 			return 0;
394 
395 		return strm.total_out;
396 	}
397 	case BCH_COMPRESSION_TYPE_zstd: {
398 		/*
399 		 * rescale:
400 		 * zstd max compression level is 22, our max level is 15
401 		 */
402 		unsigned level = min((compression.level * 3) / 2, zstd_max_clevel());
403 		ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max);
404 		ZSTD_CCtx *ctx = zstd_init_cctx(workspace, c->zstd_workspace_size);
405 
406 		/*
407 		 * ZSTD requires that when we decompress we pass in the exact
408 		 * compressed size - rounding it up to the nearest sector
409 		 * doesn't work, so we use the first 4 bytes of the buffer for
410 		 * that.
411 		 *
412 		 * Additionally, the ZSTD code seems to have a bug where it will
413 		 * write just past the end of the buffer - so subtract a fudge
414 		 * factor (7 bytes) from the dst buffer size to account for
415 		 * that.
416 		 */
417 		size_t len = zstd_compress_cctx(ctx,
418 				dst + 4,	dst_len - 4 - 7,
419 				src,		src_len,
420 				&params);
421 		if (zstd_is_error(len))
422 			return 0;
423 
424 		*((__le32 *) dst) = cpu_to_le32(len);
425 		return len + 4;
426 	}
427 	default:
428 		BUG();
429 	}
430 }
431 
__bio_compress(struct bch_fs * c,struct bio * dst,size_t * dst_len,struct bio * src,size_t * src_len,struct bch_compression_opt compression)432 static unsigned __bio_compress(struct bch_fs *c,
433 			       struct bio *dst, size_t *dst_len,
434 			       struct bio *src, size_t *src_len,
435 			       struct bch_compression_opt compression)
436 {
437 	struct bbuf src_data = { NULL }, dst_data = { NULL };
438 	void *workspace;
439 	enum bch_compression_type compression_type =
440 		__bch2_compression_opt_to_type[compression.type];
441 	unsigned pad;
442 	int ret = 0;
443 
444 	/* bch2_compression_decode catches unknown compression types: */
445 	BUG_ON(compression.type >= BCH_COMPRESSION_OPT_NR);
446 
447 	mempool_t *workspace_pool = &c->compress_workspace[compression.type];
448 	if (unlikely(!mempool_initialized(workspace_pool))) {
449 		if (fsck_err(c, compression_opt_not_marked_in_sb,
450 			     "compression opt %s set but not marked in superblock",
451 			     bch2_compression_opts[compression.type])) {
452 			ret = bch2_check_set_has_compressed_data(c, compression.type);
453 			if (ret) /* memory allocation failure, don't compress */
454 				return 0;
455 		} else {
456 			return 0;
457 		}
458 	}
459 
460 	/* If it's only one block, don't bother trying to compress: */
461 	if (src->bi_iter.bi_size <= c->opts.block_size)
462 		return BCH_COMPRESSION_TYPE_incompressible;
463 
464 	dst_data = bio_map_or_bounce(c, dst, WRITE);
465 	src_data = bio_map_or_bounce(c, src, READ);
466 
467 	workspace = mempool_alloc(workspace_pool, GFP_NOFS);
468 
469 	*src_len = src->bi_iter.bi_size;
470 	*dst_len = dst->bi_iter.bi_size;
471 
472 	/*
473 	 * XXX: this algorithm sucks when the compression code doesn't tell us
474 	 * how much would fit, like LZ4 does:
475 	 */
476 	while (1) {
477 		if (*src_len <= block_bytes(c)) {
478 			ret = -1;
479 			break;
480 		}
481 
482 		ret = attempt_compress(c, workspace,
483 				       dst_data.b,	*dst_len,
484 				       src_data.b,	*src_len,
485 				       compression);
486 		if (ret > 0) {
487 			*dst_len = ret;
488 			ret = 0;
489 			break;
490 		}
491 
492 		/* Didn't fit: should we retry with a smaller amount?  */
493 		if (*src_len <= *dst_len) {
494 			ret = -1;
495 			break;
496 		}
497 
498 		/*
499 		 * If ret is negative, it's a hint as to how much data would fit
500 		 */
501 		BUG_ON(-ret >= *src_len);
502 
503 		if (ret < 0)
504 			*src_len = -ret;
505 		else
506 			*src_len -= (*src_len - *dst_len) / 2;
507 		*src_len = round_down(*src_len, block_bytes(c));
508 	}
509 
510 	mempool_free(workspace, workspace_pool);
511 
512 	if (ret)
513 		goto err;
514 
515 	/* Didn't get smaller: */
516 	if (round_up(*dst_len, block_bytes(c)) >= *src_len)
517 		goto err;
518 
519 	pad = round_up(*dst_len, block_bytes(c)) - *dst_len;
520 
521 	memset(dst_data.b + *dst_len, 0, pad);
522 	*dst_len += pad;
523 
524 	if (dst_data.type != BB_NONE &&
525 	    dst_data.type != BB_VMAP)
526 		memcpy_to_bio(dst, dst->bi_iter, dst_data.b);
527 
528 	BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size);
529 	BUG_ON(!*src_len || *src_len > src->bi_iter.bi_size);
530 	BUG_ON(*dst_len & (block_bytes(c) - 1));
531 	BUG_ON(*src_len & (block_bytes(c) - 1));
532 	ret = compression_type;
533 out:
534 	bio_unmap_or_unbounce(c, src_data);
535 	bio_unmap_or_unbounce(c, dst_data);
536 	return ret;
537 err:
538 	ret = BCH_COMPRESSION_TYPE_incompressible;
539 	goto out;
540 fsck_err:
541 	ret = 0;
542 	goto out;
543 }
544 
bch2_bio_compress(struct bch_fs * c,struct bio * dst,size_t * dst_len,struct bio * src,size_t * src_len,unsigned compression_opt)545 unsigned bch2_bio_compress(struct bch_fs *c,
546 			   struct bio *dst, size_t *dst_len,
547 			   struct bio *src, size_t *src_len,
548 			   unsigned compression_opt)
549 {
550 	unsigned orig_dst = dst->bi_iter.bi_size;
551 	unsigned orig_src = src->bi_iter.bi_size;
552 	unsigned compression_type;
553 
554 	/* Don't consume more than BCH_ENCODED_EXTENT_MAX from @src: */
555 	src->bi_iter.bi_size = min_t(unsigned, src->bi_iter.bi_size,
556 				     c->opts.encoded_extent_max);
557 	/* Don't generate a bigger output than input: */
558 	dst->bi_iter.bi_size = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
559 
560 	compression_type =
561 		__bio_compress(c, dst, dst_len, src, src_len,
562 			       bch2_compression_decode(compression_opt));
563 
564 	dst->bi_iter.bi_size = orig_dst;
565 	src->bi_iter.bi_size = orig_src;
566 	return compression_type;
567 }
568 
569 static int __bch2_fs_compress_init(struct bch_fs *, u64);
570 
571 #define BCH_FEATURE_none	0
572 
573 static const unsigned bch2_compression_opt_to_feature[] = {
574 #define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_FEATURE_##t,
575 	BCH_COMPRESSION_OPTS()
576 #undef x
577 };
578 
579 #undef BCH_FEATURE_none
580 
__bch2_check_set_has_compressed_data(struct bch_fs * c,u64 f)581 static int __bch2_check_set_has_compressed_data(struct bch_fs *c, u64 f)
582 {
583 	int ret = 0;
584 
585 	if ((c->sb.features & f) == f)
586 		return 0;
587 
588 	mutex_lock(&c->sb_lock);
589 
590 	if ((c->sb.features & f) == f) {
591 		mutex_unlock(&c->sb_lock);
592 		return 0;
593 	}
594 
595 	ret = __bch2_fs_compress_init(c, c->sb.features|f);
596 	if (ret) {
597 		mutex_unlock(&c->sb_lock);
598 		return ret;
599 	}
600 
601 	c->disk_sb.sb->features[0] |= cpu_to_le64(f);
602 	bch2_write_super(c);
603 	mutex_unlock(&c->sb_lock);
604 
605 	return 0;
606 }
607 
bch2_check_set_has_compressed_data(struct bch_fs * c,unsigned compression_opt)608 int bch2_check_set_has_compressed_data(struct bch_fs *c,
609 				       unsigned compression_opt)
610 {
611 	unsigned compression_type = bch2_compression_decode(compression_opt).type;
612 
613 	BUG_ON(compression_type >= ARRAY_SIZE(bch2_compression_opt_to_feature));
614 
615 	return compression_type
616 		? __bch2_check_set_has_compressed_data(c,
617 				1ULL << bch2_compression_opt_to_feature[compression_type])
618 		: 0;
619 }
620 
bch2_fs_compress_exit(struct bch_fs * c)621 void bch2_fs_compress_exit(struct bch_fs *c)
622 {
623 	unsigned i;
624 
625 	for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++)
626 		mempool_exit(&c->compress_workspace[i]);
627 	mempool_exit(&c->compression_bounce[WRITE]);
628 	mempool_exit(&c->compression_bounce[READ]);
629 }
630 
__bch2_fs_compress_init(struct bch_fs * c,u64 features)631 static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
632 {
633 	ZSTD_parameters params = zstd_get_params(zstd_max_clevel(),
634 						 c->opts.encoded_extent_max);
635 
636 	c->zstd_workspace_size = zstd_cctx_workspace_bound(&params.cParams);
637 
638 	struct {
639 		unsigned			feature;
640 		enum bch_compression_opts	type;
641 		size_t				compress_workspace;
642 	} compression_types[] = {
643 		{ BCH_FEATURE_lz4, BCH_COMPRESSION_OPT_lz4,
644 			max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS) },
645 		{ BCH_FEATURE_gzip, BCH_COMPRESSION_OPT_gzip,
646 			max(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL),
647 			    zlib_inflate_workspacesize()) },
648 		{ BCH_FEATURE_zstd, BCH_COMPRESSION_OPT_zstd,
649 			max(c->zstd_workspace_size,
650 			    zstd_dctx_workspace_bound()) },
651 	}, *i;
652 	bool have_compressed = false;
653 
654 	for (i = compression_types;
655 	     i < compression_types + ARRAY_SIZE(compression_types);
656 	     i++)
657 		have_compressed |= (features & (1 << i->feature)) != 0;
658 
659 	if (!have_compressed)
660 		return 0;
661 
662 	if (!mempool_initialized(&c->compression_bounce[READ]) &&
663 	    mempool_init_kvmalloc_pool(&c->compression_bounce[READ],
664 				       1, c->opts.encoded_extent_max))
665 		return -BCH_ERR_ENOMEM_compression_bounce_read_init;
666 
667 	if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
668 	    mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE],
669 				       1, c->opts.encoded_extent_max))
670 		return -BCH_ERR_ENOMEM_compression_bounce_write_init;
671 
672 	for (i = compression_types;
673 	     i < compression_types + ARRAY_SIZE(compression_types);
674 	     i++) {
675 		if (!(features & (1 << i->feature)))
676 			continue;
677 
678 		if (mempool_initialized(&c->compress_workspace[i->type]))
679 			continue;
680 
681 		if (mempool_init_kvmalloc_pool(
682 				&c->compress_workspace[i->type],
683 				1, i->compress_workspace))
684 			return -BCH_ERR_ENOMEM_compression_workspace_init;
685 	}
686 
687 	return 0;
688 }
689 
compression_opt_to_feature(unsigned v)690 static u64 compression_opt_to_feature(unsigned v)
691 {
692 	unsigned type = bch2_compression_decode(v).type;
693 
694 	return BIT_ULL(bch2_compression_opt_to_feature[type]);
695 }
696 
bch2_fs_compress_init(struct bch_fs * c)697 int bch2_fs_compress_init(struct bch_fs *c)
698 {
699 	u64 f = c->sb.features;
700 
701 	f |= compression_opt_to_feature(c->opts.compression);
702 	f |= compression_opt_to_feature(c->opts.background_compression);
703 
704 	return __bch2_fs_compress_init(c, f);
705 }
706 
bch2_opt_compression_parse(struct bch_fs * c,const char * _val,u64 * res,struct printbuf * err)707 int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res,
708 			       struct printbuf *err)
709 {
710 	char *val = kstrdup(_val, GFP_KERNEL);
711 	char *p = val, *type_str, *level_str;
712 	struct bch_compression_opt opt = { 0 };
713 	int ret;
714 
715 	if (!val)
716 		return -ENOMEM;
717 
718 	type_str = strsep(&p, ":");
719 	level_str = p;
720 
721 	ret = match_string(bch2_compression_opts, -1, type_str);
722 	if (ret < 0 && err)
723 		prt_str(err, "invalid compression type");
724 	if (ret < 0)
725 		goto err;
726 
727 	opt.type = ret;
728 
729 	if (level_str) {
730 		unsigned level;
731 
732 		ret = kstrtouint(level_str, 10, &level);
733 		if (!ret && !opt.type && level)
734 			ret = -EINVAL;
735 		if (!ret && level > 15)
736 			ret = -EINVAL;
737 		if (ret < 0 && err)
738 			prt_str(err, "invalid compression level");
739 		if (ret < 0)
740 			goto err;
741 
742 		opt.level = level;
743 	}
744 
745 	*res = bch2_compression_encode(opt);
746 err:
747 	kfree(val);
748 	return ret;
749 }
750 
bch2_compression_opt_to_text(struct printbuf * out,u64 v)751 void bch2_compression_opt_to_text(struct printbuf *out, u64 v)
752 {
753 	struct bch_compression_opt opt = bch2_compression_decode(v);
754 
755 	if (opt.type < BCH_COMPRESSION_OPT_NR)
756 		prt_str(out, bch2_compression_opts[opt.type]);
757 	else
758 		prt_printf(out, "(unknown compression opt %u)", opt.type);
759 	if (opt.level)
760 		prt_printf(out, ":%u", opt.level);
761 }
762 
bch2_opt_compression_to_text(struct printbuf * out,struct bch_fs * c,struct bch_sb * sb,u64 v)763 void bch2_opt_compression_to_text(struct printbuf *out,
764 				  struct bch_fs *c,
765 				  struct bch_sb *sb,
766 				  u64 v)
767 {
768 	return bch2_compression_opt_to_text(out, v);
769 }
770 
bch2_opt_compression_validate(u64 v,struct printbuf * err)771 int bch2_opt_compression_validate(u64 v, struct printbuf *err)
772 {
773 	if (!bch2_compression_opt_valid(v)) {
774 		prt_printf(err, "invalid compression opt %llu", v);
775 		return -BCH_ERR_invalid_sb_opt_compression;
776 	}
777 
778 	return 0;
779 }
780