xref: /linux/fs/bcachefs/compress.c (revision 4a4b30ea80d8cb5e8c4c62bb86201f4ea0d9b030)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "compress.h"
5 #include "error.h"
6 #include "extents.h"
7 #include "io_write.h"
8 #include "opts.h"
9 #include "super-io.h"
10 
11 #include <linux/lz4.h>
12 #include <linux/zlib.h>
13 #include <linux/zstd.h>
14 
bch2_compression_type_to_opt(enum bch_compression_type type)15 static inline enum bch_compression_opts bch2_compression_type_to_opt(enum bch_compression_type type)
16 {
17 	switch (type) {
18 	case BCH_COMPRESSION_TYPE_none:
19 	case BCH_COMPRESSION_TYPE_incompressible:
20 		return BCH_COMPRESSION_OPT_none;
21 	case BCH_COMPRESSION_TYPE_lz4_old:
22 	case BCH_COMPRESSION_TYPE_lz4:
23 		return BCH_COMPRESSION_OPT_lz4;
24 	case BCH_COMPRESSION_TYPE_gzip:
25 		return BCH_COMPRESSION_OPT_gzip;
26 	case BCH_COMPRESSION_TYPE_zstd:
27 		return BCH_COMPRESSION_OPT_zstd;
28 	default:
29 		BUG();
30 	}
31 }
32 
33 /* Bounce buffer: */
34 struct bbuf {
35 	void		*b;
36 	enum {
37 		BB_NONE,
38 		BB_VMAP,
39 		BB_KMALLOC,
40 		BB_MEMPOOL,
41 	}		type;
42 	int		rw;
43 };
44 
__bounce_alloc(struct bch_fs * c,unsigned size,int rw)45 static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
46 {
47 	void *b;
48 
49 	BUG_ON(size > c->opts.encoded_extent_max);
50 
51 	b = kmalloc(size, GFP_NOFS|__GFP_NOWARN);
52 	if (b)
53 		return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
54 
55 	b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS);
56 	if (b)
57 		return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
58 
59 	BUG();
60 }
61 
bio_phys_contig(struct bio * bio,struct bvec_iter start)62 static bool bio_phys_contig(struct bio *bio, struct bvec_iter start)
63 {
64 	struct bio_vec bv;
65 	struct bvec_iter iter;
66 	void *expected_start = NULL;
67 
68 	__bio_for_each_bvec(bv, bio, iter, start) {
69 		if (expected_start &&
70 		    expected_start != page_address(bv.bv_page) + bv.bv_offset)
71 			return false;
72 
73 		expected_start = page_address(bv.bv_page) +
74 			bv.bv_offset + bv.bv_len;
75 	}
76 
77 	return true;
78 }
79 
__bio_map_or_bounce(struct bch_fs * c,struct bio * bio,struct bvec_iter start,int rw)80 static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
81 				       struct bvec_iter start, int rw)
82 {
83 	struct bbuf ret;
84 	struct bio_vec bv;
85 	struct bvec_iter iter;
86 	unsigned nr_pages = 0;
87 	struct page *stack_pages[16];
88 	struct page **pages = NULL;
89 	void *data;
90 
91 	BUG_ON(start.bi_size > c->opts.encoded_extent_max);
92 
93 	if (!PageHighMem(bio_iter_page(bio, start)) &&
94 	    bio_phys_contig(bio, start))
95 		return (struct bbuf) {
96 			.b = page_address(bio_iter_page(bio, start)) +
97 				bio_iter_offset(bio, start),
98 			.type = BB_NONE, .rw = rw
99 		};
100 
101 	/* check if we can map the pages contiguously: */
102 	__bio_for_each_segment(bv, bio, iter, start) {
103 		if (iter.bi_size != start.bi_size &&
104 		    bv.bv_offset)
105 			goto bounce;
106 
107 		if (bv.bv_len < iter.bi_size &&
108 		    bv.bv_offset + bv.bv_len < PAGE_SIZE)
109 			goto bounce;
110 
111 		nr_pages++;
112 	}
113 
114 	BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages);
115 
116 	pages = nr_pages > ARRAY_SIZE(stack_pages)
117 		? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS)
118 		: stack_pages;
119 	if (!pages)
120 		goto bounce;
121 
122 	nr_pages = 0;
123 	__bio_for_each_segment(bv, bio, iter, start)
124 		pages[nr_pages++] = bv.bv_page;
125 
126 	data = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
127 	if (pages != stack_pages)
128 		kfree(pages);
129 
130 	if (data)
131 		return (struct bbuf) {
132 			.b = data + bio_iter_offset(bio, start),
133 			.type = BB_VMAP, .rw = rw
134 		};
135 bounce:
136 	ret = __bounce_alloc(c, start.bi_size, rw);
137 
138 	if (rw == READ)
139 		memcpy_from_bio(ret.b, bio, start);
140 
141 	return ret;
142 }
143 
bio_map_or_bounce(struct bch_fs * c,struct bio * bio,int rw)144 static struct bbuf bio_map_or_bounce(struct bch_fs *c, struct bio *bio, int rw)
145 {
146 	return __bio_map_or_bounce(c, bio, bio->bi_iter, rw);
147 }
148 
bio_unmap_or_unbounce(struct bch_fs * c,struct bbuf buf)149 static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
150 {
151 	switch (buf.type) {
152 	case BB_NONE:
153 		break;
154 	case BB_VMAP:
155 		vunmap((void *) ((unsigned long) buf.b & PAGE_MASK));
156 		break;
157 	case BB_KMALLOC:
158 		kfree(buf.b);
159 		break;
160 	case BB_MEMPOOL:
161 		mempool_free(buf.b, &c->compression_bounce[buf.rw]);
162 		break;
163 	}
164 }
165 
zlib_set_workspace(z_stream * strm,void * workspace)166 static inline void zlib_set_workspace(z_stream *strm, void *workspace)
167 {
168 #ifdef __KERNEL__
169 	strm->workspace = workspace;
170 #endif
171 }
172 
__bio_uncompress(struct bch_fs * c,struct bio * src,void * dst_data,struct bch_extent_crc_unpacked crc)173 static int __bio_uncompress(struct bch_fs *c, struct bio *src,
174 			    void *dst_data, struct bch_extent_crc_unpacked crc)
175 {
176 	struct bbuf src_data = { NULL };
177 	size_t src_len = src->bi_iter.bi_size;
178 	size_t dst_len = crc.uncompressed_size << 9;
179 	void *workspace;
180 	int ret = 0, ret2;
181 
182 	enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type);
183 	mempool_t *workspace_pool = &c->compress_workspace[opt];
184 	if (unlikely(!mempool_initialized(workspace_pool))) {
185 		if (fsck_err(c, compression_type_not_marked_in_sb,
186 			     "compression type %s set but not marked in superblock",
187 			     __bch2_compression_types[crc.compression_type]))
188 			ret = bch2_check_set_has_compressed_data(c, opt);
189 		else
190 			ret = -BCH_ERR_compression_workspace_not_initialized;
191 		if (ret)
192 			goto err;
193 	}
194 
195 	src_data = bio_map_or_bounce(c, src, READ);
196 
197 	switch (crc.compression_type) {
198 	case BCH_COMPRESSION_TYPE_lz4_old:
199 	case BCH_COMPRESSION_TYPE_lz4:
200 		ret2 = LZ4_decompress_safe_partial(src_data.b, dst_data,
201 						   src_len, dst_len, dst_len);
202 		if (ret2 != dst_len)
203 			ret = -BCH_ERR_decompress_lz4;
204 		break;
205 	case BCH_COMPRESSION_TYPE_gzip: {
206 		z_stream strm = {
207 			.next_in	= src_data.b,
208 			.avail_in	= src_len,
209 			.next_out	= dst_data,
210 			.avail_out	= dst_len,
211 		};
212 
213 		workspace = mempool_alloc(workspace_pool, GFP_NOFS);
214 
215 		zlib_set_workspace(&strm, workspace);
216 		zlib_inflateInit2(&strm, -MAX_WBITS);
217 		ret2 = zlib_inflate(&strm, Z_FINISH);
218 
219 		mempool_free(workspace, workspace_pool);
220 
221 		if (ret2 != Z_STREAM_END)
222 			ret = -BCH_ERR_decompress_gzip;
223 		break;
224 	}
225 	case BCH_COMPRESSION_TYPE_zstd: {
226 		ZSTD_DCtx *ctx;
227 		size_t real_src_len = le32_to_cpup(src_data.b);
228 
229 		if (real_src_len > src_len - 4) {
230 			ret = -BCH_ERR_decompress_zstd_src_len_bad;
231 			goto err;
232 		}
233 
234 		workspace = mempool_alloc(workspace_pool, GFP_NOFS);
235 		ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound());
236 
237 		ret2 = zstd_decompress_dctx(ctx,
238 				dst_data,	dst_len,
239 				src_data.b + 4, real_src_len);
240 
241 		mempool_free(workspace, workspace_pool);
242 
243 		if (ret2 != dst_len)
244 			ret = -BCH_ERR_decompress_zstd;
245 		break;
246 	}
247 	default:
248 		BUG();
249 	}
250 err:
251 fsck_err:
252 	bio_unmap_or_unbounce(c, src_data);
253 	return ret;
254 }
255 
bch2_bio_uncompress_inplace(struct bch_write_op * op,struct bio * bio)256 int bch2_bio_uncompress_inplace(struct bch_write_op *op,
257 				struct bio *bio)
258 {
259 	struct bch_fs *c = op->c;
260 	struct bch_extent_crc_unpacked *crc = &op->crc;
261 	struct bbuf data = { NULL };
262 	size_t dst_len = crc->uncompressed_size << 9;
263 	int ret = 0;
264 
265 	/* bio must own its pages: */
266 	BUG_ON(!bio->bi_vcnt);
267 	BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs);
268 
269 	if (crc->uncompressed_size << 9	> c->opts.encoded_extent_max) {
270 		bch2_write_op_error(op, op->pos.offset,
271 				    "extent too big to decompress (%u > %u)",
272 				    crc->uncompressed_size << 9, c->opts.encoded_extent_max);
273 		return -BCH_ERR_decompress_exceeded_max_encoded_extent;
274 	}
275 
276 	data = __bounce_alloc(c, dst_len, WRITE);
277 
278 	ret = __bio_uncompress(c, bio, data.b, *crc);
279 
280 	if (c->opts.no_data_io)
281 		ret = 0;
282 
283 	if (ret) {
284 		bch2_write_op_error(op, op->pos.offset, "%s", bch2_err_str(ret));
285 		goto err;
286 	}
287 
288 	/*
289 	 * XXX: don't have a good way to assert that the bio was allocated with
290 	 * enough space, we depend on bch2_move_extent doing the right thing
291 	 */
292 	bio->bi_iter.bi_size = crc->live_size << 9;
293 
294 	memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9));
295 
296 	crc->csum_type		= 0;
297 	crc->compression_type	= 0;
298 	crc->compressed_size	= crc->live_size;
299 	crc->uncompressed_size	= crc->live_size;
300 	crc->offset		= 0;
301 	crc->csum		= (struct bch_csum) { 0, 0 };
302 err:
303 	bio_unmap_or_unbounce(c, data);
304 	return ret;
305 }
306 
bch2_bio_uncompress(struct bch_fs * c,struct bio * src,struct bio * dst,struct bvec_iter dst_iter,struct bch_extent_crc_unpacked crc)307 int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
308 		       struct bio *dst, struct bvec_iter dst_iter,
309 		       struct bch_extent_crc_unpacked crc)
310 {
311 	struct bbuf dst_data = { NULL };
312 	size_t dst_len = crc.uncompressed_size << 9;
313 	int ret;
314 
315 	if (crc.uncompressed_size << 9	> c->opts.encoded_extent_max ||
316 	    crc.compressed_size << 9	> c->opts.encoded_extent_max)
317 		return -BCH_ERR_decompress_exceeded_max_encoded_extent;
318 
319 	dst_data = dst_len == dst_iter.bi_size
320 		? __bio_map_or_bounce(c, dst, dst_iter, WRITE)
321 		: __bounce_alloc(c, dst_len, WRITE);
322 
323 	ret = __bio_uncompress(c, src, dst_data.b, crc);
324 	if (ret)
325 		goto err;
326 
327 	if (dst_data.type != BB_NONE &&
328 	    dst_data.type != BB_VMAP)
329 		memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9));
330 err:
331 	bio_unmap_or_unbounce(c, dst_data);
332 	return ret;
333 }
334 
attempt_compress(struct bch_fs * c,void * workspace,void * dst,size_t dst_len,void * src,size_t src_len,struct bch_compression_opt compression)335 static int attempt_compress(struct bch_fs *c,
336 			    void *workspace,
337 			    void *dst, size_t dst_len,
338 			    void *src, size_t src_len,
339 			    struct bch_compression_opt compression)
340 {
341 	enum bch_compression_type compression_type =
342 		__bch2_compression_opt_to_type[compression.type];
343 
344 	switch (compression_type) {
345 	case BCH_COMPRESSION_TYPE_lz4:
346 		if (compression.level < LZ4HC_MIN_CLEVEL) {
347 			int len = src_len;
348 			int ret = LZ4_compress_destSize(
349 					src,		dst,
350 					&len,		dst_len,
351 					workspace);
352 			if (len < src_len)
353 				return -len;
354 
355 			return ret;
356 		} else {
357 			int ret = LZ4_compress_HC(
358 					src,		dst,
359 					src_len,	dst_len,
360 					compression.level,
361 					workspace);
362 
363 			return ret ?: -1;
364 		}
365 	case BCH_COMPRESSION_TYPE_gzip: {
366 		z_stream strm = {
367 			.next_in	= src,
368 			.avail_in	= src_len,
369 			.next_out	= dst,
370 			.avail_out	= dst_len,
371 		};
372 
373 		zlib_set_workspace(&strm, workspace);
374 		zlib_deflateInit2(&strm,
375 				  compression.level
376 				  ? clamp_t(unsigned, compression.level,
377 					    Z_BEST_SPEED, Z_BEST_COMPRESSION)
378 				  : Z_DEFAULT_COMPRESSION,
379 				  Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL,
380 				  Z_DEFAULT_STRATEGY);
381 
382 		if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END)
383 			return 0;
384 
385 		if (zlib_deflateEnd(&strm) != Z_OK)
386 			return 0;
387 
388 		return strm.total_out;
389 	}
390 	case BCH_COMPRESSION_TYPE_zstd: {
391 		/*
392 		 * rescale:
393 		 * zstd max compression level is 22, our max level is 15
394 		 */
395 		unsigned level = min((compression.level * 3) / 2, zstd_max_clevel());
396 		ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max);
397 		ZSTD_CCtx *ctx = zstd_init_cctx(workspace, c->zstd_workspace_size);
398 
399 		/*
400 		 * ZSTD requires that when we decompress we pass in the exact
401 		 * compressed size - rounding it up to the nearest sector
402 		 * doesn't work, so we use the first 4 bytes of the buffer for
403 		 * that.
404 		 *
405 		 * Additionally, the ZSTD code seems to have a bug where it will
406 		 * write just past the end of the buffer - so subtract a fudge
407 		 * factor (7 bytes) from the dst buffer size to account for
408 		 * that.
409 		 */
410 		size_t len = zstd_compress_cctx(ctx,
411 				dst + 4,	dst_len - 4 - 7,
412 				src,		src_len,
413 				&params);
414 		if (zstd_is_error(len))
415 			return 0;
416 
417 		*((__le32 *) dst) = cpu_to_le32(len);
418 		return len + 4;
419 	}
420 	default:
421 		BUG();
422 	}
423 }
424 
__bio_compress(struct bch_fs * c,struct bio * dst,size_t * dst_len,struct bio * src,size_t * src_len,struct bch_compression_opt compression)425 static unsigned __bio_compress(struct bch_fs *c,
426 			       struct bio *dst, size_t *dst_len,
427 			       struct bio *src, size_t *src_len,
428 			       struct bch_compression_opt compression)
429 {
430 	struct bbuf src_data = { NULL }, dst_data = { NULL };
431 	void *workspace;
432 	enum bch_compression_type compression_type =
433 		__bch2_compression_opt_to_type[compression.type];
434 	unsigned pad;
435 	int ret = 0;
436 
437 	/* bch2_compression_decode catches unknown compression types: */
438 	BUG_ON(compression.type >= BCH_COMPRESSION_OPT_NR);
439 
440 	mempool_t *workspace_pool = &c->compress_workspace[compression.type];
441 	if (unlikely(!mempool_initialized(workspace_pool))) {
442 		if (fsck_err(c, compression_opt_not_marked_in_sb,
443 			     "compression opt %s set but not marked in superblock",
444 			     bch2_compression_opts[compression.type])) {
445 			ret = bch2_check_set_has_compressed_data(c, compression.type);
446 			if (ret) /* memory allocation failure, don't compress */
447 				return 0;
448 		} else {
449 			return 0;
450 		}
451 	}
452 
453 	/* If it's only one block, don't bother trying to compress: */
454 	if (src->bi_iter.bi_size <= c->opts.block_size)
455 		return BCH_COMPRESSION_TYPE_incompressible;
456 
457 	dst_data = bio_map_or_bounce(c, dst, WRITE);
458 	src_data = bio_map_or_bounce(c, src, READ);
459 
460 	workspace = mempool_alloc(workspace_pool, GFP_NOFS);
461 
462 	*src_len = src->bi_iter.bi_size;
463 	*dst_len = dst->bi_iter.bi_size;
464 
465 	/*
466 	 * XXX: this algorithm sucks when the compression code doesn't tell us
467 	 * how much would fit, like LZ4 does:
468 	 */
469 	while (1) {
470 		if (*src_len <= block_bytes(c)) {
471 			ret = -1;
472 			break;
473 		}
474 
475 		ret = attempt_compress(c, workspace,
476 				       dst_data.b,	*dst_len,
477 				       src_data.b,	*src_len,
478 				       compression);
479 		if (ret > 0) {
480 			*dst_len = ret;
481 			ret = 0;
482 			break;
483 		}
484 
485 		/* Didn't fit: should we retry with a smaller amount?  */
486 		if (*src_len <= *dst_len) {
487 			ret = -1;
488 			break;
489 		}
490 
491 		/*
492 		 * If ret is negative, it's a hint as to how much data would fit
493 		 */
494 		BUG_ON(-ret >= *src_len);
495 
496 		if (ret < 0)
497 			*src_len = -ret;
498 		else
499 			*src_len -= (*src_len - *dst_len) / 2;
500 		*src_len = round_down(*src_len, block_bytes(c));
501 	}
502 
503 	mempool_free(workspace, workspace_pool);
504 
505 	if (ret)
506 		goto err;
507 
508 	/* Didn't get smaller: */
509 	if (round_up(*dst_len, block_bytes(c)) >= *src_len)
510 		goto err;
511 
512 	pad = round_up(*dst_len, block_bytes(c)) - *dst_len;
513 
514 	memset(dst_data.b + *dst_len, 0, pad);
515 	*dst_len += pad;
516 
517 	if (dst_data.type != BB_NONE &&
518 	    dst_data.type != BB_VMAP)
519 		memcpy_to_bio(dst, dst->bi_iter, dst_data.b);
520 
521 	BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size);
522 	BUG_ON(!*src_len || *src_len > src->bi_iter.bi_size);
523 	BUG_ON(*dst_len & (block_bytes(c) - 1));
524 	BUG_ON(*src_len & (block_bytes(c) - 1));
525 	ret = compression_type;
526 out:
527 	bio_unmap_or_unbounce(c, src_data);
528 	bio_unmap_or_unbounce(c, dst_data);
529 	return ret;
530 err:
531 	ret = BCH_COMPRESSION_TYPE_incompressible;
532 	goto out;
533 fsck_err:
534 	ret = 0;
535 	goto out;
536 }
537 
bch2_bio_compress(struct bch_fs * c,struct bio * dst,size_t * dst_len,struct bio * src,size_t * src_len,unsigned compression_opt)538 unsigned bch2_bio_compress(struct bch_fs *c,
539 			   struct bio *dst, size_t *dst_len,
540 			   struct bio *src, size_t *src_len,
541 			   unsigned compression_opt)
542 {
543 	unsigned orig_dst = dst->bi_iter.bi_size;
544 	unsigned orig_src = src->bi_iter.bi_size;
545 	unsigned compression_type;
546 
547 	/* Don't consume more than BCH_ENCODED_EXTENT_MAX from @src: */
548 	src->bi_iter.bi_size = min_t(unsigned, src->bi_iter.bi_size,
549 				     c->opts.encoded_extent_max);
550 	/* Don't generate a bigger output than input: */
551 	dst->bi_iter.bi_size = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
552 
553 	compression_type =
554 		__bio_compress(c, dst, dst_len, src, src_len,
555 			       bch2_compression_decode(compression_opt));
556 
557 	dst->bi_iter.bi_size = orig_dst;
558 	src->bi_iter.bi_size = orig_src;
559 	return compression_type;
560 }
561 
562 static int __bch2_fs_compress_init(struct bch_fs *, u64);
563 
564 #define BCH_FEATURE_none	0
565 
566 static const unsigned bch2_compression_opt_to_feature[] = {
567 #define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_FEATURE_##t,
568 	BCH_COMPRESSION_OPTS()
569 #undef x
570 };
571 
572 #undef BCH_FEATURE_none
573 
__bch2_check_set_has_compressed_data(struct bch_fs * c,u64 f)574 static int __bch2_check_set_has_compressed_data(struct bch_fs *c, u64 f)
575 {
576 	int ret = 0;
577 
578 	if ((c->sb.features & f) == f)
579 		return 0;
580 
581 	mutex_lock(&c->sb_lock);
582 
583 	if ((c->sb.features & f) == f) {
584 		mutex_unlock(&c->sb_lock);
585 		return 0;
586 	}
587 
588 	ret = __bch2_fs_compress_init(c, c->sb.features|f);
589 	if (ret) {
590 		mutex_unlock(&c->sb_lock);
591 		return ret;
592 	}
593 
594 	c->disk_sb.sb->features[0] |= cpu_to_le64(f);
595 	bch2_write_super(c);
596 	mutex_unlock(&c->sb_lock);
597 
598 	return 0;
599 }
600 
bch2_check_set_has_compressed_data(struct bch_fs * c,unsigned compression_opt)601 int bch2_check_set_has_compressed_data(struct bch_fs *c,
602 				       unsigned compression_opt)
603 {
604 	unsigned compression_type = bch2_compression_decode(compression_opt).type;
605 
606 	BUG_ON(compression_type >= ARRAY_SIZE(bch2_compression_opt_to_feature));
607 
608 	return compression_type
609 		? __bch2_check_set_has_compressed_data(c,
610 				1ULL << bch2_compression_opt_to_feature[compression_type])
611 		: 0;
612 }
613 
bch2_fs_compress_exit(struct bch_fs * c)614 void bch2_fs_compress_exit(struct bch_fs *c)
615 {
616 	unsigned i;
617 
618 	for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++)
619 		mempool_exit(&c->compress_workspace[i]);
620 	mempool_exit(&c->compression_bounce[WRITE]);
621 	mempool_exit(&c->compression_bounce[READ]);
622 }
623 
__bch2_fs_compress_init(struct bch_fs * c,u64 features)624 static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
625 {
626 	ZSTD_parameters params = zstd_get_params(zstd_max_clevel(),
627 						 c->opts.encoded_extent_max);
628 
629 	c->zstd_workspace_size = zstd_cctx_workspace_bound(&params.cParams);
630 
631 	struct {
632 		unsigned			feature;
633 		enum bch_compression_opts	type;
634 		size_t				compress_workspace;
635 	} compression_types[] = {
636 		{ BCH_FEATURE_lz4, BCH_COMPRESSION_OPT_lz4,
637 			max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS) },
638 		{ BCH_FEATURE_gzip, BCH_COMPRESSION_OPT_gzip,
639 			max(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL),
640 			    zlib_inflate_workspacesize()) },
641 		{ BCH_FEATURE_zstd, BCH_COMPRESSION_OPT_zstd,
642 			max(c->zstd_workspace_size,
643 			    zstd_dctx_workspace_bound()) },
644 	}, *i;
645 	bool have_compressed = false;
646 
647 	for (i = compression_types;
648 	     i < compression_types + ARRAY_SIZE(compression_types);
649 	     i++)
650 		have_compressed |= (features & (1 << i->feature)) != 0;
651 
652 	if (!have_compressed)
653 		return 0;
654 
655 	if (!mempool_initialized(&c->compression_bounce[READ]) &&
656 	    mempool_init_kvmalloc_pool(&c->compression_bounce[READ],
657 				       1, c->opts.encoded_extent_max))
658 		return -BCH_ERR_ENOMEM_compression_bounce_read_init;
659 
660 	if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
661 	    mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE],
662 				       1, c->opts.encoded_extent_max))
663 		return -BCH_ERR_ENOMEM_compression_bounce_write_init;
664 
665 	for (i = compression_types;
666 	     i < compression_types + ARRAY_SIZE(compression_types);
667 	     i++) {
668 		if (!(features & (1 << i->feature)))
669 			continue;
670 
671 		if (mempool_initialized(&c->compress_workspace[i->type]))
672 			continue;
673 
674 		if (mempool_init_kvmalloc_pool(
675 				&c->compress_workspace[i->type],
676 				1, i->compress_workspace))
677 			return -BCH_ERR_ENOMEM_compression_workspace_init;
678 	}
679 
680 	return 0;
681 }
682 
compression_opt_to_feature(unsigned v)683 static u64 compression_opt_to_feature(unsigned v)
684 {
685 	unsigned type = bch2_compression_decode(v).type;
686 
687 	return BIT_ULL(bch2_compression_opt_to_feature[type]);
688 }
689 
bch2_fs_compress_init(struct bch_fs * c)690 int bch2_fs_compress_init(struct bch_fs *c)
691 {
692 	u64 f = c->sb.features;
693 
694 	f |= compression_opt_to_feature(c->opts.compression);
695 	f |= compression_opt_to_feature(c->opts.background_compression);
696 
697 	return __bch2_fs_compress_init(c, f);
698 }
699 
bch2_opt_compression_parse(struct bch_fs * c,const char * _val,u64 * res,struct printbuf * err)700 int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res,
701 			       struct printbuf *err)
702 {
703 	char *val = kstrdup(_val, GFP_KERNEL);
704 	char *p = val, *type_str, *level_str;
705 	struct bch_compression_opt opt = { 0 };
706 	int ret;
707 
708 	if (!val)
709 		return -ENOMEM;
710 
711 	type_str = strsep(&p, ":");
712 	level_str = p;
713 
714 	ret = match_string(bch2_compression_opts, -1, type_str);
715 	if (ret < 0 && err)
716 		prt_str(err, "invalid compression type");
717 	if (ret < 0)
718 		goto err;
719 
720 	opt.type = ret;
721 
722 	if (level_str) {
723 		unsigned level;
724 
725 		ret = kstrtouint(level_str, 10, &level);
726 		if (!ret && !opt.type && level)
727 			ret = -EINVAL;
728 		if (!ret && level > 15)
729 			ret = -EINVAL;
730 		if (ret < 0 && err)
731 			prt_str(err, "invalid compression level");
732 		if (ret < 0)
733 			goto err;
734 
735 		opt.level = level;
736 	}
737 
738 	*res = bch2_compression_encode(opt);
739 err:
740 	kfree(val);
741 	return ret;
742 }
743 
bch2_compression_opt_to_text(struct printbuf * out,u64 v)744 void bch2_compression_opt_to_text(struct printbuf *out, u64 v)
745 {
746 	struct bch_compression_opt opt = bch2_compression_decode(v);
747 
748 	if (opt.type < BCH_COMPRESSION_OPT_NR)
749 		prt_str(out, bch2_compression_opts[opt.type]);
750 	else
751 		prt_printf(out, "(unknown compression opt %u)", opt.type);
752 	if (opt.level)
753 		prt_printf(out, ":%u", opt.level);
754 }
755 
bch2_opt_compression_to_text(struct printbuf * out,struct bch_fs * c,struct bch_sb * sb,u64 v)756 void bch2_opt_compression_to_text(struct printbuf *out,
757 				  struct bch_fs *c,
758 				  struct bch_sb *sb,
759 				  u64 v)
760 {
761 	return bch2_compression_opt_to_text(out, v);
762 }
763 
bch2_opt_compression_validate(u64 v,struct printbuf * err)764 int bch2_opt_compression_validate(u64 v, struct printbuf *err)
765 {
766 	if (!bch2_compression_opt_valid(v)) {
767 		prt_printf(err, "invalid compression opt %llu", v);
768 		return -BCH_ERR_invalid_sb_opt_compression;
769 	}
770 
771 	return 0;
772 }
773