xref: /linux/fs/btrfs/compression.c (revision a8b0b72255d09bb12ada5620cd6ced91adde5ac8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/file.h>
9 #include <linux/fs.h>
10 #include <linux/pagemap.h>
11 #include <linux/folio_batch.h>
12 #include <linux/highmem.h>
13 #include <linux/kthread.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/psi.h>
20 #include <linux/slab.h>
21 #include <linux/sched/mm.h>
22 #include <linux/log2.h>
23 #include <linux/shrinker.h>
24 #include "misc.h"
25 #include "ctree.h"
26 #include "fs.h"
27 #include "btrfs_inode.h"
28 #include "bio.h"
29 #include "ordered-data.h"
30 #include "compression.h"
31 #include "extent_io.h"
32 #include "extent_map.h"
33 #include "subpage.h"
34 #include "messages.h"
35 #include "super.h"
36 
37 static struct bio_set btrfs_compressed_bioset;
38 
39 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
40 
btrfs_compress_type2str(enum btrfs_compression_type type)41 const char* btrfs_compress_type2str(enum btrfs_compression_type type)
42 {
43 	switch (type) {
44 	case BTRFS_COMPRESS_ZLIB:
45 	case BTRFS_COMPRESS_LZO:
46 	case BTRFS_COMPRESS_ZSTD:
47 	case BTRFS_COMPRESS_NONE:
48 		return btrfs_compress_types[type];
49 	default:
50 		break;
51 	}
52 
53 	return NULL;
54 }
55 
to_compressed_bio(struct btrfs_bio * bbio)56 static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio)
57 {
58 	return container_of(bbio, struct compressed_bio, bbio);
59 }
60 
alloc_compressed_bio(struct btrfs_inode * inode,u64 start,blk_opf_t op,btrfs_bio_end_io_t end_io)61 static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
62 						   u64 start, blk_opf_t op,
63 						   btrfs_bio_end_io_t end_io)
64 {
65 	struct btrfs_bio *bbio;
66 
67 	bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
68 					  GFP_NOFS, &btrfs_compressed_bioset));
69 	btrfs_bio_init(bbio, inode, start, end_io, NULL);
70 	return to_compressed_bio(bbio);
71 }
72 
btrfs_compress_is_valid_type(const char * str,size_t len)73 bool btrfs_compress_is_valid_type(const char *str, size_t len)
74 {
75 	int i;
76 
77 	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
78 		size_t comp_len = strlen(btrfs_compress_types[i]);
79 
80 		if (len < comp_len)
81 			continue;
82 
83 		if (!strncmp(btrfs_compress_types[i], str, comp_len))
84 			return true;
85 	}
86 	return false;
87 }
88 
compression_decompress_bio(struct list_head * ws,struct compressed_bio * cb)89 static int compression_decompress_bio(struct list_head *ws,
90 				      struct compressed_bio *cb)
91 {
92 	switch (cb->compress_type) {
93 	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
94 	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
95 	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
96 	case BTRFS_COMPRESS_NONE:
97 	default:
98 		/*
99 		 * This can't happen, the type is validated several times
100 		 * before we get here.
101 		 */
102 		BUG();
103 	}
104 }
105 
compression_decompress(int type,struct list_head * ws,const u8 * data_in,struct folio * dest_folio,unsigned long dest_pgoff,size_t srclen,size_t destlen)106 static int compression_decompress(int type, struct list_head *ws,
107 		const u8 *data_in, struct folio *dest_folio,
108 		unsigned long dest_pgoff, size_t srclen, size_t destlen)
109 {
110 	switch (type) {
111 	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio,
112 						dest_pgoff, srclen, destlen);
113 	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_folio,
114 						dest_pgoff, srclen, destlen);
115 	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio,
116 						dest_pgoff, srclen, destlen);
117 	case BTRFS_COMPRESS_NONE:
118 	default:
119 		/*
120 		 * This can't happen, the type is validated several times
121 		 * before we get here.
122 		 */
123 		BUG();
124 	}
125 }
126 
127 static int btrfs_decompress_bio(struct compressed_bio *cb);
128 
129 /*
130  * Global cache of last unused pages for compression/decompression.
131  */
132 static struct btrfs_compr_pool {
133 	struct shrinker *shrinker;
134 	spinlock_t lock;
135 	struct list_head list;
136 	int count;
137 	int thresh;
138 } compr_pool;
139 
btrfs_compr_pool_count(struct shrinker * sh,struct shrink_control * sc)140 static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc)
141 {
142 	int ret;
143 
144 	/*
145 	 * We must not read the values more than once if 'ret' gets expanded in
146 	 * the return statement so we don't accidentally return a negative
147 	 * number, even if the first condition finds it positive.
148 	 */
149 	ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh);
150 
151 	return ret > 0 ? ret : 0;
152 }
153 
btrfs_compr_pool_scan(struct shrinker * sh,struct shrink_control * sc)154 static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc)
155 {
156 	LIST_HEAD(remove);
157 	struct list_head *tmp, *next;
158 	int freed;
159 
160 	if (compr_pool.count == 0)
161 		return SHRINK_STOP;
162 
163 	/* For now, just simply drain the whole list. */
164 	spin_lock(&compr_pool.lock);
165 	list_splice_init(&compr_pool.list, &remove);
166 	freed = compr_pool.count;
167 	compr_pool.count = 0;
168 	spin_unlock(&compr_pool.lock);
169 
170 	list_for_each_safe(tmp, next, &remove) {
171 		struct page *page = list_entry(tmp, struct page, lru);
172 
173 		ASSERT(page_ref_count(page) == 1);
174 		put_page(page);
175 	}
176 
177 	return freed;
178 }
179 
180 /*
181  * Common wrappers for page allocation from compression wrappers
182  */
btrfs_alloc_compr_folio(struct btrfs_fs_info * fs_info,gfp_t gfp)183 struct folio *btrfs_alloc_compr_folio(struct btrfs_fs_info *fs_info, gfp_t gfp)
184 {
185 	struct folio *folio = NULL;
186 
187 	/* For bs > ps cases, no cached folio pool for now. */
188 	if (fs_info->block_min_order)
189 		goto alloc;
190 
191 	spin_lock(&compr_pool.lock);
192 	if (compr_pool.count > 0) {
193 		folio = list_first_entry(&compr_pool.list, struct folio, lru);
194 		list_del_init(&folio->lru);
195 		compr_pool.count--;
196 	}
197 	spin_unlock(&compr_pool.lock);
198 
199 	if (folio)
200 		return folio;
201 
202 alloc:
203 	return folio_alloc(gfp, fs_info->block_min_order);
204 }
205 
btrfs_free_compr_folio(struct folio * folio)206 void btrfs_free_compr_folio(struct folio *folio)
207 {
208 	bool do_free = false;
209 
210 	/* The folio is from bs > ps fs, no cached pool for now. */
211 	if (folio_order(folio))
212 		goto free;
213 
214 	spin_lock(&compr_pool.lock);
215 	if (compr_pool.count > compr_pool.thresh) {
216 		do_free = true;
217 	} else {
218 		list_add(&folio->lru, &compr_pool.list);
219 		compr_pool.count++;
220 	}
221 	spin_unlock(&compr_pool.lock);
222 
223 	if (!do_free)
224 		return;
225 
226 free:
227 	ASSERT(folio_ref_count(folio) == 1);
228 	folio_put(folio);
229 }
230 
end_bbio_compressed_read(struct btrfs_bio * bbio)231 static void end_bbio_compressed_read(struct btrfs_bio *bbio)
232 {
233 	struct compressed_bio *cb = to_compressed_bio(bbio);
234 	blk_status_t status = bbio->bio.bi_status;
235 	struct folio_iter fi;
236 
237 	if (!status)
238 		status = errno_to_blk_status(btrfs_decompress_bio(cb));
239 
240 	btrfs_bio_end_io(cb->orig_bbio, status);
241 	bio_for_each_folio_all(fi, &bbio->bio)
242 		btrfs_free_compr_folio(fi.folio);
243 	bio_put(&bbio->bio);
244 }
245 
246 /*
247  * Clear the writeback bits on all of the file
248  * pages for a compressed write
249  */
end_compressed_writeback(const struct compressed_bio * cb)250 static noinline void end_compressed_writeback(const struct compressed_bio *cb)
251 {
252 	struct inode *inode = &cb->bbio.inode->vfs_inode;
253 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
254 	pgoff_t index = cb->start >> PAGE_SHIFT;
255 	const pgoff_t end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
256 	struct folio_batch fbatch;
257 	int i;
258 	int ret;
259 
260 	ret = blk_status_to_errno(cb->bbio.bio.bi_status);
261 	if (ret)
262 		mapping_set_error(inode->i_mapping, ret);
263 
264 	folio_batch_init(&fbatch);
265 	while (index <= end_index) {
266 		ret = filemap_get_folios(inode->i_mapping, &index, end_index,
267 				&fbatch);
268 
269 		if (ret == 0)
270 			return;
271 
272 		for (i = 0; i < ret; i++) {
273 			struct folio *folio = fbatch.folios[i];
274 
275 			btrfs_folio_clamp_clear_writeback(fs_info, folio,
276 							  cb->start, cb->len);
277 		}
278 		folio_batch_release(&fbatch);
279 	}
280 	/* the inode may be gone now */
281 }
282 
283 /*
284  * Do the cleanup once all the compressed pages hit the disk.  This will clear
285  * writeback on the file pages and free the compressed pages.
286  *
287  * This also calls the writeback end hooks for the file pages so that metadata
288  * and checksums can be updated in the file.
289  */
end_bbio_compressed_write(struct btrfs_bio * bbio)290 static void end_bbio_compressed_write(struct btrfs_bio *bbio)
291 {
292 	struct compressed_bio *cb = to_compressed_bio(bbio);
293 	struct folio_iter fi;
294 
295 	btrfs_finish_ordered_extent(cb->bbio.ordered, cb->start, cb->len,
296 				    cb->bbio.bio.bi_status == BLK_STS_OK);
297 
298 	if (cb->writeback)
299 		end_compressed_writeback(cb);
300 	/* Note, our inode could be gone now. */
301 	bio_for_each_folio_all(fi, &bbio->bio)
302 		btrfs_free_compr_folio(fi.folio);
303 	bio_put(&cb->bbio.bio);
304 }
305 
306 /*
307  * worker function to build and submit bios for previously compressed pages.
308  * The corresponding pages in the inode should be marked for writeback
309  * and the compressed pages should have a reference on them for dropping
310  * when the IO is complete.
311  *
312  * This also checksums the file bytes and gets things ready for
313  * the end io hooks.
314  */
btrfs_submit_compressed_write(struct btrfs_ordered_extent * ordered,struct compressed_bio * cb)315 void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
316 				   struct compressed_bio *cb)
317 {
318 	struct btrfs_inode *inode = ordered->inode;
319 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
320 
321 	ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
322 	ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
323 	/*
324 	 * This flag determines if we should clear the writeback flag from the
325 	 * page cache. But this function is only utilized by encoded writes, it
326 	 * never goes through the page cache.
327 	 */
328 	ASSERT(!cb->writeback);
329 
330 	cb->start = ordered->file_offset;
331 	cb->len = ordered->num_bytes;
332 	ASSERT(cb->bbio.bio.bi_iter.bi_size == ordered->disk_num_bytes);
333 	cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
334 	cb->bbio.ordered = ordered;
335 
336 	btrfs_submit_bbio(&cb->bbio, 0);
337 }
338 
339 /*
340  * Allocate a compressed write bio for @inode file offset @start length @len.
341  *
342  * The caller still needs to properly queue all folios and populate involved
343  * members.
344  */
btrfs_alloc_compressed_write(struct btrfs_inode * inode,u64 start,u64 len)345 struct compressed_bio *btrfs_alloc_compressed_write(struct btrfs_inode *inode,
346 						    u64 start, u64 len)
347 {
348 	struct compressed_bio *cb;
349 
350 	cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE, end_bbio_compressed_write);
351 	cb->start = start;
352 	cb->len = len;
353 	cb->writeback = false;
354 	return cb;
355 }
356 
357 /*
358  * Add extra pages in the same compressed file extent so that we don't need to
359  * re-read the same extent again and again.
360  *
361  * NOTE: this won't work well for subpage, as for subpage read, we lock the
362  * full page then submit bio for each compressed/regular extents.
363  *
364  * This means, if we have several sectors in the same page points to the same
365  * on-disk compressed data, we will re-read the same extent many times and
366  * this function can only help for the next page.
367  */
add_ra_bio_pages(struct inode * inode,u64 compressed_end,struct compressed_bio * cb,int * memstall,unsigned long * pflags,bool direct_reclaim)368 static noinline int add_ra_bio_pages(struct inode *inode,
369 				     u64 compressed_end,
370 				     struct compressed_bio *cb,
371 				     int *memstall, unsigned long *pflags,
372 				     bool direct_reclaim)
373 {
374 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
375 	pgoff_t end_index;
376 	struct bio *orig_bio = &cb->orig_bbio->bio;
377 	u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
378 	u64 isize = i_size_read(inode);
379 	int ret;
380 	gfp_t constraint_gfp, cache_gfp;
381 	struct folio *folio;
382 	struct extent_map *em;
383 	struct address_space *mapping = inode->i_mapping;
384 	struct extent_map_tree *em_tree;
385 	struct extent_io_tree *tree;
386 	int sectors_missed = 0;
387 
388 	em_tree = &BTRFS_I(inode)->extent_tree;
389 	tree = &BTRFS_I(inode)->io_tree;
390 
391 	if (isize == 0)
392 		return 0;
393 
394 	/*
395 	 * For current subpage support, we only support 64K page size,
396 	 * which means maximum compressed extent size (128K) is just 2x page
397 	 * size.
398 	 * This makes readahead less effective, so here disable readahead for
399 	 * subpage for now, until full compressed write is supported.
400 	 */
401 	if (fs_info->sectorsize < PAGE_SIZE)
402 		return 0;
403 
404 	/* For bs > ps cases, we don't support readahead for compressed folios for now. */
405 	if (fs_info->block_min_order)
406 		return 0;
407 
408 	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
409 
410 	/* Avoid direct reclaim when the caller does not allow it. */
411 	constraint_gfp = ~__GFP_FS;
412 	cache_gfp = GFP_NOFS | __GFP_NOWARN;
413 	if (!direct_reclaim) {
414 		constraint_gfp &= ~__GFP_DIRECT_RECLAIM;
415 		cache_gfp &= ~__GFP_DIRECT_RECLAIM;
416 	}
417 
418 	while (cur < compressed_end) {
419 		pgoff_t page_end;
420 		pgoff_t pg_index = cur >> PAGE_SHIFT;
421 		gfp_t masked_constraint_gfp;
422 		u32 add_size;
423 
424 		if (pg_index > end_index)
425 			break;
426 
427 		folio = filemap_get_folio(mapping, pg_index);
428 		if (!IS_ERR(folio)) {
429 			u64 folio_sz = folio_size(folio);
430 			u64 offset = offset_in_folio(folio, cur);
431 
432 			folio_put(folio);
433 			sectors_missed += (folio_sz - offset) >>
434 					  fs_info->sectorsize_bits;
435 
436 			/* Beyond threshold, no need to continue */
437 			if (sectors_missed > 4)
438 				break;
439 
440 			/*
441 			 * Jump to next page start as we already have page for
442 			 * current offset.
443 			 */
444 			cur += (folio_sz - offset);
445 			continue;
446 		}
447 
448 		/*
449 		 * Since add_ra_bio_pages() is always speculative, suppress
450 		 * allocation warnings.
451 		 */
452 		masked_constraint_gfp = mapping_gfp_constraint(mapping, constraint_gfp);
453 		masked_constraint_gfp |= __GFP_NOWARN;
454 
455 		folio = filemap_alloc_folio(masked_constraint_gfp, 0, NULL);
456 		if (!folio)
457 			break;
458 
459 		if (filemap_add_folio(mapping, folio, pg_index, cache_gfp)) {
460 			/* There is already a page, skip to page end */
461 			cur += folio_size(folio);
462 			folio_put(folio);
463 			continue;
464 		}
465 
466 		if (!*memstall && folio_test_workingset(folio)) {
467 			psi_memstall_enter(pflags);
468 			*memstall = 1;
469 		}
470 
471 		ret = set_folio_extent_mapped(folio);
472 		if (ret < 0) {
473 			folio_unlock(folio);
474 			folio_put(folio);
475 			break;
476 		}
477 
478 		page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
479 		btrfs_lock_extent(tree, cur, page_end, NULL);
480 		read_lock(&em_tree->lock);
481 		em = btrfs_lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
482 		read_unlock(&em_tree->lock);
483 
484 		/*
485 		 * At this point, we have a locked page in the page cache for
486 		 * these bytes in the file.  But, we have to make sure they map
487 		 * to this compressed extent on disk.
488 		 */
489 		if (!em || cur < em->start ||
490 		    (cur + fs_info->sectorsize > btrfs_extent_map_end(em)) ||
491 		    (btrfs_extent_map_block_start(em) >> SECTOR_SHIFT) !=
492 		    orig_bio->bi_iter.bi_sector) {
493 			btrfs_free_extent_map(em);
494 			btrfs_unlock_extent(tree, cur, page_end, NULL);
495 			folio_unlock(folio);
496 			folio_put(folio);
497 			break;
498 		}
499 		add_size = min(btrfs_extent_map_end(em), page_end + 1) - cur;
500 		btrfs_free_extent_map(em);
501 		btrfs_unlock_extent(tree, cur, page_end, NULL);
502 
503 		if (folio_contains(folio, end_index)) {
504 			size_t zero_offset = offset_in_folio(folio, isize);
505 
506 			if (zero_offset) {
507 				int zeros;
508 				zeros = folio_size(folio) - zero_offset;
509 				folio_zero_range(folio, zero_offset, zeros);
510 			}
511 		}
512 
513 		if (!bio_add_folio(orig_bio, folio, add_size,
514 				   offset_in_folio(folio, cur))) {
515 			folio_unlock(folio);
516 			folio_put(folio);
517 			break;
518 		}
519 		/*
520 		 * If it's subpage, we also need to increase its
521 		 * subpage::readers number, as at endio we will decrease
522 		 * subpage::readers and to unlock the page.
523 		 */
524 		if (fs_info->sectorsize < PAGE_SIZE)
525 			btrfs_folio_set_lock(fs_info, folio, cur, add_size);
526 		folio_put(folio);
527 		cur += add_size;
528 	}
529 	return 0;
530 }
531 
532 /*
533  * for a compressed read, the bio we get passed has all the inode pages
534  * in it.  We don't actually do IO on those pages but allocate new ones
535  * to hold the compressed pages on disk.
536  *
537  * bio->bi_iter.bi_sector points to the compressed extent on disk
538  * bio->bi_io_vec points to all of the inode pages
539  *
540  * After the compressed pages are read, we copy the bytes into the
541  * bio we were passed and then call the bio end_io calls
542  */
btrfs_submit_compressed_read(struct btrfs_bio * bbio)543 void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
544 {
545 	struct btrfs_inode *inode = bbio->inode;
546 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
547 	struct extent_map_tree *em_tree = &inode->extent_tree;
548 	struct compressed_bio *cb;
549 	unsigned int compressed_len;
550 	const u32 min_folio_size = btrfs_min_folio_size(fs_info);
551 	u64 file_offset = bbio->file_offset;
552 	gfp_t gfp;
553 	u64 em_len;
554 	u64 em_start;
555 	struct extent_map *em;
556 	unsigned long pflags;
557 	int memstall = 0;
558 	int ret;
559 
560 	/*
561 	 * If this is a readahead bio, prevent direct reclaim. This is done to
562 	 * avoid stalling on speculative allocations when memory pressure is
563 	 * high. The demand fault will retry with GFP_NOFS and enter direct
564 	 * reclaim if needed.
565 	 */
566 	if (bbio->bio.bi_opf & REQ_RAHEAD)
567 		gfp = (GFP_NOFS & ~__GFP_DIRECT_RECLAIM) | __GFP_NOWARN;
568 	else
569 		gfp = GFP_NOFS;
570 
571 	/* we need the actual starting offset of this extent in the file */
572 	read_lock(&em_tree->lock);
573 	em = btrfs_lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
574 	read_unlock(&em_tree->lock);
575 	if (!em) {
576 		ret = -EIO;
577 		goto out;
578 	}
579 
580 	ASSERT(btrfs_extent_map_is_compressed(em));
581 	compressed_len = em->disk_num_bytes;
582 
583 	cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
584 				  end_bbio_compressed_read);
585 
586 	cb->start = em->start - em->offset;
587 	em_len = em->len;
588 	em_start = em->start;
589 
590 	cb->len = bbio->bio.bi_iter.bi_size;
591 	cb->compress_type = btrfs_extent_map_compression(em);
592 	cb->orig_bbio = bbio;
593 	cb->bbio.csum_search_commit_root = bbio->csum_search_commit_root;
594 
595 	btrfs_free_extent_map(em);
596 
597 	for (int i = 0; i * min_folio_size < compressed_len; i++) {
598 		struct folio *folio;
599 		u32 cur_len = min(compressed_len - i * min_folio_size, min_folio_size);
600 
601 		folio = btrfs_alloc_compr_folio(fs_info, gfp);
602 		if (!folio) {
603 			ret = -ENOMEM;
604 			goto out_free_bio;
605 		}
606 
607 		ret = bio_add_folio(&cb->bbio.bio, folio, cur_len, 0);
608 		if (unlikely(!ret)) {
609 			folio_put(folio);
610 			ret = -EINVAL;
611 			goto out_free_bio;
612 		}
613 	}
614 	ASSERT(cb->bbio.bio.bi_iter.bi_size == compressed_len);
615 
616 	add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
617 			 &pflags, !(bbio->bio.bi_opf & REQ_RAHEAD));
618 
619 	cb->len = bbio->bio.bi_iter.bi_size;
620 	cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
621 
622 	if (memstall)
623 		psi_memstall_leave(&pflags);
624 
625 	btrfs_submit_bbio(&cb->bbio, 0);
626 	return;
627 
628 out_free_bio:
629 	cleanup_compressed_bio(cb);
630 out:
631 	btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
632 }
633 
634 /*
635  * Heuristic uses systematic sampling to collect data from the input data
636  * range, the logic can be tuned by the following constants:
637  *
638  * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
639  * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
640  */
641 #define SAMPLING_READ_SIZE	(16)
642 #define SAMPLING_INTERVAL	(256)
643 
644 /*
645  * For statistical analysis of the input data we consider bytes that form a
646  * Galois Field of 256 objects. Each object has an attribute count, ie. how
647  * many times the object appeared in the sample.
648  */
649 #define BUCKET_SIZE		(256)
650 
651 /*
652  * The size of the sample is based on a statistical sampling rule of thumb.
653  * The common way is to perform sampling tests as long as the number of
654  * elements in each cell is at least 5.
655  *
656  * Instead of 5, we choose 32 to obtain more accurate results.
657  * If the data contain the maximum number of symbols, which is 256, we obtain a
658  * sample size bound by 8192.
659  *
660  * For a sample of at most 8KB of data per data range: 16 consecutive bytes
661  * from up to 512 locations.
662  */
663 #define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
664 				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
665 
666 struct bucket_item {
667 	u32 count;
668 };
669 
670 struct heuristic_ws {
671 	/* Partial copy of input data */
672 	u8 *sample;
673 	u32 sample_size;
674 	/* Buckets store counters for each byte value */
675 	struct bucket_item *bucket;
676 	/* Sorting buffer */
677 	struct bucket_item *bucket_b;
678 	struct list_head list;
679 };
680 
free_heuristic_ws(struct list_head * ws)681 static void free_heuristic_ws(struct list_head *ws)
682 {
683 	struct heuristic_ws *workspace;
684 
685 	workspace = list_entry(ws, struct heuristic_ws, list);
686 
687 	kvfree(workspace->sample);
688 	kfree(workspace->bucket);
689 	kfree(workspace->bucket_b);
690 	kfree(workspace);
691 }
692 
alloc_heuristic_ws(struct btrfs_fs_info * fs_info)693 static struct list_head *alloc_heuristic_ws(struct btrfs_fs_info *fs_info)
694 {
695 	struct heuristic_ws *ws;
696 
697 	ws = kzalloc_obj(*ws);
698 	if (!ws)
699 		return ERR_PTR(-ENOMEM);
700 
701 	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
702 	if (!ws->sample)
703 		goto fail;
704 
705 	ws->bucket = kzalloc_objs(*ws->bucket, BUCKET_SIZE);
706 	if (!ws->bucket)
707 		goto fail;
708 
709 	ws->bucket_b = kzalloc_objs(*ws->bucket_b, BUCKET_SIZE);
710 	if (!ws->bucket_b)
711 		goto fail;
712 
713 	INIT_LIST_HEAD(&ws->list);
714 	return &ws->list;
715 fail:
716 	free_heuristic_ws(&ws->list);
717 	return ERR_PTR(-ENOMEM);
718 }
719 
720 const struct btrfs_compress_levels btrfs_heuristic_compress = { 0 };
721 
722 static const struct btrfs_compress_levels * const btrfs_compress_levels[] = {
723 	/* The heuristic is represented as compression type 0 */
724 	&btrfs_heuristic_compress,
725 	&btrfs_zlib_compress,
726 	&btrfs_lzo_compress,
727 	&btrfs_zstd_compress,
728 };
729 
alloc_workspace(struct btrfs_fs_info * fs_info,int type,int level)730 static struct list_head *alloc_workspace(struct btrfs_fs_info *fs_info, int type, int level)
731 {
732 	switch (type) {
733 	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(fs_info);
734 	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(fs_info, level);
735 	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(fs_info);
736 	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(fs_info, level);
737 	default:
738 		/*
739 		 * This can't happen, the type is validated several times
740 		 * before we get here.
741 		 */
742 		BUG();
743 	}
744 }
745 
free_workspace(int type,struct list_head * ws)746 static void free_workspace(int type, struct list_head *ws)
747 {
748 	switch (type) {
749 	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
750 	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
751 	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
752 	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
753 	default:
754 		/*
755 		 * This can't happen, the type is validated several times
756 		 * before we get here.
757 		 */
758 		BUG();
759 	}
760 }
761 
alloc_workspace_manager(struct btrfs_fs_info * fs_info,enum btrfs_compression_type type)762 static int alloc_workspace_manager(struct btrfs_fs_info *fs_info,
763 				   enum btrfs_compression_type type)
764 {
765 	struct workspace_manager *gwsm;
766 	struct list_head *workspace;
767 
768 	ASSERT(fs_info->compr_wsm[type] == NULL);
769 	gwsm = kzalloc_obj(*gwsm);
770 	if (!gwsm)
771 		return -ENOMEM;
772 
773 	INIT_LIST_HEAD(&gwsm->idle_ws);
774 	spin_lock_init(&gwsm->ws_lock);
775 	atomic_set(&gwsm->total_ws, 0);
776 	init_waitqueue_head(&gwsm->ws_wait);
777 	fs_info->compr_wsm[type] = gwsm;
778 
779 	/*
780 	 * Preallocate one workspace for each compression type so we can
781 	 * guarantee forward progress in the worst case
782 	 */
783 	workspace = alloc_workspace(fs_info, type, 0);
784 	if (IS_ERR(workspace)) {
785 		btrfs_warn(fs_info,
786 	"cannot preallocate compression workspace for %s, will try later",
787 			   btrfs_compress_type2str(type));
788 	} else {
789 		atomic_set(&gwsm->total_ws, 1);
790 		gwsm->free_ws = 1;
791 		list_add(workspace, &gwsm->idle_ws);
792 	}
793 	return 0;
794 }
795 
free_workspace_manager(struct btrfs_fs_info * fs_info,enum btrfs_compression_type type)796 static void free_workspace_manager(struct btrfs_fs_info *fs_info,
797 				   enum btrfs_compression_type type)
798 {
799 	struct list_head *ws;
800 	struct workspace_manager *gwsm = fs_info->compr_wsm[type];
801 
802 	/* ZSTD uses its own workspace manager, should enter here. */
803 	ASSERT(type != BTRFS_COMPRESS_ZSTD && type < BTRFS_NR_COMPRESS_TYPES);
804 	if (!gwsm)
805 		return;
806 	fs_info->compr_wsm[type] = NULL;
807 	while (!list_empty(&gwsm->idle_ws)) {
808 		ws = gwsm->idle_ws.next;
809 		list_del(ws);
810 		free_workspace(type, ws);
811 		atomic_dec(&gwsm->total_ws);
812 	}
813 	kfree(gwsm);
814 }
815 
816 /*
817  * This finds an available workspace or allocates a new one.
818  * If it's not possible to allocate a new one, waits until there's one.
819  * Preallocation makes a forward progress guarantees and we do not return
820  * errors.
821  */
btrfs_get_workspace(struct btrfs_fs_info * fs_info,int type,int level)822 struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
823 {
824 	struct workspace_manager *wsm = fs_info->compr_wsm[type];
825 	struct list_head *workspace;
826 	int cpus = num_online_cpus();
827 	unsigned nofs_flag;
828 	struct list_head *idle_ws;
829 	spinlock_t *ws_lock;
830 	atomic_t *total_ws;
831 	wait_queue_head_t *ws_wait;
832 	int *free_ws;
833 
834 	ASSERT(wsm);
835 	idle_ws	 = &wsm->idle_ws;
836 	ws_lock	 = &wsm->ws_lock;
837 	total_ws = &wsm->total_ws;
838 	ws_wait	 = &wsm->ws_wait;
839 	free_ws	 = &wsm->free_ws;
840 
841 again:
842 	spin_lock(ws_lock);
843 	if (!list_empty(idle_ws)) {
844 		workspace = idle_ws->next;
845 		list_del(workspace);
846 		(*free_ws)--;
847 		spin_unlock(ws_lock);
848 		return workspace;
849 
850 	}
851 	if (atomic_read(total_ws) > cpus) {
852 		DEFINE_WAIT(wait);
853 
854 		spin_unlock(ws_lock);
855 		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
856 		if (atomic_read(total_ws) > cpus && !*free_ws)
857 			schedule();
858 		finish_wait(ws_wait, &wait);
859 		goto again;
860 	}
861 	atomic_inc(total_ws);
862 	spin_unlock(ws_lock);
863 
864 	/*
865 	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
866 	 * to turn it off here because we might get called from the restricted
867 	 * context of btrfs_compress_bio/btrfs_compress_pages
868 	 */
869 	nofs_flag = memalloc_nofs_save();
870 	workspace = alloc_workspace(fs_info, type, level);
871 	memalloc_nofs_restore(nofs_flag);
872 
873 	if (IS_ERR(workspace)) {
874 		atomic_dec(total_ws);
875 		wake_up(ws_wait);
876 
877 		/*
878 		 * Do not return the error but go back to waiting. There's a
879 		 * workspace preallocated for each type and the compression
880 		 * time is bounded so we get to a workspace eventually. This
881 		 * makes our caller's life easier.
882 		 *
883 		 * To prevent silent and low-probability deadlocks (when the
884 		 * initial preallocation fails), check if there are any
885 		 * workspaces at all.
886 		 */
887 		if (atomic_read(total_ws) == 0) {
888 			static DEFINE_RATELIMIT_STATE(_rs,
889 					/* once per minute */ 60 * HZ,
890 					/* no burst */ 1);
891 
892 			if (__ratelimit(&_rs))
893 				btrfs_warn(fs_info,
894 				"no compression workspaces, low memory, retrying");
895 		}
896 		goto again;
897 	}
898 	return workspace;
899 }
900 
get_workspace(struct btrfs_fs_info * fs_info,int type,int level)901 static struct list_head *get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
902 {
903 	switch (type) {
904 	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(fs_info, type, level);
905 	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(fs_info, level);
906 	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(fs_info, type, level);
907 	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(fs_info, level);
908 	default:
909 		/*
910 		 * This can't happen, the type is validated several times
911 		 * before we get here.
912 		 */
913 		BUG();
914 	}
915 }
916 
917 /*
918  * put a workspace struct back on the list or free it if we have enough
919  * idle ones sitting around
920  */
btrfs_put_workspace(struct btrfs_fs_info * fs_info,int type,struct list_head * ws)921 void btrfs_put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
922 {
923 	struct workspace_manager *gwsm = fs_info->compr_wsm[type];
924 	struct list_head *idle_ws;
925 	spinlock_t *ws_lock;
926 	atomic_t *total_ws;
927 	wait_queue_head_t *ws_wait;
928 	int *free_ws;
929 
930 	ASSERT(gwsm);
931 	idle_ws	 = &gwsm->idle_ws;
932 	ws_lock	 = &gwsm->ws_lock;
933 	total_ws = &gwsm->total_ws;
934 	ws_wait	 = &gwsm->ws_wait;
935 	free_ws	 = &gwsm->free_ws;
936 
937 	spin_lock(ws_lock);
938 	if (*free_ws <= num_online_cpus()) {
939 		list_add(ws, idle_ws);
940 		(*free_ws)++;
941 		spin_unlock(ws_lock);
942 		goto wake;
943 	}
944 	spin_unlock(ws_lock);
945 
946 	free_workspace(type, ws);
947 	atomic_dec(total_ws);
948 wake:
949 	cond_wake_up(ws_wait);
950 }
951 
put_workspace(struct btrfs_fs_info * fs_info,int type,struct list_head * ws)952 static void put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
953 {
954 	switch (type) {
955 	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(fs_info, type, ws);
956 	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(fs_info, type, ws);
957 	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(fs_info, type, ws);
958 	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(fs_info, ws);
959 	default:
960 		/*
961 		 * This can't happen, the type is validated several times
962 		 * before we get here.
963 		 */
964 		BUG();
965 	}
966 }
967 
968 /*
969  * Adjust @level according to the limits of the compression algorithm or
970  * fallback to default
971  */
btrfs_compress_set_level(unsigned int type,int level)972 static int btrfs_compress_set_level(unsigned int type, int level)
973 {
974 	const struct btrfs_compress_levels *levels = btrfs_compress_levels[type];
975 
976 	if (level == 0)
977 		level = levels->default_level;
978 	else
979 		level = clamp(level, levels->min_level, levels->max_level);
980 
981 	return level;
982 }
983 
984 /*
985  * Check whether the @level is within the valid range for the given type.
986  */
btrfs_compress_level_valid(unsigned int type,int level)987 bool btrfs_compress_level_valid(unsigned int type, int level)
988 {
989 	const struct btrfs_compress_levels *levels = btrfs_compress_levels[type];
990 
991 	return levels->min_level <= level && level <= levels->max_level;
992 }
993 
994 /* Wrapper around find_get_page(), with extra error message. */
btrfs_compress_filemap_get_folio(struct address_space * mapping,u64 start,struct folio ** in_folio_ret)995 int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
996 				     struct folio **in_folio_ret)
997 {
998 	struct folio *in_folio;
999 
1000 	/*
1001 	 * The compressed write path should have the folio locked already, thus
1002 	 * we only need to grab one reference.
1003 	 */
1004 	in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
1005 	if (IS_ERR(in_folio)) {
1006 		struct btrfs_inode *inode = BTRFS_I(mapping->host);
1007 
1008 		btrfs_crit(inode->root->fs_info,
1009 		"failed to get page cache, root %lld ino %llu file offset %llu",
1010 			   btrfs_root_id(inode->root), btrfs_ino(inode), start);
1011 		return -ENOENT;
1012 	}
1013 	*in_folio_ret = in_folio;
1014 	return 0;
1015 }
1016 
1017 /*
1018  * Given an address space and start and length, compress the page cache
1019  * contents into @cb.
1020  *
1021  * @type_level:      is encoded algorithm and level, where level 0 means whatever
1022  *                   default the algorithm chooses and is opaque here;
1023  *                   - compression algo are 0-3
1024  *                   - the level are bits 4-7
1025  *
1026  * @cb->bbio.bio.bi_iter.bi_size will indicate the compressed data size.
1027  * The bi_size may not be sectorsize aligned, thus the caller still need
1028  * to do the round up before submission.
1029  *
1030  * This function will allocate compressed folios with btrfs_alloc_compr_folio(),
1031  * thus callers must make sure the endio function and error handling are using
1032  * btrfs_free_compr_folio() to release those folios.
1033  * This is already done in end_bbio_compressed_write() and cleanup_compressed_bio().
1034  */
btrfs_compress_bio(struct btrfs_inode * inode,u64 start,u32 len,unsigned int type,int level,blk_opf_t write_flags)1035 struct compressed_bio *btrfs_compress_bio(struct btrfs_inode *inode,
1036 					  u64 start, u32 len, unsigned int type,
1037 					  int level, blk_opf_t write_flags)
1038 {
1039 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1040 	struct list_head *workspace;
1041 	struct compressed_bio *cb;
1042 	int ret;
1043 
1044 	cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE | write_flags,
1045 				  end_bbio_compressed_write);
1046 	cb->start = start;
1047 	cb->len = len;
1048 	cb->writeback = true;
1049 	cb->compress_type = type;
1050 
1051 	level = btrfs_compress_set_level(type, level);
1052 	workspace = get_workspace(fs_info, type, level);
1053 	switch (type) {
1054 	case BTRFS_COMPRESS_ZLIB:
1055 		ret = zlib_compress_bio(workspace, cb);
1056 		break;
1057 	case BTRFS_COMPRESS_LZO:
1058 		ret = lzo_compress_bio(workspace, cb);
1059 		break;
1060 	case BTRFS_COMPRESS_ZSTD:
1061 		ret = zstd_compress_bio(workspace, cb);
1062 		break;
1063 	case BTRFS_COMPRESS_NONE:
1064 	default:
1065 		/*
1066 		 * This can happen when compression races with remount setting
1067 		 * it to 'no compress', while caller doesn't call
1068 		 * inode_need_compress() to check if we really need to
1069 		 * compress.
1070 		 *
1071 		 * Not a big deal, just need to inform caller that we
1072 		 * haven't allocated any pages yet.
1073 		 */
1074 		ret = -E2BIG;
1075 	}
1076 
1077 	put_workspace(fs_info, type, workspace);
1078 	if (ret < 0) {
1079 		cleanup_compressed_bio(cb);
1080 		return ERR_PTR(ret);
1081 	}
1082 	return cb;
1083 }
1084 
btrfs_decompress_bio(struct compressed_bio * cb)1085 static int btrfs_decompress_bio(struct compressed_bio *cb)
1086 {
1087 	struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
1088 	struct list_head *workspace;
1089 	int ret;
1090 	int type = cb->compress_type;
1091 
1092 	workspace = get_workspace(fs_info, type, 0);
1093 	ret = compression_decompress_bio(workspace, cb);
1094 	put_workspace(fs_info, type, workspace);
1095 
1096 	if (!ret)
1097 		zero_fill_bio(&cb->orig_bbio->bio);
1098 	return ret;
1099 }
1100 
1101 /*
1102  * a less complex decompression routine.  Our compressed data fits in a
1103  * single page, and we want to read a single page out of it.
1104  * dest_pgoff tells us the offset into the destination folio where we write the
1105  * decompressed data.
1106  */
btrfs_decompress(int type,const u8 * data_in,struct folio * dest_folio,unsigned long dest_pgoff,size_t srclen,size_t destlen)1107 int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
1108 		     unsigned long dest_pgoff, size_t srclen, size_t destlen)
1109 {
1110 	struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
1111 	struct list_head *workspace;
1112 	const u32 sectorsize = fs_info->sectorsize;
1113 	int ret;
1114 
1115 	/*
1116 	 * The full destination folio range should not exceed the folio size.
1117 	 * And the @destlen should not exceed sectorsize, as this is only called for
1118 	 * inline file extents, which should not exceed sectorsize.
1119 	 */
1120 	ASSERT(dest_pgoff + destlen <= folio_size(dest_folio) && destlen <= sectorsize);
1121 
1122 	workspace = get_workspace(fs_info, type, 0);
1123 	ret = compression_decompress(type, workspace, data_in, dest_folio,
1124 				     dest_pgoff, srclen, destlen);
1125 	put_workspace(fs_info, type, workspace);
1126 
1127 	return ret;
1128 }
1129 
btrfs_alloc_compress_wsm(struct btrfs_fs_info * fs_info)1130 int btrfs_alloc_compress_wsm(struct btrfs_fs_info *fs_info)
1131 {
1132 	int ret;
1133 
1134 	ret = alloc_workspace_manager(fs_info, BTRFS_COMPRESS_NONE);
1135 	if (ret < 0)
1136 		goto error;
1137 	ret = alloc_workspace_manager(fs_info, BTRFS_COMPRESS_ZLIB);
1138 	if (ret < 0)
1139 		goto error;
1140 	ret = alloc_workspace_manager(fs_info, BTRFS_COMPRESS_LZO);
1141 	if (ret < 0)
1142 		goto error;
1143 	ret = zstd_alloc_workspace_manager(fs_info);
1144 	if (ret < 0)
1145 		goto error;
1146 	return 0;
1147 error:
1148 	btrfs_free_compress_wsm(fs_info);
1149 	return ret;
1150 }
1151 
btrfs_free_compress_wsm(struct btrfs_fs_info * fs_info)1152 void btrfs_free_compress_wsm(struct btrfs_fs_info *fs_info)
1153 {
1154 	free_workspace_manager(fs_info, BTRFS_COMPRESS_NONE);
1155 	free_workspace_manager(fs_info, BTRFS_COMPRESS_ZLIB);
1156 	free_workspace_manager(fs_info, BTRFS_COMPRESS_LZO);
1157 	zstd_free_workspace_manager(fs_info);
1158 }
1159 
btrfs_init_compress(void)1160 int __init btrfs_init_compress(void)
1161 {
1162 	if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE,
1163 			offsetof(struct compressed_bio, bbio.bio),
1164 			BIOSET_NEED_BVECS))
1165 		return -ENOMEM;
1166 
1167 	compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, "btrfs-compr-pages");
1168 	if (!compr_pool.shrinker)
1169 		return -ENOMEM;
1170 
1171 	spin_lock_init(&compr_pool.lock);
1172 	INIT_LIST_HEAD(&compr_pool.list);
1173 	compr_pool.count = 0;
1174 	/* 128K / 4K = 32, for 8 threads is 256 pages. */
1175 	compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8;
1176 	compr_pool.shrinker->count_objects = btrfs_compr_pool_count;
1177 	compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan;
1178 	compr_pool.shrinker->batch = 32;
1179 	compr_pool.shrinker->seeks = DEFAULT_SEEKS;
1180 	shrinker_register(compr_pool.shrinker);
1181 
1182 	return 0;
1183 }
1184 
btrfs_exit_compress(void)1185 void __cold btrfs_exit_compress(void)
1186 {
1187 	/* For now scan drains all pages and does not touch the parameters. */
1188 	btrfs_compr_pool_scan(NULL, NULL);
1189 	shrinker_free(compr_pool.shrinker);
1190 
1191 	bioset_exit(&btrfs_compressed_bioset);
1192 }
1193 
1194 /*
1195  * The bvec is a single page bvec from a bio that contains folios from a filemap.
1196  *
1197  * Since the folio may be a large one, and if the bv_page is not a head page of
1198  * a large folio, then page->index is unreliable.
1199  *
1200  * Thus we need this helper to grab the proper file offset.
1201  */
file_offset_from_bvec(const struct bio_vec * bvec)1202 static u64 file_offset_from_bvec(const struct bio_vec *bvec)
1203 {
1204 	const struct page *page = bvec->bv_page;
1205 	const struct folio *folio = page_folio(page);
1206 
1207 	return (page_pgoff(folio, page) << PAGE_SHIFT) + bvec->bv_offset;
1208 }
1209 
1210 /*
1211  * Copy decompressed data from working buffer to pages.
1212  *
1213  * @buf:		The decompressed data buffer
1214  * @buf_len:		The decompressed data length
1215  * @decompressed:	Number of bytes that are already decompressed inside the
1216  * 			compressed extent
1217  * @cb:			The compressed extent descriptor
1218  * @orig_bio:		The original bio that the caller wants to read for
1219  *
1220  * An easier to understand graph is like below:
1221  *
1222  * 		|<- orig_bio ->|     |<- orig_bio->|
1223  * 	|<-------      full decompressed extent      ----->|
1224  * 	|<-----------    @cb range   ---->|
1225  * 	|			|<-- @buf_len -->|
1226  * 	|<--- @decompressed --->|
1227  *
1228  * Note that, @cb can be a subpage of the full decompressed extent, but
1229  * @cb->start always has the same as the orig_file_offset value of the full
1230  * decompressed extent.
1231  *
1232  * When reading compressed extent, we have to read the full compressed extent,
1233  * while @orig_bio may only want part of the range.
1234  * Thus this function will ensure only data covered by @orig_bio will be copied
1235  * to.
1236  *
1237  * Return 0 if we have copied all needed contents for @orig_bio.
1238  * Return >0 if we need continue decompress.
1239  */
btrfs_decompress_buf2page(const char * buf,u32 buf_len,struct compressed_bio * cb,u32 decompressed)1240 int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1241 			      struct compressed_bio *cb, u32 decompressed)
1242 {
1243 	struct bio *orig_bio = &cb->orig_bbio->bio;
1244 	/* Offset inside the full decompressed extent */
1245 	u32 cur_offset;
1246 
1247 	cur_offset = decompressed;
1248 	/* The main loop to do the copy */
1249 	while (cur_offset < decompressed + buf_len) {
1250 		struct bio_vec bvec;
1251 		size_t copy_len;
1252 		u32 copy_start;
1253 		/* Offset inside the full decompressed extent */
1254 		u32 bvec_offset;
1255 		void *kaddr;
1256 
1257 		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1258 		/*
1259 		 * cb->start may underflow, but subtracting that value can still
1260 		 * give us correct offset inside the full decompressed extent.
1261 		 */
1262 		bvec_offset = file_offset_from_bvec(&bvec) - cb->start;
1263 
1264 		/* Haven't reached the bvec range, exit */
1265 		if (decompressed + buf_len <= bvec_offset)
1266 			return 1;
1267 
1268 		copy_start = max(cur_offset, bvec_offset);
1269 		copy_len = min(bvec_offset + bvec.bv_len,
1270 			       decompressed + buf_len) - copy_start;
1271 		ASSERT(copy_len);
1272 
1273 		/*
1274 		 * Extra range check to ensure we didn't go beyond
1275 		 * @buf + @buf_len.
1276 		 */
1277 		ASSERT(copy_start - decompressed < buf_len);
1278 
1279 		kaddr = bvec_kmap_local(&bvec);
1280 		memcpy(kaddr, buf + copy_start - decompressed, copy_len);
1281 		kunmap_local(kaddr);
1282 
1283 		cur_offset += copy_len;
1284 		bio_advance(orig_bio, copy_len);
1285 		/* Finished the bio */
1286 		if (!orig_bio->bi_iter.bi_size)
1287 			return 0;
1288 	}
1289 	return 1;
1290 }
1291 
1292 /*
1293  * Shannon Entropy calculation
1294  *
1295  * Pure byte distribution analysis fails to determine compressibility of data.
1296  * Try calculating entropy to estimate the average minimum number of bits
1297  * needed to encode the sampled data.
1298  *
1299  * For convenience, return the percentage of needed bits, instead of amount of
1300  * bits directly.
1301  *
1302  * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1303  *			    and can be compressible with high probability
1304  *
1305  * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1306  *
1307  * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1308  */
1309 #define ENTROPY_LVL_ACEPTABLE		(65)
1310 #define ENTROPY_LVL_HIGH		(80)
1311 
1312 /*
1313  * For increased precision in shannon_entropy calculation,
1314  * let's do pow(n, M) to save more digits after comma:
1315  *
1316  * - maximum int bit length is 64
1317  * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1318  * - 13 * 4 = 52 < 64		-> M = 4
1319  *
1320  * So use pow(n, 4).
1321  */
ilog2_w(u64 n)1322 static inline u32 ilog2_w(u64 n)
1323 {
1324 	return ilog2(n * n * n * n);
1325 }
1326 
shannon_entropy(struct heuristic_ws * ws)1327 static u32 shannon_entropy(struct heuristic_ws *ws)
1328 {
1329 	const u32 entropy_max = 8 * ilog2_w(2);
1330 	u32 entropy_sum = 0;
1331 	u32 p, p_base, sz_base;
1332 	u32 i;
1333 
1334 	sz_base = ilog2_w(ws->sample_size);
1335 	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1336 		p = ws->bucket[i].count;
1337 		p_base = ilog2_w(p);
1338 		entropy_sum += p * (sz_base - p_base);
1339 	}
1340 
1341 	entropy_sum /= ws->sample_size;
1342 	return entropy_sum * 100 / entropy_max;
1343 }
1344 
1345 #define RADIX_BASE		4U
1346 #define COUNTERS_SIZE		(1U << RADIX_BASE)
1347 
get4bits(u64 num,int shift)1348 static u8 get4bits(u64 num, int shift) {
1349 	u8 low4bits;
1350 
1351 	num >>= shift;
1352 	/* Reverse order */
1353 	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1354 	return low4bits;
1355 }
1356 
1357 /*
1358  * Use 4 bits as radix base
1359  * Use 16 u32 counters for calculating new position in buf array
1360  *
1361  * @array     - array that will be sorted
1362  * @array_buf - buffer array to store sorting results
1363  *              must be equal in size to @array
1364  * @num       - array size
1365  */
radix_sort(struct bucket_item * array,struct bucket_item * array_buf,int num)1366 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1367 		       int num)
1368 {
1369 	u64 max_num;
1370 	u64 buf_num;
1371 	u32 counters[COUNTERS_SIZE];
1372 	u32 new_addr;
1373 	u32 addr;
1374 	int bitlen;
1375 	int shift;
1376 	int i;
1377 
1378 	/*
1379 	 * Try avoid useless loop iterations for small numbers stored in big
1380 	 * counters.  Example: 48 33 4 ... in 64bit array
1381 	 */
1382 	max_num = array[0].count;
1383 	for (i = 1; i < num; i++) {
1384 		buf_num = array[i].count;
1385 		if (buf_num > max_num)
1386 			max_num = buf_num;
1387 	}
1388 
1389 	buf_num = ilog2(max_num);
1390 	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1391 
1392 	shift = 0;
1393 	while (shift < bitlen) {
1394 		memset(counters, 0, sizeof(counters));
1395 
1396 		for (i = 0; i < num; i++) {
1397 			buf_num = array[i].count;
1398 			addr = get4bits(buf_num, shift);
1399 			counters[addr]++;
1400 		}
1401 
1402 		for (i = 1; i < COUNTERS_SIZE; i++)
1403 			counters[i] += counters[i - 1];
1404 
1405 		for (i = num - 1; i >= 0; i--) {
1406 			buf_num = array[i].count;
1407 			addr = get4bits(buf_num, shift);
1408 			counters[addr]--;
1409 			new_addr = counters[addr];
1410 			array_buf[new_addr] = array[i];
1411 		}
1412 
1413 		shift += RADIX_BASE;
1414 
1415 		/*
1416 		 * Normal radix expects to move data from a temporary array, to
1417 		 * the main one.  But that requires some CPU time. Avoid that
1418 		 * by doing another sort iteration to original array instead of
1419 		 * memcpy()
1420 		 */
1421 		memset(counters, 0, sizeof(counters));
1422 
1423 		for (i = 0; i < num; i ++) {
1424 			buf_num = array_buf[i].count;
1425 			addr = get4bits(buf_num, shift);
1426 			counters[addr]++;
1427 		}
1428 
1429 		for (i = 1; i < COUNTERS_SIZE; i++)
1430 			counters[i] += counters[i - 1];
1431 
1432 		for (i = num - 1; i >= 0; i--) {
1433 			buf_num = array_buf[i].count;
1434 			addr = get4bits(buf_num, shift);
1435 			counters[addr]--;
1436 			new_addr = counters[addr];
1437 			array[new_addr] = array_buf[i];
1438 		}
1439 
1440 		shift += RADIX_BASE;
1441 	}
1442 }
1443 
1444 /*
1445  * Size of the core byte set - how many bytes cover 90% of the sample
1446  *
1447  * There are several types of structured binary data that use nearly all byte
1448  * values. The distribution can be uniform and counts in all buckets will be
1449  * nearly the same (eg. encrypted data). Unlikely to be compressible.
1450  *
1451  * Other possibility is normal (Gaussian) distribution, where the data could
1452  * be potentially compressible, but we have to take a few more steps to decide
1453  * how much.
1454  *
1455  * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1456  *                       compression algo can easy fix that
1457  * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1458  *                       probability is not compressible
1459  */
1460 #define BYTE_CORE_SET_LOW		(64)
1461 #define BYTE_CORE_SET_HIGH		(200)
1462 
byte_core_set_size(struct heuristic_ws * ws)1463 static int byte_core_set_size(struct heuristic_ws *ws)
1464 {
1465 	u32 i;
1466 	u32 coreset_sum = 0;
1467 	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1468 	struct bucket_item *bucket = ws->bucket;
1469 
1470 	/* Sort in reverse order */
1471 	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1472 
1473 	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1474 		coreset_sum += bucket[i].count;
1475 
1476 	if (coreset_sum > core_set_threshold)
1477 		return i;
1478 
1479 	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1480 		coreset_sum += bucket[i].count;
1481 		if (coreset_sum > core_set_threshold)
1482 			break;
1483 	}
1484 
1485 	return i;
1486 }
1487 
1488 /*
1489  * Count byte values in buckets.
1490  * This heuristic can detect textual data (configs, xml, json, html, etc).
1491  * Because in most text-like data byte set is restricted to limited number of
1492  * possible characters, and that restriction in most cases makes data easy to
1493  * compress.
1494  *
1495  * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1496  *	less - compressible
1497  *	more - need additional analysis
1498  */
1499 #define BYTE_SET_THRESHOLD		(64)
1500 
byte_set_size(const struct heuristic_ws * ws)1501 static u32 byte_set_size(const struct heuristic_ws *ws)
1502 {
1503 	u32 i;
1504 	u32 byte_set_size = 0;
1505 
1506 	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1507 		if (ws->bucket[i].count > 0)
1508 			byte_set_size++;
1509 	}
1510 
1511 	/*
1512 	 * Continue collecting count of byte values in buckets.  If the byte
1513 	 * set size is bigger then the threshold, it's pointless to continue,
1514 	 * the detection technique would fail for this type of data.
1515 	 */
1516 	for (; i < BUCKET_SIZE; i++) {
1517 		if (ws->bucket[i].count > 0) {
1518 			byte_set_size++;
1519 			if (byte_set_size > BYTE_SET_THRESHOLD)
1520 				return byte_set_size;
1521 		}
1522 	}
1523 
1524 	return byte_set_size;
1525 }
1526 
sample_repeated_patterns(struct heuristic_ws * ws)1527 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1528 {
1529 	const u32 half_of_sample = ws->sample_size / 2;
1530 	const u8 *data = ws->sample;
1531 
1532 	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1533 }
1534 
heuristic_collect_sample(struct inode * inode,u64 start,u64 end,struct heuristic_ws * ws)1535 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1536 				     struct heuristic_ws *ws)
1537 {
1538 	struct page *page;
1539 	pgoff_t index, index_end;
1540 	u32 i, curr_sample_pos;
1541 	u8 *in_data;
1542 
1543 	/*
1544 	 * Compression handles the input data by chunks of 128KiB
1545 	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1546 	 *
1547 	 * We do the same for the heuristic and loop over the whole range.
1548 	 *
1549 	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1550 	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1551 	 */
1552 	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1553 		end = start + BTRFS_MAX_UNCOMPRESSED;
1554 
1555 	index = start >> PAGE_SHIFT;
1556 	index_end = end >> PAGE_SHIFT;
1557 
1558 	/* Don't miss unaligned end */
1559 	if (!PAGE_ALIGNED(end))
1560 		index_end++;
1561 
1562 	curr_sample_pos = 0;
1563 	while (index < index_end) {
1564 		page = find_get_page(inode->i_mapping, index);
1565 		in_data = kmap_local_page(page);
1566 		/* Handle case where the start is not aligned to PAGE_SIZE */
1567 		i = start % PAGE_SIZE;
1568 		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1569 			/* Don't sample any garbage from the last page */
1570 			if (start > end - SAMPLING_READ_SIZE)
1571 				break;
1572 			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1573 					SAMPLING_READ_SIZE);
1574 			i += SAMPLING_INTERVAL;
1575 			start += SAMPLING_INTERVAL;
1576 			curr_sample_pos += SAMPLING_READ_SIZE;
1577 		}
1578 		kunmap_local(in_data);
1579 		put_page(page);
1580 
1581 		index++;
1582 	}
1583 
1584 	ws->sample_size = curr_sample_pos;
1585 }
1586 
1587 /*
1588  * Compression heuristic.
1589  *
1590  * The following types of analysis can be performed:
1591  * - detect mostly zero data
1592  * - detect data with low "byte set" size (text, etc)
1593  * - detect data with low/high "core byte" set
1594  *
1595  * Return non-zero if the compression should be done, 0 otherwise.
1596  */
btrfs_compress_heuristic(struct btrfs_inode * inode,u64 start,u64 end)1597 int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end)
1598 {
1599 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1600 	struct list_head *ws_list = get_workspace(fs_info, 0, 0);
1601 	struct heuristic_ws *ws;
1602 	u32 i;
1603 	u8 byte;
1604 	int ret = 0;
1605 
1606 	ws = list_entry(ws_list, struct heuristic_ws, list);
1607 
1608 	heuristic_collect_sample(&inode->vfs_inode, start, end, ws);
1609 
1610 	if (sample_repeated_patterns(ws)) {
1611 		ret = 1;
1612 		goto out;
1613 	}
1614 
1615 	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1616 
1617 	for (i = 0; i < ws->sample_size; i++) {
1618 		byte = ws->sample[i];
1619 		ws->bucket[byte].count++;
1620 	}
1621 
1622 	i = byte_set_size(ws);
1623 	if (i < BYTE_SET_THRESHOLD) {
1624 		ret = 2;
1625 		goto out;
1626 	}
1627 
1628 	i = byte_core_set_size(ws);
1629 	if (i <= BYTE_CORE_SET_LOW) {
1630 		ret = 3;
1631 		goto out;
1632 	}
1633 
1634 	if (i >= BYTE_CORE_SET_HIGH) {
1635 		ret = 0;
1636 		goto out;
1637 	}
1638 
1639 	i = shannon_entropy(ws);
1640 	if (i <= ENTROPY_LVL_ACEPTABLE) {
1641 		ret = 4;
1642 		goto out;
1643 	}
1644 
1645 	/*
1646 	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1647 	 * needed to give green light to compression.
1648 	 *
1649 	 * For now just assume that compression at that level is not worth the
1650 	 * resources because:
1651 	 *
1652 	 * 1. it is possible to defrag the data later
1653 	 *
1654 	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1655 	 * values, every bucket has counter at level ~54. The heuristic would
1656 	 * be confused. This can happen when data have some internal repeated
1657 	 * patterns like "abbacbbc...". This can be detected by analyzing
1658 	 * pairs of bytes, which is too costly.
1659 	 */
1660 	if (i < ENTROPY_LVL_HIGH) {
1661 		ret = 5;
1662 		goto out;
1663 	} else {
1664 		ret = 0;
1665 		goto out;
1666 	}
1667 
1668 out:
1669 	put_workspace(fs_info, 0, ws_list);
1670 	return ret;
1671 }
1672 
1673 /*
1674  * Convert the compression suffix (eg. after "zlib" starting with ":") to level.
1675  *
1676  * If the resulting level exceeds the algo's supported levels, it will be clamped.
1677  *
1678  * Return <0 if no valid string can be found.
1679  * Return 0 if everything is fine.
1680  */
btrfs_compress_str2level(unsigned int type,const char * str,int * level_ret)1681 int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret)
1682 {
1683 	int level = 0;
1684 	int ret;
1685 
1686 	if (!type) {
1687 		*level_ret = btrfs_compress_set_level(type, level);
1688 		return 0;
1689 	}
1690 
1691 	if (str[0] == ':') {
1692 		ret = kstrtoint(str + 1, 10, &level);
1693 		if (ret)
1694 			return ret;
1695 	}
1696 
1697 	*level_ret = btrfs_compress_set_level(type, level);
1698 	return 0;
1699 }
1700