xref: /linux/fs/btrfs/compression.c (revision 41e0d49104dbff888ef6446ea46842fde66c0a76)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/file.h>
9 #include <linux/fs.h>
10 #include <linux/pagemap.h>
11 #include <linux/pagevec.h>
12 #include <linux/highmem.h>
13 #include <linux/kthread.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/psi.h>
20 #include <linux/slab.h>
21 #include <linux/sched/mm.h>
22 #include <linux/log2.h>
23 #include <crypto/hash.h>
24 #include "misc.h"
25 #include "ctree.h"
26 #include "fs.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "btrfs_inode.h"
30 #include "bio.h"
31 #include "ordered-data.h"
32 #include "compression.h"
33 #include "extent_io.h"
34 #include "extent_map.h"
35 #include "subpage.h"
36 #include "zoned.h"
37 #include "file-item.h"
38 #include "super.h"
39 
40 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
41 
42 const char* btrfs_compress_type2str(enum btrfs_compression_type type)
43 {
44 	switch (type) {
45 	case BTRFS_COMPRESS_ZLIB:
46 	case BTRFS_COMPRESS_LZO:
47 	case BTRFS_COMPRESS_ZSTD:
48 	case BTRFS_COMPRESS_NONE:
49 		return btrfs_compress_types[type];
50 	default:
51 		break;
52 	}
53 
54 	return NULL;
55 }
56 
57 bool btrfs_compress_is_valid_type(const char *str, size_t len)
58 {
59 	int i;
60 
61 	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
62 		size_t comp_len = strlen(btrfs_compress_types[i]);
63 
64 		if (len < comp_len)
65 			continue;
66 
67 		if (!strncmp(btrfs_compress_types[i], str, comp_len))
68 			return true;
69 	}
70 	return false;
71 }
72 
73 static int compression_compress_pages(int type, struct list_head *ws,
74                struct address_space *mapping, u64 start, struct page **pages,
75                unsigned long *out_pages, unsigned long *total_in,
76                unsigned long *total_out)
77 {
78 	switch (type) {
79 	case BTRFS_COMPRESS_ZLIB:
80 		return zlib_compress_pages(ws, mapping, start, pages,
81 				out_pages, total_in, total_out);
82 	case BTRFS_COMPRESS_LZO:
83 		return lzo_compress_pages(ws, mapping, start, pages,
84 				out_pages, total_in, total_out);
85 	case BTRFS_COMPRESS_ZSTD:
86 		return zstd_compress_pages(ws, mapping, start, pages,
87 				out_pages, total_in, total_out);
88 	case BTRFS_COMPRESS_NONE:
89 	default:
90 		/*
91 		 * This can happen when compression races with remount setting
92 		 * it to 'no compress', while caller doesn't call
93 		 * inode_need_compress() to check if we really need to
94 		 * compress.
95 		 *
96 		 * Not a big deal, just need to inform caller that we
97 		 * haven't allocated any pages yet.
98 		 */
99 		*out_pages = 0;
100 		return -E2BIG;
101 	}
102 }
103 
104 static int compression_decompress_bio(struct list_head *ws,
105 				      struct compressed_bio *cb)
106 {
107 	switch (cb->compress_type) {
108 	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
109 	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
110 	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
111 	case BTRFS_COMPRESS_NONE:
112 	default:
113 		/*
114 		 * This can't happen, the type is validated several times
115 		 * before we get here.
116 		 */
117 		BUG();
118 	}
119 }
120 
121 static int compression_decompress(int type, struct list_head *ws,
122                const u8 *data_in, struct page *dest_page,
123                unsigned long start_byte, size_t srclen, size_t destlen)
124 {
125 	switch (type) {
126 	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
127 						start_byte, srclen, destlen);
128 	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
129 						start_byte, srclen, destlen);
130 	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
131 						start_byte, srclen, destlen);
132 	case BTRFS_COMPRESS_NONE:
133 	default:
134 		/*
135 		 * This can't happen, the type is validated several times
136 		 * before we get here.
137 		 */
138 		BUG();
139 	}
140 }
141 
142 static int btrfs_decompress_bio(struct compressed_bio *cb);
143 
144 static void finish_compressed_bio_read(struct compressed_bio *cb)
145 {
146 	unsigned int index;
147 	struct page *page;
148 
149 	if (cb->status == BLK_STS_OK)
150 		cb->status = errno_to_blk_status(btrfs_decompress_bio(cb));
151 
152 	/* Release the compressed pages */
153 	for (index = 0; index < cb->nr_pages; index++) {
154 		page = cb->compressed_pages[index];
155 		page->mapping = NULL;
156 		put_page(page);
157 	}
158 
159 	/* Do io completion on the original bio */
160 	btrfs_bio_end_io(btrfs_bio(cb->orig_bio), cb->status);
161 
162 	/* Finally free the cb struct */
163 	kfree(cb->compressed_pages);
164 	kfree(cb);
165 }
166 
167 /*
168  * Verify the checksums and kick off repair if needed on the uncompressed data
169  * before decompressing it into the original bio and freeing the uncompressed
170  * pages.
171  */
172 static void end_compressed_bio_read(struct btrfs_bio *bbio)
173 {
174 	struct compressed_bio *cb = bbio->private;
175 	struct inode *inode = cb->inode;
176 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
177 	struct btrfs_inode *bi = BTRFS_I(inode);
178 	bool csum = !(bi->flags & BTRFS_INODE_NODATASUM) &&
179 		    !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
180 	blk_status_t status = bbio->bio.bi_status;
181 	struct bvec_iter iter;
182 	struct bio_vec bv;
183 	u32 offset;
184 
185 	btrfs_bio_for_each_sector(fs_info, bv, bbio, iter, offset) {
186 		u64 start = bbio->file_offset + offset;
187 
188 		if (!status &&
189 		    (!csum || !btrfs_check_data_csum(bi, bbio, offset,
190 						     bv.bv_page, bv.bv_offset))) {
191 			btrfs_clean_io_failure(bi, start, bv.bv_page,
192 					       bv.bv_offset);
193 		} else {
194 			int ret;
195 
196 			refcount_inc(&cb->pending_ios);
197 			ret = btrfs_repair_one_sector(BTRFS_I(inode), bbio, offset,
198 						      bv.bv_page, bv.bv_offset,
199 						      true);
200 			if (ret) {
201 				refcount_dec(&cb->pending_ios);
202 				status = errno_to_blk_status(ret);
203 			}
204 		}
205 	}
206 
207 	if (status)
208 		cb->status = status;
209 
210 	if (refcount_dec_and_test(&cb->pending_ios))
211 		finish_compressed_bio_read(cb);
212 	btrfs_bio_free_csum(bbio);
213 	bio_put(&bbio->bio);
214 }
215 
216 /*
217  * Clear the writeback bits on all of the file
218  * pages for a compressed write
219  */
220 static noinline void end_compressed_writeback(struct inode *inode,
221 					      const struct compressed_bio *cb)
222 {
223 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
224 	unsigned long index = cb->start >> PAGE_SHIFT;
225 	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
226 	struct folio_batch fbatch;
227 	const int errno = blk_status_to_errno(cb->status);
228 	int i;
229 	int ret;
230 
231 	if (errno)
232 		mapping_set_error(inode->i_mapping, errno);
233 
234 	folio_batch_init(&fbatch);
235 	while (index <= end_index) {
236 		ret = filemap_get_folios(inode->i_mapping, &index, end_index,
237 				&fbatch);
238 
239 		if (ret == 0)
240 			return;
241 
242 		for (i = 0; i < ret; i++) {
243 			struct folio *folio = fbatch.folios[i];
244 
245 			if (errno)
246 				folio_set_error(folio);
247 			btrfs_page_clamp_clear_writeback(fs_info, &folio->page,
248 							 cb->start, cb->len);
249 		}
250 		folio_batch_release(&fbatch);
251 	}
252 	/* the inode may be gone now */
253 }
254 
255 static void finish_compressed_bio_write(struct compressed_bio *cb)
256 {
257 	struct inode *inode = cb->inode;
258 	unsigned int index;
259 
260 	/*
261 	 * Ok, we're the last bio for this extent, step one is to call back
262 	 * into the FS and do all the end_io operations.
263 	 */
264 	btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
265 			cb->start, cb->start + cb->len - 1,
266 			cb->status == BLK_STS_OK);
267 
268 	if (cb->writeback)
269 		end_compressed_writeback(inode, cb);
270 	/* Note, our inode could be gone now */
271 
272 	/*
273 	 * Release the compressed pages, these came from alloc_page and
274 	 * are not attached to the inode at all
275 	 */
276 	for (index = 0; index < cb->nr_pages; index++) {
277 		struct page *page = cb->compressed_pages[index];
278 
279 		page->mapping = NULL;
280 		put_page(page);
281 	}
282 
283 	/* Finally free the cb struct */
284 	kfree(cb->compressed_pages);
285 	kfree(cb);
286 }
287 
288 static void btrfs_finish_compressed_write_work(struct work_struct *work)
289 {
290 	struct compressed_bio *cb =
291 		container_of(work, struct compressed_bio, write_end_work);
292 
293 	finish_compressed_bio_write(cb);
294 }
295 
296 /*
297  * Do the cleanup once all the compressed pages hit the disk.  This will clear
298  * writeback on the file pages and free the compressed pages.
299  *
300  * This also calls the writeback end hooks for the file pages so that metadata
301  * and checksums can be updated in the file.
302  */
303 static void end_compressed_bio_write(struct btrfs_bio *bbio)
304 {
305 	struct compressed_bio *cb = bbio->private;
306 
307 	if (bbio->bio.bi_status)
308 		cb->status = bbio->bio.bi_status;
309 
310 	if (refcount_dec_and_test(&cb->pending_ios)) {
311 		struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
312 
313 		btrfs_record_physical_zoned(cb->inode, cb->start, &bbio->bio);
314 		queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
315 	}
316 	bio_put(&bbio->bio);
317 }
318 
319 /*
320  * Allocate a compressed_bio, which will be used to read/write on-disk
321  * (aka, compressed) * data.
322  *
323  * @cb:                 The compressed_bio structure, which records all the needed
324  *                      information to bind the compressed data to the uncompressed
325  *                      page cache.
326  * @disk_byten:         The logical bytenr where the compressed data will be read
327  *                      from or written to.
328  * @endio_func:         The endio function to call after the IO for compressed data
329  *                      is finished.
330  * @next_stripe_start:  Return value of logical bytenr of where next stripe starts.
331  *                      Let the caller know to only fill the bio up to the stripe
332  *                      boundary.
333  */
334 
335 
336 static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
337 					blk_opf_t opf,
338 					btrfs_bio_end_io_t endio_func,
339 					u64 *next_stripe_start)
340 {
341 	struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
342 	struct btrfs_io_geometry geom;
343 	struct extent_map *em;
344 	struct bio *bio;
345 	int ret;
346 
347 	bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, endio_func, cb);
348 	bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
349 
350 	em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
351 	if (IS_ERR(em)) {
352 		bio_put(bio);
353 		return ERR_CAST(em);
354 	}
355 
356 	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
357 		bio_set_dev(bio, em->map_lookup->stripes[0].dev->bdev);
358 
359 	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), disk_bytenr, &geom);
360 	free_extent_map(em);
361 	if (ret < 0) {
362 		bio_put(bio);
363 		return ERR_PTR(ret);
364 	}
365 	*next_stripe_start = disk_bytenr + geom.len;
366 	refcount_inc(&cb->pending_ios);
367 	return bio;
368 }
369 
370 /*
371  * worker function to build and submit bios for previously compressed pages.
372  * The corresponding pages in the inode should be marked for writeback
373  * and the compressed pages should have a reference on them for dropping
374  * when the IO is complete.
375  *
376  * This also checksums the file bytes and gets things ready for
377  * the end io hooks.
378  */
379 blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
380 				 unsigned int len, u64 disk_start,
381 				 unsigned int compressed_len,
382 				 struct page **compressed_pages,
383 				 unsigned int nr_pages,
384 				 blk_opf_t write_flags,
385 				 struct cgroup_subsys_state *blkcg_css,
386 				 bool writeback)
387 {
388 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
389 	struct bio *bio = NULL;
390 	struct compressed_bio *cb;
391 	u64 cur_disk_bytenr = disk_start;
392 	u64 next_stripe_start;
393 	blk_status_t ret = BLK_STS_OK;
394 	int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
395 	const bool use_append = btrfs_use_zone_append(inode, disk_start);
396 	const enum req_op bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
397 
398 	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
399 	       IS_ALIGNED(len, fs_info->sectorsize));
400 	cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS);
401 	if (!cb)
402 		return BLK_STS_RESOURCE;
403 	refcount_set(&cb->pending_ios, 1);
404 	cb->status = BLK_STS_OK;
405 	cb->inode = &inode->vfs_inode;
406 	cb->start = start;
407 	cb->len = len;
408 	cb->compressed_pages = compressed_pages;
409 	cb->compressed_len = compressed_len;
410 	cb->writeback = writeback;
411 	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
412 	cb->nr_pages = nr_pages;
413 
414 	if (blkcg_css)
415 		kthread_associate_blkcg(blkcg_css);
416 
417 	while (cur_disk_bytenr < disk_start + compressed_len) {
418 		u64 offset = cur_disk_bytenr - disk_start;
419 		unsigned int index = offset >> PAGE_SHIFT;
420 		unsigned int real_size;
421 		unsigned int added;
422 		struct page *page = compressed_pages[index];
423 		bool submit = false;
424 
425 		/* Allocate new bio if submitted or not yet allocated */
426 		if (!bio) {
427 			bio = alloc_compressed_bio(cb, cur_disk_bytenr,
428 				bio_op | write_flags, end_compressed_bio_write,
429 				&next_stripe_start);
430 			if (IS_ERR(bio)) {
431 				ret = errno_to_blk_status(PTR_ERR(bio));
432 				break;
433 			}
434 			if (blkcg_css)
435 				bio->bi_opf |= REQ_CGROUP_PUNT;
436 		}
437 		/*
438 		 * We should never reach next_stripe_start start as we will
439 		 * submit comp_bio when reach the boundary immediately.
440 		 */
441 		ASSERT(cur_disk_bytenr != next_stripe_start);
442 
443 		/*
444 		 * We have various limits on the real read size:
445 		 * - stripe boundary
446 		 * - page boundary
447 		 * - compressed length boundary
448 		 */
449 		real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_bytenr);
450 		real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
451 		real_size = min_t(u64, real_size, compressed_len - offset);
452 		ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
453 
454 		if (use_append)
455 			added = bio_add_zone_append_page(bio, page, real_size,
456 					offset_in_page(offset));
457 		else
458 			added = bio_add_page(bio, page, real_size,
459 					offset_in_page(offset));
460 		/* Reached zoned boundary */
461 		if (added == 0)
462 			submit = true;
463 
464 		cur_disk_bytenr += added;
465 		/* Reached stripe boundary */
466 		if (cur_disk_bytenr == next_stripe_start)
467 			submit = true;
468 
469 		/* Finished the range */
470 		if (cur_disk_bytenr == disk_start + compressed_len)
471 			submit = true;
472 
473 		if (submit) {
474 			if (!skip_sum) {
475 				ret = btrfs_csum_one_bio(inode, bio, start, true);
476 				if (ret) {
477 					btrfs_bio_end_io(btrfs_bio(bio), ret);
478 					break;
479 				}
480 			}
481 
482 			ASSERT(bio->bi_iter.bi_size);
483 			btrfs_submit_bio(fs_info, bio, 0);
484 			bio = NULL;
485 		}
486 		cond_resched();
487 	}
488 
489 	if (blkcg_css)
490 		kthread_associate_blkcg(NULL);
491 
492 	if (refcount_dec_and_test(&cb->pending_ios))
493 		finish_compressed_bio_write(cb);
494 	return ret;
495 }
496 
497 static u64 bio_end_offset(struct bio *bio)
498 {
499 	struct bio_vec *last = bio_last_bvec_all(bio);
500 
501 	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
502 }
503 
504 /*
505  * Add extra pages in the same compressed file extent so that we don't need to
506  * re-read the same extent again and again.
507  *
508  * NOTE: this won't work well for subpage, as for subpage read, we lock the
509  * full page then submit bio for each compressed/regular extents.
510  *
511  * This means, if we have several sectors in the same page points to the same
512  * on-disk compressed data, we will re-read the same extent many times and
513  * this function can only help for the next page.
514  */
515 static noinline int add_ra_bio_pages(struct inode *inode,
516 				     u64 compressed_end,
517 				     struct compressed_bio *cb,
518 				     int *memstall, unsigned long *pflags)
519 {
520 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
521 	unsigned long end_index;
522 	u64 cur = bio_end_offset(cb->orig_bio);
523 	u64 isize = i_size_read(inode);
524 	int ret;
525 	struct page *page;
526 	struct extent_map *em;
527 	struct address_space *mapping = inode->i_mapping;
528 	struct extent_map_tree *em_tree;
529 	struct extent_io_tree *tree;
530 	int sectors_missed = 0;
531 
532 	em_tree = &BTRFS_I(inode)->extent_tree;
533 	tree = &BTRFS_I(inode)->io_tree;
534 
535 	if (isize == 0)
536 		return 0;
537 
538 	/*
539 	 * For current subpage support, we only support 64K page size,
540 	 * which means maximum compressed extent size (128K) is just 2x page
541 	 * size.
542 	 * This makes readahead less effective, so here disable readahead for
543 	 * subpage for now, until full compressed write is supported.
544 	 */
545 	if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
546 		return 0;
547 
548 	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
549 
550 	while (cur < compressed_end) {
551 		u64 page_end;
552 		u64 pg_index = cur >> PAGE_SHIFT;
553 		u32 add_size;
554 
555 		if (pg_index > end_index)
556 			break;
557 
558 		page = xa_load(&mapping->i_pages, pg_index);
559 		if (page && !xa_is_value(page)) {
560 			sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
561 					  fs_info->sectorsize_bits;
562 
563 			/* Beyond threshold, no need to continue */
564 			if (sectors_missed > 4)
565 				break;
566 
567 			/*
568 			 * Jump to next page start as we already have page for
569 			 * current offset.
570 			 */
571 			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
572 			continue;
573 		}
574 
575 		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
576 								 ~__GFP_FS));
577 		if (!page)
578 			break;
579 
580 		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
581 			put_page(page);
582 			/* There is already a page, skip to page end */
583 			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
584 			continue;
585 		}
586 
587 		if (!*memstall && PageWorkingset(page)) {
588 			psi_memstall_enter(pflags);
589 			*memstall = 1;
590 		}
591 
592 		ret = set_page_extent_mapped(page);
593 		if (ret < 0) {
594 			unlock_page(page);
595 			put_page(page);
596 			break;
597 		}
598 
599 		page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
600 		lock_extent(tree, cur, page_end, NULL);
601 		read_lock(&em_tree->lock);
602 		em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
603 		read_unlock(&em_tree->lock);
604 
605 		/*
606 		 * At this point, we have a locked page in the page cache for
607 		 * these bytes in the file.  But, we have to make sure they map
608 		 * to this compressed extent on disk.
609 		 */
610 		if (!em || cur < em->start ||
611 		    (cur + fs_info->sectorsize > extent_map_end(em)) ||
612 		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
613 			free_extent_map(em);
614 			unlock_extent(tree, cur, page_end, NULL);
615 			unlock_page(page);
616 			put_page(page);
617 			break;
618 		}
619 		free_extent_map(em);
620 
621 		if (page->index == end_index) {
622 			size_t zero_offset = offset_in_page(isize);
623 
624 			if (zero_offset) {
625 				int zeros;
626 				zeros = PAGE_SIZE - zero_offset;
627 				memzero_page(page, zero_offset, zeros);
628 			}
629 		}
630 
631 		add_size = min(em->start + em->len, page_end + 1) - cur;
632 		ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
633 		if (ret != add_size) {
634 			unlock_extent(tree, cur, page_end, NULL);
635 			unlock_page(page);
636 			put_page(page);
637 			break;
638 		}
639 		/*
640 		 * If it's subpage, we also need to increase its
641 		 * subpage::readers number, as at endio we will decrease
642 		 * subpage::readers and to unlock the page.
643 		 */
644 		if (fs_info->sectorsize < PAGE_SIZE)
645 			btrfs_subpage_start_reader(fs_info, page, cur, add_size);
646 		put_page(page);
647 		cur += add_size;
648 	}
649 	return 0;
650 }
651 
652 /*
653  * for a compressed read, the bio we get passed has all the inode pages
654  * in it.  We don't actually do IO on those pages but allocate new ones
655  * to hold the compressed pages on disk.
656  *
657  * bio->bi_iter.bi_sector points to the compressed extent on disk
658  * bio->bi_io_vec points to all of the inode pages
659  *
660  * After the compressed pages are read, we copy the bytes into the
661  * bio we were passed and then call the bio end_io calls
662  */
663 void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
664 				  int mirror_num)
665 {
666 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
667 	struct extent_map_tree *em_tree;
668 	struct compressed_bio *cb;
669 	unsigned int compressed_len;
670 	struct bio *comp_bio = NULL;
671 	const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
672 	u64 cur_disk_byte = disk_bytenr;
673 	u64 next_stripe_start;
674 	u64 file_offset;
675 	u64 em_len;
676 	u64 em_start;
677 	struct extent_map *em;
678 	unsigned long pflags;
679 	int memstall = 0;
680 	blk_status_t ret;
681 	int ret2;
682 	int i;
683 
684 	em_tree = &BTRFS_I(inode)->extent_tree;
685 
686 	file_offset = bio_first_bvec_all(bio)->bv_offset +
687 		      page_offset(bio_first_page_all(bio));
688 
689 	/* we need the actual starting offset of this extent in the file */
690 	read_lock(&em_tree->lock);
691 	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
692 	read_unlock(&em_tree->lock);
693 	if (!em) {
694 		ret = BLK_STS_IOERR;
695 		goto out;
696 	}
697 
698 	ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
699 	compressed_len = em->block_len;
700 	cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS);
701 	if (!cb) {
702 		ret = BLK_STS_RESOURCE;
703 		goto out;
704 	}
705 
706 	refcount_set(&cb->pending_ios, 1);
707 	cb->status = BLK_STS_OK;
708 	cb->inode = inode;
709 
710 	cb->start = em->orig_start;
711 	em_len = em->len;
712 	em_start = em->start;
713 
714 	cb->len = bio->bi_iter.bi_size;
715 	cb->compressed_len = compressed_len;
716 	cb->compress_type = em->compress_type;
717 	cb->orig_bio = bio;
718 
719 	free_extent_map(em);
720 	em = NULL;
721 
722 	cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
723 	cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
724 	if (!cb->compressed_pages) {
725 		ret = BLK_STS_RESOURCE;
726 		goto fail;
727 	}
728 
729 	ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages);
730 	if (ret2) {
731 		ret = BLK_STS_RESOURCE;
732 		goto fail;
733 	}
734 
735 	add_ra_bio_pages(inode, em_start + em_len, cb, &memstall, &pflags);
736 
737 	/* include any pages we added in add_ra-bio_pages */
738 	cb->len = bio->bi_iter.bi_size;
739 
740 	while (cur_disk_byte < disk_bytenr + compressed_len) {
741 		u64 offset = cur_disk_byte - disk_bytenr;
742 		unsigned int index = offset >> PAGE_SHIFT;
743 		unsigned int real_size;
744 		unsigned int added;
745 		struct page *page = cb->compressed_pages[index];
746 		bool submit = false;
747 
748 		/* Allocate new bio if submitted or not yet allocated */
749 		if (!comp_bio) {
750 			comp_bio = alloc_compressed_bio(cb, cur_disk_byte,
751 					REQ_OP_READ, end_compressed_bio_read,
752 					&next_stripe_start);
753 			if (IS_ERR(comp_bio)) {
754 				cb->status = errno_to_blk_status(PTR_ERR(comp_bio));
755 				break;
756 			}
757 		}
758 		/*
759 		 * We should never reach next_stripe_start start as we will
760 		 * submit comp_bio when reach the boundary immediately.
761 		 */
762 		ASSERT(cur_disk_byte != next_stripe_start);
763 		/*
764 		 * We have various limit on the real read size:
765 		 * - stripe boundary
766 		 * - page boundary
767 		 * - compressed length boundary
768 		 */
769 		real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_byte);
770 		real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
771 		real_size = min_t(u64, real_size, compressed_len - offset);
772 		ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
773 
774 		added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset));
775 		/*
776 		 * Maximum compressed extent is smaller than bio size limit,
777 		 * thus bio_add_page() should always success.
778 		 */
779 		ASSERT(added == real_size);
780 		cur_disk_byte += added;
781 
782 		/* Reached stripe boundary, need to submit */
783 		if (cur_disk_byte == next_stripe_start)
784 			submit = true;
785 
786 		/* Has finished the range, need to submit */
787 		if (cur_disk_byte == disk_bytenr + compressed_len)
788 			submit = true;
789 
790 		if (submit) {
791 			/* Save the original iter for read repair */
792 			if (bio_op(comp_bio) == REQ_OP_READ)
793 				btrfs_bio(comp_bio)->iter = comp_bio->bi_iter;
794 
795 			/*
796 			 * Save the initial offset of this chunk, as there
797 			 * is no direct correlation between compressed pages and
798 			 * the original file offset.  The field is only used for
799 			 * priting error messages.
800 			 */
801 			btrfs_bio(comp_bio)->file_offset = file_offset;
802 
803 			ret = btrfs_lookup_bio_sums(inode, comp_bio, NULL);
804 			if (ret) {
805 				btrfs_bio_end_io(btrfs_bio(comp_bio), ret);
806 				break;
807 			}
808 
809 			ASSERT(comp_bio->bi_iter.bi_size);
810 			btrfs_submit_bio(fs_info, comp_bio, mirror_num);
811 			comp_bio = NULL;
812 		}
813 	}
814 
815 	if (memstall)
816 		psi_memstall_leave(&pflags);
817 
818 	if (refcount_dec_and_test(&cb->pending_ios))
819 		finish_compressed_bio_read(cb);
820 	return;
821 
822 fail:
823 	if (cb->compressed_pages) {
824 		for (i = 0; i < cb->nr_pages; i++) {
825 			if (cb->compressed_pages[i])
826 				__free_page(cb->compressed_pages[i]);
827 		}
828 	}
829 
830 	kfree(cb->compressed_pages);
831 	kfree(cb);
832 out:
833 	free_extent_map(em);
834 	btrfs_bio_end_io(btrfs_bio(bio), ret);
835 	return;
836 }
837 
838 /*
839  * Heuristic uses systematic sampling to collect data from the input data
840  * range, the logic can be tuned by the following constants:
841  *
842  * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
843  * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
844  */
845 #define SAMPLING_READ_SIZE	(16)
846 #define SAMPLING_INTERVAL	(256)
847 
848 /*
849  * For statistical analysis of the input data we consider bytes that form a
850  * Galois Field of 256 objects. Each object has an attribute count, ie. how
851  * many times the object appeared in the sample.
852  */
853 #define BUCKET_SIZE		(256)
854 
855 /*
856  * The size of the sample is based on a statistical sampling rule of thumb.
857  * The common way is to perform sampling tests as long as the number of
858  * elements in each cell is at least 5.
859  *
860  * Instead of 5, we choose 32 to obtain more accurate results.
861  * If the data contain the maximum number of symbols, which is 256, we obtain a
862  * sample size bound by 8192.
863  *
864  * For a sample of at most 8KB of data per data range: 16 consecutive bytes
865  * from up to 512 locations.
866  */
867 #define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
868 				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
869 
870 struct bucket_item {
871 	u32 count;
872 };
873 
874 struct heuristic_ws {
875 	/* Partial copy of input data */
876 	u8 *sample;
877 	u32 sample_size;
878 	/* Buckets store counters for each byte value */
879 	struct bucket_item *bucket;
880 	/* Sorting buffer */
881 	struct bucket_item *bucket_b;
882 	struct list_head list;
883 };
884 
885 static struct workspace_manager heuristic_wsm;
886 
887 static void free_heuristic_ws(struct list_head *ws)
888 {
889 	struct heuristic_ws *workspace;
890 
891 	workspace = list_entry(ws, struct heuristic_ws, list);
892 
893 	kvfree(workspace->sample);
894 	kfree(workspace->bucket);
895 	kfree(workspace->bucket_b);
896 	kfree(workspace);
897 }
898 
899 static struct list_head *alloc_heuristic_ws(unsigned int level)
900 {
901 	struct heuristic_ws *ws;
902 
903 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
904 	if (!ws)
905 		return ERR_PTR(-ENOMEM);
906 
907 	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
908 	if (!ws->sample)
909 		goto fail;
910 
911 	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
912 	if (!ws->bucket)
913 		goto fail;
914 
915 	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
916 	if (!ws->bucket_b)
917 		goto fail;
918 
919 	INIT_LIST_HEAD(&ws->list);
920 	return &ws->list;
921 fail:
922 	free_heuristic_ws(&ws->list);
923 	return ERR_PTR(-ENOMEM);
924 }
925 
926 const struct btrfs_compress_op btrfs_heuristic_compress = {
927 	.workspace_manager = &heuristic_wsm,
928 };
929 
930 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
931 	/* The heuristic is represented as compression type 0 */
932 	&btrfs_heuristic_compress,
933 	&btrfs_zlib_compress,
934 	&btrfs_lzo_compress,
935 	&btrfs_zstd_compress,
936 };
937 
938 static struct list_head *alloc_workspace(int type, unsigned int level)
939 {
940 	switch (type) {
941 	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
942 	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
943 	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
944 	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
945 	default:
946 		/*
947 		 * This can't happen, the type is validated several times
948 		 * before we get here.
949 		 */
950 		BUG();
951 	}
952 }
953 
954 static void free_workspace(int type, struct list_head *ws)
955 {
956 	switch (type) {
957 	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
958 	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
959 	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
960 	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
961 	default:
962 		/*
963 		 * This can't happen, the type is validated several times
964 		 * before we get here.
965 		 */
966 		BUG();
967 	}
968 }
969 
970 static void btrfs_init_workspace_manager(int type)
971 {
972 	struct workspace_manager *wsm;
973 	struct list_head *workspace;
974 
975 	wsm = btrfs_compress_op[type]->workspace_manager;
976 	INIT_LIST_HEAD(&wsm->idle_ws);
977 	spin_lock_init(&wsm->ws_lock);
978 	atomic_set(&wsm->total_ws, 0);
979 	init_waitqueue_head(&wsm->ws_wait);
980 
981 	/*
982 	 * Preallocate one workspace for each compression type so we can
983 	 * guarantee forward progress in the worst case
984 	 */
985 	workspace = alloc_workspace(type, 0);
986 	if (IS_ERR(workspace)) {
987 		pr_warn(
988 	"BTRFS: cannot preallocate compression workspace, will try later\n");
989 	} else {
990 		atomic_set(&wsm->total_ws, 1);
991 		wsm->free_ws = 1;
992 		list_add(workspace, &wsm->idle_ws);
993 	}
994 }
995 
996 static void btrfs_cleanup_workspace_manager(int type)
997 {
998 	struct workspace_manager *wsman;
999 	struct list_head *ws;
1000 
1001 	wsman = btrfs_compress_op[type]->workspace_manager;
1002 	while (!list_empty(&wsman->idle_ws)) {
1003 		ws = wsman->idle_ws.next;
1004 		list_del(ws);
1005 		free_workspace(type, ws);
1006 		atomic_dec(&wsman->total_ws);
1007 	}
1008 }
1009 
1010 /*
1011  * This finds an available workspace or allocates a new one.
1012  * If it's not possible to allocate a new one, waits until there's one.
1013  * Preallocation makes a forward progress guarantees and we do not return
1014  * errors.
1015  */
1016 struct list_head *btrfs_get_workspace(int type, unsigned int level)
1017 {
1018 	struct workspace_manager *wsm;
1019 	struct list_head *workspace;
1020 	int cpus = num_online_cpus();
1021 	unsigned nofs_flag;
1022 	struct list_head *idle_ws;
1023 	spinlock_t *ws_lock;
1024 	atomic_t *total_ws;
1025 	wait_queue_head_t *ws_wait;
1026 	int *free_ws;
1027 
1028 	wsm = btrfs_compress_op[type]->workspace_manager;
1029 	idle_ws	 = &wsm->idle_ws;
1030 	ws_lock	 = &wsm->ws_lock;
1031 	total_ws = &wsm->total_ws;
1032 	ws_wait	 = &wsm->ws_wait;
1033 	free_ws	 = &wsm->free_ws;
1034 
1035 again:
1036 	spin_lock(ws_lock);
1037 	if (!list_empty(idle_ws)) {
1038 		workspace = idle_ws->next;
1039 		list_del(workspace);
1040 		(*free_ws)--;
1041 		spin_unlock(ws_lock);
1042 		return workspace;
1043 
1044 	}
1045 	if (atomic_read(total_ws) > cpus) {
1046 		DEFINE_WAIT(wait);
1047 
1048 		spin_unlock(ws_lock);
1049 		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1050 		if (atomic_read(total_ws) > cpus && !*free_ws)
1051 			schedule();
1052 		finish_wait(ws_wait, &wait);
1053 		goto again;
1054 	}
1055 	atomic_inc(total_ws);
1056 	spin_unlock(ws_lock);
1057 
1058 	/*
1059 	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1060 	 * to turn it off here because we might get called from the restricted
1061 	 * context of btrfs_compress_bio/btrfs_compress_pages
1062 	 */
1063 	nofs_flag = memalloc_nofs_save();
1064 	workspace = alloc_workspace(type, level);
1065 	memalloc_nofs_restore(nofs_flag);
1066 
1067 	if (IS_ERR(workspace)) {
1068 		atomic_dec(total_ws);
1069 		wake_up(ws_wait);
1070 
1071 		/*
1072 		 * Do not return the error but go back to waiting. There's a
1073 		 * workspace preallocated for each type and the compression
1074 		 * time is bounded so we get to a workspace eventually. This
1075 		 * makes our caller's life easier.
1076 		 *
1077 		 * To prevent silent and low-probability deadlocks (when the
1078 		 * initial preallocation fails), check if there are any
1079 		 * workspaces at all.
1080 		 */
1081 		if (atomic_read(total_ws) == 0) {
1082 			static DEFINE_RATELIMIT_STATE(_rs,
1083 					/* once per minute */ 60 * HZ,
1084 					/* no burst */ 1);
1085 
1086 			if (__ratelimit(&_rs)) {
1087 				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1088 			}
1089 		}
1090 		goto again;
1091 	}
1092 	return workspace;
1093 }
1094 
1095 static struct list_head *get_workspace(int type, int level)
1096 {
1097 	switch (type) {
1098 	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1099 	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1100 	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1101 	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1102 	default:
1103 		/*
1104 		 * This can't happen, the type is validated several times
1105 		 * before we get here.
1106 		 */
1107 		BUG();
1108 	}
1109 }
1110 
1111 /*
1112  * put a workspace struct back on the list or free it if we have enough
1113  * idle ones sitting around
1114  */
1115 void btrfs_put_workspace(int type, struct list_head *ws)
1116 {
1117 	struct workspace_manager *wsm;
1118 	struct list_head *idle_ws;
1119 	spinlock_t *ws_lock;
1120 	atomic_t *total_ws;
1121 	wait_queue_head_t *ws_wait;
1122 	int *free_ws;
1123 
1124 	wsm = btrfs_compress_op[type]->workspace_manager;
1125 	idle_ws	 = &wsm->idle_ws;
1126 	ws_lock	 = &wsm->ws_lock;
1127 	total_ws = &wsm->total_ws;
1128 	ws_wait	 = &wsm->ws_wait;
1129 	free_ws	 = &wsm->free_ws;
1130 
1131 	spin_lock(ws_lock);
1132 	if (*free_ws <= num_online_cpus()) {
1133 		list_add(ws, idle_ws);
1134 		(*free_ws)++;
1135 		spin_unlock(ws_lock);
1136 		goto wake;
1137 	}
1138 	spin_unlock(ws_lock);
1139 
1140 	free_workspace(type, ws);
1141 	atomic_dec(total_ws);
1142 wake:
1143 	cond_wake_up(ws_wait);
1144 }
1145 
1146 static void put_workspace(int type, struct list_head *ws)
1147 {
1148 	switch (type) {
1149 	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1150 	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1151 	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1152 	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1153 	default:
1154 		/*
1155 		 * This can't happen, the type is validated several times
1156 		 * before we get here.
1157 		 */
1158 		BUG();
1159 	}
1160 }
1161 
1162 /*
1163  * Adjust @level according to the limits of the compression algorithm or
1164  * fallback to default
1165  */
1166 static unsigned int btrfs_compress_set_level(int type, unsigned level)
1167 {
1168 	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1169 
1170 	if (level == 0)
1171 		level = ops->default_level;
1172 	else
1173 		level = min(level, ops->max_level);
1174 
1175 	return level;
1176 }
1177 
1178 /*
1179  * Given an address space and start and length, compress the bytes into @pages
1180  * that are allocated on demand.
1181  *
1182  * @type_level is encoded algorithm and level, where level 0 means whatever
1183  * default the algorithm chooses and is opaque here;
1184  * - compression algo are 0-3
1185  * - the level are bits 4-7
1186  *
1187  * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1188  * and returns number of actually allocated pages
1189  *
1190  * @total_in is used to return the number of bytes actually read.  It
1191  * may be smaller than the input length if we had to exit early because we
1192  * ran out of room in the pages array or because we cross the
1193  * max_out threshold.
1194  *
1195  * @total_out is an in/out parameter, must be set to the input length and will
1196  * be also used to return the total number of compressed bytes
1197  */
1198 int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1199 			 u64 start, struct page **pages,
1200 			 unsigned long *out_pages,
1201 			 unsigned long *total_in,
1202 			 unsigned long *total_out)
1203 {
1204 	int type = btrfs_compress_type(type_level);
1205 	int level = btrfs_compress_level(type_level);
1206 	struct list_head *workspace;
1207 	int ret;
1208 
1209 	level = btrfs_compress_set_level(type, level);
1210 	workspace = get_workspace(type, level);
1211 	ret = compression_compress_pages(type, workspace, mapping, start, pages,
1212 					 out_pages, total_in, total_out);
1213 	put_workspace(type, workspace);
1214 	return ret;
1215 }
1216 
1217 static int btrfs_decompress_bio(struct compressed_bio *cb)
1218 {
1219 	struct list_head *workspace;
1220 	int ret;
1221 	int type = cb->compress_type;
1222 
1223 	workspace = get_workspace(type, 0);
1224 	ret = compression_decompress_bio(workspace, cb);
1225 	put_workspace(type, workspace);
1226 
1227 	return ret;
1228 }
1229 
1230 /*
1231  * a less complex decompression routine.  Our compressed data fits in a
1232  * single page, and we want to read a single page out of it.
1233  * start_byte tells us the offset into the compressed data we're interested in
1234  */
1235 int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
1236 		     unsigned long start_byte, size_t srclen, size_t destlen)
1237 {
1238 	struct list_head *workspace;
1239 	int ret;
1240 
1241 	workspace = get_workspace(type, 0);
1242 	ret = compression_decompress(type, workspace, data_in, dest_page,
1243 				     start_byte, srclen, destlen);
1244 	put_workspace(type, workspace);
1245 
1246 	return ret;
1247 }
1248 
1249 int __init btrfs_init_compress(void)
1250 {
1251 	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1252 	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1253 	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1254 	zstd_init_workspace_manager();
1255 	return 0;
1256 }
1257 
1258 void __cold btrfs_exit_compress(void)
1259 {
1260 	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1261 	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1262 	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1263 	zstd_cleanup_workspace_manager();
1264 }
1265 
1266 /*
1267  * Copy decompressed data from working buffer to pages.
1268  *
1269  * @buf:		The decompressed data buffer
1270  * @buf_len:		The decompressed data length
1271  * @decompressed:	Number of bytes that are already decompressed inside the
1272  * 			compressed extent
1273  * @cb:			The compressed extent descriptor
1274  * @orig_bio:		The original bio that the caller wants to read for
1275  *
1276  * An easier to understand graph is like below:
1277  *
1278  * 		|<- orig_bio ->|     |<- orig_bio->|
1279  * 	|<-------      full decompressed extent      ----->|
1280  * 	|<-----------    @cb range   ---->|
1281  * 	|			|<-- @buf_len -->|
1282  * 	|<--- @decompressed --->|
1283  *
1284  * Note that, @cb can be a subpage of the full decompressed extent, but
1285  * @cb->start always has the same as the orig_file_offset value of the full
1286  * decompressed extent.
1287  *
1288  * When reading compressed extent, we have to read the full compressed extent,
1289  * while @orig_bio may only want part of the range.
1290  * Thus this function will ensure only data covered by @orig_bio will be copied
1291  * to.
1292  *
1293  * Return 0 if we have copied all needed contents for @orig_bio.
1294  * Return >0 if we need continue decompress.
1295  */
1296 int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1297 			      struct compressed_bio *cb, u32 decompressed)
1298 {
1299 	struct bio *orig_bio = cb->orig_bio;
1300 	/* Offset inside the full decompressed extent */
1301 	u32 cur_offset;
1302 
1303 	cur_offset = decompressed;
1304 	/* The main loop to do the copy */
1305 	while (cur_offset < decompressed + buf_len) {
1306 		struct bio_vec bvec;
1307 		size_t copy_len;
1308 		u32 copy_start;
1309 		/* Offset inside the full decompressed extent */
1310 		u32 bvec_offset;
1311 
1312 		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1313 		/*
1314 		 * cb->start may underflow, but subtracting that value can still
1315 		 * give us correct offset inside the full decompressed extent.
1316 		 */
1317 		bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1318 
1319 		/* Haven't reached the bvec range, exit */
1320 		if (decompressed + buf_len <= bvec_offset)
1321 			return 1;
1322 
1323 		copy_start = max(cur_offset, bvec_offset);
1324 		copy_len = min(bvec_offset + bvec.bv_len,
1325 			       decompressed + buf_len) - copy_start;
1326 		ASSERT(copy_len);
1327 
1328 		/*
1329 		 * Extra range check to ensure we didn't go beyond
1330 		 * @buf + @buf_len.
1331 		 */
1332 		ASSERT(copy_start - decompressed < buf_len);
1333 		memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1334 			       buf + copy_start - decompressed, copy_len);
1335 		cur_offset += copy_len;
1336 
1337 		bio_advance(orig_bio, copy_len);
1338 		/* Finished the bio */
1339 		if (!orig_bio->bi_iter.bi_size)
1340 			return 0;
1341 	}
1342 	return 1;
1343 }
1344 
1345 /*
1346  * Shannon Entropy calculation
1347  *
1348  * Pure byte distribution analysis fails to determine compressibility of data.
1349  * Try calculating entropy to estimate the average minimum number of bits
1350  * needed to encode the sampled data.
1351  *
1352  * For convenience, return the percentage of needed bits, instead of amount of
1353  * bits directly.
1354  *
1355  * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1356  *			    and can be compressible with high probability
1357  *
1358  * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1359  *
1360  * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1361  */
1362 #define ENTROPY_LVL_ACEPTABLE		(65)
1363 #define ENTROPY_LVL_HIGH		(80)
1364 
1365 /*
1366  * For increasead precision in shannon_entropy calculation,
1367  * let's do pow(n, M) to save more digits after comma:
1368  *
1369  * - maximum int bit length is 64
1370  * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1371  * - 13 * 4 = 52 < 64		-> M = 4
1372  *
1373  * So use pow(n, 4).
1374  */
1375 static inline u32 ilog2_w(u64 n)
1376 {
1377 	return ilog2(n * n * n * n);
1378 }
1379 
1380 static u32 shannon_entropy(struct heuristic_ws *ws)
1381 {
1382 	const u32 entropy_max = 8 * ilog2_w(2);
1383 	u32 entropy_sum = 0;
1384 	u32 p, p_base, sz_base;
1385 	u32 i;
1386 
1387 	sz_base = ilog2_w(ws->sample_size);
1388 	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1389 		p = ws->bucket[i].count;
1390 		p_base = ilog2_w(p);
1391 		entropy_sum += p * (sz_base - p_base);
1392 	}
1393 
1394 	entropy_sum /= ws->sample_size;
1395 	return entropy_sum * 100 / entropy_max;
1396 }
1397 
1398 #define RADIX_BASE		4U
1399 #define COUNTERS_SIZE		(1U << RADIX_BASE)
1400 
1401 static u8 get4bits(u64 num, int shift) {
1402 	u8 low4bits;
1403 
1404 	num >>= shift;
1405 	/* Reverse order */
1406 	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1407 	return low4bits;
1408 }
1409 
1410 /*
1411  * Use 4 bits as radix base
1412  * Use 16 u32 counters for calculating new position in buf array
1413  *
1414  * @array     - array that will be sorted
1415  * @array_buf - buffer array to store sorting results
1416  *              must be equal in size to @array
1417  * @num       - array size
1418  */
1419 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1420 		       int num)
1421 {
1422 	u64 max_num;
1423 	u64 buf_num;
1424 	u32 counters[COUNTERS_SIZE];
1425 	u32 new_addr;
1426 	u32 addr;
1427 	int bitlen;
1428 	int shift;
1429 	int i;
1430 
1431 	/*
1432 	 * Try avoid useless loop iterations for small numbers stored in big
1433 	 * counters.  Example: 48 33 4 ... in 64bit array
1434 	 */
1435 	max_num = array[0].count;
1436 	for (i = 1; i < num; i++) {
1437 		buf_num = array[i].count;
1438 		if (buf_num > max_num)
1439 			max_num = buf_num;
1440 	}
1441 
1442 	buf_num = ilog2(max_num);
1443 	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1444 
1445 	shift = 0;
1446 	while (shift < bitlen) {
1447 		memset(counters, 0, sizeof(counters));
1448 
1449 		for (i = 0; i < num; i++) {
1450 			buf_num = array[i].count;
1451 			addr = get4bits(buf_num, shift);
1452 			counters[addr]++;
1453 		}
1454 
1455 		for (i = 1; i < COUNTERS_SIZE; i++)
1456 			counters[i] += counters[i - 1];
1457 
1458 		for (i = num - 1; i >= 0; i--) {
1459 			buf_num = array[i].count;
1460 			addr = get4bits(buf_num, shift);
1461 			counters[addr]--;
1462 			new_addr = counters[addr];
1463 			array_buf[new_addr] = array[i];
1464 		}
1465 
1466 		shift += RADIX_BASE;
1467 
1468 		/*
1469 		 * Normal radix expects to move data from a temporary array, to
1470 		 * the main one.  But that requires some CPU time. Avoid that
1471 		 * by doing another sort iteration to original array instead of
1472 		 * memcpy()
1473 		 */
1474 		memset(counters, 0, sizeof(counters));
1475 
1476 		for (i = 0; i < num; i ++) {
1477 			buf_num = array_buf[i].count;
1478 			addr = get4bits(buf_num, shift);
1479 			counters[addr]++;
1480 		}
1481 
1482 		for (i = 1; i < COUNTERS_SIZE; i++)
1483 			counters[i] += counters[i - 1];
1484 
1485 		for (i = num - 1; i >= 0; i--) {
1486 			buf_num = array_buf[i].count;
1487 			addr = get4bits(buf_num, shift);
1488 			counters[addr]--;
1489 			new_addr = counters[addr];
1490 			array[new_addr] = array_buf[i];
1491 		}
1492 
1493 		shift += RADIX_BASE;
1494 	}
1495 }
1496 
1497 /*
1498  * Size of the core byte set - how many bytes cover 90% of the sample
1499  *
1500  * There are several types of structured binary data that use nearly all byte
1501  * values. The distribution can be uniform and counts in all buckets will be
1502  * nearly the same (eg. encrypted data). Unlikely to be compressible.
1503  *
1504  * Other possibility is normal (Gaussian) distribution, where the data could
1505  * be potentially compressible, but we have to take a few more steps to decide
1506  * how much.
1507  *
1508  * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1509  *                       compression algo can easy fix that
1510  * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1511  *                       probability is not compressible
1512  */
1513 #define BYTE_CORE_SET_LOW		(64)
1514 #define BYTE_CORE_SET_HIGH		(200)
1515 
1516 static int byte_core_set_size(struct heuristic_ws *ws)
1517 {
1518 	u32 i;
1519 	u32 coreset_sum = 0;
1520 	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1521 	struct bucket_item *bucket = ws->bucket;
1522 
1523 	/* Sort in reverse order */
1524 	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1525 
1526 	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1527 		coreset_sum += bucket[i].count;
1528 
1529 	if (coreset_sum > core_set_threshold)
1530 		return i;
1531 
1532 	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1533 		coreset_sum += bucket[i].count;
1534 		if (coreset_sum > core_set_threshold)
1535 			break;
1536 	}
1537 
1538 	return i;
1539 }
1540 
1541 /*
1542  * Count byte values in buckets.
1543  * This heuristic can detect textual data (configs, xml, json, html, etc).
1544  * Because in most text-like data byte set is restricted to limited number of
1545  * possible characters, and that restriction in most cases makes data easy to
1546  * compress.
1547  *
1548  * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1549  *	less - compressible
1550  *	more - need additional analysis
1551  */
1552 #define BYTE_SET_THRESHOLD		(64)
1553 
1554 static u32 byte_set_size(const struct heuristic_ws *ws)
1555 {
1556 	u32 i;
1557 	u32 byte_set_size = 0;
1558 
1559 	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1560 		if (ws->bucket[i].count > 0)
1561 			byte_set_size++;
1562 	}
1563 
1564 	/*
1565 	 * Continue collecting count of byte values in buckets.  If the byte
1566 	 * set size is bigger then the threshold, it's pointless to continue,
1567 	 * the detection technique would fail for this type of data.
1568 	 */
1569 	for (; i < BUCKET_SIZE; i++) {
1570 		if (ws->bucket[i].count > 0) {
1571 			byte_set_size++;
1572 			if (byte_set_size > BYTE_SET_THRESHOLD)
1573 				return byte_set_size;
1574 		}
1575 	}
1576 
1577 	return byte_set_size;
1578 }
1579 
1580 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1581 {
1582 	const u32 half_of_sample = ws->sample_size / 2;
1583 	const u8 *data = ws->sample;
1584 
1585 	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1586 }
1587 
1588 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1589 				     struct heuristic_ws *ws)
1590 {
1591 	struct page *page;
1592 	u64 index, index_end;
1593 	u32 i, curr_sample_pos;
1594 	u8 *in_data;
1595 
1596 	/*
1597 	 * Compression handles the input data by chunks of 128KiB
1598 	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1599 	 *
1600 	 * We do the same for the heuristic and loop over the whole range.
1601 	 *
1602 	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1603 	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1604 	 */
1605 	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1606 		end = start + BTRFS_MAX_UNCOMPRESSED;
1607 
1608 	index = start >> PAGE_SHIFT;
1609 	index_end = end >> PAGE_SHIFT;
1610 
1611 	/* Don't miss unaligned end */
1612 	if (!IS_ALIGNED(end, PAGE_SIZE))
1613 		index_end++;
1614 
1615 	curr_sample_pos = 0;
1616 	while (index < index_end) {
1617 		page = find_get_page(inode->i_mapping, index);
1618 		in_data = kmap_local_page(page);
1619 		/* Handle case where the start is not aligned to PAGE_SIZE */
1620 		i = start % PAGE_SIZE;
1621 		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1622 			/* Don't sample any garbage from the last page */
1623 			if (start > end - SAMPLING_READ_SIZE)
1624 				break;
1625 			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1626 					SAMPLING_READ_SIZE);
1627 			i += SAMPLING_INTERVAL;
1628 			start += SAMPLING_INTERVAL;
1629 			curr_sample_pos += SAMPLING_READ_SIZE;
1630 		}
1631 		kunmap_local(in_data);
1632 		put_page(page);
1633 
1634 		index++;
1635 	}
1636 
1637 	ws->sample_size = curr_sample_pos;
1638 }
1639 
1640 /*
1641  * Compression heuristic.
1642  *
1643  * For now is's a naive and optimistic 'return true', we'll extend the logic to
1644  * quickly (compared to direct compression) detect data characteristics
1645  * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1646  * data.
1647  *
1648  * The following types of analysis can be performed:
1649  * - detect mostly zero data
1650  * - detect data with low "byte set" size (text, etc)
1651  * - detect data with low/high "core byte" set
1652  *
1653  * Return non-zero if the compression should be done, 0 otherwise.
1654  */
1655 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1656 {
1657 	struct list_head *ws_list = get_workspace(0, 0);
1658 	struct heuristic_ws *ws;
1659 	u32 i;
1660 	u8 byte;
1661 	int ret = 0;
1662 
1663 	ws = list_entry(ws_list, struct heuristic_ws, list);
1664 
1665 	heuristic_collect_sample(inode, start, end, ws);
1666 
1667 	if (sample_repeated_patterns(ws)) {
1668 		ret = 1;
1669 		goto out;
1670 	}
1671 
1672 	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1673 
1674 	for (i = 0; i < ws->sample_size; i++) {
1675 		byte = ws->sample[i];
1676 		ws->bucket[byte].count++;
1677 	}
1678 
1679 	i = byte_set_size(ws);
1680 	if (i < BYTE_SET_THRESHOLD) {
1681 		ret = 2;
1682 		goto out;
1683 	}
1684 
1685 	i = byte_core_set_size(ws);
1686 	if (i <= BYTE_CORE_SET_LOW) {
1687 		ret = 3;
1688 		goto out;
1689 	}
1690 
1691 	if (i >= BYTE_CORE_SET_HIGH) {
1692 		ret = 0;
1693 		goto out;
1694 	}
1695 
1696 	i = shannon_entropy(ws);
1697 	if (i <= ENTROPY_LVL_ACEPTABLE) {
1698 		ret = 4;
1699 		goto out;
1700 	}
1701 
1702 	/*
1703 	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1704 	 * needed to give green light to compression.
1705 	 *
1706 	 * For now just assume that compression at that level is not worth the
1707 	 * resources because:
1708 	 *
1709 	 * 1. it is possible to defrag the data later
1710 	 *
1711 	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1712 	 * values, every bucket has counter at level ~54. The heuristic would
1713 	 * be confused. This can happen when data have some internal repeated
1714 	 * patterns like "abbacbbc...". This can be detected by analyzing
1715 	 * pairs of bytes, which is too costly.
1716 	 */
1717 	if (i < ENTROPY_LVL_HIGH) {
1718 		ret = 5;
1719 		goto out;
1720 	} else {
1721 		ret = 0;
1722 		goto out;
1723 	}
1724 
1725 out:
1726 	put_workspace(0, ws_list);
1727 	return ret;
1728 }
1729 
1730 /*
1731  * Convert the compression suffix (eg. after "zlib" starting with ":") to
1732  * level, unrecognized string will set the default level
1733  */
1734 unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1735 {
1736 	unsigned int level = 0;
1737 	int ret;
1738 
1739 	if (!type)
1740 		return 0;
1741 
1742 	if (str[0] == ':') {
1743 		ret = kstrtouint(str + 1, 10, &level);
1744 		if (ret)
1745 			level = 0;
1746 	}
1747 
1748 	level = btrfs_compress_set_level(type, level);
1749 
1750 	return level;
1751 }
1752