1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/bit_spinlock.h> 34 #include <linux/slab.h> 35 #include "ctree.h" 36 #include "disk-io.h" 37 #include "transaction.h" 38 #include "btrfs_inode.h" 39 #include "volumes.h" 40 #include "ordered-data.h" 41 #include "compression.h" 42 #include "extent_io.h" 43 #include "extent_map.h" 44 45 struct compressed_bio { 46 /* number of bios pending for this compressed extent */ 47 atomic_t pending_bios; 48 49 /* the pages with the compressed data on them */ 50 struct page **compressed_pages; 51 52 /* inode that owns this data */ 53 struct inode *inode; 54 55 /* starting offset in the inode for our pages */ 56 u64 start; 57 58 /* number of bytes in the inode we're working on */ 59 unsigned long len; 60 61 /* number of bytes on disk */ 62 unsigned long compressed_len; 63 64 /* the compression algorithm for this bio */ 65 int compress_type; 66 67 /* number of compressed pages in the array */ 68 unsigned long nr_pages; 69 70 /* IO errors */ 71 int errors; 72 int mirror_num; 73 74 /* for reads, this is the bio we are copying the data into */ 75 struct bio *orig_bio; 76 77 /* 78 * the start of a variable length array of checksums only 79 * used by reads 80 */ 81 u32 sums; 82 }; 83 84 static int btrfs_decompress_biovec(int type, struct page **pages_in, 85 u64 disk_start, struct bio_vec *bvec, 86 int vcnt, size_t srclen); 87 88 static inline int compressed_bio_size(struct btrfs_root *root, 89 unsigned long disk_size) 90 { 91 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 92 93 return sizeof(struct compressed_bio) + 94 (DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size; 95 } 96 97 static struct bio *compressed_bio_alloc(struct block_device *bdev, 98 u64 first_byte, gfp_t gfp_flags) 99 { 100 int nr_vecs; 101 102 nr_vecs = bio_get_nr_vecs(bdev); 103 return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags); 104 } 105 106 static int check_compressed_csum(struct inode *inode, 107 struct compressed_bio *cb, 108 u64 disk_start) 109 { 110 int ret; 111 struct page *page; 112 unsigned long i; 113 char *kaddr; 114 u32 csum; 115 u32 *cb_sum = &cb->sums; 116 117 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 118 return 0; 119 120 for (i = 0; i < cb->nr_pages; i++) { 121 page = cb->compressed_pages[i]; 122 csum = ~(u32)0; 123 124 kaddr = kmap_atomic(page); 125 csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE); 126 btrfs_csum_final(csum, (char *)&csum); 127 kunmap_atomic(kaddr); 128 129 if (csum != *cb_sum) { 130 btrfs_info(BTRFS_I(inode)->root->fs_info, 131 "csum failed ino %llu extent %llu csum %u wanted %u mirror %d", 132 btrfs_ino(inode), disk_start, csum, *cb_sum, 133 cb->mirror_num); 134 ret = -EIO; 135 goto fail; 136 } 137 cb_sum++; 138 139 } 140 ret = 0; 141 fail: 142 return ret; 143 } 144 145 /* when we finish reading compressed pages from the disk, we 146 * decompress them and then run the bio end_io routines on the 147 * decompressed pages (in the inode address space). 148 * 149 * This allows the checksumming and other IO error handling routines 150 * to work normally 151 * 152 * The compressed pages are freed here, and it must be run 153 * in process context 154 */ 155 static void end_compressed_bio_read(struct bio *bio, int err) 156 { 157 struct compressed_bio *cb = bio->bi_private; 158 struct inode *inode; 159 struct page *page; 160 unsigned long index; 161 int ret; 162 163 if (err) 164 cb->errors = 1; 165 166 /* if there are more bios still pending for this compressed 167 * extent, just exit 168 */ 169 if (!atomic_dec_and_test(&cb->pending_bios)) 170 goto out; 171 172 inode = cb->inode; 173 ret = check_compressed_csum(inode, cb, 174 (u64)bio->bi_iter.bi_sector << 9); 175 if (ret) 176 goto csum_failed; 177 178 /* ok, we're the last bio for this extent, lets start 179 * the decompression. 180 */ 181 ret = btrfs_decompress_biovec(cb->compress_type, 182 cb->compressed_pages, 183 cb->start, 184 cb->orig_bio->bi_io_vec, 185 cb->orig_bio->bi_vcnt, 186 cb->compressed_len); 187 csum_failed: 188 if (ret) 189 cb->errors = 1; 190 191 /* release the compressed pages */ 192 index = 0; 193 for (index = 0; index < cb->nr_pages; index++) { 194 page = cb->compressed_pages[index]; 195 page->mapping = NULL; 196 page_cache_release(page); 197 } 198 199 /* do io completion on the original bio */ 200 if (cb->errors) { 201 bio_io_error(cb->orig_bio); 202 } else { 203 int i; 204 struct bio_vec *bvec; 205 206 /* 207 * we have verified the checksum already, set page 208 * checked so the end_io handlers know about it 209 */ 210 bio_for_each_segment_all(bvec, cb->orig_bio, i) 211 SetPageChecked(bvec->bv_page); 212 213 bio_endio(cb->orig_bio, 0); 214 } 215 216 /* finally free the cb struct */ 217 kfree(cb->compressed_pages); 218 kfree(cb); 219 out: 220 bio_put(bio); 221 } 222 223 /* 224 * Clear the writeback bits on all of the file 225 * pages for a compressed write 226 */ 227 static noinline void end_compressed_writeback(struct inode *inode, u64 start, 228 unsigned long ram_size) 229 { 230 unsigned long index = start >> PAGE_CACHE_SHIFT; 231 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; 232 struct page *pages[16]; 233 unsigned long nr_pages = end_index - index + 1; 234 int i; 235 int ret; 236 237 while (nr_pages > 0) { 238 ret = find_get_pages_contig(inode->i_mapping, index, 239 min_t(unsigned long, 240 nr_pages, ARRAY_SIZE(pages)), pages); 241 if (ret == 0) { 242 nr_pages -= 1; 243 index += 1; 244 continue; 245 } 246 for (i = 0; i < ret; i++) { 247 end_page_writeback(pages[i]); 248 page_cache_release(pages[i]); 249 } 250 nr_pages -= ret; 251 index += ret; 252 } 253 /* the inode may be gone now */ 254 } 255 256 /* 257 * do the cleanup once all the compressed pages hit the disk. 258 * This will clear writeback on the file pages and free the compressed 259 * pages. 260 * 261 * This also calls the writeback end hooks for the file pages so that 262 * metadata and checksums can be updated in the file. 263 */ 264 static void end_compressed_bio_write(struct bio *bio, int err) 265 { 266 struct extent_io_tree *tree; 267 struct compressed_bio *cb = bio->bi_private; 268 struct inode *inode; 269 struct page *page; 270 unsigned long index; 271 272 if (err) 273 cb->errors = 1; 274 275 /* if there are more bios still pending for this compressed 276 * extent, just exit 277 */ 278 if (!atomic_dec_and_test(&cb->pending_bios)) 279 goto out; 280 281 /* ok, we're the last bio for this extent, step one is to 282 * call back into the FS and do all the end_io operations 283 */ 284 inode = cb->inode; 285 tree = &BTRFS_I(inode)->io_tree; 286 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; 287 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 288 cb->start, 289 cb->start + cb->len - 1, 290 NULL, 1); 291 cb->compressed_pages[0]->mapping = NULL; 292 293 end_compressed_writeback(inode, cb->start, cb->len); 294 /* note, our inode could be gone now */ 295 296 /* 297 * release the compressed pages, these came from alloc_page and 298 * are not attached to the inode at all 299 */ 300 index = 0; 301 for (index = 0; index < cb->nr_pages; index++) { 302 page = cb->compressed_pages[index]; 303 page->mapping = NULL; 304 page_cache_release(page); 305 } 306 307 /* finally free the cb struct */ 308 kfree(cb->compressed_pages); 309 kfree(cb); 310 out: 311 bio_put(bio); 312 } 313 314 /* 315 * worker function to build and submit bios for previously compressed pages. 316 * The corresponding pages in the inode should be marked for writeback 317 * and the compressed pages should have a reference on them for dropping 318 * when the IO is complete. 319 * 320 * This also checksums the file bytes and gets things ready for 321 * the end io hooks. 322 */ 323 int btrfs_submit_compressed_write(struct inode *inode, u64 start, 324 unsigned long len, u64 disk_start, 325 unsigned long compressed_len, 326 struct page **compressed_pages, 327 unsigned long nr_pages) 328 { 329 struct bio *bio = NULL; 330 struct btrfs_root *root = BTRFS_I(inode)->root; 331 struct compressed_bio *cb; 332 unsigned long bytes_left; 333 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 334 int pg_index = 0; 335 struct page *page; 336 u64 first_byte = disk_start; 337 struct block_device *bdev; 338 int ret; 339 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 340 341 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 342 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 343 if (!cb) 344 return -ENOMEM; 345 atomic_set(&cb->pending_bios, 0); 346 cb->errors = 0; 347 cb->inode = inode; 348 cb->start = start; 349 cb->len = len; 350 cb->mirror_num = 0; 351 cb->compressed_pages = compressed_pages; 352 cb->compressed_len = compressed_len; 353 cb->orig_bio = NULL; 354 cb->nr_pages = nr_pages; 355 356 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 357 358 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 359 if (!bio) { 360 kfree(cb); 361 return -ENOMEM; 362 } 363 bio->bi_private = cb; 364 bio->bi_end_io = end_compressed_bio_write; 365 atomic_inc(&cb->pending_bios); 366 367 /* create and submit bios for the compressed pages */ 368 bytes_left = compressed_len; 369 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { 370 page = compressed_pages[pg_index]; 371 page->mapping = inode->i_mapping; 372 if (bio->bi_iter.bi_size) 373 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, 374 PAGE_CACHE_SIZE, 375 bio, 0); 376 else 377 ret = 0; 378 379 page->mapping = NULL; 380 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < 381 PAGE_CACHE_SIZE) { 382 bio_get(bio); 383 384 /* 385 * inc the count before we submit the bio so 386 * we know the end IO handler won't happen before 387 * we inc the count. Otherwise, the cb might get 388 * freed before we're done setting it up 389 */ 390 atomic_inc(&cb->pending_bios); 391 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 392 BTRFS_WQ_ENDIO_DATA); 393 BUG_ON(ret); /* -ENOMEM */ 394 395 if (!skip_sum) { 396 ret = btrfs_csum_one_bio(root, inode, bio, 397 start, 1); 398 BUG_ON(ret); /* -ENOMEM */ 399 } 400 401 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 402 BUG_ON(ret); /* -ENOMEM */ 403 404 bio_put(bio); 405 406 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 407 BUG_ON(!bio); 408 bio->bi_private = cb; 409 bio->bi_end_io = end_compressed_bio_write; 410 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 411 } 412 if (bytes_left < PAGE_CACHE_SIZE) { 413 btrfs_info(BTRFS_I(inode)->root->fs_info, 414 "bytes left %lu compress len %lu nr %lu", 415 bytes_left, cb->compressed_len, cb->nr_pages); 416 } 417 bytes_left -= PAGE_CACHE_SIZE; 418 first_byte += PAGE_CACHE_SIZE; 419 cond_resched(); 420 } 421 bio_get(bio); 422 423 ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA); 424 BUG_ON(ret); /* -ENOMEM */ 425 426 if (!skip_sum) { 427 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 428 BUG_ON(ret); /* -ENOMEM */ 429 } 430 431 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 432 BUG_ON(ret); /* -ENOMEM */ 433 434 bio_put(bio); 435 return 0; 436 } 437 438 static noinline int add_ra_bio_pages(struct inode *inode, 439 u64 compressed_end, 440 struct compressed_bio *cb) 441 { 442 unsigned long end_index; 443 unsigned long pg_index; 444 u64 last_offset; 445 u64 isize = i_size_read(inode); 446 int ret; 447 struct page *page; 448 unsigned long nr_pages = 0; 449 struct extent_map *em; 450 struct address_space *mapping = inode->i_mapping; 451 struct extent_map_tree *em_tree; 452 struct extent_io_tree *tree; 453 u64 end; 454 int misses = 0; 455 456 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 457 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); 458 em_tree = &BTRFS_I(inode)->extent_tree; 459 tree = &BTRFS_I(inode)->io_tree; 460 461 if (isize == 0) 462 return 0; 463 464 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 465 466 while (last_offset < compressed_end) { 467 pg_index = last_offset >> PAGE_CACHE_SHIFT; 468 469 if (pg_index > end_index) 470 break; 471 472 rcu_read_lock(); 473 page = radix_tree_lookup(&mapping->page_tree, pg_index); 474 rcu_read_unlock(); 475 if (page && !radix_tree_exceptional_entry(page)) { 476 misses++; 477 if (misses > 4) 478 break; 479 goto next; 480 } 481 482 page = __page_cache_alloc(mapping_gfp_mask(mapping) & 483 ~__GFP_FS); 484 if (!page) 485 break; 486 487 if (add_to_page_cache_lru(page, mapping, pg_index, 488 GFP_NOFS)) { 489 page_cache_release(page); 490 goto next; 491 } 492 493 end = last_offset + PAGE_CACHE_SIZE - 1; 494 /* 495 * at this point, we have a locked page in the page cache 496 * for these bytes in the file. But, we have to make 497 * sure they map to this compressed extent on disk. 498 */ 499 set_page_extent_mapped(page); 500 lock_extent(tree, last_offset, end); 501 read_lock(&em_tree->lock); 502 em = lookup_extent_mapping(em_tree, last_offset, 503 PAGE_CACHE_SIZE); 504 read_unlock(&em_tree->lock); 505 506 if (!em || last_offset < em->start || 507 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 508 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 509 free_extent_map(em); 510 unlock_extent(tree, last_offset, end); 511 unlock_page(page); 512 page_cache_release(page); 513 break; 514 } 515 free_extent_map(em); 516 517 if (page->index == end_index) { 518 char *userpage; 519 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); 520 521 if (zero_offset) { 522 int zeros; 523 zeros = PAGE_CACHE_SIZE - zero_offset; 524 userpage = kmap_atomic(page); 525 memset(userpage + zero_offset, 0, zeros); 526 flush_dcache_page(page); 527 kunmap_atomic(userpage); 528 } 529 } 530 531 ret = bio_add_page(cb->orig_bio, page, 532 PAGE_CACHE_SIZE, 0); 533 534 if (ret == PAGE_CACHE_SIZE) { 535 nr_pages++; 536 page_cache_release(page); 537 } else { 538 unlock_extent(tree, last_offset, end); 539 unlock_page(page); 540 page_cache_release(page); 541 break; 542 } 543 next: 544 last_offset += PAGE_CACHE_SIZE; 545 } 546 return 0; 547 } 548 549 /* 550 * for a compressed read, the bio we get passed has all the inode pages 551 * in it. We don't actually do IO on those pages but allocate new ones 552 * to hold the compressed pages on disk. 553 * 554 * bio->bi_iter.bi_sector points to the compressed extent on disk 555 * bio->bi_io_vec points to all of the inode pages 556 * bio->bi_vcnt is a count of pages 557 * 558 * After the compressed pages are read, we copy the bytes into the 559 * bio we were passed and then call the bio end_io calls 560 */ 561 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 562 int mirror_num, unsigned long bio_flags) 563 { 564 struct extent_io_tree *tree; 565 struct extent_map_tree *em_tree; 566 struct compressed_bio *cb; 567 struct btrfs_root *root = BTRFS_I(inode)->root; 568 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 569 unsigned long compressed_len; 570 unsigned long nr_pages; 571 unsigned long pg_index; 572 struct page *page; 573 struct block_device *bdev; 574 struct bio *comp_bio; 575 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; 576 u64 em_len; 577 u64 em_start; 578 struct extent_map *em; 579 int ret = -ENOMEM; 580 int faili = 0; 581 u32 *sums; 582 583 tree = &BTRFS_I(inode)->io_tree; 584 em_tree = &BTRFS_I(inode)->extent_tree; 585 586 /* we need the actual starting offset of this extent in the file */ 587 read_lock(&em_tree->lock); 588 em = lookup_extent_mapping(em_tree, 589 page_offset(bio->bi_io_vec->bv_page), 590 PAGE_CACHE_SIZE); 591 read_unlock(&em_tree->lock); 592 if (!em) 593 return -EIO; 594 595 compressed_len = em->block_len; 596 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 597 if (!cb) 598 goto out; 599 600 atomic_set(&cb->pending_bios, 0); 601 cb->errors = 0; 602 cb->inode = inode; 603 cb->mirror_num = mirror_num; 604 sums = &cb->sums; 605 606 cb->start = em->orig_start; 607 em_len = em->len; 608 em_start = em->start; 609 610 free_extent_map(em); 611 em = NULL; 612 613 cb->len = uncompressed_len; 614 cb->compressed_len = compressed_len; 615 cb->compress_type = extent_compress_type(bio_flags); 616 cb->orig_bio = bio; 617 618 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE); 619 cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, 620 GFP_NOFS); 621 if (!cb->compressed_pages) 622 goto fail1; 623 624 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 625 626 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 627 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | 628 __GFP_HIGHMEM); 629 if (!cb->compressed_pages[pg_index]) { 630 faili = pg_index - 1; 631 ret = -ENOMEM; 632 goto fail2; 633 } 634 } 635 faili = nr_pages - 1; 636 cb->nr_pages = nr_pages; 637 638 /* In the parent-locked case, we only locked the range we are 639 * interested in. In all other cases, we can opportunistically 640 * cache decompressed data that goes beyond the requested range. */ 641 if (!(bio_flags & EXTENT_BIO_PARENT_LOCKED)) 642 add_ra_bio_pages(inode, em_start + em_len, cb); 643 644 /* include any pages we added in add_ra-bio_pages */ 645 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 646 cb->len = uncompressed_len; 647 648 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 649 if (!comp_bio) 650 goto fail2; 651 comp_bio->bi_private = cb; 652 comp_bio->bi_end_io = end_compressed_bio_read; 653 atomic_inc(&cb->pending_bios); 654 655 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 656 page = cb->compressed_pages[pg_index]; 657 page->mapping = inode->i_mapping; 658 page->index = em_start >> PAGE_CACHE_SHIFT; 659 660 if (comp_bio->bi_iter.bi_size) 661 ret = tree->ops->merge_bio_hook(READ, page, 0, 662 PAGE_CACHE_SIZE, 663 comp_bio, 0); 664 else 665 ret = 0; 666 667 page->mapping = NULL; 668 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < 669 PAGE_CACHE_SIZE) { 670 bio_get(comp_bio); 671 672 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 673 BTRFS_WQ_ENDIO_DATA); 674 BUG_ON(ret); /* -ENOMEM */ 675 676 /* 677 * inc the count before we submit the bio so 678 * we know the end IO handler won't happen before 679 * we inc the count. Otherwise, the cb might get 680 * freed before we're done setting it up 681 */ 682 atomic_inc(&cb->pending_bios); 683 684 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 685 ret = btrfs_lookup_bio_sums(root, inode, 686 comp_bio, sums); 687 BUG_ON(ret); /* -ENOMEM */ 688 } 689 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, 690 root->sectorsize); 691 692 ret = btrfs_map_bio(root, READ, comp_bio, 693 mirror_num, 0); 694 if (ret) 695 bio_endio(comp_bio, ret); 696 697 bio_put(comp_bio); 698 699 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, 700 GFP_NOFS); 701 BUG_ON(!comp_bio); 702 comp_bio->bi_private = cb; 703 comp_bio->bi_end_io = end_compressed_bio_read; 704 705 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); 706 } 707 cur_disk_byte += PAGE_CACHE_SIZE; 708 } 709 bio_get(comp_bio); 710 711 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 712 BTRFS_WQ_ENDIO_DATA); 713 BUG_ON(ret); /* -ENOMEM */ 714 715 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 716 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 717 BUG_ON(ret); /* -ENOMEM */ 718 } 719 720 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 721 if (ret) 722 bio_endio(comp_bio, ret); 723 724 bio_put(comp_bio); 725 return 0; 726 727 fail2: 728 while (faili >= 0) { 729 __free_page(cb->compressed_pages[faili]); 730 faili--; 731 } 732 733 kfree(cb->compressed_pages); 734 fail1: 735 kfree(cb); 736 out: 737 free_extent_map(em); 738 return ret; 739 } 740 741 static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; 742 static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]; 743 static int comp_num_workspace[BTRFS_COMPRESS_TYPES]; 744 static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; 745 static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; 746 747 static struct btrfs_compress_op *btrfs_compress_op[] = { 748 &btrfs_zlib_compress, 749 &btrfs_lzo_compress, 750 }; 751 752 void __init btrfs_init_compress(void) 753 { 754 int i; 755 756 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 757 INIT_LIST_HEAD(&comp_idle_workspace[i]); 758 spin_lock_init(&comp_workspace_lock[i]); 759 atomic_set(&comp_alloc_workspace[i], 0); 760 init_waitqueue_head(&comp_workspace_wait[i]); 761 } 762 } 763 764 /* 765 * this finds an available workspace or allocates a new one 766 * ERR_PTR is returned if things go bad. 767 */ 768 static struct list_head *find_workspace(int type) 769 { 770 struct list_head *workspace; 771 int cpus = num_online_cpus(); 772 int idx = type - 1; 773 774 struct list_head *idle_workspace = &comp_idle_workspace[idx]; 775 spinlock_t *workspace_lock = &comp_workspace_lock[idx]; 776 atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; 777 wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; 778 int *num_workspace = &comp_num_workspace[idx]; 779 again: 780 spin_lock(workspace_lock); 781 if (!list_empty(idle_workspace)) { 782 workspace = idle_workspace->next; 783 list_del(workspace); 784 (*num_workspace)--; 785 spin_unlock(workspace_lock); 786 return workspace; 787 788 } 789 if (atomic_read(alloc_workspace) > cpus) { 790 DEFINE_WAIT(wait); 791 792 spin_unlock(workspace_lock); 793 prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE); 794 if (atomic_read(alloc_workspace) > cpus && !*num_workspace) 795 schedule(); 796 finish_wait(workspace_wait, &wait); 797 goto again; 798 } 799 atomic_inc(alloc_workspace); 800 spin_unlock(workspace_lock); 801 802 workspace = btrfs_compress_op[idx]->alloc_workspace(); 803 if (IS_ERR(workspace)) { 804 atomic_dec(alloc_workspace); 805 wake_up(workspace_wait); 806 } 807 return workspace; 808 } 809 810 /* 811 * put a workspace struct back on the list or free it if we have enough 812 * idle ones sitting around 813 */ 814 static void free_workspace(int type, struct list_head *workspace) 815 { 816 int idx = type - 1; 817 struct list_head *idle_workspace = &comp_idle_workspace[idx]; 818 spinlock_t *workspace_lock = &comp_workspace_lock[idx]; 819 atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; 820 wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; 821 int *num_workspace = &comp_num_workspace[idx]; 822 823 spin_lock(workspace_lock); 824 if (*num_workspace < num_online_cpus()) { 825 list_add(workspace, idle_workspace); 826 (*num_workspace)++; 827 spin_unlock(workspace_lock); 828 goto wake; 829 } 830 spin_unlock(workspace_lock); 831 832 btrfs_compress_op[idx]->free_workspace(workspace); 833 atomic_dec(alloc_workspace); 834 wake: 835 smp_mb(); 836 if (waitqueue_active(workspace_wait)) 837 wake_up(workspace_wait); 838 } 839 840 /* 841 * cleanup function for module exit 842 */ 843 static void free_workspaces(void) 844 { 845 struct list_head *workspace; 846 int i; 847 848 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 849 while (!list_empty(&comp_idle_workspace[i])) { 850 workspace = comp_idle_workspace[i].next; 851 list_del(workspace); 852 btrfs_compress_op[i]->free_workspace(workspace); 853 atomic_dec(&comp_alloc_workspace[i]); 854 } 855 } 856 } 857 858 /* 859 * given an address space and start/len, compress the bytes. 860 * 861 * pages are allocated to hold the compressed result and stored 862 * in 'pages' 863 * 864 * out_pages is used to return the number of pages allocated. There 865 * may be pages allocated even if we return an error 866 * 867 * total_in is used to return the number of bytes actually read. It 868 * may be smaller then len if we had to exit early because we 869 * ran out of room in the pages array or because we cross the 870 * max_out threshold. 871 * 872 * total_out is used to return the total number of compressed bytes 873 * 874 * max_out tells us the max number of bytes that we're allowed to 875 * stuff into pages 876 */ 877 int btrfs_compress_pages(int type, struct address_space *mapping, 878 u64 start, unsigned long len, 879 struct page **pages, 880 unsigned long nr_dest_pages, 881 unsigned long *out_pages, 882 unsigned long *total_in, 883 unsigned long *total_out, 884 unsigned long max_out) 885 { 886 struct list_head *workspace; 887 int ret; 888 889 workspace = find_workspace(type); 890 if (IS_ERR(workspace)) 891 return PTR_ERR(workspace); 892 893 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, 894 start, len, pages, 895 nr_dest_pages, out_pages, 896 total_in, total_out, 897 max_out); 898 free_workspace(type, workspace); 899 return ret; 900 } 901 902 /* 903 * pages_in is an array of pages with compressed data. 904 * 905 * disk_start is the starting logical offset of this array in the file 906 * 907 * bvec is a bio_vec of pages from the file that we want to decompress into 908 * 909 * vcnt is the count of pages in the biovec 910 * 911 * srclen is the number of bytes in pages_in 912 * 913 * The basic idea is that we have a bio that was created by readpages. 914 * The pages in the bio are for the uncompressed data, and they may not 915 * be contiguous. They all correspond to the range of bytes covered by 916 * the compressed extent. 917 */ 918 static int btrfs_decompress_biovec(int type, struct page **pages_in, 919 u64 disk_start, struct bio_vec *bvec, 920 int vcnt, size_t srclen) 921 { 922 struct list_head *workspace; 923 int ret; 924 925 workspace = find_workspace(type); 926 if (IS_ERR(workspace)) 927 return PTR_ERR(workspace); 928 929 ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, 930 disk_start, 931 bvec, vcnt, srclen); 932 free_workspace(type, workspace); 933 return ret; 934 } 935 936 /* 937 * a less complex decompression routine. Our compressed data fits in a 938 * single page, and we want to read a single page out of it. 939 * start_byte tells us the offset into the compressed data we're interested in 940 */ 941 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, 942 unsigned long start_byte, size_t srclen, size_t destlen) 943 { 944 struct list_head *workspace; 945 int ret; 946 947 workspace = find_workspace(type); 948 if (IS_ERR(workspace)) 949 return PTR_ERR(workspace); 950 951 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, 952 dest_page, start_byte, 953 srclen, destlen); 954 955 free_workspace(type, workspace); 956 return ret; 957 } 958 959 void btrfs_exit_compress(void) 960 { 961 free_workspaces(); 962 } 963 964 /* 965 * Copy uncompressed data from working buffer to pages. 966 * 967 * buf_start is the byte offset we're of the start of our workspace buffer. 968 * 969 * total_out is the last byte of the buffer 970 */ 971 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, 972 unsigned long total_out, u64 disk_start, 973 struct bio_vec *bvec, int vcnt, 974 unsigned long *pg_index, 975 unsigned long *pg_offset) 976 { 977 unsigned long buf_offset; 978 unsigned long current_buf_start; 979 unsigned long start_byte; 980 unsigned long working_bytes = total_out - buf_start; 981 unsigned long bytes; 982 char *kaddr; 983 struct page *page_out = bvec[*pg_index].bv_page; 984 985 /* 986 * start byte is the first byte of the page we're currently 987 * copying into relative to the start of the compressed data. 988 */ 989 start_byte = page_offset(page_out) - disk_start; 990 991 /* we haven't yet hit data corresponding to this page */ 992 if (total_out <= start_byte) 993 return 1; 994 995 /* 996 * the start of the data we care about is offset into 997 * the middle of our working buffer 998 */ 999 if (total_out > start_byte && buf_start < start_byte) { 1000 buf_offset = start_byte - buf_start; 1001 working_bytes -= buf_offset; 1002 } else { 1003 buf_offset = 0; 1004 } 1005 current_buf_start = buf_start; 1006 1007 /* copy bytes from the working buffer into the pages */ 1008 while (working_bytes > 0) { 1009 bytes = min(PAGE_CACHE_SIZE - *pg_offset, 1010 PAGE_CACHE_SIZE - buf_offset); 1011 bytes = min(bytes, working_bytes); 1012 kaddr = kmap_atomic(page_out); 1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1014 if (*pg_index == (vcnt - 1) && *pg_offset == 0) 1015 memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); 1016 kunmap_atomic(kaddr); 1017 flush_dcache_page(page_out); 1018 1019 *pg_offset += bytes; 1020 buf_offset += bytes; 1021 working_bytes -= bytes; 1022 current_buf_start += bytes; 1023 1024 /* check if we need to pick another page */ 1025 if (*pg_offset == PAGE_CACHE_SIZE) { 1026 (*pg_index)++; 1027 if (*pg_index >= vcnt) 1028 return 0; 1029 1030 page_out = bvec[*pg_index].bv_page; 1031 *pg_offset = 0; 1032 start_byte = page_offset(page_out) - disk_start; 1033 1034 /* 1035 * make sure our new page is covered by this 1036 * working buffer 1037 */ 1038 if (total_out <= start_byte) 1039 return 1; 1040 1041 /* 1042 * the next page in the biovec might not be adjacent 1043 * to the last page, but it might still be found 1044 * inside this working buffer. bump our offset pointer 1045 */ 1046 if (total_out > start_byte && 1047 current_buf_start < start_byte) { 1048 buf_offset = start_byte - buf_start; 1049 working_bytes = total_out - start_byte; 1050 current_buf_start = buf_start + buf_offset; 1051 } 1052 } 1053 } 1054 1055 return 1; 1056 } 1057