1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/bit_spinlock.h> 34 #include <linux/slab.h> 35 #include "ctree.h" 36 #include "disk-io.h" 37 #include "transaction.h" 38 #include "btrfs_inode.h" 39 #include "volumes.h" 40 #include "ordered-data.h" 41 #include "compression.h" 42 #include "extent_io.h" 43 #include "extent_map.h" 44 45 struct compressed_bio { 46 /* number of bios pending for this compressed extent */ 47 atomic_t pending_bios; 48 49 /* the pages with the compressed data on them */ 50 struct page **compressed_pages; 51 52 /* inode that owns this data */ 53 struct inode *inode; 54 55 /* starting offset in the inode for our pages */ 56 u64 start; 57 58 /* number of bytes in the inode we're working on */ 59 unsigned long len; 60 61 /* number of bytes on disk */ 62 unsigned long compressed_len; 63 64 /* the compression algorithm for this bio */ 65 int compress_type; 66 67 /* number of compressed pages in the array */ 68 unsigned long nr_pages; 69 70 /* IO errors */ 71 int errors; 72 int mirror_num; 73 74 /* for reads, this is the bio we are copying the data into */ 75 struct bio *orig_bio; 76 77 /* 78 * the start of a variable length array of checksums only 79 * used by reads 80 */ 81 u32 sums; 82 }; 83 84 static int btrfs_decompress_biovec(int type, struct page **pages_in, 85 u64 disk_start, struct bio_vec *bvec, 86 int vcnt, size_t srclen); 87 88 static inline int compressed_bio_size(struct btrfs_root *root, 89 unsigned long disk_size) 90 { 91 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 92 93 return sizeof(struct compressed_bio) + 94 (DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size; 95 } 96 97 static struct bio *compressed_bio_alloc(struct block_device *bdev, 98 u64 first_byte, gfp_t gfp_flags) 99 { 100 return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags); 101 } 102 103 static int check_compressed_csum(struct inode *inode, 104 struct compressed_bio *cb, 105 u64 disk_start) 106 { 107 int ret; 108 struct page *page; 109 unsigned long i; 110 char *kaddr; 111 u32 csum; 112 u32 *cb_sum = &cb->sums; 113 114 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 115 return 0; 116 117 for (i = 0; i < cb->nr_pages; i++) { 118 page = cb->compressed_pages[i]; 119 csum = ~(u32)0; 120 121 kaddr = kmap_atomic(page); 122 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); 123 btrfs_csum_final(csum, (char *)&csum); 124 kunmap_atomic(kaddr); 125 126 if (csum != *cb_sum) { 127 btrfs_info(BTRFS_I(inode)->root->fs_info, 128 "csum failed ino %llu extent %llu csum %u wanted %u mirror %d", 129 btrfs_ino(inode), disk_start, csum, *cb_sum, 130 cb->mirror_num); 131 ret = -EIO; 132 goto fail; 133 } 134 cb_sum++; 135 136 } 137 ret = 0; 138 fail: 139 return ret; 140 } 141 142 /* when we finish reading compressed pages from the disk, we 143 * decompress them and then run the bio end_io routines on the 144 * decompressed pages (in the inode address space). 145 * 146 * This allows the checksumming and other IO error handling routines 147 * to work normally 148 * 149 * The compressed pages are freed here, and it must be run 150 * in process context 151 */ 152 static void end_compressed_bio_read(struct bio *bio) 153 { 154 struct compressed_bio *cb = bio->bi_private; 155 struct inode *inode; 156 struct page *page; 157 unsigned long index; 158 int ret; 159 160 if (bio->bi_error) 161 cb->errors = 1; 162 163 /* if there are more bios still pending for this compressed 164 * extent, just exit 165 */ 166 if (!atomic_dec_and_test(&cb->pending_bios)) 167 goto out; 168 169 inode = cb->inode; 170 ret = check_compressed_csum(inode, cb, 171 (u64)bio->bi_iter.bi_sector << 9); 172 if (ret) 173 goto csum_failed; 174 175 /* ok, we're the last bio for this extent, lets start 176 * the decompression. 177 */ 178 ret = btrfs_decompress_biovec(cb->compress_type, 179 cb->compressed_pages, 180 cb->start, 181 cb->orig_bio->bi_io_vec, 182 cb->orig_bio->bi_vcnt, 183 cb->compressed_len); 184 csum_failed: 185 if (ret) 186 cb->errors = 1; 187 188 /* release the compressed pages */ 189 index = 0; 190 for (index = 0; index < cb->nr_pages; index++) { 191 page = cb->compressed_pages[index]; 192 page->mapping = NULL; 193 put_page(page); 194 } 195 196 /* do io completion on the original bio */ 197 if (cb->errors) { 198 bio_io_error(cb->orig_bio); 199 } else { 200 int i; 201 struct bio_vec *bvec; 202 203 /* 204 * we have verified the checksum already, set page 205 * checked so the end_io handlers know about it 206 */ 207 bio_for_each_segment_all(bvec, cb->orig_bio, i) 208 SetPageChecked(bvec->bv_page); 209 210 bio_endio(cb->orig_bio); 211 } 212 213 /* finally free the cb struct */ 214 kfree(cb->compressed_pages); 215 kfree(cb); 216 out: 217 bio_put(bio); 218 } 219 220 /* 221 * Clear the writeback bits on all of the file 222 * pages for a compressed write 223 */ 224 static noinline void end_compressed_writeback(struct inode *inode, 225 const struct compressed_bio *cb) 226 { 227 unsigned long index = cb->start >> PAGE_SHIFT; 228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; 229 struct page *pages[16]; 230 unsigned long nr_pages = end_index - index + 1; 231 int i; 232 int ret; 233 234 if (cb->errors) 235 mapping_set_error(inode->i_mapping, -EIO); 236 237 while (nr_pages > 0) { 238 ret = find_get_pages_contig(inode->i_mapping, index, 239 min_t(unsigned long, 240 nr_pages, ARRAY_SIZE(pages)), pages); 241 if (ret == 0) { 242 nr_pages -= 1; 243 index += 1; 244 continue; 245 } 246 for (i = 0; i < ret; i++) { 247 if (cb->errors) 248 SetPageError(pages[i]); 249 end_page_writeback(pages[i]); 250 put_page(pages[i]); 251 } 252 nr_pages -= ret; 253 index += ret; 254 } 255 /* the inode may be gone now */ 256 } 257 258 /* 259 * do the cleanup once all the compressed pages hit the disk. 260 * This will clear writeback on the file pages and free the compressed 261 * pages. 262 * 263 * This also calls the writeback end hooks for the file pages so that 264 * metadata and checksums can be updated in the file. 265 */ 266 static void end_compressed_bio_write(struct bio *bio) 267 { 268 struct extent_io_tree *tree; 269 struct compressed_bio *cb = bio->bi_private; 270 struct inode *inode; 271 struct page *page; 272 unsigned long index; 273 274 if (bio->bi_error) 275 cb->errors = 1; 276 277 /* if there are more bios still pending for this compressed 278 * extent, just exit 279 */ 280 if (!atomic_dec_and_test(&cb->pending_bios)) 281 goto out; 282 283 /* ok, we're the last bio for this extent, step one is to 284 * call back into the FS and do all the end_io operations 285 */ 286 inode = cb->inode; 287 tree = &BTRFS_I(inode)->io_tree; 288 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; 289 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 290 cb->start, 291 cb->start + cb->len - 1, 292 NULL, 293 bio->bi_error ? 0 : 1); 294 cb->compressed_pages[0]->mapping = NULL; 295 296 end_compressed_writeback(inode, cb); 297 /* note, our inode could be gone now */ 298 299 /* 300 * release the compressed pages, these came from alloc_page and 301 * are not attached to the inode at all 302 */ 303 index = 0; 304 for (index = 0; index < cb->nr_pages; index++) { 305 page = cb->compressed_pages[index]; 306 page->mapping = NULL; 307 put_page(page); 308 } 309 310 /* finally free the cb struct */ 311 kfree(cb->compressed_pages); 312 kfree(cb); 313 out: 314 bio_put(bio); 315 } 316 317 /* 318 * worker function to build and submit bios for previously compressed pages. 319 * The corresponding pages in the inode should be marked for writeback 320 * and the compressed pages should have a reference on them for dropping 321 * when the IO is complete. 322 * 323 * This also checksums the file bytes and gets things ready for 324 * the end io hooks. 325 */ 326 int btrfs_submit_compressed_write(struct inode *inode, u64 start, 327 unsigned long len, u64 disk_start, 328 unsigned long compressed_len, 329 struct page **compressed_pages, 330 unsigned long nr_pages) 331 { 332 struct bio *bio = NULL; 333 struct btrfs_root *root = BTRFS_I(inode)->root; 334 struct compressed_bio *cb; 335 unsigned long bytes_left; 336 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 337 int pg_index = 0; 338 struct page *page; 339 u64 first_byte = disk_start; 340 struct block_device *bdev; 341 int ret; 342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 343 344 WARN_ON(start & ((u64)PAGE_SIZE - 1)); 345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 346 if (!cb) 347 return -ENOMEM; 348 atomic_set(&cb->pending_bios, 0); 349 cb->errors = 0; 350 cb->inode = inode; 351 cb->start = start; 352 cb->len = len; 353 cb->mirror_num = 0; 354 cb->compressed_pages = compressed_pages; 355 cb->compressed_len = compressed_len; 356 cb->orig_bio = NULL; 357 cb->nr_pages = nr_pages; 358 359 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 360 361 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 362 if (!bio) { 363 kfree(cb); 364 return -ENOMEM; 365 } 366 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 367 bio->bi_private = cb; 368 bio->bi_end_io = end_compressed_bio_write; 369 atomic_inc(&cb->pending_bios); 370 371 /* create and submit bios for the compressed pages */ 372 bytes_left = compressed_len; 373 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { 374 page = compressed_pages[pg_index]; 375 page->mapping = inode->i_mapping; 376 if (bio->bi_iter.bi_size) 377 ret = io_tree->ops->merge_bio_hook(page, 0, 378 PAGE_SIZE, 379 bio, 0); 380 else 381 ret = 0; 382 383 page->mapping = NULL; 384 if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) < 385 PAGE_SIZE) { 386 bio_get(bio); 387 388 /* 389 * inc the count before we submit the bio so 390 * we know the end IO handler won't happen before 391 * we inc the count. Otherwise, the cb might get 392 * freed before we're done setting it up 393 */ 394 atomic_inc(&cb->pending_bios); 395 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 396 BTRFS_WQ_ENDIO_DATA); 397 BUG_ON(ret); /* -ENOMEM */ 398 399 if (!skip_sum) { 400 ret = btrfs_csum_one_bio(root, inode, bio, 401 start, 1); 402 BUG_ON(ret); /* -ENOMEM */ 403 } 404 405 ret = btrfs_map_bio(root, bio, 0, 1); 406 BUG_ON(ret); /* -ENOMEM */ 407 408 bio_put(bio); 409 410 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 411 BUG_ON(!bio); 412 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 413 bio->bi_private = cb; 414 bio->bi_end_io = end_compressed_bio_write; 415 bio_add_page(bio, page, PAGE_SIZE, 0); 416 } 417 if (bytes_left < PAGE_SIZE) { 418 btrfs_info(BTRFS_I(inode)->root->fs_info, 419 "bytes left %lu compress len %lu nr %lu", 420 bytes_left, cb->compressed_len, cb->nr_pages); 421 } 422 bytes_left -= PAGE_SIZE; 423 first_byte += PAGE_SIZE; 424 cond_resched(); 425 } 426 bio_get(bio); 427 428 ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA); 429 BUG_ON(ret); /* -ENOMEM */ 430 431 if (!skip_sum) { 432 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 433 BUG_ON(ret); /* -ENOMEM */ 434 } 435 436 ret = btrfs_map_bio(root, bio, 0, 1); 437 BUG_ON(ret); /* -ENOMEM */ 438 439 bio_put(bio); 440 return 0; 441 } 442 443 static noinline int add_ra_bio_pages(struct inode *inode, 444 u64 compressed_end, 445 struct compressed_bio *cb) 446 { 447 unsigned long end_index; 448 unsigned long pg_index; 449 u64 last_offset; 450 u64 isize = i_size_read(inode); 451 int ret; 452 struct page *page; 453 unsigned long nr_pages = 0; 454 struct extent_map *em; 455 struct address_space *mapping = inode->i_mapping; 456 struct extent_map_tree *em_tree; 457 struct extent_io_tree *tree; 458 u64 end; 459 int misses = 0; 460 461 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 462 last_offset = (page_offset(page) + PAGE_SIZE); 463 em_tree = &BTRFS_I(inode)->extent_tree; 464 tree = &BTRFS_I(inode)->io_tree; 465 466 if (isize == 0) 467 return 0; 468 469 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; 470 471 while (last_offset < compressed_end) { 472 pg_index = last_offset >> PAGE_SHIFT; 473 474 if (pg_index > end_index) 475 break; 476 477 rcu_read_lock(); 478 page = radix_tree_lookup(&mapping->page_tree, pg_index); 479 rcu_read_unlock(); 480 if (page && !radix_tree_exceptional_entry(page)) { 481 misses++; 482 if (misses > 4) 483 break; 484 goto next; 485 } 486 487 page = __page_cache_alloc(mapping_gfp_constraint(mapping, 488 ~__GFP_FS)); 489 if (!page) 490 break; 491 492 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { 493 put_page(page); 494 goto next; 495 } 496 497 end = last_offset + PAGE_SIZE - 1; 498 /* 499 * at this point, we have a locked page in the page cache 500 * for these bytes in the file. But, we have to make 501 * sure they map to this compressed extent on disk. 502 */ 503 set_page_extent_mapped(page); 504 lock_extent(tree, last_offset, end); 505 read_lock(&em_tree->lock); 506 em = lookup_extent_mapping(em_tree, last_offset, 507 PAGE_SIZE); 508 read_unlock(&em_tree->lock); 509 510 if (!em || last_offset < em->start || 511 (last_offset + PAGE_SIZE > extent_map_end(em)) || 512 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 513 free_extent_map(em); 514 unlock_extent(tree, last_offset, end); 515 unlock_page(page); 516 put_page(page); 517 break; 518 } 519 free_extent_map(em); 520 521 if (page->index == end_index) { 522 char *userpage; 523 size_t zero_offset = isize & (PAGE_SIZE - 1); 524 525 if (zero_offset) { 526 int zeros; 527 zeros = PAGE_SIZE - zero_offset; 528 userpage = kmap_atomic(page); 529 memset(userpage + zero_offset, 0, zeros); 530 flush_dcache_page(page); 531 kunmap_atomic(userpage); 532 } 533 } 534 535 ret = bio_add_page(cb->orig_bio, page, 536 PAGE_SIZE, 0); 537 538 if (ret == PAGE_SIZE) { 539 nr_pages++; 540 put_page(page); 541 } else { 542 unlock_extent(tree, last_offset, end); 543 unlock_page(page); 544 put_page(page); 545 break; 546 } 547 next: 548 last_offset += PAGE_SIZE; 549 } 550 return 0; 551 } 552 553 /* 554 * for a compressed read, the bio we get passed has all the inode pages 555 * in it. We don't actually do IO on those pages but allocate new ones 556 * to hold the compressed pages on disk. 557 * 558 * bio->bi_iter.bi_sector points to the compressed extent on disk 559 * bio->bi_io_vec points to all of the inode pages 560 * bio->bi_vcnt is a count of pages 561 * 562 * After the compressed pages are read, we copy the bytes into the 563 * bio we were passed and then call the bio end_io calls 564 */ 565 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 566 int mirror_num, unsigned long bio_flags) 567 { 568 struct extent_io_tree *tree; 569 struct extent_map_tree *em_tree; 570 struct compressed_bio *cb; 571 struct btrfs_root *root = BTRFS_I(inode)->root; 572 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE; 573 unsigned long compressed_len; 574 unsigned long nr_pages; 575 unsigned long pg_index; 576 struct page *page; 577 struct block_device *bdev; 578 struct bio *comp_bio; 579 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; 580 u64 em_len; 581 u64 em_start; 582 struct extent_map *em; 583 int ret = -ENOMEM; 584 int faili = 0; 585 u32 *sums; 586 587 tree = &BTRFS_I(inode)->io_tree; 588 em_tree = &BTRFS_I(inode)->extent_tree; 589 590 /* we need the actual starting offset of this extent in the file */ 591 read_lock(&em_tree->lock); 592 em = lookup_extent_mapping(em_tree, 593 page_offset(bio->bi_io_vec->bv_page), 594 PAGE_SIZE); 595 read_unlock(&em_tree->lock); 596 if (!em) 597 return -EIO; 598 599 compressed_len = em->block_len; 600 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 601 if (!cb) 602 goto out; 603 604 atomic_set(&cb->pending_bios, 0); 605 cb->errors = 0; 606 cb->inode = inode; 607 cb->mirror_num = mirror_num; 608 sums = &cb->sums; 609 610 cb->start = em->orig_start; 611 em_len = em->len; 612 em_start = em->start; 613 614 free_extent_map(em); 615 em = NULL; 616 617 cb->len = uncompressed_len; 618 cb->compressed_len = compressed_len; 619 cb->compress_type = extent_compress_type(bio_flags); 620 cb->orig_bio = bio; 621 622 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); 623 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), 624 GFP_NOFS); 625 if (!cb->compressed_pages) 626 goto fail1; 627 628 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 629 630 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 631 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | 632 __GFP_HIGHMEM); 633 if (!cb->compressed_pages[pg_index]) { 634 faili = pg_index - 1; 635 ret = -ENOMEM; 636 goto fail2; 637 } 638 } 639 faili = nr_pages - 1; 640 cb->nr_pages = nr_pages; 641 642 add_ra_bio_pages(inode, em_start + em_len, cb); 643 644 /* include any pages we added in add_ra-bio_pages */ 645 uncompressed_len = bio->bi_vcnt * PAGE_SIZE; 646 cb->len = uncompressed_len; 647 648 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 649 if (!comp_bio) 650 goto fail2; 651 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0); 652 comp_bio->bi_private = cb; 653 comp_bio->bi_end_io = end_compressed_bio_read; 654 atomic_inc(&cb->pending_bios); 655 656 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 657 page = cb->compressed_pages[pg_index]; 658 page->mapping = inode->i_mapping; 659 page->index = em_start >> PAGE_SHIFT; 660 661 if (comp_bio->bi_iter.bi_size) 662 ret = tree->ops->merge_bio_hook(page, 0, 663 PAGE_SIZE, 664 comp_bio, 0); 665 else 666 ret = 0; 667 668 page->mapping = NULL; 669 if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < 670 PAGE_SIZE) { 671 bio_get(comp_bio); 672 673 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 674 BTRFS_WQ_ENDIO_DATA); 675 BUG_ON(ret); /* -ENOMEM */ 676 677 /* 678 * inc the count before we submit the bio so 679 * we know the end IO handler won't happen before 680 * we inc the count. Otherwise, the cb might get 681 * freed before we're done setting it up 682 */ 683 atomic_inc(&cb->pending_bios); 684 685 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 686 ret = btrfs_lookup_bio_sums(root, inode, 687 comp_bio, sums); 688 BUG_ON(ret); /* -ENOMEM */ 689 } 690 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, 691 root->sectorsize); 692 693 ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); 694 if (ret) { 695 bio->bi_error = ret; 696 bio_endio(comp_bio); 697 } 698 699 bio_put(comp_bio); 700 701 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, 702 GFP_NOFS); 703 BUG_ON(!comp_bio); 704 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0); 705 comp_bio->bi_private = cb; 706 comp_bio->bi_end_io = end_compressed_bio_read; 707 708 bio_add_page(comp_bio, page, PAGE_SIZE, 0); 709 } 710 cur_disk_byte += PAGE_SIZE; 711 } 712 bio_get(comp_bio); 713 714 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 715 BTRFS_WQ_ENDIO_DATA); 716 BUG_ON(ret); /* -ENOMEM */ 717 718 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 719 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 720 BUG_ON(ret); /* -ENOMEM */ 721 } 722 723 ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); 724 if (ret) { 725 bio->bi_error = ret; 726 bio_endio(comp_bio); 727 } 728 729 bio_put(comp_bio); 730 return 0; 731 732 fail2: 733 while (faili >= 0) { 734 __free_page(cb->compressed_pages[faili]); 735 faili--; 736 } 737 738 kfree(cb->compressed_pages); 739 fail1: 740 kfree(cb); 741 out: 742 free_extent_map(em); 743 return ret; 744 } 745 746 static struct { 747 struct list_head idle_ws; 748 spinlock_t ws_lock; 749 /* Number of free workspaces */ 750 int free_ws; 751 /* Total number of allocated workspaces */ 752 atomic_t total_ws; 753 /* Waiters for a free workspace */ 754 wait_queue_head_t ws_wait; 755 } btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; 756 757 static const struct btrfs_compress_op * const btrfs_compress_op[] = { 758 &btrfs_zlib_compress, 759 &btrfs_lzo_compress, 760 }; 761 762 void __init btrfs_init_compress(void) 763 { 764 int i; 765 766 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 767 struct list_head *workspace; 768 769 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); 770 spin_lock_init(&btrfs_comp_ws[i].ws_lock); 771 atomic_set(&btrfs_comp_ws[i].total_ws, 0); 772 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); 773 774 /* 775 * Preallocate one workspace for each compression type so 776 * we can guarantee forward progress in the worst case 777 */ 778 workspace = btrfs_compress_op[i]->alloc_workspace(); 779 if (IS_ERR(workspace)) { 780 printk(KERN_WARNING 781 "BTRFS: cannot preallocate compression workspace, will try later"); 782 } else { 783 atomic_set(&btrfs_comp_ws[i].total_ws, 1); 784 btrfs_comp_ws[i].free_ws = 1; 785 list_add(workspace, &btrfs_comp_ws[i].idle_ws); 786 } 787 } 788 } 789 790 /* 791 * This finds an available workspace or allocates a new one. 792 * If it's not possible to allocate a new one, waits until there's one. 793 * Preallocation makes a forward progress guarantees and we do not return 794 * errors. 795 */ 796 static struct list_head *find_workspace(int type) 797 { 798 struct list_head *workspace; 799 int cpus = num_online_cpus(); 800 int idx = type - 1; 801 802 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; 803 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; 804 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws; 805 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; 806 int *free_ws = &btrfs_comp_ws[idx].free_ws; 807 again: 808 spin_lock(ws_lock); 809 if (!list_empty(idle_ws)) { 810 workspace = idle_ws->next; 811 list_del(workspace); 812 (*free_ws)--; 813 spin_unlock(ws_lock); 814 return workspace; 815 816 } 817 if (atomic_read(total_ws) > cpus) { 818 DEFINE_WAIT(wait); 819 820 spin_unlock(ws_lock); 821 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); 822 if (atomic_read(total_ws) > cpus && !*free_ws) 823 schedule(); 824 finish_wait(ws_wait, &wait); 825 goto again; 826 } 827 atomic_inc(total_ws); 828 spin_unlock(ws_lock); 829 830 workspace = btrfs_compress_op[idx]->alloc_workspace(); 831 if (IS_ERR(workspace)) { 832 atomic_dec(total_ws); 833 wake_up(ws_wait); 834 835 /* 836 * Do not return the error but go back to waiting. There's a 837 * workspace preallocated for each type and the compression 838 * time is bounded so we get to a workspace eventually. This 839 * makes our caller's life easier. 840 * 841 * To prevent silent and low-probability deadlocks (when the 842 * initial preallocation fails), check if there are any 843 * workspaces at all. 844 */ 845 if (atomic_read(total_ws) == 0) { 846 static DEFINE_RATELIMIT_STATE(_rs, 847 /* once per minute */ 60 * HZ, 848 /* no burst */ 1); 849 850 if (__ratelimit(&_rs)) { 851 printk(KERN_WARNING 852 "no compression workspaces, low memory, retrying"); 853 } 854 } 855 goto again; 856 } 857 return workspace; 858 } 859 860 /* 861 * put a workspace struct back on the list or free it if we have enough 862 * idle ones sitting around 863 */ 864 static void free_workspace(int type, struct list_head *workspace) 865 { 866 int idx = type - 1; 867 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; 868 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; 869 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws; 870 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; 871 int *free_ws = &btrfs_comp_ws[idx].free_ws; 872 873 spin_lock(ws_lock); 874 if (*free_ws < num_online_cpus()) { 875 list_add(workspace, idle_ws); 876 (*free_ws)++; 877 spin_unlock(ws_lock); 878 goto wake; 879 } 880 spin_unlock(ws_lock); 881 882 btrfs_compress_op[idx]->free_workspace(workspace); 883 atomic_dec(total_ws); 884 wake: 885 /* 886 * Make sure counter is updated before we wake up waiters. 887 */ 888 smp_mb(); 889 if (waitqueue_active(ws_wait)) 890 wake_up(ws_wait); 891 } 892 893 /* 894 * cleanup function for module exit 895 */ 896 static void free_workspaces(void) 897 { 898 struct list_head *workspace; 899 int i; 900 901 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 902 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { 903 workspace = btrfs_comp_ws[i].idle_ws.next; 904 list_del(workspace); 905 btrfs_compress_op[i]->free_workspace(workspace); 906 atomic_dec(&btrfs_comp_ws[i].total_ws); 907 } 908 } 909 } 910 911 /* 912 * given an address space and start/len, compress the bytes. 913 * 914 * pages are allocated to hold the compressed result and stored 915 * in 'pages' 916 * 917 * out_pages is used to return the number of pages allocated. There 918 * may be pages allocated even if we return an error 919 * 920 * total_in is used to return the number of bytes actually read. It 921 * may be smaller then len if we had to exit early because we 922 * ran out of room in the pages array or because we cross the 923 * max_out threshold. 924 * 925 * total_out is used to return the total number of compressed bytes 926 * 927 * max_out tells us the max number of bytes that we're allowed to 928 * stuff into pages 929 */ 930 int btrfs_compress_pages(int type, struct address_space *mapping, 931 u64 start, unsigned long len, 932 struct page **pages, 933 unsigned long nr_dest_pages, 934 unsigned long *out_pages, 935 unsigned long *total_in, 936 unsigned long *total_out, 937 unsigned long max_out) 938 { 939 struct list_head *workspace; 940 int ret; 941 942 workspace = find_workspace(type); 943 944 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, 945 start, len, pages, 946 nr_dest_pages, out_pages, 947 total_in, total_out, 948 max_out); 949 free_workspace(type, workspace); 950 return ret; 951 } 952 953 /* 954 * pages_in is an array of pages with compressed data. 955 * 956 * disk_start is the starting logical offset of this array in the file 957 * 958 * bvec is a bio_vec of pages from the file that we want to decompress into 959 * 960 * vcnt is the count of pages in the biovec 961 * 962 * srclen is the number of bytes in pages_in 963 * 964 * The basic idea is that we have a bio that was created by readpages. 965 * The pages in the bio are for the uncompressed data, and they may not 966 * be contiguous. They all correspond to the range of bytes covered by 967 * the compressed extent. 968 */ 969 static int btrfs_decompress_biovec(int type, struct page **pages_in, 970 u64 disk_start, struct bio_vec *bvec, 971 int vcnt, size_t srclen) 972 { 973 struct list_head *workspace; 974 int ret; 975 976 workspace = find_workspace(type); 977 978 ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, 979 disk_start, 980 bvec, vcnt, srclen); 981 free_workspace(type, workspace); 982 return ret; 983 } 984 985 /* 986 * a less complex decompression routine. Our compressed data fits in a 987 * single page, and we want to read a single page out of it. 988 * start_byte tells us the offset into the compressed data we're interested in 989 */ 990 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, 991 unsigned long start_byte, size_t srclen, size_t destlen) 992 { 993 struct list_head *workspace; 994 int ret; 995 996 workspace = find_workspace(type); 997 998 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, 999 dest_page, start_byte, 1000 srclen, destlen); 1001 1002 free_workspace(type, workspace); 1003 return ret; 1004 } 1005 1006 void btrfs_exit_compress(void) 1007 { 1008 free_workspaces(); 1009 } 1010 1011 /* 1012 * Copy uncompressed data from working buffer to pages. 1013 * 1014 * buf_start is the byte offset we're of the start of our workspace buffer. 1015 * 1016 * total_out is the last byte of the buffer 1017 */ 1018 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, 1019 unsigned long total_out, u64 disk_start, 1020 struct bio_vec *bvec, int vcnt, 1021 unsigned long *pg_index, 1022 unsigned long *pg_offset) 1023 { 1024 unsigned long buf_offset; 1025 unsigned long current_buf_start; 1026 unsigned long start_byte; 1027 unsigned long working_bytes = total_out - buf_start; 1028 unsigned long bytes; 1029 char *kaddr; 1030 struct page *page_out = bvec[*pg_index].bv_page; 1031 1032 /* 1033 * start byte is the first byte of the page we're currently 1034 * copying into relative to the start of the compressed data. 1035 */ 1036 start_byte = page_offset(page_out) - disk_start; 1037 1038 /* we haven't yet hit data corresponding to this page */ 1039 if (total_out <= start_byte) 1040 return 1; 1041 1042 /* 1043 * the start of the data we care about is offset into 1044 * the middle of our working buffer 1045 */ 1046 if (total_out > start_byte && buf_start < start_byte) { 1047 buf_offset = start_byte - buf_start; 1048 working_bytes -= buf_offset; 1049 } else { 1050 buf_offset = 0; 1051 } 1052 current_buf_start = buf_start; 1053 1054 /* copy bytes from the working buffer into the pages */ 1055 while (working_bytes > 0) { 1056 bytes = min(PAGE_SIZE - *pg_offset, 1057 PAGE_SIZE - buf_offset); 1058 bytes = min(bytes, working_bytes); 1059 kaddr = kmap_atomic(page_out); 1060 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1061 kunmap_atomic(kaddr); 1062 flush_dcache_page(page_out); 1063 1064 *pg_offset += bytes; 1065 buf_offset += bytes; 1066 working_bytes -= bytes; 1067 current_buf_start += bytes; 1068 1069 /* check if we need to pick another page */ 1070 if (*pg_offset == PAGE_SIZE) { 1071 (*pg_index)++; 1072 if (*pg_index >= vcnt) 1073 return 0; 1074 1075 page_out = bvec[*pg_index].bv_page; 1076 *pg_offset = 0; 1077 start_byte = page_offset(page_out) - disk_start; 1078 1079 /* 1080 * make sure our new page is covered by this 1081 * working buffer 1082 */ 1083 if (total_out <= start_byte) 1084 return 1; 1085 1086 /* 1087 * the next page in the biovec might not be adjacent 1088 * to the last page, but it might still be found 1089 * inside this working buffer. bump our offset pointer 1090 */ 1091 if (total_out > start_byte && 1092 current_buf_start < start_byte) { 1093 buf_offset = start_byte - buf_start; 1094 working_bytes = total_out - start_byte; 1095 current_buf_start = buf_start + buf_offset; 1096 } 1097 } 1098 } 1099 1100 return 1; 1101 } 1102 1103 /* 1104 * When uncompressing data, we need to make sure and zero any parts of 1105 * the biovec that were not filled in by the decompression code. pg_index 1106 * and pg_offset indicate the last page and the last offset of that page 1107 * that have been filled in. This will zero everything remaining in the 1108 * biovec. 1109 */ 1110 void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, 1111 unsigned long pg_index, 1112 unsigned long pg_offset) 1113 { 1114 while (pg_index < vcnt) { 1115 struct page *page = bvec[pg_index].bv_page; 1116 unsigned long off = bvec[pg_index].bv_offset; 1117 unsigned long len = bvec[pg_index].bv_len; 1118 1119 if (pg_offset < off) 1120 pg_offset = off; 1121 if (pg_offset < off + len) { 1122 unsigned long bytes = off + len - pg_offset; 1123 char *kaddr; 1124 1125 kaddr = kmap_atomic(page); 1126 memset(kaddr + pg_offset, 0, bytes); 1127 kunmap_atomic(kaddr); 1128 } 1129 pg_index++; 1130 pg_offset = 0; 1131 } 1132 } 1133