1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/bit_spinlock.h> 34 #include <linux/slab.h> 35 #include "ctree.h" 36 #include "disk-io.h" 37 #include "transaction.h" 38 #include "btrfs_inode.h" 39 #include "volumes.h" 40 #include "ordered-data.h" 41 #include "compression.h" 42 #include "extent_io.h" 43 #include "extent_map.h" 44 45 struct compressed_bio { 46 /* number of bios pending for this compressed extent */ 47 atomic_t pending_bios; 48 49 /* the pages with the compressed data on them */ 50 struct page **compressed_pages; 51 52 /* inode that owns this data */ 53 struct inode *inode; 54 55 /* starting offset in the inode for our pages */ 56 u64 start; 57 58 /* number of bytes in the inode we're working on */ 59 unsigned long len; 60 61 /* number of bytes on disk */ 62 unsigned long compressed_len; 63 64 /* the compression algorithm for this bio */ 65 int compress_type; 66 67 /* number of compressed pages in the array */ 68 unsigned long nr_pages; 69 70 /* IO errors */ 71 int errors; 72 int mirror_num; 73 74 /* for reads, this is the bio we are copying the data into */ 75 struct bio *orig_bio; 76 77 /* 78 * the start of a variable length array of checksums only 79 * used by reads 80 */ 81 u32 sums; 82 }; 83 84 static int btrfs_decompress_biovec(int type, struct page **pages_in, 85 u64 disk_start, struct bio_vec *bvec, 86 int vcnt, size_t srclen); 87 88 static inline int compressed_bio_size(struct btrfs_root *root, 89 unsigned long disk_size) 90 { 91 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 92 93 return sizeof(struct compressed_bio) + 94 (DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size; 95 } 96 97 static struct bio *compressed_bio_alloc(struct block_device *bdev, 98 u64 first_byte, gfp_t gfp_flags) 99 { 100 return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags); 101 } 102 103 static int check_compressed_csum(struct inode *inode, 104 struct compressed_bio *cb, 105 u64 disk_start) 106 { 107 int ret; 108 struct page *page; 109 unsigned long i; 110 char *kaddr; 111 u32 csum; 112 u32 *cb_sum = &cb->sums; 113 114 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 115 return 0; 116 117 for (i = 0; i < cb->nr_pages; i++) { 118 page = cb->compressed_pages[i]; 119 csum = ~(u32)0; 120 121 kaddr = kmap_atomic(page); 122 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); 123 btrfs_csum_final(csum, (char *)&csum); 124 kunmap_atomic(kaddr); 125 126 if (csum != *cb_sum) { 127 btrfs_info(BTRFS_I(inode)->root->fs_info, 128 "csum failed ino %llu extent %llu csum %u wanted %u mirror %d", 129 btrfs_ino(inode), disk_start, csum, *cb_sum, 130 cb->mirror_num); 131 ret = -EIO; 132 goto fail; 133 } 134 cb_sum++; 135 136 } 137 ret = 0; 138 fail: 139 return ret; 140 } 141 142 /* when we finish reading compressed pages from the disk, we 143 * decompress them and then run the bio end_io routines on the 144 * decompressed pages (in the inode address space). 145 * 146 * This allows the checksumming and other IO error handling routines 147 * to work normally 148 * 149 * The compressed pages are freed here, and it must be run 150 * in process context 151 */ 152 static void end_compressed_bio_read(struct bio *bio) 153 { 154 struct compressed_bio *cb = bio->bi_private; 155 struct inode *inode; 156 struct page *page; 157 unsigned long index; 158 int ret; 159 160 if (bio->bi_error) 161 cb->errors = 1; 162 163 /* if there are more bios still pending for this compressed 164 * extent, just exit 165 */ 166 if (!atomic_dec_and_test(&cb->pending_bios)) 167 goto out; 168 169 inode = cb->inode; 170 ret = check_compressed_csum(inode, cb, 171 (u64)bio->bi_iter.bi_sector << 9); 172 if (ret) 173 goto csum_failed; 174 175 /* ok, we're the last bio for this extent, lets start 176 * the decompression. 177 */ 178 ret = btrfs_decompress_biovec(cb->compress_type, 179 cb->compressed_pages, 180 cb->start, 181 cb->orig_bio->bi_io_vec, 182 cb->orig_bio->bi_vcnt, 183 cb->compressed_len); 184 csum_failed: 185 if (ret) 186 cb->errors = 1; 187 188 /* release the compressed pages */ 189 index = 0; 190 for (index = 0; index < cb->nr_pages; index++) { 191 page = cb->compressed_pages[index]; 192 page->mapping = NULL; 193 put_page(page); 194 } 195 196 /* do io completion on the original bio */ 197 if (cb->errors) { 198 bio_io_error(cb->orig_bio); 199 } else { 200 int i; 201 struct bio_vec *bvec; 202 203 /* 204 * we have verified the checksum already, set page 205 * checked so the end_io handlers know about it 206 */ 207 bio_for_each_segment_all(bvec, cb->orig_bio, i) 208 SetPageChecked(bvec->bv_page); 209 210 bio_endio(cb->orig_bio); 211 } 212 213 /* finally free the cb struct */ 214 kfree(cb->compressed_pages); 215 kfree(cb); 216 out: 217 bio_put(bio); 218 } 219 220 /* 221 * Clear the writeback bits on all of the file 222 * pages for a compressed write 223 */ 224 static noinline void end_compressed_writeback(struct inode *inode, 225 const struct compressed_bio *cb) 226 { 227 unsigned long index = cb->start >> PAGE_SHIFT; 228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; 229 struct page *pages[16]; 230 unsigned long nr_pages = end_index - index + 1; 231 int i; 232 int ret; 233 234 if (cb->errors) 235 mapping_set_error(inode->i_mapping, -EIO); 236 237 while (nr_pages > 0) { 238 ret = find_get_pages_contig(inode->i_mapping, index, 239 min_t(unsigned long, 240 nr_pages, ARRAY_SIZE(pages)), pages); 241 if (ret == 0) { 242 nr_pages -= 1; 243 index += 1; 244 continue; 245 } 246 for (i = 0; i < ret; i++) { 247 if (cb->errors) 248 SetPageError(pages[i]); 249 end_page_writeback(pages[i]); 250 put_page(pages[i]); 251 } 252 nr_pages -= ret; 253 index += ret; 254 } 255 /* the inode may be gone now */ 256 } 257 258 /* 259 * do the cleanup once all the compressed pages hit the disk. 260 * This will clear writeback on the file pages and free the compressed 261 * pages. 262 * 263 * This also calls the writeback end hooks for the file pages so that 264 * metadata and checksums can be updated in the file. 265 */ 266 static void end_compressed_bio_write(struct bio *bio) 267 { 268 struct extent_io_tree *tree; 269 struct compressed_bio *cb = bio->bi_private; 270 struct inode *inode; 271 struct page *page; 272 unsigned long index; 273 274 if (bio->bi_error) 275 cb->errors = 1; 276 277 /* if there are more bios still pending for this compressed 278 * extent, just exit 279 */ 280 if (!atomic_dec_and_test(&cb->pending_bios)) 281 goto out; 282 283 /* ok, we're the last bio for this extent, step one is to 284 * call back into the FS and do all the end_io operations 285 */ 286 inode = cb->inode; 287 tree = &BTRFS_I(inode)->io_tree; 288 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; 289 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 290 cb->start, 291 cb->start + cb->len - 1, 292 NULL, 293 bio->bi_error ? 0 : 1); 294 cb->compressed_pages[0]->mapping = NULL; 295 296 end_compressed_writeback(inode, cb); 297 /* note, our inode could be gone now */ 298 299 /* 300 * release the compressed pages, these came from alloc_page and 301 * are not attached to the inode at all 302 */ 303 index = 0; 304 for (index = 0; index < cb->nr_pages; index++) { 305 page = cb->compressed_pages[index]; 306 page->mapping = NULL; 307 put_page(page); 308 } 309 310 /* finally free the cb struct */ 311 kfree(cb->compressed_pages); 312 kfree(cb); 313 out: 314 bio_put(bio); 315 } 316 317 /* 318 * worker function to build and submit bios for previously compressed pages. 319 * The corresponding pages in the inode should be marked for writeback 320 * and the compressed pages should have a reference on them for dropping 321 * when the IO is complete. 322 * 323 * This also checksums the file bytes and gets things ready for 324 * the end io hooks. 325 */ 326 int btrfs_submit_compressed_write(struct inode *inode, u64 start, 327 unsigned long len, u64 disk_start, 328 unsigned long compressed_len, 329 struct page **compressed_pages, 330 unsigned long nr_pages) 331 { 332 struct bio *bio = NULL; 333 struct btrfs_root *root = BTRFS_I(inode)->root; 334 struct compressed_bio *cb; 335 unsigned long bytes_left; 336 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 337 int pg_index = 0; 338 struct page *page; 339 u64 first_byte = disk_start; 340 struct block_device *bdev; 341 int ret; 342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 343 344 WARN_ON(start & ((u64)PAGE_SIZE - 1)); 345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 346 if (!cb) 347 return -ENOMEM; 348 atomic_set(&cb->pending_bios, 0); 349 cb->errors = 0; 350 cb->inode = inode; 351 cb->start = start; 352 cb->len = len; 353 cb->mirror_num = 0; 354 cb->compressed_pages = compressed_pages; 355 cb->compressed_len = compressed_len; 356 cb->orig_bio = NULL; 357 cb->nr_pages = nr_pages; 358 359 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 360 361 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 362 if (!bio) { 363 kfree(cb); 364 return -ENOMEM; 365 } 366 bio->bi_private = cb; 367 bio->bi_end_io = end_compressed_bio_write; 368 atomic_inc(&cb->pending_bios); 369 370 /* create and submit bios for the compressed pages */ 371 bytes_left = compressed_len; 372 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { 373 page = compressed_pages[pg_index]; 374 page->mapping = inode->i_mapping; 375 if (bio->bi_iter.bi_size) 376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, 377 PAGE_SIZE, 378 bio, 0); 379 else 380 ret = 0; 381 382 page->mapping = NULL; 383 if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) < 384 PAGE_SIZE) { 385 bio_get(bio); 386 387 /* 388 * inc the count before we submit the bio so 389 * we know the end IO handler won't happen before 390 * we inc the count. Otherwise, the cb might get 391 * freed before we're done setting it up 392 */ 393 atomic_inc(&cb->pending_bios); 394 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 395 BTRFS_WQ_ENDIO_DATA); 396 BUG_ON(ret); /* -ENOMEM */ 397 398 if (!skip_sum) { 399 ret = btrfs_csum_one_bio(root, inode, bio, 400 start, 1); 401 BUG_ON(ret); /* -ENOMEM */ 402 } 403 404 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 405 BUG_ON(ret); /* -ENOMEM */ 406 407 bio_put(bio); 408 409 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 410 BUG_ON(!bio); 411 bio->bi_private = cb; 412 bio->bi_end_io = end_compressed_bio_write; 413 bio_add_page(bio, page, PAGE_SIZE, 0); 414 } 415 if (bytes_left < PAGE_SIZE) { 416 btrfs_info(BTRFS_I(inode)->root->fs_info, 417 "bytes left %lu compress len %lu nr %lu", 418 bytes_left, cb->compressed_len, cb->nr_pages); 419 } 420 bytes_left -= PAGE_SIZE; 421 first_byte += PAGE_SIZE; 422 cond_resched(); 423 } 424 bio_get(bio); 425 426 ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA); 427 BUG_ON(ret); /* -ENOMEM */ 428 429 if (!skip_sum) { 430 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 431 BUG_ON(ret); /* -ENOMEM */ 432 } 433 434 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 435 BUG_ON(ret); /* -ENOMEM */ 436 437 bio_put(bio); 438 return 0; 439 } 440 441 static noinline int add_ra_bio_pages(struct inode *inode, 442 u64 compressed_end, 443 struct compressed_bio *cb) 444 { 445 unsigned long end_index; 446 unsigned long pg_index; 447 u64 last_offset; 448 u64 isize = i_size_read(inode); 449 int ret; 450 struct page *page; 451 unsigned long nr_pages = 0; 452 struct extent_map *em; 453 struct address_space *mapping = inode->i_mapping; 454 struct extent_map_tree *em_tree; 455 struct extent_io_tree *tree; 456 u64 end; 457 int misses = 0; 458 459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 460 last_offset = (page_offset(page) + PAGE_SIZE); 461 em_tree = &BTRFS_I(inode)->extent_tree; 462 tree = &BTRFS_I(inode)->io_tree; 463 464 if (isize == 0) 465 return 0; 466 467 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; 468 469 while (last_offset < compressed_end) { 470 pg_index = last_offset >> PAGE_SHIFT; 471 472 if (pg_index > end_index) 473 break; 474 475 rcu_read_lock(); 476 page = radix_tree_lookup(&mapping->page_tree, pg_index); 477 rcu_read_unlock(); 478 if (page && !radix_tree_exceptional_entry(page)) { 479 misses++; 480 if (misses > 4) 481 break; 482 goto next; 483 } 484 485 page = __page_cache_alloc(mapping_gfp_constraint(mapping, 486 ~__GFP_FS)); 487 if (!page) 488 break; 489 490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { 491 put_page(page); 492 goto next; 493 } 494 495 end = last_offset + PAGE_SIZE - 1; 496 /* 497 * at this point, we have a locked page in the page cache 498 * for these bytes in the file. But, we have to make 499 * sure they map to this compressed extent on disk. 500 */ 501 set_page_extent_mapped(page); 502 lock_extent(tree, last_offset, end); 503 read_lock(&em_tree->lock); 504 em = lookup_extent_mapping(em_tree, last_offset, 505 PAGE_SIZE); 506 read_unlock(&em_tree->lock); 507 508 if (!em || last_offset < em->start || 509 (last_offset + PAGE_SIZE > extent_map_end(em)) || 510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 511 free_extent_map(em); 512 unlock_extent(tree, last_offset, end); 513 unlock_page(page); 514 put_page(page); 515 break; 516 } 517 free_extent_map(em); 518 519 if (page->index == end_index) { 520 char *userpage; 521 size_t zero_offset = isize & (PAGE_SIZE - 1); 522 523 if (zero_offset) { 524 int zeros; 525 zeros = PAGE_SIZE - zero_offset; 526 userpage = kmap_atomic(page); 527 memset(userpage + zero_offset, 0, zeros); 528 flush_dcache_page(page); 529 kunmap_atomic(userpage); 530 } 531 } 532 533 ret = bio_add_page(cb->orig_bio, page, 534 PAGE_SIZE, 0); 535 536 if (ret == PAGE_SIZE) { 537 nr_pages++; 538 put_page(page); 539 } else { 540 unlock_extent(tree, last_offset, end); 541 unlock_page(page); 542 put_page(page); 543 break; 544 } 545 next: 546 last_offset += PAGE_SIZE; 547 } 548 return 0; 549 } 550 551 /* 552 * for a compressed read, the bio we get passed has all the inode pages 553 * in it. We don't actually do IO on those pages but allocate new ones 554 * to hold the compressed pages on disk. 555 * 556 * bio->bi_iter.bi_sector points to the compressed extent on disk 557 * bio->bi_io_vec points to all of the inode pages 558 * bio->bi_vcnt is a count of pages 559 * 560 * After the compressed pages are read, we copy the bytes into the 561 * bio we were passed and then call the bio end_io calls 562 */ 563 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 564 int mirror_num, unsigned long bio_flags) 565 { 566 struct extent_io_tree *tree; 567 struct extent_map_tree *em_tree; 568 struct compressed_bio *cb; 569 struct btrfs_root *root = BTRFS_I(inode)->root; 570 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE; 571 unsigned long compressed_len; 572 unsigned long nr_pages; 573 unsigned long pg_index; 574 struct page *page; 575 struct block_device *bdev; 576 struct bio *comp_bio; 577 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; 578 u64 em_len; 579 u64 em_start; 580 struct extent_map *em; 581 int ret = -ENOMEM; 582 int faili = 0; 583 u32 *sums; 584 585 tree = &BTRFS_I(inode)->io_tree; 586 em_tree = &BTRFS_I(inode)->extent_tree; 587 588 /* we need the actual starting offset of this extent in the file */ 589 read_lock(&em_tree->lock); 590 em = lookup_extent_mapping(em_tree, 591 page_offset(bio->bi_io_vec->bv_page), 592 PAGE_SIZE); 593 read_unlock(&em_tree->lock); 594 if (!em) 595 return -EIO; 596 597 compressed_len = em->block_len; 598 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 599 if (!cb) 600 goto out; 601 602 atomic_set(&cb->pending_bios, 0); 603 cb->errors = 0; 604 cb->inode = inode; 605 cb->mirror_num = mirror_num; 606 sums = &cb->sums; 607 608 cb->start = em->orig_start; 609 em_len = em->len; 610 em_start = em->start; 611 612 free_extent_map(em); 613 em = NULL; 614 615 cb->len = uncompressed_len; 616 cb->compressed_len = compressed_len; 617 cb->compress_type = extent_compress_type(bio_flags); 618 cb->orig_bio = bio; 619 620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); 621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), 622 GFP_NOFS); 623 if (!cb->compressed_pages) 624 goto fail1; 625 626 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 627 628 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 629 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | 630 __GFP_HIGHMEM); 631 if (!cb->compressed_pages[pg_index]) { 632 faili = pg_index - 1; 633 ret = -ENOMEM; 634 goto fail2; 635 } 636 } 637 faili = nr_pages - 1; 638 cb->nr_pages = nr_pages; 639 640 add_ra_bio_pages(inode, em_start + em_len, cb); 641 642 /* include any pages we added in add_ra-bio_pages */ 643 uncompressed_len = bio->bi_vcnt * PAGE_SIZE; 644 cb->len = uncompressed_len; 645 646 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 647 if (!comp_bio) 648 goto fail2; 649 comp_bio->bi_private = cb; 650 comp_bio->bi_end_io = end_compressed_bio_read; 651 atomic_inc(&cb->pending_bios); 652 653 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 654 page = cb->compressed_pages[pg_index]; 655 page->mapping = inode->i_mapping; 656 page->index = em_start >> PAGE_SHIFT; 657 658 if (comp_bio->bi_iter.bi_size) 659 ret = tree->ops->merge_bio_hook(READ, page, 0, 660 PAGE_SIZE, 661 comp_bio, 0); 662 else 663 ret = 0; 664 665 page->mapping = NULL; 666 if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < 667 PAGE_SIZE) { 668 bio_get(comp_bio); 669 670 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 671 BTRFS_WQ_ENDIO_DATA); 672 BUG_ON(ret); /* -ENOMEM */ 673 674 /* 675 * inc the count before we submit the bio so 676 * we know the end IO handler won't happen before 677 * we inc the count. Otherwise, the cb might get 678 * freed before we're done setting it up 679 */ 680 atomic_inc(&cb->pending_bios); 681 682 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 683 ret = btrfs_lookup_bio_sums(root, inode, 684 comp_bio, sums); 685 BUG_ON(ret); /* -ENOMEM */ 686 } 687 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, 688 root->sectorsize); 689 690 ret = btrfs_map_bio(root, READ, comp_bio, 691 mirror_num, 0); 692 if (ret) { 693 bio->bi_error = ret; 694 bio_endio(comp_bio); 695 } 696 697 bio_put(comp_bio); 698 699 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, 700 GFP_NOFS); 701 BUG_ON(!comp_bio); 702 comp_bio->bi_private = cb; 703 comp_bio->bi_end_io = end_compressed_bio_read; 704 705 bio_add_page(comp_bio, page, PAGE_SIZE, 0); 706 } 707 cur_disk_byte += PAGE_SIZE; 708 } 709 bio_get(comp_bio); 710 711 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 712 BTRFS_WQ_ENDIO_DATA); 713 BUG_ON(ret); /* -ENOMEM */ 714 715 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 716 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 717 BUG_ON(ret); /* -ENOMEM */ 718 } 719 720 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 721 if (ret) { 722 bio->bi_error = ret; 723 bio_endio(comp_bio); 724 } 725 726 bio_put(comp_bio); 727 return 0; 728 729 fail2: 730 while (faili >= 0) { 731 __free_page(cb->compressed_pages[faili]); 732 faili--; 733 } 734 735 kfree(cb->compressed_pages); 736 fail1: 737 kfree(cb); 738 out: 739 free_extent_map(em); 740 return ret; 741 } 742 743 static struct { 744 struct list_head idle_ws; 745 spinlock_t ws_lock; 746 /* Number of free workspaces */ 747 int free_ws; 748 /* Total number of allocated workspaces */ 749 atomic_t total_ws; 750 /* Waiters for a free workspace */ 751 wait_queue_head_t ws_wait; 752 } btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; 753 754 static const struct btrfs_compress_op * const btrfs_compress_op[] = { 755 &btrfs_zlib_compress, 756 &btrfs_lzo_compress, 757 }; 758 759 void __init btrfs_init_compress(void) 760 { 761 int i; 762 763 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 764 struct list_head *workspace; 765 766 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); 767 spin_lock_init(&btrfs_comp_ws[i].ws_lock); 768 atomic_set(&btrfs_comp_ws[i].total_ws, 0); 769 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); 770 771 /* 772 * Preallocate one workspace for each compression type so 773 * we can guarantee forward progress in the worst case 774 */ 775 workspace = btrfs_compress_op[i]->alloc_workspace(); 776 if (IS_ERR(workspace)) { 777 printk(KERN_WARNING 778 "BTRFS: cannot preallocate compression workspace, will try later"); 779 } else { 780 atomic_set(&btrfs_comp_ws[i].total_ws, 1); 781 btrfs_comp_ws[i].free_ws = 1; 782 list_add(workspace, &btrfs_comp_ws[i].idle_ws); 783 } 784 } 785 } 786 787 /* 788 * This finds an available workspace or allocates a new one. 789 * If it's not possible to allocate a new one, waits until there's one. 790 * Preallocation makes a forward progress guarantees and we do not return 791 * errors. 792 */ 793 static struct list_head *find_workspace(int type) 794 { 795 struct list_head *workspace; 796 int cpus = num_online_cpus(); 797 int idx = type - 1; 798 799 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; 800 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; 801 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws; 802 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; 803 int *free_ws = &btrfs_comp_ws[idx].free_ws; 804 again: 805 spin_lock(ws_lock); 806 if (!list_empty(idle_ws)) { 807 workspace = idle_ws->next; 808 list_del(workspace); 809 (*free_ws)--; 810 spin_unlock(ws_lock); 811 return workspace; 812 813 } 814 if (atomic_read(total_ws) > cpus) { 815 DEFINE_WAIT(wait); 816 817 spin_unlock(ws_lock); 818 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); 819 if (atomic_read(total_ws) > cpus && !*free_ws) 820 schedule(); 821 finish_wait(ws_wait, &wait); 822 goto again; 823 } 824 atomic_inc(total_ws); 825 spin_unlock(ws_lock); 826 827 workspace = btrfs_compress_op[idx]->alloc_workspace(); 828 if (IS_ERR(workspace)) { 829 atomic_dec(total_ws); 830 wake_up(ws_wait); 831 832 /* 833 * Do not return the error but go back to waiting. There's a 834 * workspace preallocated for each type and the compression 835 * time is bounded so we get to a workspace eventually. This 836 * makes our caller's life easier. 837 * 838 * To prevent silent and low-probability deadlocks (when the 839 * initial preallocation fails), check if there are any 840 * workspaces at all. 841 */ 842 if (atomic_read(total_ws) == 0) { 843 static DEFINE_RATELIMIT_STATE(_rs, 844 /* once per minute */ 60 * HZ, 845 /* no burst */ 1); 846 847 if (__ratelimit(&_rs)) { 848 printk(KERN_WARNING 849 "no compression workspaces, low memory, retrying"); 850 } 851 } 852 goto again; 853 } 854 return workspace; 855 } 856 857 /* 858 * put a workspace struct back on the list or free it if we have enough 859 * idle ones sitting around 860 */ 861 static void free_workspace(int type, struct list_head *workspace) 862 { 863 int idx = type - 1; 864 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; 865 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; 866 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws; 867 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; 868 int *free_ws = &btrfs_comp_ws[idx].free_ws; 869 870 spin_lock(ws_lock); 871 if (*free_ws < num_online_cpus()) { 872 list_add(workspace, idle_ws); 873 (*free_ws)++; 874 spin_unlock(ws_lock); 875 goto wake; 876 } 877 spin_unlock(ws_lock); 878 879 btrfs_compress_op[idx]->free_workspace(workspace); 880 atomic_dec(total_ws); 881 wake: 882 /* 883 * Make sure counter is updated before we wake up waiters. 884 */ 885 smp_mb(); 886 if (waitqueue_active(ws_wait)) 887 wake_up(ws_wait); 888 } 889 890 /* 891 * cleanup function for module exit 892 */ 893 static void free_workspaces(void) 894 { 895 struct list_head *workspace; 896 int i; 897 898 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 899 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { 900 workspace = btrfs_comp_ws[i].idle_ws.next; 901 list_del(workspace); 902 btrfs_compress_op[i]->free_workspace(workspace); 903 atomic_dec(&btrfs_comp_ws[i].total_ws); 904 } 905 } 906 } 907 908 /* 909 * given an address space and start/len, compress the bytes. 910 * 911 * pages are allocated to hold the compressed result and stored 912 * in 'pages' 913 * 914 * out_pages is used to return the number of pages allocated. There 915 * may be pages allocated even if we return an error 916 * 917 * total_in is used to return the number of bytes actually read. It 918 * may be smaller then len if we had to exit early because we 919 * ran out of room in the pages array or because we cross the 920 * max_out threshold. 921 * 922 * total_out is used to return the total number of compressed bytes 923 * 924 * max_out tells us the max number of bytes that we're allowed to 925 * stuff into pages 926 */ 927 int btrfs_compress_pages(int type, struct address_space *mapping, 928 u64 start, unsigned long len, 929 struct page **pages, 930 unsigned long nr_dest_pages, 931 unsigned long *out_pages, 932 unsigned long *total_in, 933 unsigned long *total_out, 934 unsigned long max_out) 935 { 936 struct list_head *workspace; 937 int ret; 938 939 workspace = find_workspace(type); 940 941 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, 942 start, len, pages, 943 nr_dest_pages, out_pages, 944 total_in, total_out, 945 max_out); 946 free_workspace(type, workspace); 947 return ret; 948 } 949 950 /* 951 * pages_in is an array of pages with compressed data. 952 * 953 * disk_start is the starting logical offset of this array in the file 954 * 955 * bvec is a bio_vec of pages from the file that we want to decompress into 956 * 957 * vcnt is the count of pages in the biovec 958 * 959 * srclen is the number of bytes in pages_in 960 * 961 * The basic idea is that we have a bio that was created by readpages. 962 * The pages in the bio are for the uncompressed data, and they may not 963 * be contiguous. They all correspond to the range of bytes covered by 964 * the compressed extent. 965 */ 966 static int btrfs_decompress_biovec(int type, struct page **pages_in, 967 u64 disk_start, struct bio_vec *bvec, 968 int vcnt, size_t srclen) 969 { 970 struct list_head *workspace; 971 int ret; 972 973 workspace = find_workspace(type); 974 975 ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, 976 disk_start, 977 bvec, vcnt, srclen); 978 free_workspace(type, workspace); 979 return ret; 980 } 981 982 /* 983 * a less complex decompression routine. Our compressed data fits in a 984 * single page, and we want to read a single page out of it. 985 * start_byte tells us the offset into the compressed data we're interested in 986 */ 987 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, 988 unsigned long start_byte, size_t srclen, size_t destlen) 989 { 990 struct list_head *workspace; 991 int ret; 992 993 workspace = find_workspace(type); 994 995 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, 996 dest_page, start_byte, 997 srclen, destlen); 998 999 free_workspace(type, workspace); 1000 return ret; 1001 } 1002 1003 void btrfs_exit_compress(void) 1004 { 1005 free_workspaces(); 1006 } 1007 1008 /* 1009 * Copy uncompressed data from working buffer to pages. 1010 * 1011 * buf_start is the byte offset we're of the start of our workspace buffer. 1012 * 1013 * total_out is the last byte of the buffer 1014 */ 1015 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, 1016 unsigned long total_out, u64 disk_start, 1017 struct bio_vec *bvec, int vcnt, 1018 unsigned long *pg_index, 1019 unsigned long *pg_offset) 1020 { 1021 unsigned long buf_offset; 1022 unsigned long current_buf_start; 1023 unsigned long start_byte; 1024 unsigned long working_bytes = total_out - buf_start; 1025 unsigned long bytes; 1026 char *kaddr; 1027 struct page *page_out = bvec[*pg_index].bv_page; 1028 1029 /* 1030 * start byte is the first byte of the page we're currently 1031 * copying into relative to the start of the compressed data. 1032 */ 1033 start_byte = page_offset(page_out) - disk_start; 1034 1035 /* we haven't yet hit data corresponding to this page */ 1036 if (total_out <= start_byte) 1037 return 1; 1038 1039 /* 1040 * the start of the data we care about is offset into 1041 * the middle of our working buffer 1042 */ 1043 if (total_out > start_byte && buf_start < start_byte) { 1044 buf_offset = start_byte - buf_start; 1045 working_bytes -= buf_offset; 1046 } else { 1047 buf_offset = 0; 1048 } 1049 current_buf_start = buf_start; 1050 1051 /* copy bytes from the working buffer into the pages */ 1052 while (working_bytes > 0) { 1053 bytes = min(PAGE_SIZE - *pg_offset, 1054 PAGE_SIZE - buf_offset); 1055 bytes = min(bytes, working_bytes); 1056 kaddr = kmap_atomic(page_out); 1057 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1058 kunmap_atomic(kaddr); 1059 flush_dcache_page(page_out); 1060 1061 *pg_offset += bytes; 1062 buf_offset += bytes; 1063 working_bytes -= bytes; 1064 current_buf_start += bytes; 1065 1066 /* check if we need to pick another page */ 1067 if (*pg_offset == PAGE_SIZE) { 1068 (*pg_index)++; 1069 if (*pg_index >= vcnt) 1070 return 0; 1071 1072 page_out = bvec[*pg_index].bv_page; 1073 *pg_offset = 0; 1074 start_byte = page_offset(page_out) - disk_start; 1075 1076 /* 1077 * make sure our new page is covered by this 1078 * working buffer 1079 */ 1080 if (total_out <= start_byte) 1081 return 1; 1082 1083 /* 1084 * the next page in the biovec might not be adjacent 1085 * to the last page, but it might still be found 1086 * inside this working buffer. bump our offset pointer 1087 */ 1088 if (total_out > start_byte && 1089 current_buf_start < start_byte) { 1090 buf_offset = start_byte - buf_start; 1091 working_bytes = total_out - start_byte; 1092 current_buf_start = buf_start + buf_offset; 1093 } 1094 } 1095 } 1096 1097 return 1; 1098 } 1099 1100 /* 1101 * When uncompressing data, we need to make sure and zero any parts of 1102 * the biovec that were not filled in by the decompression code. pg_index 1103 * and pg_offset indicate the last page and the last offset of that page 1104 * that have been filled in. This will zero everything remaining in the 1105 * biovec. 1106 */ 1107 void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, 1108 unsigned long pg_index, 1109 unsigned long pg_offset) 1110 { 1111 while (pg_index < vcnt) { 1112 struct page *page = bvec[pg_index].bv_page; 1113 unsigned long off = bvec[pg_index].bv_offset; 1114 unsigned long len = bvec[pg_index].bv_len; 1115 1116 if (pg_offset < off) 1117 pg_offset = off; 1118 if (pg_offset < off + len) { 1119 unsigned long bytes = off + len - pg_offset; 1120 char *kaddr; 1121 1122 kaddr = kmap_atomic(page); 1123 memset(kaddr + pg_offset, 0, bytes); 1124 kunmap_atomic(kaddr); 1125 } 1126 pg_index++; 1127 pg_offset = 0; 1128 } 1129 } 1130