1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/smp_lock.h> 30 #include <linux/backing-dev.h> 31 #include <linux/mpage.h> 32 #include <linux/swap.h> 33 #include <linux/writeback.h> 34 #include <linux/bit_spinlock.h> 35 #include <linux/version.h> 36 #include <linux/pagevec.h> 37 #include "compat.h" 38 #include "ctree.h" 39 #include "disk-io.h" 40 #include "transaction.h" 41 #include "btrfs_inode.h" 42 #include "volumes.h" 43 #include "ordered-data.h" 44 #include "compression.h" 45 #include "extent_io.h" 46 #include "extent_map.h" 47 48 struct compressed_bio { 49 /* number of bios pending for this compressed extent */ 50 atomic_t pending_bios; 51 52 /* the pages with the compressed data on them */ 53 struct page **compressed_pages; 54 55 /* inode that owns this data */ 56 struct inode *inode; 57 58 /* starting offset in the inode for our pages */ 59 u64 start; 60 61 /* number of bytes in the inode we're working on */ 62 unsigned long len; 63 64 /* number of bytes on disk */ 65 unsigned long compressed_len; 66 67 /* number of compressed pages in the array */ 68 unsigned long nr_pages; 69 70 /* IO errors */ 71 int errors; 72 int mirror_num; 73 74 /* for reads, this is the bio we are copying the data into */ 75 struct bio *orig_bio; 76 77 /* 78 * the start of a variable length array of checksums only 79 * used by reads 80 */ 81 u32 sums; 82 }; 83 84 static inline int compressed_bio_size(struct btrfs_root *root, 85 unsigned long disk_size) 86 { 87 u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); 88 return sizeof(struct compressed_bio) + 89 ((disk_size + root->sectorsize - 1) / root->sectorsize) * 90 csum_size; 91 } 92 93 static struct bio *compressed_bio_alloc(struct block_device *bdev, 94 u64 first_byte, gfp_t gfp_flags) 95 { 96 struct bio *bio; 97 int nr_vecs; 98 99 nr_vecs = bio_get_nr_vecs(bdev); 100 bio = bio_alloc(gfp_flags, nr_vecs); 101 102 if (bio == NULL && (current->flags & PF_MEMALLOC)) { 103 while (!bio && (nr_vecs /= 2)) 104 bio = bio_alloc(gfp_flags, nr_vecs); 105 } 106 107 if (bio) { 108 bio->bi_size = 0; 109 bio->bi_bdev = bdev; 110 bio->bi_sector = first_byte >> 9; 111 } 112 return bio; 113 } 114 115 static int check_compressed_csum(struct inode *inode, 116 struct compressed_bio *cb, 117 u64 disk_start) 118 { 119 int ret; 120 struct btrfs_root *root = BTRFS_I(inode)->root; 121 struct page *page; 122 unsigned long i; 123 char *kaddr; 124 u32 csum; 125 u32 *cb_sum = &cb->sums; 126 127 if (btrfs_test_flag(inode, NODATASUM)) 128 return 0; 129 130 for (i = 0; i < cb->nr_pages; i++) { 131 page = cb->compressed_pages[i]; 132 csum = ~(u32)0; 133 134 kaddr = kmap_atomic(page, KM_USER0); 135 csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); 136 btrfs_csum_final(csum, (char *)&csum); 137 kunmap_atomic(kaddr, KM_USER0); 138 139 if (csum != *cb_sum) { 140 printk(KERN_INFO "btrfs csum failed ino %lu " 141 "extent %llu csum %u " 142 "wanted %u mirror %d\n", inode->i_ino, 143 (unsigned long long)disk_start, 144 csum, *cb_sum, cb->mirror_num); 145 ret = -EIO; 146 goto fail; 147 } 148 cb_sum++; 149 150 } 151 ret = 0; 152 fail: 153 return ret; 154 } 155 156 /* when we finish reading compressed pages from the disk, we 157 * decompress them and then run the bio end_io routines on the 158 * decompressed pages (in the inode address space). 159 * 160 * This allows the checksumming and other IO error handling routines 161 * to work normally 162 * 163 * The compressed pages are freed here, and it must be run 164 * in process context 165 */ 166 static void end_compressed_bio_read(struct bio *bio, int err) 167 { 168 struct extent_io_tree *tree; 169 struct compressed_bio *cb = bio->bi_private; 170 struct inode *inode; 171 struct page *page; 172 unsigned long index; 173 int ret; 174 175 if (err) 176 cb->errors = 1; 177 178 /* if there are more bios still pending for this compressed 179 * extent, just exit 180 */ 181 if (!atomic_dec_and_test(&cb->pending_bios)) 182 goto out; 183 184 inode = cb->inode; 185 ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); 186 if (ret) 187 goto csum_failed; 188 189 /* ok, we're the last bio for this extent, lets start 190 * the decompression. 191 */ 192 tree = &BTRFS_I(inode)->io_tree; 193 ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, 194 cb->start, 195 cb->orig_bio->bi_io_vec, 196 cb->orig_bio->bi_vcnt, 197 cb->compressed_len); 198 csum_failed: 199 if (ret) 200 cb->errors = 1; 201 202 /* release the compressed pages */ 203 index = 0; 204 for (index = 0; index < cb->nr_pages; index++) { 205 page = cb->compressed_pages[index]; 206 page->mapping = NULL; 207 page_cache_release(page); 208 } 209 210 /* do io completion on the original bio */ 211 if (cb->errors) { 212 bio_io_error(cb->orig_bio); 213 } else { 214 int bio_index = 0; 215 struct bio_vec *bvec = cb->orig_bio->bi_io_vec; 216 217 /* 218 * we have verified the checksum already, set page 219 * checked so the end_io handlers know about it 220 */ 221 while (bio_index < cb->orig_bio->bi_vcnt) { 222 SetPageChecked(bvec->bv_page); 223 bvec++; 224 bio_index++; 225 } 226 bio_endio(cb->orig_bio, 0); 227 } 228 229 /* finally free the cb struct */ 230 kfree(cb->compressed_pages); 231 kfree(cb); 232 out: 233 bio_put(bio); 234 } 235 236 /* 237 * Clear the writeback bits on all of the file 238 * pages for a compressed write 239 */ 240 static noinline int end_compressed_writeback(struct inode *inode, u64 start, 241 unsigned long ram_size) 242 { 243 unsigned long index = start >> PAGE_CACHE_SHIFT; 244 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; 245 struct page *pages[16]; 246 unsigned long nr_pages = end_index - index + 1; 247 int i; 248 int ret; 249 250 while (nr_pages > 0) { 251 ret = find_get_pages_contig(inode->i_mapping, index, 252 min_t(unsigned long, 253 nr_pages, ARRAY_SIZE(pages)), pages); 254 if (ret == 0) { 255 nr_pages -= 1; 256 index += 1; 257 continue; 258 } 259 for (i = 0; i < ret; i++) { 260 end_page_writeback(pages[i]); 261 page_cache_release(pages[i]); 262 } 263 nr_pages -= ret; 264 index += ret; 265 } 266 /* the inode may be gone now */ 267 return 0; 268 } 269 270 /* 271 * do the cleanup once all the compressed pages hit the disk. 272 * This will clear writeback on the file pages and free the compressed 273 * pages. 274 * 275 * This also calls the writeback end hooks for the file pages so that 276 * metadata and checksums can be updated in the file. 277 */ 278 static void end_compressed_bio_write(struct bio *bio, int err) 279 { 280 struct extent_io_tree *tree; 281 struct compressed_bio *cb = bio->bi_private; 282 struct inode *inode; 283 struct page *page; 284 unsigned long index; 285 286 if (err) 287 cb->errors = 1; 288 289 /* if there are more bios still pending for this compressed 290 * extent, just exit 291 */ 292 if (!atomic_dec_and_test(&cb->pending_bios)) 293 goto out; 294 295 /* ok, we're the last bio for this extent, step one is to 296 * call back into the FS and do all the end_io operations 297 */ 298 inode = cb->inode; 299 tree = &BTRFS_I(inode)->io_tree; 300 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; 301 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 302 cb->start, 303 cb->start + cb->len - 1, 304 NULL, 1); 305 cb->compressed_pages[0]->mapping = NULL; 306 307 end_compressed_writeback(inode, cb->start, cb->len); 308 /* note, our inode could be gone now */ 309 310 /* 311 * release the compressed pages, these came from alloc_page and 312 * are not attached to the inode at all 313 */ 314 index = 0; 315 for (index = 0; index < cb->nr_pages; index++) { 316 page = cb->compressed_pages[index]; 317 page->mapping = NULL; 318 page_cache_release(page); 319 } 320 321 /* finally free the cb struct */ 322 kfree(cb->compressed_pages); 323 kfree(cb); 324 out: 325 bio_put(bio); 326 } 327 328 /* 329 * worker function to build and submit bios for previously compressed pages. 330 * The corresponding pages in the inode should be marked for writeback 331 * and the compressed pages should have a reference on them for dropping 332 * when the IO is complete. 333 * 334 * This also checksums the file bytes and gets things ready for 335 * the end io hooks. 336 */ 337 int btrfs_submit_compressed_write(struct inode *inode, u64 start, 338 unsigned long len, u64 disk_start, 339 unsigned long compressed_len, 340 struct page **compressed_pages, 341 unsigned long nr_pages) 342 { 343 struct bio *bio = NULL; 344 struct btrfs_root *root = BTRFS_I(inode)->root; 345 struct compressed_bio *cb; 346 unsigned long bytes_left; 347 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 348 int page_index = 0; 349 struct page *page; 350 u64 first_byte = disk_start; 351 struct block_device *bdev; 352 int ret; 353 354 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 355 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 356 atomic_set(&cb->pending_bios, 0); 357 cb->errors = 0; 358 cb->inode = inode; 359 cb->start = start; 360 cb->len = len; 361 cb->mirror_num = 0; 362 cb->compressed_pages = compressed_pages; 363 cb->compressed_len = compressed_len; 364 cb->orig_bio = NULL; 365 cb->nr_pages = nr_pages; 366 367 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 368 369 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 370 bio->bi_private = cb; 371 bio->bi_end_io = end_compressed_bio_write; 372 atomic_inc(&cb->pending_bios); 373 374 /* create and submit bios for the compressed pages */ 375 bytes_left = compressed_len; 376 for (page_index = 0; page_index < cb->nr_pages; page_index++) { 377 page = compressed_pages[page_index]; 378 page->mapping = inode->i_mapping; 379 if (bio->bi_size) 380 ret = io_tree->ops->merge_bio_hook(page, 0, 381 PAGE_CACHE_SIZE, 382 bio, 0); 383 else 384 ret = 0; 385 386 page->mapping = NULL; 387 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < 388 PAGE_CACHE_SIZE) { 389 bio_get(bio); 390 391 /* 392 * inc the count before we submit the bio so 393 * we know the end IO handler won't happen before 394 * we inc the count. Otherwise, the cb might get 395 * freed before we're done setting it up 396 */ 397 atomic_inc(&cb->pending_bios); 398 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 399 BUG_ON(ret); 400 401 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 402 BUG_ON(ret); 403 404 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 405 BUG_ON(ret); 406 407 bio_put(bio); 408 409 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 410 bio->bi_private = cb; 411 bio->bi_end_io = end_compressed_bio_write; 412 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 413 } 414 if (bytes_left < PAGE_CACHE_SIZE) { 415 printk("bytes left %lu compress len %lu nr %lu\n", 416 bytes_left, cb->compressed_len, cb->nr_pages); 417 } 418 bytes_left -= PAGE_CACHE_SIZE; 419 first_byte += PAGE_CACHE_SIZE; 420 cond_resched(); 421 } 422 bio_get(bio); 423 424 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 425 BUG_ON(ret); 426 427 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 428 BUG_ON(ret); 429 430 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 431 BUG_ON(ret); 432 433 bio_put(bio); 434 return 0; 435 } 436 437 static noinline int add_ra_bio_pages(struct inode *inode, 438 u64 compressed_end, 439 struct compressed_bio *cb) 440 { 441 unsigned long end_index; 442 unsigned long page_index; 443 u64 last_offset; 444 u64 isize = i_size_read(inode); 445 int ret; 446 struct page *page; 447 unsigned long nr_pages = 0; 448 struct extent_map *em; 449 struct address_space *mapping = inode->i_mapping; 450 struct pagevec pvec; 451 struct extent_map_tree *em_tree; 452 struct extent_io_tree *tree; 453 u64 end; 454 int misses = 0; 455 456 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 457 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); 458 em_tree = &BTRFS_I(inode)->extent_tree; 459 tree = &BTRFS_I(inode)->io_tree; 460 461 if (isize == 0) 462 return 0; 463 464 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 465 466 pagevec_init(&pvec, 0); 467 while (last_offset < compressed_end) { 468 page_index = last_offset >> PAGE_CACHE_SHIFT; 469 470 if (page_index > end_index) 471 break; 472 473 rcu_read_lock(); 474 page = radix_tree_lookup(&mapping->page_tree, page_index); 475 rcu_read_unlock(); 476 if (page) { 477 misses++; 478 if (misses > 4) 479 break; 480 goto next; 481 } 482 483 page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); 484 if (!page) 485 break; 486 487 page->index = page_index; 488 /* 489 * what we want to do here is call add_to_page_cache_lru, 490 * but that isn't exported, so we reproduce it here 491 */ 492 if (add_to_page_cache(page, mapping, 493 page->index, GFP_NOFS)) { 494 page_cache_release(page); 495 goto next; 496 } 497 498 /* open coding of lru_cache_add, also not exported */ 499 page_cache_get(page); 500 if (!pagevec_add(&pvec, page)) 501 __pagevec_lru_add_file(&pvec); 502 503 end = last_offset + PAGE_CACHE_SIZE - 1; 504 /* 505 * at this point, we have a locked page in the page cache 506 * for these bytes in the file. But, we have to make 507 * sure they map to this compressed extent on disk. 508 */ 509 set_page_extent_mapped(page); 510 lock_extent(tree, last_offset, end, GFP_NOFS); 511 spin_lock(&em_tree->lock); 512 em = lookup_extent_mapping(em_tree, last_offset, 513 PAGE_CACHE_SIZE); 514 spin_unlock(&em_tree->lock); 515 516 if (!em || last_offset < em->start || 517 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 518 (em->block_start >> 9) != cb->orig_bio->bi_sector) { 519 free_extent_map(em); 520 unlock_extent(tree, last_offset, end, GFP_NOFS); 521 unlock_page(page); 522 page_cache_release(page); 523 break; 524 } 525 free_extent_map(em); 526 527 if (page->index == end_index) { 528 char *userpage; 529 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); 530 531 if (zero_offset) { 532 int zeros; 533 zeros = PAGE_CACHE_SIZE - zero_offset; 534 userpage = kmap_atomic(page, KM_USER0); 535 memset(userpage + zero_offset, 0, zeros); 536 flush_dcache_page(page); 537 kunmap_atomic(userpage, KM_USER0); 538 } 539 } 540 541 ret = bio_add_page(cb->orig_bio, page, 542 PAGE_CACHE_SIZE, 0); 543 544 if (ret == PAGE_CACHE_SIZE) { 545 nr_pages++; 546 page_cache_release(page); 547 } else { 548 unlock_extent(tree, last_offset, end, GFP_NOFS); 549 unlock_page(page); 550 page_cache_release(page); 551 break; 552 } 553 next: 554 last_offset += PAGE_CACHE_SIZE; 555 } 556 if (pagevec_count(&pvec)) 557 __pagevec_lru_add_file(&pvec); 558 return 0; 559 } 560 561 /* 562 * for a compressed read, the bio we get passed has all the inode pages 563 * in it. We don't actually do IO on those pages but allocate new ones 564 * to hold the compressed pages on disk. 565 * 566 * bio->bi_sector points to the compressed extent on disk 567 * bio->bi_io_vec points to all of the inode pages 568 * bio->bi_vcnt is a count of pages 569 * 570 * After the compressed pages are read, we copy the bytes into the 571 * bio we were passed and then call the bio end_io calls 572 */ 573 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 574 int mirror_num, unsigned long bio_flags) 575 { 576 struct extent_io_tree *tree; 577 struct extent_map_tree *em_tree; 578 struct compressed_bio *cb; 579 struct btrfs_root *root = BTRFS_I(inode)->root; 580 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 581 unsigned long compressed_len; 582 unsigned long nr_pages; 583 unsigned long page_index; 584 struct page *page; 585 struct block_device *bdev; 586 struct bio *comp_bio; 587 u64 cur_disk_byte = (u64)bio->bi_sector << 9; 588 u64 em_len; 589 u64 em_start; 590 struct extent_map *em; 591 int ret; 592 u32 *sums; 593 594 tree = &BTRFS_I(inode)->io_tree; 595 em_tree = &BTRFS_I(inode)->extent_tree; 596 597 /* we need the actual starting offset of this extent in the file */ 598 spin_lock(&em_tree->lock); 599 em = lookup_extent_mapping(em_tree, 600 page_offset(bio->bi_io_vec->bv_page), 601 PAGE_CACHE_SIZE); 602 spin_unlock(&em_tree->lock); 603 604 compressed_len = em->block_len; 605 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 606 atomic_set(&cb->pending_bios, 0); 607 cb->errors = 0; 608 cb->inode = inode; 609 cb->mirror_num = mirror_num; 610 sums = &cb->sums; 611 612 cb->start = em->orig_start; 613 em_len = em->len; 614 em_start = em->start; 615 616 free_extent_map(em); 617 em = NULL; 618 619 cb->len = uncompressed_len; 620 cb->compressed_len = compressed_len; 621 cb->orig_bio = bio; 622 623 nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / 624 PAGE_CACHE_SIZE; 625 cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, 626 GFP_NOFS); 627 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 628 629 for (page_index = 0; page_index < nr_pages; page_index++) { 630 cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | 631 __GFP_HIGHMEM); 632 } 633 cb->nr_pages = nr_pages; 634 635 add_ra_bio_pages(inode, em_start + em_len, cb); 636 637 /* include any pages we added in add_ra-bio_pages */ 638 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 639 cb->len = uncompressed_len; 640 641 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 642 comp_bio->bi_private = cb; 643 comp_bio->bi_end_io = end_compressed_bio_read; 644 atomic_inc(&cb->pending_bios); 645 646 for (page_index = 0; page_index < nr_pages; page_index++) { 647 page = cb->compressed_pages[page_index]; 648 page->mapping = inode->i_mapping; 649 page->index = em_start >> PAGE_CACHE_SHIFT; 650 651 if (comp_bio->bi_size) 652 ret = tree->ops->merge_bio_hook(page, 0, 653 PAGE_CACHE_SIZE, 654 comp_bio, 0); 655 else 656 ret = 0; 657 658 page->mapping = NULL; 659 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < 660 PAGE_CACHE_SIZE) { 661 bio_get(comp_bio); 662 663 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 664 BUG_ON(ret); 665 666 /* 667 * inc the count before we submit the bio so 668 * we know the end IO handler won't happen before 669 * we inc the count. Otherwise, the cb might get 670 * freed before we're done setting it up 671 */ 672 atomic_inc(&cb->pending_bios); 673 674 if (!btrfs_test_flag(inode, NODATASUM)) { 675 btrfs_lookup_bio_sums(root, inode, comp_bio, 676 sums); 677 } 678 sums += (comp_bio->bi_size + root->sectorsize - 1) / 679 root->sectorsize; 680 681 ret = btrfs_map_bio(root, READ, comp_bio, 682 mirror_num, 0); 683 BUG_ON(ret); 684 685 bio_put(comp_bio); 686 687 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, 688 GFP_NOFS); 689 comp_bio->bi_private = cb; 690 comp_bio->bi_end_io = end_compressed_bio_read; 691 692 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); 693 } 694 cur_disk_byte += PAGE_CACHE_SIZE; 695 } 696 bio_get(comp_bio); 697 698 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 699 BUG_ON(ret); 700 701 if (!btrfs_test_flag(inode, NODATASUM)) 702 btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 703 704 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 705 BUG_ON(ret); 706 707 bio_put(comp_bio); 708 return 0; 709 } 710