1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/pagemap.h> 22 #include <linux/highmem.h> 23 #include "ctree.h" 24 #include "disk-io.h" 25 #include "transaction.h" 26 #include "volumes.h" 27 #include "print-tree.h" 28 #include "compression.h" 29 30 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 31 sizeof(struct btrfs_item) * 2) / \ 32 size) - 1)) 33 34 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 35 PAGE_SIZE)) 36 37 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ 38 sizeof(struct btrfs_ordered_sum)) / \ 39 sizeof(u32) * (r)->sectorsize) 40 41 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 42 struct btrfs_root *root, 43 u64 objectid, u64 pos, 44 u64 disk_offset, u64 disk_num_bytes, 45 u64 num_bytes, u64 offset, u64 ram_bytes, 46 u8 compression, u8 encryption, u16 other_encoding) 47 { 48 int ret = 0; 49 struct btrfs_file_extent_item *item; 50 struct btrfs_key file_key; 51 struct btrfs_path *path; 52 struct extent_buffer *leaf; 53 54 path = btrfs_alloc_path(); 55 if (!path) 56 return -ENOMEM; 57 file_key.objectid = objectid; 58 file_key.offset = pos; 59 file_key.type = BTRFS_EXTENT_DATA_KEY; 60 61 path->leave_spinning = 1; 62 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 63 sizeof(*item)); 64 if (ret < 0) 65 goto out; 66 BUG_ON(ret); /* Can't happen */ 67 leaf = path->nodes[0]; 68 item = btrfs_item_ptr(leaf, path->slots[0], 69 struct btrfs_file_extent_item); 70 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset); 71 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); 72 btrfs_set_file_extent_offset(leaf, item, offset); 73 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 74 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); 75 btrfs_set_file_extent_generation(leaf, item, trans->transid); 76 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 77 btrfs_set_file_extent_compression(leaf, item, compression); 78 btrfs_set_file_extent_encryption(leaf, item, encryption); 79 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); 80 81 btrfs_mark_buffer_dirty(leaf); 82 out: 83 btrfs_free_path(path); 84 return ret; 85 } 86 87 static struct btrfs_csum_item * 88 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 89 struct btrfs_root *root, 90 struct btrfs_path *path, 91 u64 bytenr, int cow) 92 { 93 int ret; 94 struct btrfs_key file_key; 95 struct btrfs_key found_key; 96 struct btrfs_csum_item *item; 97 struct extent_buffer *leaf; 98 u64 csum_offset = 0; 99 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 100 int csums_in_item; 101 102 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 103 file_key.offset = bytenr; 104 file_key.type = BTRFS_EXTENT_CSUM_KEY; 105 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 106 if (ret < 0) 107 goto fail; 108 leaf = path->nodes[0]; 109 if (ret > 0) { 110 ret = 1; 111 if (path->slots[0] == 0) 112 goto fail; 113 path->slots[0]--; 114 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 115 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 116 goto fail; 117 118 csum_offset = (bytenr - found_key.offset) >> 119 root->fs_info->sb->s_blocksize_bits; 120 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); 121 csums_in_item /= csum_size; 122 123 if (csum_offset == csums_in_item) { 124 ret = -EFBIG; 125 goto fail; 126 } else if (csum_offset > csums_in_item) { 127 goto fail; 128 } 129 } 130 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 131 item = (struct btrfs_csum_item *)((unsigned char *)item + 132 csum_offset * csum_size); 133 return item; 134 fail: 135 if (ret > 0) 136 ret = -ENOENT; 137 return ERR_PTR(ret); 138 } 139 140 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 141 struct btrfs_root *root, 142 struct btrfs_path *path, u64 objectid, 143 u64 offset, int mod) 144 { 145 int ret; 146 struct btrfs_key file_key; 147 int ins_len = mod < 0 ? -1 : 0; 148 int cow = mod != 0; 149 150 file_key.objectid = objectid; 151 file_key.offset = offset; 152 file_key.type = BTRFS_EXTENT_DATA_KEY; 153 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 154 return ret; 155 } 156 157 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err) 158 { 159 kfree(bio->csum_allocated); 160 } 161 162 static int __btrfs_lookup_bio_sums(struct btrfs_root *root, 163 struct inode *inode, struct bio *bio, 164 u64 logical_offset, u32 *dst, int dio) 165 { 166 struct bio_vec *bvec = bio->bi_io_vec; 167 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); 168 struct btrfs_csum_item *item = NULL; 169 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 170 struct btrfs_path *path; 171 u8 *csum; 172 u64 offset = 0; 173 u64 item_start_offset = 0; 174 u64 item_last_offset = 0; 175 u64 disk_bytenr; 176 u64 page_bytes_left; 177 u32 diff; 178 int nblocks; 179 int bio_index = 0; 180 int count; 181 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 182 183 path = btrfs_alloc_path(); 184 if (!path) 185 return -ENOMEM; 186 187 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 188 if (!dst) { 189 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 190 btrfs_bio->csum_allocated = kmalloc_array(nblocks, 191 csum_size, GFP_NOFS); 192 if (!btrfs_bio->csum_allocated) { 193 btrfs_free_path(path); 194 return -ENOMEM; 195 } 196 btrfs_bio->csum = btrfs_bio->csum_allocated; 197 btrfs_bio->end_io = btrfs_io_bio_endio_readpage; 198 } else { 199 btrfs_bio->csum = btrfs_bio->csum_inline; 200 } 201 csum = btrfs_bio->csum; 202 } else { 203 csum = (u8 *)dst; 204 } 205 206 if (bio->bi_iter.bi_size > PAGE_SIZE * 8) 207 path->reada = READA_FORWARD; 208 209 WARN_ON(bio->bi_vcnt <= 0); 210 211 /* 212 * the free space stuff is only read when it hasn't been 213 * updated in the current transaction. So, we can safely 214 * read from the commit root and sidestep a nasty deadlock 215 * between reading the free space cache and updating the csum tree. 216 */ 217 if (btrfs_is_free_space_inode(inode)) { 218 path->search_commit_root = 1; 219 path->skip_locking = 1; 220 } 221 222 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; 223 if (dio) 224 offset = logical_offset; 225 226 page_bytes_left = bvec->bv_len; 227 while (bio_index < bio->bi_vcnt) { 228 if (!dio) 229 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 230 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, 231 (u32 *)csum, nblocks); 232 if (count) 233 goto found; 234 235 if (!item || disk_bytenr < item_start_offset || 236 disk_bytenr >= item_last_offset) { 237 struct btrfs_key found_key; 238 u32 item_size; 239 240 if (item) 241 btrfs_release_path(path); 242 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, 243 path, disk_bytenr, 0); 244 if (IS_ERR(item)) { 245 count = 1; 246 memset(csum, 0, csum_size); 247 if (BTRFS_I(inode)->root->root_key.objectid == 248 BTRFS_DATA_RELOC_TREE_OBJECTID) { 249 set_extent_bits(io_tree, offset, 250 offset + root->sectorsize - 1, 251 EXTENT_NODATASUM); 252 } else { 253 btrfs_info_rl(BTRFS_I(inode)->root->fs_info, 254 "no csum found for inode %llu start %llu", 255 btrfs_ino(inode), offset); 256 } 257 item = NULL; 258 btrfs_release_path(path); 259 goto found; 260 } 261 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 262 path->slots[0]); 263 264 item_start_offset = found_key.offset; 265 item_size = btrfs_item_size_nr(path->nodes[0], 266 path->slots[0]); 267 item_last_offset = item_start_offset + 268 (item_size / csum_size) * 269 root->sectorsize; 270 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 271 struct btrfs_csum_item); 272 } 273 /* 274 * this byte range must be able to fit inside 275 * a single leaf so it will also fit inside a u32 276 */ 277 diff = disk_bytenr - item_start_offset; 278 diff = diff / root->sectorsize; 279 diff = diff * csum_size; 280 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >> 281 inode->i_sb->s_blocksize_bits); 282 read_extent_buffer(path->nodes[0], csum, 283 ((unsigned long)item) + diff, 284 csum_size * count); 285 found: 286 csum += count * csum_size; 287 nblocks -= count; 288 289 while (count--) { 290 disk_bytenr += root->sectorsize; 291 offset += root->sectorsize; 292 page_bytes_left -= root->sectorsize; 293 if (!page_bytes_left) { 294 bio_index++; 295 /* 296 * make sure we're still inside the 297 * bio before we update page_bytes_left 298 */ 299 if (bio_index >= bio->bi_vcnt) { 300 WARN_ON_ONCE(count); 301 goto done; 302 } 303 bvec++; 304 page_bytes_left = bvec->bv_len; 305 } 306 307 } 308 } 309 310 done: 311 btrfs_free_path(path); 312 return 0; 313 } 314 315 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 316 struct bio *bio, u32 *dst) 317 { 318 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0); 319 } 320 321 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 322 struct bio *bio, u64 offset) 323 { 324 return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1); 325 } 326 327 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 328 struct list_head *list, int search_commit) 329 { 330 struct btrfs_key key; 331 struct btrfs_path *path; 332 struct extent_buffer *leaf; 333 struct btrfs_ordered_sum *sums; 334 struct btrfs_csum_item *item; 335 LIST_HEAD(tmplist); 336 unsigned long offset; 337 int ret; 338 size_t size; 339 u64 csum_end; 340 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 341 342 ASSERT(IS_ALIGNED(start, root->sectorsize) && 343 IS_ALIGNED(end + 1, root->sectorsize)); 344 345 path = btrfs_alloc_path(); 346 if (!path) 347 return -ENOMEM; 348 349 if (search_commit) { 350 path->skip_locking = 1; 351 path->reada = READA_FORWARD; 352 path->search_commit_root = 1; 353 } 354 355 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 356 key.offset = start; 357 key.type = BTRFS_EXTENT_CSUM_KEY; 358 359 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 360 if (ret < 0) 361 goto fail; 362 if (ret > 0 && path->slots[0] > 0) { 363 leaf = path->nodes[0]; 364 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 365 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 366 key.type == BTRFS_EXTENT_CSUM_KEY) { 367 offset = (start - key.offset) >> 368 root->fs_info->sb->s_blocksize_bits; 369 if (offset * csum_size < 370 btrfs_item_size_nr(leaf, path->slots[0] - 1)) 371 path->slots[0]--; 372 } 373 } 374 375 while (start <= end) { 376 leaf = path->nodes[0]; 377 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 378 ret = btrfs_next_leaf(root, path); 379 if (ret < 0) 380 goto fail; 381 if (ret > 0) 382 break; 383 leaf = path->nodes[0]; 384 } 385 386 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 387 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 388 key.type != BTRFS_EXTENT_CSUM_KEY || 389 key.offset > end) 390 break; 391 392 if (key.offset > start) 393 start = key.offset; 394 395 size = btrfs_item_size_nr(leaf, path->slots[0]); 396 csum_end = key.offset + (size / csum_size) * root->sectorsize; 397 if (csum_end <= start) { 398 path->slots[0]++; 399 continue; 400 } 401 402 csum_end = min(csum_end, end + 1); 403 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 404 struct btrfs_csum_item); 405 while (start < csum_end) { 406 size = min_t(size_t, csum_end - start, 407 MAX_ORDERED_SUM_BYTES(root)); 408 sums = kzalloc(btrfs_ordered_sum_size(root, size), 409 GFP_NOFS); 410 if (!sums) { 411 ret = -ENOMEM; 412 goto fail; 413 } 414 415 sums->bytenr = start; 416 sums->len = (int)size; 417 418 offset = (start - key.offset) >> 419 root->fs_info->sb->s_blocksize_bits; 420 offset *= csum_size; 421 size >>= root->fs_info->sb->s_blocksize_bits; 422 423 read_extent_buffer(path->nodes[0], 424 sums->sums, 425 ((unsigned long)item) + offset, 426 csum_size * size); 427 428 start += root->sectorsize * size; 429 list_add_tail(&sums->list, &tmplist); 430 } 431 path->slots[0]++; 432 } 433 ret = 0; 434 fail: 435 while (ret < 0 && !list_empty(&tmplist)) { 436 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); 437 list_del(&sums->list); 438 kfree(sums); 439 } 440 list_splice_tail(&tmplist, list); 441 442 btrfs_free_path(path); 443 return ret; 444 } 445 446 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 447 struct bio *bio, u64 file_start, int contig) 448 { 449 struct btrfs_ordered_sum *sums; 450 struct btrfs_ordered_extent *ordered; 451 char *data; 452 struct bio_vec *bvec = bio->bi_io_vec; 453 int bio_index = 0; 454 int index; 455 int nr_sectors; 456 int i; 457 unsigned long total_bytes = 0; 458 unsigned long this_sum_bytes = 0; 459 u64 offset; 460 461 WARN_ON(bio->bi_vcnt <= 0); 462 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size), 463 GFP_NOFS); 464 if (!sums) 465 return -ENOMEM; 466 467 sums->len = bio->bi_iter.bi_size; 468 INIT_LIST_HEAD(&sums->list); 469 470 if (contig) 471 offset = file_start; 472 else 473 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 474 475 ordered = btrfs_lookup_ordered_extent(inode, offset); 476 BUG_ON(!ordered); /* Logic error */ 477 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; 478 index = 0; 479 480 while (bio_index < bio->bi_vcnt) { 481 if (!contig) 482 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 483 484 data = kmap_atomic(bvec->bv_page); 485 486 nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, 487 bvec->bv_len + root->sectorsize 488 - 1); 489 490 for (i = 0; i < nr_sectors; i++) { 491 if (offset >= ordered->file_offset + ordered->len || 492 offset < ordered->file_offset) { 493 unsigned long bytes_left; 494 495 kunmap_atomic(data); 496 sums->len = this_sum_bytes; 497 this_sum_bytes = 0; 498 btrfs_add_ordered_sum(inode, ordered, sums); 499 btrfs_put_ordered_extent(ordered); 500 501 bytes_left = bio->bi_iter.bi_size - total_bytes; 502 503 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), 504 GFP_NOFS); 505 BUG_ON(!sums); /* -ENOMEM */ 506 sums->len = bytes_left; 507 ordered = btrfs_lookup_ordered_extent(inode, 508 offset); 509 ASSERT(ordered); /* Logic error */ 510 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) 511 + total_bytes; 512 index = 0; 513 514 data = kmap_atomic(bvec->bv_page); 515 } 516 517 sums->sums[index] = ~(u32)0; 518 sums->sums[index] 519 = btrfs_csum_data(data + bvec->bv_offset 520 + (i * root->sectorsize), 521 sums->sums[index], 522 root->sectorsize); 523 btrfs_csum_final(sums->sums[index], 524 (char *)(sums->sums + index)); 525 index++; 526 offset += root->sectorsize; 527 this_sum_bytes += root->sectorsize; 528 total_bytes += root->sectorsize; 529 } 530 531 kunmap_atomic(data); 532 533 bio_index++; 534 bvec++; 535 } 536 this_sum_bytes = 0; 537 btrfs_add_ordered_sum(inode, ordered, sums); 538 btrfs_put_ordered_extent(ordered); 539 return 0; 540 } 541 542 /* 543 * helper function for csum removal, this expects the 544 * key to describe the csum pointed to by the path, and it expects 545 * the csum to overlap the range [bytenr, len] 546 * 547 * The csum should not be entirely contained in the range and the 548 * range should not be entirely contained in the csum. 549 * 550 * This calls btrfs_truncate_item with the correct args based on the 551 * overlap, and fixes up the key as required. 552 */ 553 static noinline void truncate_one_csum(struct btrfs_root *root, 554 struct btrfs_path *path, 555 struct btrfs_key *key, 556 u64 bytenr, u64 len) 557 { 558 struct extent_buffer *leaf; 559 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 560 u64 csum_end; 561 u64 end_byte = bytenr + len; 562 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; 563 564 leaf = path->nodes[0]; 565 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 566 csum_end <<= root->fs_info->sb->s_blocksize_bits; 567 csum_end += key->offset; 568 569 if (key->offset < bytenr && csum_end <= end_byte) { 570 /* 571 * [ bytenr - len ] 572 * [ ] 573 * [csum ] 574 * A simple truncate off the end of the item 575 */ 576 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 577 new_size *= csum_size; 578 btrfs_truncate_item(root, path, new_size, 1); 579 } else if (key->offset >= bytenr && csum_end > end_byte && 580 end_byte > key->offset) { 581 /* 582 * [ bytenr - len ] 583 * [ ] 584 * [csum ] 585 * we need to truncate from the beginning of the csum 586 */ 587 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 588 new_size *= csum_size; 589 590 btrfs_truncate_item(root, path, new_size, 0); 591 592 key->offset = end_byte; 593 btrfs_set_item_key_safe(root->fs_info, path, key); 594 } else { 595 BUG(); 596 } 597 } 598 599 /* 600 * deletes the csum items from the csum tree for a given 601 * range of bytes. 602 */ 603 int btrfs_del_csums(struct btrfs_trans_handle *trans, 604 struct btrfs_root *root, u64 bytenr, u64 len) 605 { 606 struct btrfs_path *path; 607 struct btrfs_key key; 608 u64 end_byte = bytenr + len; 609 u64 csum_end; 610 struct extent_buffer *leaf; 611 int ret; 612 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 613 int blocksize_bits = root->fs_info->sb->s_blocksize_bits; 614 615 root = root->fs_info->csum_root; 616 617 path = btrfs_alloc_path(); 618 if (!path) 619 return -ENOMEM; 620 621 while (1) { 622 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 623 key.offset = end_byte - 1; 624 key.type = BTRFS_EXTENT_CSUM_KEY; 625 626 path->leave_spinning = 1; 627 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 628 if (ret > 0) { 629 if (path->slots[0] == 0) 630 break; 631 path->slots[0]--; 632 } else if (ret < 0) { 633 break; 634 } 635 636 leaf = path->nodes[0]; 637 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 638 639 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 640 key.type != BTRFS_EXTENT_CSUM_KEY) { 641 break; 642 } 643 644 if (key.offset >= end_byte) 645 break; 646 647 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 648 csum_end <<= blocksize_bits; 649 csum_end += key.offset; 650 651 /* this csum ends before we start, we're done */ 652 if (csum_end <= bytenr) 653 break; 654 655 /* delete the entire item, it is inside our range */ 656 if (key.offset >= bytenr && csum_end <= end_byte) { 657 ret = btrfs_del_item(trans, root, path); 658 if (ret) 659 goto out; 660 if (key.offset == bytenr) 661 break; 662 } else if (key.offset < bytenr && csum_end > end_byte) { 663 unsigned long offset; 664 unsigned long shift_len; 665 unsigned long item_offset; 666 /* 667 * [ bytenr - len ] 668 * [csum ] 669 * 670 * Our bytes are in the middle of the csum, 671 * we need to split this item and insert a new one. 672 * 673 * But we can't drop the path because the 674 * csum could change, get removed, extended etc. 675 * 676 * The trick here is the max size of a csum item leaves 677 * enough room in the tree block for a single 678 * item header. So, we split the item in place, 679 * adding a new header pointing to the existing 680 * bytes. Then we loop around again and we have 681 * a nicely formed csum item that we can neatly 682 * truncate. 683 */ 684 offset = (bytenr - key.offset) >> blocksize_bits; 685 offset *= csum_size; 686 687 shift_len = (len >> blocksize_bits) * csum_size; 688 689 item_offset = btrfs_item_ptr_offset(leaf, 690 path->slots[0]); 691 692 memset_extent_buffer(leaf, 0, item_offset + offset, 693 shift_len); 694 key.offset = bytenr; 695 696 /* 697 * btrfs_split_item returns -EAGAIN when the 698 * item changed size or key 699 */ 700 ret = btrfs_split_item(trans, root, path, &key, offset); 701 if (ret && ret != -EAGAIN) { 702 btrfs_abort_transaction(trans, ret); 703 goto out; 704 } 705 706 key.offset = end_byte - 1; 707 } else { 708 truncate_one_csum(root, path, &key, bytenr, len); 709 if (key.offset < bytenr) 710 break; 711 } 712 btrfs_release_path(path); 713 } 714 ret = 0; 715 out: 716 btrfs_free_path(path); 717 return ret; 718 } 719 720 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 721 struct btrfs_root *root, 722 struct btrfs_ordered_sum *sums) 723 { 724 struct btrfs_key file_key; 725 struct btrfs_key found_key; 726 struct btrfs_path *path; 727 struct btrfs_csum_item *item; 728 struct btrfs_csum_item *item_end; 729 struct extent_buffer *leaf = NULL; 730 u64 next_offset; 731 u64 total_bytes = 0; 732 u64 csum_offset; 733 u64 bytenr; 734 u32 nritems; 735 u32 ins_size; 736 int index = 0; 737 int found_next; 738 int ret; 739 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 740 741 path = btrfs_alloc_path(); 742 if (!path) 743 return -ENOMEM; 744 again: 745 next_offset = (u64)-1; 746 found_next = 0; 747 bytenr = sums->bytenr + total_bytes; 748 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 749 file_key.offset = bytenr; 750 file_key.type = BTRFS_EXTENT_CSUM_KEY; 751 752 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 753 if (!IS_ERR(item)) { 754 ret = 0; 755 leaf = path->nodes[0]; 756 item_end = btrfs_item_ptr(leaf, path->slots[0], 757 struct btrfs_csum_item); 758 item_end = (struct btrfs_csum_item *)((char *)item_end + 759 btrfs_item_size_nr(leaf, path->slots[0])); 760 goto found; 761 } 762 ret = PTR_ERR(item); 763 if (ret != -EFBIG && ret != -ENOENT) 764 goto fail_unlock; 765 766 if (ret == -EFBIG) { 767 u32 item_size; 768 /* we found one, but it isn't big enough yet */ 769 leaf = path->nodes[0]; 770 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 771 if ((item_size / csum_size) >= 772 MAX_CSUM_ITEMS(root, csum_size)) { 773 /* already at max size, make a new one */ 774 goto insert; 775 } 776 } else { 777 int slot = path->slots[0] + 1; 778 /* we didn't find a csum item, insert one */ 779 nritems = btrfs_header_nritems(path->nodes[0]); 780 if (!nritems || (path->slots[0] >= nritems - 1)) { 781 ret = btrfs_next_leaf(root, path); 782 if (ret == 1) 783 found_next = 1; 784 if (ret != 0) 785 goto insert; 786 slot = path->slots[0]; 787 } 788 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 789 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 790 found_key.type != BTRFS_EXTENT_CSUM_KEY) { 791 found_next = 1; 792 goto insert; 793 } 794 next_offset = found_key.offset; 795 found_next = 1; 796 goto insert; 797 } 798 799 /* 800 * at this point, we know the tree has an item, but it isn't big 801 * enough yet to put our csum in. Grow it 802 */ 803 btrfs_release_path(path); 804 ret = btrfs_search_slot(trans, root, &file_key, path, 805 csum_size, 1); 806 if (ret < 0) 807 goto fail_unlock; 808 809 if (ret > 0) { 810 if (path->slots[0] == 0) 811 goto insert; 812 path->slots[0]--; 813 } 814 815 leaf = path->nodes[0]; 816 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 817 csum_offset = (bytenr - found_key.offset) >> 818 root->fs_info->sb->s_blocksize_bits; 819 820 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 821 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 822 csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) { 823 goto insert; 824 } 825 826 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) / 827 csum_size) { 828 int extend_nr; 829 u64 tmp; 830 u32 diff; 831 u32 free_space; 832 833 if (btrfs_leaf_free_space(root, leaf) < 834 sizeof(struct btrfs_item) + csum_size * 2) 835 goto insert; 836 837 free_space = btrfs_leaf_free_space(root, leaf) - 838 sizeof(struct btrfs_item) - csum_size; 839 tmp = sums->len - total_bytes; 840 tmp >>= root->fs_info->sb->s_blocksize_bits; 841 WARN_ON(tmp < 1); 842 843 extend_nr = max_t(int, 1, (int)tmp); 844 diff = (csum_offset + extend_nr) * csum_size; 845 diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size); 846 847 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); 848 diff = min(free_space, diff); 849 diff /= csum_size; 850 diff *= csum_size; 851 852 btrfs_extend_item(root, path, diff); 853 ret = 0; 854 goto csum; 855 } 856 857 insert: 858 btrfs_release_path(path); 859 csum_offset = 0; 860 if (found_next) { 861 u64 tmp; 862 863 tmp = sums->len - total_bytes; 864 tmp >>= root->fs_info->sb->s_blocksize_bits; 865 tmp = min(tmp, (next_offset - file_key.offset) >> 866 root->fs_info->sb->s_blocksize_bits); 867 868 tmp = max((u64)1, tmp); 869 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); 870 ins_size = csum_size * tmp; 871 } else { 872 ins_size = csum_size; 873 } 874 path->leave_spinning = 1; 875 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 876 ins_size); 877 path->leave_spinning = 0; 878 if (ret < 0) 879 goto fail_unlock; 880 if (WARN_ON(ret != 0)) 881 goto fail_unlock; 882 leaf = path->nodes[0]; 883 csum: 884 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 885 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 886 btrfs_item_size_nr(leaf, path->slots[0])); 887 item = (struct btrfs_csum_item *)((unsigned char *)item + 888 csum_offset * csum_size); 889 found: 890 ins_size = (u32)(sums->len - total_bytes) >> 891 root->fs_info->sb->s_blocksize_bits; 892 ins_size *= csum_size; 893 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 894 ins_size); 895 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 896 ins_size); 897 898 ins_size /= csum_size; 899 total_bytes += ins_size * root->sectorsize; 900 index += ins_size; 901 902 btrfs_mark_buffer_dirty(path->nodes[0]); 903 if (total_bytes < sums->len) { 904 btrfs_release_path(path); 905 cond_resched(); 906 goto again; 907 } 908 out: 909 btrfs_free_path(path); 910 return ret; 911 912 fail_unlock: 913 goto out; 914 } 915 916 void btrfs_extent_item_to_extent_map(struct inode *inode, 917 const struct btrfs_path *path, 918 struct btrfs_file_extent_item *fi, 919 const bool new_inline, 920 struct extent_map *em) 921 { 922 struct btrfs_root *root = BTRFS_I(inode)->root; 923 struct extent_buffer *leaf = path->nodes[0]; 924 const int slot = path->slots[0]; 925 struct btrfs_key key; 926 u64 extent_start, extent_end; 927 u64 bytenr; 928 u8 type = btrfs_file_extent_type(leaf, fi); 929 int compress_type = btrfs_file_extent_compression(leaf, fi); 930 931 em->bdev = root->fs_info->fs_devices->latest_bdev; 932 btrfs_item_key_to_cpu(leaf, &key, slot); 933 extent_start = key.offset; 934 935 if (type == BTRFS_FILE_EXTENT_REG || 936 type == BTRFS_FILE_EXTENT_PREALLOC) { 937 extent_end = extent_start + 938 btrfs_file_extent_num_bytes(leaf, fi); 939 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 940 size_t size; 941 size = btrfs_file_extent_inline_len(leaf, slot, fi); 942 extent_end = ALIGN(extent_start + size, root->sectorsize); 943 } 944 945 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 946 if (type == BTRFS_FILE_EXTENT_REG || 947 type == BTRFS_FILE_EXTENT_PREALLOC) { 948 em->start = extent_start; 949 em->len = extent_end - extent_start; 950 em->orig_start = extent_start - 951 btrfs_file_extent_offset(leaf, fi); 952 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 953 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 954 if (bytenr == 0) { 955 em->block_start = EXTENT_MAP_HOLE; 956 return; 957 } 958 if (compress_type != BTRFS_COMPRESS_NONE) { 959 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 960 em->compress_type = compress_type; 961 em->block_start = bytenr; 962 em->block_len = em->orig_block_len; 963 } else { 964 bytenr += btrfs_file_extent_offset(leaf, fi); 965 em->block_start = bytenr; 966 em->block_len = em->len; 967 if (type == BTRFS_FILE_EXTENT_PREALLOC) 968 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 969 } 970 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 971 em->block_start = EXTENT_MAP_INLINE; 972 em->start = extent_start; 973 em->len = extent_end - extent_start; 974 /* 975 * Initialize orig_start and block_len with the same values 976 * as in inode.c:btrfs_get_extent(). 977 */ 978 em->orig_start = EXTENT_MAP_HOLE; 979 em->block_len = (u64)-1; 980 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) { 981 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 982 em->compress_type = compress_type; 983 } 984 } else { 985 btrfs_err(root->fs_info, 986 "unknown file extent item type %d, inode %llu, offset %llu, root %llu", 987 type, btrfs_ino(inode), extent_start, 988 root->root_key.objectid); 989 } 990 } 991