1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/bio.h> 7 #include <linux/slab.h> 8 #include <linux/pagemap.h> 9 #include <linux/highmem.h> 10 #include <linux/sched/mm.h> 11 #include "ctree.h" 12 #include "disk-io.h" 13 #include "transaction.h" 14 #include "volumes.h" 15 #include "print-tree.h" 16 #include "compression.h" 17 18 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 19 sizeof(struct btrfs_item) * 2) / \ 20 size) - 1)) 21 22 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 23 PAGE_SIZE)) 24 25 #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \ 26 sizeof(struct btrfs_ordered_sum)) / \ 27 sizeof(u32) * (fs_info)->sectorsize) 28 29 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 30 struct btrfs_root *root, 31 u64 objectid, u64 pos, 32 u64 disk_offset, u64 disk_num_bytes, 33 u64 num_bytes, u64 offset, u64 ram_bytes, 34 u8 compression, u8 encryption, u16 other_encoding) 35 { 36 int ret = 0; 37 struct btrfs_file_extent_item *item; 38 struct btrfs_key file_key; 39 struct btrfs_path *path; 40 struct extent_buffer *leaf; 41 42 path = btrfs_alloc_path(); 43 if (!path) 44 return -ENOMEM; 45 file_key.objectid = objectid; 46 file_key.offset = pos; 47 file_key.type = BTRFS_EXTENT_DATA_KEY; 48 49 path->leave_spinning = 1; 50 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 51 sizeof(*item)); 52 if (ret < 0) 53 goto out; 54 BUG_ON(ret); /* Can't happen */ 55 leaf = path->nodes[0]; 56 item = btrfs_item_ptr(leaf, path->slots[0], 57 struct btrfs_file_extent_item); 58 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset); 59 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); 60 btrfs_set_file_extent_offset(leaf, item, offset); 61 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 62 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); 63 btrfs_set_file_extent_generation(leaf, item, trans->transid); 64 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 65 btrfs_set_file_extent_compression(leaf, item, compression); 66 btrfs_set_file_extent_encryption(leaf, item, encryption); 67 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); 68 69 btrfs_mark_buffer_dirty(leaf); 70 out: 71 btrfs_free_path(path); 72 return ret; 73 } 74 75 static struct btrfs_csum_item * 76 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 77 struct btrfs_root *root, 78 struct btrfs_path *path, 79 u64 bytenr, int cow) 80 { 81 struct btrfs_fs_info *fs_info = root->fs_info; 82 int ret; 83 struct btrfs_key file_key; 84 struct btrfs_key found_key; 85 struct btrfs_csum_item *item; 86 struct extent_buffer *leaf; 87 u64 csum_offset = 0; 88 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 89 int csums_in_item; 90 91 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 92 file_key.offset = bytenr; 93 file_key.type = BTRFS_EXTENT_CSUM_KEY; 94 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 95 if (ret < 0) 96 goto fail; 97 leaf = path->nodes[0]; 98 if (ret > 0) { 99 ret = 1; 100 if (path->slots[0] == 0) 101 goto fail; 102 path->slots[0]--; 103 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 104 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 105 goto fail; 106 107 csum_offset = (bytenr - found_key.offset) >> 108 fs_info->sb->s_blocksize_bits; 109 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); 110 csums_in_item /= csum_size; 111 112 if (csum_offset == csums_in_item) { 113 ret = -EFBIG; 114 goto fail; 115 } else if (csum_offset > csums_in_item) { 116 goto fail; 117 } 118 } 119 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 120 item = (struct btrfs_csum_item *)((unsigned char *)item + 121 csum_offset * csum_size); 122 return item; 123 fail: 124 if (ret > 0) 125 ret = -ENOENT; 126 return ERR_PTR(ret); 127 } 128 129 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 130 struct btrfs_root *root, 131 struct btrfs_path *path, u64 objectid, 132 u64 offset, int mod) 133 { 134 int ret; 135 struct btrfs_key file_key; 136 int ins_len = mod < 0 ? -1 : 0; 137 int cow = mod != 0; 138 139 file_key.objectid = objectid; 140 file_key.offset = offset; 141 file_key.type = BTRFS_EXTENT_DATA_KEY; 142 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 143 return ret; 144 } 145 146 static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, 147 u64 logical_offset, u32 *dst, int dio) 148 { 149 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 150 struct bio_vec bvec; 151 struct bvec_iter iter; 152 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); 153 struct btrfs_csum_item *item = NULL; 154 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 155 struct btrfs_path *path; 156 u8 *csum; 157 u64 offset = 0; 158 u64 item_start_offset = 0; 159 u64 item_last_offset = 0; 160 u64 disk_bytenr; 161 u64 page_bytes_left; 162 u32 diff; 163 int nblocks; 164 int count = 0; 165 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 166 167 path = btrfs_alloc_path(); 168 if (!path) 169 return BLK_STS_RESOURCE; 170 171 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 172 if (!dst) { 173 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 174 btrfs_bio->csum = kmalloc_array(nblocks, csum_size, 175 GFP_NOFS); 176 if (!btrfs_bio->csum) { 177 btrfs_free_path(path); 178 return BLK_STS_RESOURCE; 179 } 180 } else { 181 btrfs_bio->csum = btrfs_bio->csum_inline; 182 } 183 csum = btrfs_bio->csum; 184 } else { 185 csum = (u8 *)dst; 186 } 187 188 if (bio->bi_iter.bi_size > PAGE_SIZE * 8) 189 path->reada = READA_FORWARD; 190 191 /* 192 * the free space stuff is only read when it hasn't been 193 * updated in the current transaction. So, we can safely 194 * read from the commit root and sidestep a nasty deadlock 195 * between reading the free space cache and updating the csum tree. 196 */ 197 if (btrfs_is_free_space_inode(BTRFS_I(inode))) { 198 path->search_commit_root = 1; 199 path->skip_locking = 1; 200 } 201 202 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; 203 if (dio) 204 offset = logical_offset; 205 206 bio_for_each_segment(bvec, bio, iter) { 207 page_bytes_left = bvec.bv_len; 208 if (count) 209 goto next; 210 211 if (!dio) 212 offset = page_offset(bvec.bv_page) + bvec.bv_offset; 213 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, 214 (u32 *)csum, nblocks); 215 if (count) 216 goto found; 217 218 if (!item || disk_bytenr < item_start_offset || 219 disk_bytenr >= item_last_offset) { 220 struct btrfs_key found_key; 221 u32 item_size; 222 223 if (item) 224 btrfs_release_path(path); 225 item = btrfs_lookup_csum(NULL, fs_info->csum_root, 226 path, disk_bytenr, 0); 227 if (IS_ERR(item)) { 228 count = 1; 229 memset(csum, 0, csum_size); 230 if (BTRFS_I(inode)->root->root_key.objectid == 231 BTRFS_DATA_RELOC_TREE_OBJECTID) { 232 set_extent_bits(io_tree, offset, 233 offset + fs_info->sectorsize - 1, 234 EXTENT_NODATASUM); 235 } else { 236 btrfs_info_rl(fs_info, 237 "no csum found for inode %llu start %llu", 238 btrfs_ino(BTRFS_I(inode)), offset); 239 } 240 item = NULL; 241 btrfs_release_path(path); 242 goto found; 243 } 244 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 245 path->slots[0]); 246 247 item_start_offset = found_key.offset; 248 item_size = btrfs_item_size_nr(path->nodes[0], 249 path->slots[0]); 250 item_last_offset = item_start_offset + 251 (item_size / csum_size) * 252 fs_info->sectorsize; 253 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 254 struct btrfs_csum_item); 255 } 256 /* 257 * this byte range must be able to fit inside 258 * a single leaf so it will also fit inside a u32 259 */ 260 diff = disk_bytenr - item_start_offset; 261 diff = diff / fs_info->sectorsize; 262 diff = diff * csum_size; 263 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >> 264 inode->i_sb->s_blocksize_bits); 265 read_extent_buffer(path->nodes[0], csum, 266 ((unsigned long)item) + diff, 267 csum_size * count); 268 found: 269 csum += count * csum_size; 270 nblocks -= count; 271 next: 272 while (count--) { 273 disk_bytenr += fs_info->sectorsize; 274 offset += fs_info->sectorsize; 275 page_bytes_left -= fs_info->sectorsize; 276 if (!page_bytes_left) 277 break; /* move to next bio */ 278 } 279 } 280 281 WARN_ON_ONCE(count); 282 btrfs_free_path(path); 283 return 0; 284 } 285 286 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst) 287 { 288 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0); 289 } 290 291 blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset) 292 { 293 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1); 294 } 295 296 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 297 struct list_head *list, int search_commit) 298 { 299 struct btrfs_fs_info *fs_info = root->fs_info; 300 struct btrfs_key key; 301 struct btrfs_path *path; 302 struct extent_buffer *leaf; 303 struct btrfs_ordered_sum *sums; 304 struct btrfs_csum_item *item; 305 LIST_HEAD(tmplist); 306 unsigned long offset; 307 int ret; 308 size_t size; 309 u64 csum_end; 310 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 311 312 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 313 IS_ALIGNED(end + 1, fs_info->sectorsize)); 314 315 path = btrfs_alloc_path(); 316 if (!path) 317 return -ENOMEM; 318 319 if (search_commit) { 320 path->skip_locking = 1; 321 path->reada = READA_FORWARD; 322 path->search_commit_root = 1; 323 } 324 325 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 326 key.offset = start; 327 key.type = BTRFS_EXTENT_CSUM_KEY; 328 329 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 330 if (ret < 0) 331 goto fail; 332 if (ret > 0 && path->slots[0] > 0) { 333 leaf = path->nodes[0]; 334 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 335 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 336 key.type == BTRFS_EXTENT_CSUM_KEY) { 337 offset = (start - key.offset) >> 338 fs_info->sb->s_blocksize_bits; 339 if (offset * csum_size < 340 btrfs_item_size_nr(leaf, path->slots[0] - 1)) 341 path->slots[0]--; 342 } 343 } 344 345 while (start <= end) { 346 leaf = path->nodes[0]; 347 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 348 ret = btrfs_next_leaf(root, path); 349 if (ret < 0) 350 goto fail; 351 if (ret > 0) 352 break; 353 leaf = path->nodes[0]; 354 } 355 356 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 357 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 358 key.type != BTRFS_EXTENT_CSUM_KEY || 359 key.offset > end) 360 break; 361 362 if (key.offset > start) 363 start = key.offset; 364 365 size = btrfs_item_size_nr(leaf, path->slots[0]); 366 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize; 367 if (csum_end <= start) { 368 path->slots[0]++; 369 continue; 370 } 371 372 csum_end = min(csum_end, end + 1); 373 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 374 struct btrfs_csum_item); 375 while (start < csum_end) { 376 size = min_t(size_t, csum_end - start, 377 MAX_ORDERED_SUM_BYTES(fs_info)); 378 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size), 379 GFP_NOFS); 380 if (!sums) { 381 ret = -ENOMEM; 382 goto fail; 383 } 384 385 sums->bytenr = start; 386 sums->len = (int)size; 387 388 offset = (start - key.offset) >> 389 fs_info->sb->s_blocksize_bits; 390 offset *= csum_size; 391 size >>= fs_info->sb->s_blocksize_bits; 392 393 read_extent_buffer(path->nodes[0], 394 sums->sums, 395 ((unsigned long)item) + offset, 396 csum_size * size); 397 398 start += fs_info->sectorsize * size; 399 list_add_tail(&sums->list, &tmplist); 400 } 401 path->slots[0]++; 402 } 403 ret = 0; 404 fail: 405 while (ret < 0 && !list_empty(&tmplist)) { 406 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); 407 list_del(&sums->list); 408 kfree(sums); 409 } 410 list_splice_tail(&tmplist, list); 411 412 btrfs_free_path(path); 413 return ret; 414 } 415 416 blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, 417 u64 file_start, int contig) 418 { 419 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 420 struct btrfs_ordered_sum *sums; 421 struct btrfs_ordered_extent *ordered = NULL; 422 char *data; 423 struct bvec_iter iter; 424 struct bio_vec bvec; 425 int index; 426 int nr_sectors; 427 unsigned long total_bytes = 0; 428 unsigned long this_sum_bytes = 0; 429 int i; 430 u64 offset; 431 unsigned nofs_flag; 432 433 nofs_flag = memalloc_nofs_save(); 434 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), 435 GFP_KERNEL); 436 memalloc_nofs_restore(nofs_flag); 437 438 if (!sums) 439 return BLK_STS_RESOURCE; 440 441 sums->len = bio->bi_iter.bi_size; 442 INIT_LIST_HEAD(&sums->list); 443 444 if (contig) 445 offset = file_start; 446 else 447 offset = 0; /* shut up gcc */ 448 449 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; 450 index = 0; 451 452 bio_for_each_segment(bvec, bio, iter) { 453 if (!contig) 454 offset = page_offset(bvec.bv_page) + bvec.bv_offset; 455 456 if (!ordered) { 457 ordered = btrfs_lookup_ordered_extent(inode, offset); 458 BUG_ON(!ordered); /* Logic error */ 459 } 460 461 data = kmap_atomic(bvec.bv_page); 462 463 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, 464 bvec.bv_len + fs_info->sectorsize 465 - 1); 466 467 for (i = 0; i < nr_sectors; i++) { 468 if (offset >= ordered->file_offset + ordered->len || 469 offset < ordered->file_offset) { 470 unsigned long bytes_left; 471 472 kunmap_atomic(data); 473 sums->len = this_sum_bytes; 474 this_sum_bytes = 0; 475 btrfs_add_ordered_sum(inode, ordered, sums); 476 btrfs_put_ordered_extent(ordered); 477 478 bytes_left = bio->bi_iter.bi_size - total_bytes; 479 480 nofs_flag = memalloc_nofs_save(); 481 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, 482 bytes_left), GFP_KERNEL); 483 memalloc_nofs_restore(nofs_flag); 484 BUG_ON(!sums); /* -ENOMEM */ 485 sums->len = bytes_left; 486 ordered = btrfs_lookup_ordered_extent(inode, 487 offset); 488 ASSERT(ordered); /* Logic error */ 489 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) 490 + total_bytes; 491 index = 0; 492 493 data = kmap_atomic(bvec.bv_page); 494 } 495 496 sums->sums[index] = ~(u32)0; 497 sums->sums[index] 498 = btrfs_csum_data(data + bvec.bv_offset 499 + (i * fs_info->sectorsize), 500 sums->sums[index], 501 fs_info->sectorsize); 502 btrfs_csum_final(sums->sums[index], 503 (char *)(sums->sums + index)); 504 index++; 505 offset += fs_info->sectorsize; 506 this_sum_bytes += fs_info->sectorsize; 507 total_bytes += fs_info->sectorsize; 508 } 509 510 kunmap_atomic(data); 511 } 512 this_sum_bytes = 0; 513 btrfs_add_ordered_sum(inode, ordered, sums); 514 btrfs_put_ordered_extent(ordered); 515 return 0; 516 } 517 518 /* 519 * helper function for csum removal, this expects the 520 * key to describe the csum pointed to by the path, and it expects 521 * the csum to overlap the range [bytenr, len] 522 * 523 * The csum should not be entirely contained in the range and the 524 * range should not be entirely contained in the csum. 525 * 526 * This calls btrfs_truncate_item with the correct args based on the 527 * overlap, and fixes up the key as required. 528 */ 529 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info, 530 struct btrfs_path *path, 531 struct btrfs_key *key, 532 u64 bytenr, u64 len) 533 { 534 struct extent_buffer *leaf; 535 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 536 u64 csum_end; 537 u64 end_byte = bytenr + len; 538 u32 blocksize_bits = fs_info->sb->s_blocksize_bits; 539 540 leaf = path->nodes[0]; 541 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 542 csum_end <<= fs_info->sb->s_blocksize_bits; 543 csum_end += key->offset; 544 545 if (key->offset < bytenr && csum_end <= end_byte) { 546 /* 547 * [ bytenr - len ] 548 * [ ] 549 * [csum ] 550 * A simple truncate off the end of the item 551 */ 552 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 553 new_size *= csum_size; 554 btrfs_truncate_item(fs_info, path, new_size, 1); 555 } else if (key->offset >= bytenr && csum_end > end_byte && 556 end_byte > key->offset) { 557 /* 558 * [ bytenr - len ] 559 * [ ] 560 * [csum ] 561 * we need to truncate from the beginning of the csum 562 */ 563 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 564 new_size *= csum_size; 565 566 btrfs_truncate_item(fs_info, path, new_size, 0); 567 568 key->offset = end_byte; 569 btrfs_set_item_key_safe(fs_info, path, key); 570 } else { 571 BUG(); 572 } 573 } 574 575 /* 576 * deletes the csum items from the csum tree for a given 577 * range of bytes. 578 */ 579 int btrfs_del_csums(struct btrfs_trans_handle *trans, 580 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len) 581 { 582 struct btrfs_root *root = fs_info->csum_root; 583 struct btrfs_path *path; 584 struct btrfs_key key; 585 u64 end_byte = bytenr + len; 586 u64 csum_end; 587 struct extent_buffer *leaf; 588 int ret; 589 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 590 int blocksize_bits = fs_info->sb->s_blocksize_bits; 591 592 path = btrfs_alloc_path(); 593 if (!path) 594 return -ENOMEM; 595 596 while (1) { 597 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 598 key.offset = end_byte - 1; 599 key.type = BTRFS_EXTENT_CSUM_KEY; 600 601 path->leave_spinning = 1; 602 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 603 if (ret > 0) { 604 if (path->slots[0] == 0) 605 break; 606 path->slots[0]--; 607 } else if (ret < 0) { 608 break; 609 } 610 611 leaf = path->nodes[0]; 612 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 613 614 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 615 key.type != BTRFS_EXTENT_CSUM_KEY) { 616 break; 617 } 618 619 if (key.offset >= end_byte) 620 break; 621 622 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 623 csum_end <<= blocksize_bits; 624 csum_end += key.offset; 625 626 /* this csum ends before we start, we're done */ 627 if (csum_end <= bytenr) 628 break; 629 630 /* delete the entire item, it is inside our range */ 631 if (key.offset >= bytenr && csum_end <= end_byte) { 632 int del_nr = 1; 633 634 /* 635 * Check how many csum items preceding this one in this 636 * leaf correspond to our range and then delete them all 637 * at once. 638 */ 639 if (key.offset > bytenr && path->slots[0] > 0) { 640 int slot = path->slots[0] - 1; 641 642 while (slot >= 0) { 643 struct btrfs_key pk; 644 645 btrfs_item_key_to_cpu(leaf, &pk, slot); 646 if (pk.offset < bytenr || 647 pk.type != BTRFS_EXTENT_CSUM_KEY || 648 pk.objectid != 649 BTRFS_EXTENT_CSUM_OBJECTID) 650 break; 651 path->slots[0] = slot; 652 del_nr++; 653 key.offset = pk.offset; 654 slot--; 655 } 656 } 657 ret = btrfs_del_items(trans, root, path, 658 path->slots[0], del_nr); 659 if (ret) 660 goto out; 661 if (key.offset == bytenr) 662 break; 663 } else if (key.offset < bytenr && csum_end > end_byte) { 664 unsigned long offset; 665 unsigned long shift_len; 666 unsigned long item_offset; 667 /* 668 * [ bytenr - len ] 669 * [csum ] 670 * 671 * Our bytes are in the middle of the csum, 672 * we need to split this item and insert a new one. 673 * 674 * But we can't drop the path because the 675 * csum could change, get removed, extended etc. 676 * 677 * The trick here is the max size of a csum item leaves 678 * enough room in the tree block for a single 679 * item header. So, we split the item in place, 680 * adding a new header pointing to the existing 681 * bytes. Then we loop around again and we have 682 * a nicely formed csum item that we can neatly 683 * truncate. 684 */ 685 offset = (bytenr - key.offset) >> blocksize_bits; 686 offset *= csum_size; 687 688 shift_len = (len >> blocksize_bits) * csum_size; 689 690 item_offset = btrfs_item_ptr_offset(leaf, 691 path->slots[0]); 692 693 memzero_extent_buffer(leaf, item_offset + offset, 694 shift_len); 695 key.offset = bytenr; 696 697 /* 698 * btrfs_split_item returns -EAGAIN when the 699 * item changed size or key 700 */ 701 ret = btrfs_split_item(trans, root, path, &key, offset); 702 if (ret && ret != -EAGAIN) { 703 btrfs_abort_transaction(trans, ret); 704 goto out; 705 } 706 707 key.offset = end_byte - 1; 708 } else { 709 truncate_one_csum(fs_info, path, &key, bytenr, len); 710 if (key.offset < bytenr) 711 break; 712 } 713 btrfs_release_path(path); 714 } 715 ret = 0; 716 out: 717 btrfs_free_path(path); 718 return ret; 719 } 720 721 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 722 struct btrfs_root *root, 723 struct btrfs_ordered_sum *sums) 724 { 725 struct btrfs_fs_info *fs_info = root->fs_info; 726 struct btrfs_key file_key; 727 struct btrfs_key found_key; 728 struct btrfs_path *path; 729 struct btrfs_csum_item *item; 730 struct btrfs_csum_item *item_end; 731 struct extent_buffer *leaf = NULL; 732 u64 next_offset; 733 u64 total_bytes = 0; 734 u64 csum_offset; 735 u64 bytenr; 736 u32 nritems; 737 u32 ins_size; 738 int index = 0; 739 int found_next; 740 int ret; 741 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 742 743 path = btrfs_alloc_path(); 744 if (!path) 745 return -ENOMEM; 746 again: 747 next_offset = (u64)-1; 748 found_next = 0; 749 bytenr = sums->bytenr + total_bytes; 750 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 751 file_key.offset = bytenr; 752 file_key.type = BTRFS_EXTENT_CSUM_KEY; 753 754 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 755 if (!IS_ERR(item)) { 756 ret = 0; 757 leaf = path->nodes[0]; 758 item_end = btrfs_item_ptr(leaf, path->slots[0], 759 struct btrfs_csum_item); 760 item_end = (struct btrfs_csum_item *)((char *)item_end + 761 btrfs_item_size_nr(leaf, path->slots[0])); 762 goto found; 763 } 764 ret = PTR_ERR(item); 765 if (ret != -EFBIG && ret != -ENOENT) 766 goto fail_unlock; 767 768 if (ret == -EFBIG) { 769 u32 item_size; 770 /* we found one, but it isn't big enough yet */ 771 leaf = path->nodes[0]; 772 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 773 if ((item_size / csum_size) >= 774 MAX_CSUM_ITEMS(fs_info, csum_size)) { 775 /* already at max size, make a new one */ 776 goto insert; 777 } 778 } else { 779 int slot = path->slots[0] + 1; 780 /* we didn't find a csum item, insert one */ 781 nritems = btrfs_header_nritems(path->nodes[0]); 782 if (!nritems || (path->slots[0] >= nritems - 1)) { 783 ret = btrfs_next_leaf(root, path); 784 if (ret == 1) 785 found_next = 1; 786 if (ret != 0) 787 goto insert; 788 slot = path->slots[0]; 789 } 790 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 791 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 792 found_key.type != BTRFS_EXTENT_CSUM_KEY) { 793 found_next = 1; 794 goto insert; 795 } 796 next_offset = found_key.offset; 797 found_next = 1; 798 goto insert; 799 } 800 801 /* 802 * at this point, we know the tree has an item, but it isn't big 803 * enough yet to put our csum in. Grow it 804 */ 805 btrfs_release_path(path); 806 ret = btrfs_search_slot(trans, root, &file_key, path, 807 csum_size, 1); 808 if (ret < 0) 809 goto fail_unlock; 810 811 if (ret > 0) { 812 if (path->slots[0] == 0) 813 goto insert; 814 path->slots[0]--; 815 } 816 817 leaf = path->nodes[0]; 818 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 819 csum_offset = (bytenr - found_key.offset) >> 820 fs_info->sb->s_blocksize_bits; 821 822 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 823 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 824 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) { 825 goto insert; 826 } 827 828 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) / 829 csum_size) { 830 int extend_nr; 831 u64 tmp; 832 u32 diff; 833 u32 free_space; 834 835 if (btrfs_leaf_free_space(fs_info, leaf) < 836 sizeof(struct btrfs_item) + csum_size * 2) 837 goto insert; 838 839 free_space = btrfs_leaf_free_space(fs_info, leaf) - 840 sizeof(struct btrfs_item) - csum_size; 841 tmp = sums->len - total_bytes; 842 tmp >>= fs_info->sb->s_blocksize_bits; 843 WARN_ON(tmp < 1); 844 845 extend_nr = max_t(int, 1, (int)tmp); 846 diff = (csum_offset + extend_nr) * csum_size; 847 diff = min(diff, 848 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); 849 850 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); 851 diff = min(free_space, diff); 852 diff /= csum_size; 853 diff *= csum_size; 854 855 btrfs_extend_item(fs_info, path, diff); 856 ret = 0; 857 goto csum; 858 } 859 860 insert: 861 btrfs_release_path(path); 862 csum_offset = 0; 863 if (found_next) { 864 u64 tmp; 865 866 tmp = sums->len - total_bytes; 867 tmp >>= fs_info->sb->s_blocksize_bits; 868 tmp = min(tmp, (next_offset - file_key.offset) >> 869 fs_info->sb->s_blocksize_bits); 870 871 tmp = max_t(u64, 1, tmp); 872 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size)); 873 ins_size = csum_size * tmp; 874 } else { 875 ins_size = csum_size; 876 } 877 path->leave_spinning = 1; 878 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 879 ins_size); 880 path->leave_spinning = 0; 881 if (ret < 0) 882 goto fail_unlock; 883 if (WARN_ON(ret != 0)) 884 goto fail_unlock; 885 leaf = path->nodes[0]; 886 csum: 887 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 888 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 889 btrfs_item_size_nr(leaf, path->slots[0])); 890 item = (struct btrfs_csum_item *)((unsigned char *)item + 891 csum_offset * csum_size); 892 found: 893 ins_size = (u32)(sums->len - total_bytes) >> 894 fs_info->sb->s_blocksize_bits; 895 ins_size *= csum_size; 896 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 897 ins_size); 898 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 899 ins_size); 900 901 ins_size /= csum_size; 902 total_bytes += ins_size * fs_info->sectorsize; 903 index += ins_size; 904 905 btrfs_mark_buffer_dirty(path->nodes[0]); 906 if (total_bytes < sums->len) { 907 btrfs_release_path(path); 908 cond_resched(); 909 goto again; 910 } 911 out: 912 btrfs_free_path(path); 913 return ret; 914 915 fail_unlock: 916 goto out; 917 } 918 919 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, 920 const struct btrfs_path *path, 921 struct btrfs_file_extent_item *fi, 922 const bool new_inline, 923 struct extent_map *em) 924 { 925 struct btrfs_fs_info *fs_info = inode->root->fs_info; 926 struct btrfs_root *root = inode->root; 927 struct extent_buffer *leaf = path->nodes[0]; 928 const int slot = path->slots[0]; 929 struct btrfs_key key; 930 u64 extent_start, extent_end; 931 u64 bytenr; 932 u8 type = btrfs_file_extent_type(leaf, fi); 933 int compress_type = btrfs_file_extent_compression(leaf, fi); 934 935 em->bdev = fs_info->fs_devices->latest_bdev; 936 btrfs_item_key_to_cpu(leaf, &key, slot); 937 extent_start = key.offset; 938 939 if (type == BTRFS_FILE_EXTENT_REG || 940 type == BTRFS_FILE_EXTENT_PREALLOC) { 941 extent_end = extent_start + 942 btrfs_file_extent_num_bytes(leaf, fi); 943 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 944 size_t size; 945 size = btrfs_file_extent_ram_bytes(leaf, fi); 946 extent_end = ALIGN(extent_start + size, 947 fs_info->sectorsize); 948 } 949 950 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 951 if (type == BTRFS_FILE_EXTENT_REG || 952 type == BTRFS_FILE_EXTENT_PREALLOC) { 953 em->start = extent_start; 954 em->len = extent_end - extent_start; 955 em->orig_start = extent_start - 956 btrfs_file_extent_offset(leaf, fi); 957 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 958 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 959 if (bytenr == 0) { 960 em->block_start = EXTENT_MAP_HOLE; 961 return; 962 } 963 if (compress_type != BTRFS_COMPRESS_NONE) { 964 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 965 em->compress_type = compress_type; 966 em->block_start = bytenr; 967 em->block_len = em->orig_block_len; 968 } else { 969 bytenr += btrfs_file_extent_offset(leaf, fi); 970 em->block_start = bytenr; 971 em->block_len = em->len; 972 if (type == BTRFS_FILE_EXTENT_PREALLOC) 973 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 974 } 975 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 976 em->block_start = EXTENT_MAP_INLINE; 977 em->start = extent_start; 978 em->len = extent_end - extent_start; 979 /* 980 * Initialize orig_start and block_len with the same values 981 * as in inode.c:btrfs_get_extent(). 982 */ 983 em->orig_start = EXTENT_MAP_HOLE; 984 em->block_len = (u64)-1; 985 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) { 986 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 987 em->compress_type = compress_type; 988 } 989 } else { 990 btrfs_err(fs_info, 991 "unknown file extent item type %d, inode %llu, offset %llu, " 992 "root %llu", type, btrfs_ino(inode), extent_start, 993 root->root_key.objectid); 994 } 995 } 996