1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/bio.h> 7 #include <linux/slab.h> 8 #include <linux/pagemap.h> 9 #include <linux/highmem.h> 10 #include <linux/sched/mm.h> 11 #include <crypto/hash.h> 12 #include "messages.h" 13 #include "ctree.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "bio.h" 17 #include "compression.h" 18 #include "fs.h" 19 #include "accessors.h" 20 #include "file-item.h" 21 22 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 23 sizeof(struct btrfs_item) * 2) / \ 24 size) - 1)) 25 26 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 27 PAGE_SIZE)) 28 29 /* 30 * Set inode's size according to filesystem options. 31 * 32 * @inode: inode we want to update the disk_i_size for 33 * @new_i_size: i_size we want to set to, 0 if we use i_size 34 * 35 * With NO_HOLES set this simply sets the disk_is_size to whatever i_size_read() 36 * returns as it is perfectly fine with a file that has holes without hole file 37 * extent items. 38 * 39 * However without NO_HOLES we need to only return the area that is contiguous 40 * from the 0 offset of the file. Otherwise we could end up adjust i_size up 41 * to an extent that has a gap in between. 42 * 43 * Finally new_i_size should only be set in the case of truncate where we're not 44 * ready to use i_size_read() as the limiter yet. 45 */ 46 void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size) 47 { 48 u64 start, end, i_size; 49 int ret; 50 51 spin_lock(&inode->lock); 52 i_size = new_i_size ?: i_size_read(&inode->vfs_inode); 53 if (!inode->file_extent_tree) { 54 inode->disk_i_size = i_size; 55 goto out_unlock; 56 } 57 58 ret = find_contiguous_extent_bit(inode->file_extent_tree, 0, &start, 59 &end, EXTENT_DIRTY); 60 if (!ret && start == 0) 61 i_size = min(i_size, end + 1); 62 else 63 i_size = 0; 64 inode->disk_i_size = i_size; 65 out_unlock: 66 spin_unlock(&inode->lock); 67 } 68 69 /* 70 * Mark range within a file as having a new extent inserted. 71 * 72 * @inode: inode being modified 73 * @start: start file offset of the file extent we've inserted 74 * @len: logical length of the file extent item 75 * 76 * Call when we are inserting a new file extent where there was none before. 77 * Does not need to call this in the case where we're replacing an existing file 78 * extent, however if not sure it's fine to call this multiple times. 79 * 80 * The start and len must match the file extent item, so thus must be sectorsize 81 * aligned. 82 */ 83 int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, 84 u64 len) 85 { 86 if (!inode->file_extent_tree) 87 return 0; 88 89 if (len == 0) 90 return 0; 91 92 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize)); 93 94 return set_extent_bit(inode->file_extent_tree, start, start + len - 1, 95 EXTENT_DIRTY, NULL); 96 } 97 98 /* 99 * Mark an inode range as not having a backing extent. 100 * 101 * @inode: inode being modified 102 * @start: start file offset of the file extent we've inserted 103 * @len: logical length of the file extent item 104 * 105 * Called when we drop a file extent, for example when we truncate. Doesn't 106 * need to be called for cases where we're replacing a file extent, like when 107 * we've COWed a file extent. 108 * 109 * The start and len must match the file extent item, so thus must be sectorsize 110 * aligned. 111 */ 112 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, 113 u64 len) 114 { 115 if (!inode->file_extent_tree) 116 return 0; 117 118 if (len == 0) 119 return 0; 120 121 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) || 122 len == (u64)-1); 123 124 return clear_extent_bit(inode->file_extent_tree, start, 125 start + len - 1, EXTENT_DIRTY, NULL); 126 } 127 128 static size_t bytes_to_csum_size(const struct btrfs_fs_info *fs_info, u32 bytes) 129 { 130 ASSERT(IS_ALIGNED(bytes, fs_info->sectorsize)); 131 132 return (bytes >> fs_info->sectorsize_bits) * fs_info->csum_size; 133 } 134 135 static size_t csum_size_to_bytes(const struct btrfs_fs_info *fs_info, u32 csum_size) 136 { 137 ASSERT(IS_ALIGNED(csum_size, fs_info->csum_size)); 138 139 return (csum_size / fs_info->csum_size) << fs_info->sectorsize_bits; 140 } 141 142 static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info) 143 { 144 u32 max_csum_size = round_down(PAGE_SIZE - sizeof(struct btrfs_ordered_sum), 145 fs_info->csum_size); 146 147 return csum_size_to_bytes(fs_info, max_csum_size); 148 } 149 150 /* 151 * Calculate the total size needed to allocate for an ordered sum structure 152 * spanning @bytes in the file. 153 */ 154 static int btrfs_ordered_sum_size(const struct btrfs_fs_info *fs_info, unsigned long bytes) 155 { 156 return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes); 157 } 158 159 int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans, 160 struct btrfs_root *root, 161 u64 objectid, u64 pos, u64 num_bytes) 162 { 163 int ret = 0; 164 struct btrfs_file_extent_item *item; 165 struct btrfs_key file_key; 166 struct btrfs_path *path; 167 struct extent_buffer *leaf; 168 169 path = btrfs_alloc_path(); 170 if (!path) 171 return -ENOMEM; 172 file_key.objectid = objectid; 173 file_key.offset = pos; 174 file_key.type = BTRFS_EXTENT_DATA_KEY; 175 176 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 177 sizeof(*item)); 178 if (ret < 0) 179 goto out; 180 leaf = path->nodes[0]; 181 item = btrfs_item_ptr(leaf, path->slots[0], 182 struct btrfs_file_extent_item); 183 btrfs_set_file_extent_disk_bytenr(leaf, item, 0); 184 btrfs_set_file_extent_disk_num_bytes(leaf, item, 0); 185 btrfs_set_file_extent_offset(leaf, item, 0); 186 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 187 btrfs_set_file_extent_ram_bytes(leaf, item, num_bytes); 188 btrfs_set_file_extent_generation(leaf, item, trans->transid); 189 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 190 btrfs_set_file_extent_compression(leaf, item, 0); 191 btrfs_set_file_extent_encryption(leaf, item, 0); 192 btrfs_set_file_extent_other_encoding(leaf, item, 0); 193 194 btrfs_mark_buffer_dirty(trans, leaf); 195 out: 196 btrfs_free_path(path); 197 return ret; 198 } 199 200 static struct btrfs_csum_item * 201 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 202 struct btrfs_root *root, 203 struct btrfs_path *path, 204 u64 bytenr, int cow) 205 { 206 struct btrfs_fs_info *fs_info = root->fs_info; 207 int ret; 208 struct btrfs_key file_key; 209 struct btrfs_key found_key; 210 struct btrfs_csum_item *item; 211 struct extent_buffer *leaf; 212 u64 csum_offset = 0; 213 const u32 csum_size = fs_info->csum_size; 214 int csums_in_item; 215 216 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 217 file_key.offset = bytenr; 218 file_key.type = BTRFS_EXTENT_CSUM_KEY; 219 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 220 if (ret < 0) 221 goto fail; 222 leaf = path->nodes[0]; 223 if (ret > 0) { 224 ret = 1; 225 if (path->slots[0] == 0) 226 goto fail; 227 path->slots[0]--; 228 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 229 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 230 goto fail; 231 232 csum_offset = (bytenr - found_key.offset) >> 233 fs_info->sectorsize_bits; 234 csums_in_item = btrfs_item_size(leaf, path->slots[0]); 235 csums_in_item /= csum_size; 236 237 if (csum_offset == csums_in_item) { 238 ret = -EFBIG; 239 goto fail; 240 } else if (csum_offset > csums_in_item) { 241 goto fail; 242 } 243 } 244 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 245 item = (struct btrfs_csum_item *)((unsigned char *)item + 246 csum_offset * csum_size); 247 return item; 248 fail: 249 if (ret > 0) 250 ret = -ENOENT; 251 return ERR_PTR(ret); 252 } 253 254 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 255 struct btrfs_root *root, 256 struct btrfs_path *path, u64 objectid, 257 u64 offset, int mod) 258 { 259 struct btrfs_key file_key; 260 int ins_len = mod < 0 ? -1 : 0; 261 int cow = mod != 0; 262 263 file_key.objectid = objectid; 264 file_key.offset = offset; 265 file_key.type = BTRFS_EXTENT_DATA_KEY; 266 267 return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 268 } 269 270 /* 271 * Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and 272 * store the result to @dst. 273 * 274 * Return >0 for the number of sectors we found. 275 * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum 276 * for it. Caller may want to try next sector until one range is hit. 277 * Return <0 for fatal error. 278 */ 279 static int search_csum_tree(struct btrfs_fs_info *fs_info, 280 struct btrfs_path *path, u64 disk_bytenr, 281 u64 len, u8 *dst) 282 { 283 struct btrfs_root *csum_root; 284 struct btrfs_csum_item *item = NULL; 285 struct btrfs_key key; 286 const u32 sectorsize = fs_info->sectorsize; 287 const u32 csum_size = fs_info->csum_size; 288 u32 itemsize; 289 int ret; 290 u64 csum_start; 291 u64 csum_len; 292 293 ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) && 294 IS_ALIGNED(len, sectorsize)); 295 296 /* Check if the current csum item covers disk_bytenr */ 297 if (path->nodes[0]) { 298 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 299 struct btrfs_csum_item); 300 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 301 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 302 303 csum_start = key.offset; 304 csum_len = (itemsize / csum_size) * sectorsize; 305 306 if (in_range(disk_bytenr, csum_start, csum_len)) 307 goto found; 308 } 309 310 /* Current item doesn't contain the desired range, search again */ 311 btrfs_release_path(path); 312 csum_root = btrfs_csum_root(fs_info, disk_bytenr); 313 item = btrfs_lookup_csum(NULL, csum_root, path, disk_bytenr, 0); 314 if (IS_ERR(item)) { 315 ret = PTR_ERR(item); 316 goto out; 317 } 318 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 319 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 320 321 csum_start = key.offset; 322 csum_len = (itemsize / csum_size) * sectorsize; 323 ASSERT(in_range(disk_bytenr, csum_start, csum_len)); 324 325 found: 326 ret = (min(csum_start + csum_len, disk_bytenr + len) - 327 disk_bytenr) >> fs_info->sectorsize_bits; 328 read_extent_buffer(path->nodes[0], dst, (unsigned long)item, 329 ret * csum_size); 330 out: 331 if (ret == -ENOENT || ret == -EFBIG) 332 ret = 0; 333 return ret; 334 } 335 336 /* 337 * Lookup the checksum for the read bio in csum tree. 338 * 339 * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. 340 */ 341 blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio) 342 { 343 struct btrfs_inode *inode = bbio->inode; 344 struct btrfs_fs_info *fs_info = inode->root->fs_info; 345 struct bio *bio = &bbio->bio; 346 struct btrfs_path *path; 347 const u32 sectorsize = fs_info->sectorsize; 348 const u32 csum_size = fs_info->csum_size; 349 u32 orig_len = bio->bi_iter.bi_size; 350 u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; 351 const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits; 352 blk_status_t ret = BLK_STS_OK; 353 u32 bio_offset = 0; 354 355 if ((inode->flags & BTRFS_INODE_NODATASUM) || 356 test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) 357 return BLK_STS_OK; 358 359 /* 360 * This function is only called for read bio. 361 * 362 * This means two things: 363 * - All our csums should only be in csum tree 364 * No ordered extents csums, as ordered extents are only for write 365 * path. 366 * - No need to bother any other info from bvec 367 * Since we're looking up csums, the only important info is the 368 * disk_bytenr and the length, which can be extracted from bi_iter 369 * directly. 370 */ 371 ASSERT(bio_op(bio) == REQ_OP_READ); 372 path = btrfs_alloc_path(); 373 if (!path) 374 return BLK_STS_RESOURCE; 375 376 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 377 bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS); 378 if (!bbio->csum) { 379 btrfs_free_path(path); 380 return BLK_STS_RESOURCE; 381 } 382 } else { 383 bbio->csum = bbio->csum_inline; 384 } 385 386 /* 387 * If requested number of sectors is larger than one leaf can contain, 388 * kick the readahead for csum tree. 389 */ 390 if (nblocks > fs_info->csums_per_leaf) 391 path->reada = READA_FORWARD; 392 393 /* 394 * the free space stuff is only read when it hasn't been 395 * updated in the current transaction. So, we can safely 396 * read from the commit root and sidestep a nasty deadlock 397 * between reading the free space cache and updating the csum tree. 398 */ 399 if (btrfs_is_free_space_inode(inode)) { 400 path->search_commit_root = 1; 401 path->skip_locking = 1; 402 } 403 404 while (bio_offset < orig_len) { 405 int count; 406 u64 cur_disk_bytenr = orig_disk_bytenr + bio_offset; 407 u8 *csum_dst = bbio->csum + 408 (bio_offset >> fs_info->sectorsize_bits) * csum_size; 409 410 count = search_csum_tree(fs_info, path, cur_disk_bytenr, 411 orig_len - bio_offset, csum_dst); 412 if (count < 0) { 413 ret = errno_to_blk_status(count); 414 if (bbio->csum != bbio->csum_inline) 415 kfree(bbio->csum); 416 bbio->csum = NULL; 417 break; 418 } 419 420 /* 421 * We didn't find a csum for this range. We need to make sure 422 * we complain loudly about this, because we are not NODATASUM. 423 * 424 * However for the DATA_RELOC inode we could potentially be 425 * relocating data extents for a NODATASUM inode, so the inode 426 * itself won't be marked with NODATASUM, but the extent we're 427 * copying is in fact NODATASUM. If we don't find a csum we 428 * assume this is the case. 429 */ 430 if (count == 0) { 431 memset(csum_dst, 0, csum_size); 432 count = 1; 433 434 if (btrfs_root_id(inode->root) == BTRFS_DATA_RELOC_TREE_OBJECTID) { 435 u64 file_offset = bbio->file_offset + bio_offset; 436 437 set_extent_bit(&inode->io_tree, file_offset, 438 file_offset + sectorsize - 1, 439 EXTENT_NODATASUM, NULL); 440 } else { 441 btrfs_warn_rl(fs_info, 442 "csum hole found for disk bytenr range [%llu, %llu)", 443 cur_disk_bytenr, cur_disk_bytenr + sectorsize); 444 } 445 } 446 bio_offset += count * sectorsize; 447 } 448 449 btrfs_free_path(path); 450 return ret; 451 } 452 453 /* 454 * Search for checksums for a given logical range. 455 * 456 * @root: The root where to look for checksums. 457 * @start: Logical address of target checksum range. 458 * @end: End offset (inclusive) of the target checksum range. 459 * @list: List for adding each checksum that was found. 460 * Can be NULL in case the caller only wants to check if 461 * there any checksums for the range. 462 * @nowait: Indicate if the search must be non-blocking or not. 463 * 464 * Return < 0 on error, 0 if no checksums were found, or 1 if checksums were 465 * found. 466 */ 467 int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end, 468 struct list_head *list, bool nowait) 469 { 470 struct btrfs_fs_info *fs_info = root->fs_info; 471 struct btrfs_key key; 472 struct btrfs_path *path; 473 struct extent_buffer *leaf; 474 struct btrfs_ordered_sum *sums; 475 struct btrfs_csum_item *item; 476 int ret; 477 bool found_csums = false; 478 479 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 480 IS_ALIGNED(end + 1, fs_info->sectorsize)); 481 482 path = btrfs_alloc_path(); 483 if (!path) 484 return -ENOMEM; 485 486 path->nowait = nowait; 487 488 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 489 key.offset = start; 490 key.type = BTRFS_EXTENT_CSUM_KEY; 491 492 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 493 if (ret < 0) 494 goto out; 495 if (ret > 0 && path->slots[0] > 0) { 496 leaf = path->nodes[0]; 497 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 498 499 /* 500 * There are two cases we can hit here for the previous csum 501 * item: 502 * 503 * |<- search range ->| 504 * |<- csum item ->| 505 * 506 * Or 507 * |<- search range ->| 508 * |<- csum item ->| 509 * 510 * Check if the previous csum item covers the leading part of 511 * the search range. If so we have to start from previous csum 512 * item. 513 */ 514 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 515 key.type == BTRFS_EXTENT_CSUM_KEY) { 516 if (bytes_to_csum_size(fs_info, start - key.offset) < 517 btrfs_item_size(leaf, path->slots[0] - 1)) 518 path->slots[0]--; 519 } 520 } 521 522 while (start <= end) { 523 u64 csum_end; 524 525 leaf = path->nodes[0]; 526 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 527 ret = btrfs_next_leaf(root, path); 528 if (ret < 0) 529 goto out; 530 if (ret > 0) 531 break; 532 leaf = path->nodes[0]; 533 } 534 535 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 536 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 537 key.type != BTRFS_EXTENT_CSUM_KEY || 538 key.offset > end) 539 break; 540 541 if (key.offset > start) 542 start = key.offset; 543 544 csum_end = key.offset + csum_size_to_bytes(fs_info, 545 btrfs_item_size(leaf, path->slots[0])); 546 if (csum_end <= start) { 547 path->slots[0]++; 548 continue; 549 } 550 551 found_csums = true; 552 if (!list) 553 goto out; 554 555 csum_end = min(csum_end, end + 1); 556 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 557 struct btrfs_csum_item); 558 while (start < csum_end) { 559 unsigned long offset; 560 size_t size; 561 562 size = min_t(size_t, csum_end - start, 563 max_ordered_sum_bytes(fs_info)); 564 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size), 565 GFP_NOFS); 566 if (!sums) { 567 ret = -ENOMEM; 568 goto out; 569 } 570 571 sums->logical = start; 572 sums->len = size; 573 574 offset = bytes_to_csum_size(fs_info, start - key.offset); 575 576 read_extent_buffer(path->nodes[0], 577 sums->sums, 578 ((unsigned long)item) + offset, 579 bytes_to_csum_size(fs_info, size)); 580 581 start += size; 582 list_add_tail(&sums->list, list); 583 } 584 path->slots[0]++; 585 } 586 out: 587 btrfs_free_path(path); 588 if (ret < 0) { 589 if (list) { 590 struct btrfs_ordered_sum *tmp_sums; 591 592 list_for_each_entry_safe(sums, tmp_sums, list, list) 593 kfree(sums); 594 } 595 596 return ret; 597 } 598 599 return found_csums ? 1 : 0; 600 } 601 602 /* 603 * Do the same work as btrfs_lookup_csums_list(), the difference is in how 604 * we return the result. 605 * 606 * This version will set the corresponding bits in @csum_bitmap to represent 607 * that there is a csum found. 608 * Each bit represents a sector. Thus caller should ensure @csum_buf passed 609 * in is large enough to contain all csums. 610 */ 611 int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path, 612 u64 start, u64 end, u8 *csum_buf, 613 unsigned long *csum_bitmap) 614 { 615 struct btrfs_fs_info *fs_info = root->fs_info; 616 struct btrfs_key key; 617 struct extent_buffer *leaf; 618 struct btrfs_csum_item *item; 619 const u64 orig_start = start; 620 bool free_path = false; 621 int ret; 622 623 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 624 IS_ALIGNED(end + 1, fs_info->sectorsize)); 625 626 if (!path) { 627 path = btrfs_alloc_path(); 628 if (!path) 629 return -ENOMEM; 630 free_path = true; 631 } 632 633 /* Check if we can reuse the previous path. */ 634 if (path->nodes[0]) { 635 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 636 637 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 638 key.type == BTRFS_EXTENT_CSUM_KEY && 639 key.offset <= start) 640 goto search_forward; 641 btrfs_release_path(path); 642 } 643 644 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 645 key.type = BTRFS_EXTENT_CSUM_KEY; 646 key.offset = start; 647 648 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 649 if (ret < 0) 650 goto fail; 651 if (ret > 0 && path->slots[0] > 0) { 652 leaf = path->nodes[0]; 653 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 654 655 /* 656 * There are two cases we can hit here for the previous csum 657 * item: 658 * 659 * |<- search range ->| 660 * |<- csum item ->| 661 * 662 * Or 663 * |<- search range ->| 664 * |<- csum item ->| 665 * 666 * Check if the previous csum item covers the leading part of 667 * the search range. If so we have to start from previous csum 668 * item. 669 */ 670 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 671 key.type == BTRFS_EXTENT_CSUM_KEY) { 672 if (bytes_to_csum_size(fs_info, start - key.offset) < 673 btrfs_item_size(leaf, path->slots[0] - 1)) 674 path->slots[0]--; 675 } 676 } 677 678 search_forward: 679 while (start <= end) { 680 u64 csum_end; 681 682 leaf = path->nodes[0]; 683 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 684 ret = btrfs_next_leaf(root, path); 685 if (ret < 0) 686 goto fail; 687 if (ret > 0) 688 break; 689 leaf = path->nodes[0]; 690 } 691 692 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 693 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 694 key.type != BTRFS_EXTENT_CSUM_KEY || 695 key.offset > end) 696 break; 697 698 if (key.offset > start) 699 start = key.offset; 700 701 csum_end = key.offset + csum_size_to_bytes(fs_info, 702 btrfs_item_size(leaf, path->slots[0])); 703 if (csum_end <= start) { 704 path->slots[0]++; 705 continue; 706 } 707 708 csum_end = min(csum_end, end + 1); 709 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 710 struct btrfs_csum_item); 711 while (start < csum_end) { 712 unsigned long offset; 713 size_t size; 714 u8 *csum_dest = csum_buf + bytes_to_csum_size(fs_info, 715 start - orig_start); 716 717 size = min_t(size_t, csum_end - start, end + 1 - start); 718 719 offset = bytes_to_csum_size(fs_info, start - key.offset); 720 721 read_extent_buffer(path->nodes[0], csum_dest, 722 ((unsigned long)item) + offset, 723 bytes_to_csum_size(fs_info, size)); 724 725 bitmap_set(csum_bitmap, 726 (start - orig_start) >> fs_info->sectorsize_bits, 727 size >> fs_info->sectorsize_bits); 728 729 start += size; 730 } 731 path->slots[0]++; 732 } 733 ret = 0; 734 fail: 735 if (free_path) 736 btrfs_free_path(path); 737 return ret; 738 } 739 740 /* 741 * Calculate checksums of the data contained inside a bio. 742 */ 743 blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) 744 { 745 struct btrfs_ordered_extent *ordered = bbio->ordered; 746 struct btrfs_inode *inode = bbio->inode; 747 struct btrfs_fs_info *fs_info = inode->root->fs_info; 748 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 749 struct bio *bio = &bbio->bio; 750 struct btrfs_ordered_sum *sums; 751 char *data; 752 struct bvec_iter iter; 753 struct bio_vec bvec; 754 int index; 755 unsigned int blockcount; 756 int i; 757 unsigned nofs_flag; 758 759 nofs_flag = memalloc_nofs_save(); 760 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), 761 GFP_KERNEL); 762 memalloc_nofs_restore(nofs_flag); 763 764 if (!sums) 765 return BLK_STS_RESOURCE; 766 767 sums->len = bio->bi_iter.bi_size; 768 INIT_LIST_HEAD(&sums->list); 769 770 sums->logical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 771 index = 0; 772 773 shash->tfm = fs_info->csum_shash; 774 775 bio_for_each_segment(bvec, bio, iter) { 776 blockcount = BTRFS_BYTES_TO_BLKS(fs_info, 777 bvec.bv_len + fs_info->sectorsize 778 - 1); 779 780 for (i = 0; i < blockcount; i++) { 781 data = bvec_kmap_local(&bvec); 782 crypto_shash_digest(shash, 783 data + (i * fs_info->sectorsize), 784 fs_info->sectorsize, 785 sums->sums + index); 786 kunmap_local(data); 787 index += fs_info->csum_size; 788 } 789 790 } 791 792 bbio->sums = sums; 793 btrfs_add_ordered_sum(ordered, sums); 794 return 0; 795 } 796 797 /* 798 * Nodatasum I/O on zoned file systems still requires an btrfs_ordered_sum to 799 * record the updated logical address on Zone Append completion. 800 * Allocate just the structure with an empty sums array here for that case. 801 */ 802 blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio) 803 { 804 bbio->sums = kmalloc(sizeof(*bbio->sums), GFP_NOFS); 805 if (!bbio->sums) 806 return BLK_STS_RESOURCE; 807 bbio->sums->len = bbio->bio.bi_iter.bi_size; 808 bbio->sums->logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 809 btrfs_add_ordered_sum(bbio->ordered, bbio->sums); 810 return 0; 811 } 812 813 /* 814 * Remove one checksum overlapping a range. 815 * 816 * This expects the key to describe the csum pointed to by the path, and it 817 * expects the csum to overlap the range [bytenr, len] 818 * 819 * The csum should not be entirely contained in the range and the range should 820 * not be entirely contained in the csum. 821 * 822 * This calls btrfs_truncate_item with the correct args based on the overlap, 823 * and fixes up the key as required. 824 */ 825 static noinline void truncate_one_csum(struct btrfs_trans_handle *trans, 826 struct btrfs_path *path, 827 struct btrfs_key *key, 828 u64 bytenr, u64 len) 829 { 830 struct btrfs_fs_info *fs_info = trans->fs_info; 831 struct extent_buffer *leaf; 832 const u32 csum_size = fs_info->csum_size; 833 u64 csum_end; 834 u64 end_byte = bytenr + len; 835 u32 blocksize_bits = fs_info->sectorsize_bits; 836 837 leaf = path->nodes[0]; 838 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 839 csum_end <<= blocksize_bits; 840 csum_end += key->offset; 841 842 if (key->offset < bytenr && csum_end <= end_byte) { 843 /* 844 * [ bytenr - len ] 845 * [ ] 846 * [csum ] 847 * A simple truncate off the end of the item 848 */ 849 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 850 new_size *= csum_size; 851 btrfs_truncate_item(trans, path, new_size, 1); 852 } else if (key->offset >= bytenr && csum_end > end_byte && 853 end_byte > key->offset) { 854 /* 855 * [ bytenr - len ] 856 * [ ] 857 * [csum ] 858 * we need to truncate from the beginning of the csum 859 */ 860 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 861 new_size *= csum_size; 862 863 btrfs_truncate_item(trans, path, new_size, 0); 864 865 key->offset = end_byte; 866 btrfs_set_item_key_safe(trans, path, key); 867 } else { 868 BUG(); 869 } 870 } 871 872 /* 873 * Delete the csum items from the csum tree for a given range of bytes. 874 */ 875 int btrfs_del_csums(struct btrfs_trans_handle *trans, 876 struct btrfs_root *root, u64 bytenr, u64 len) 877 { 878 struct btrfs_fs_info *fs_info = trans->fs_info; 879 struct btrfs_path *path; 880 struct btrfs_key key; 881 u64 end_byte = bytenr + len; 882 u64 csum_end; 883 struct extent_buffer *leaf; 884 int ret = 0; 885 const u32 csum_size = fs_info->csum_size; 886 u32 blocksize_bits = fs_info->sectorsize_bits; 887 888 ASSERT(btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID || 889 btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID); 890 891 path = btrfs_alloc_path(); 892 if (!path) 893 return -ENOMEM; 894 895 while (1) { 896 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 897 key.offset = end_byte - 1; 898 key.type = BTRFS_EXTENT_CSUM_KEY; 899 900 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 901 if (ret > 0) { 902 ret = 0; 903 if (path->slots[0] == 0) 904 break; 905 path->slots[0]--; 906 } else if (ret < 0) { 907 break; 908 } 909 910 leaf = path->nodes[0]; 911 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 912 913 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 914 key.type != BTRFS_EXTENT_CSUM_KEY) { 915 break; 916 } 917 918 if (key.offset >= end_byte) 919 break; 920 921 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 922 csum_end <<= blocksize_bits; 923 csum_end += key.offset; 924 925 /* this csum ends before we start, we're done */ 926 if (csum_end <= bytenr) 927 break; 928 929 /* delete the entire item, it is inside our range */ 930 if (key.offset >= bytenr && csum_end <= end_byte) { 931 int del_nr = 1; 932 933 /* 934 * Check how many csum items preceding this one in this 935 * leaf correspond to our range and then delete them all 936 * at once. 937 */ 938 if (key.offset > bytenr && path->slots[0] > 0) { 939 int slot = path->slots[0] - 1; 940 941 while (slot >= 0) { 942 struct btrfs_key pk; 943 944 btrfs_item_key_to_cpu(leaf, &pk, slot); 945 if (pk.offset < bytenr || 946 pk.type != BTRFS_EXTENT_CSUM_KEY || 947 pk.objectid != 948 BTRFS_EXTENT_CSUM_OBJECTID) 949 break; 950 path->slots[0] = slot; 951 del_nr++; 952 key.offset = pk.offset; 953 slot--; 954 } 955 } 956 ret = btrfs_del_items(trans, root, path, 957 path->slots[0], del_nr); 958 if (ret) 959 break; 960 if (key.offset == bytenr) 961 break; 962 } else if (key.offset < bytenr && csum_end > end_byte) { 963 unsigned long offset; 964 unsigned long shift_len; 965 unsigned long item_offset; 966 /* 967 * [ bytenr - len ] 968 * [csum ] 969 * 970 * Our bytes are in the middle of the csum, 971 * we need to split this item and insert a new one. 972 * 973 * But we can't drop the path because the 974 * csum could change, get removed, extended etc. 975 * 976 * The trick here is the max size of a csum item leaves 977 * enough room in the tree block for a single 978 * item header. So, we split the item in place, 979 * adding a new header pointing to the existing 980 * bytes. Then we loop around again and we have 981 * a nicely formed csum item that we can neatly 982 * truncate. 983 */ 984 offset = (bytenr - key.offset) >> blocksize_bits; 985 offset *= csum_size; 986 987 shift_len = (len >> blocksize_bits) * csum_size; 988 989 item_offset = btrfs_item_ptr_offset(leaf, 990 path->slots[0]); 991 992 memzero_extent_buffer(leaf, item_offset + offset, 993 shift_len); 994 key.offset = bytenr; 995 996 /* 997 * btrfs_split_item returns -EAGAIN when the 998 * item changed size or key 999 */ 1000 ret = btrfs_split_item(trans, root, path, &key, offset); 1001 if (ret && ret != -EAGAIN) { 1002 btrfs_abort_transaction(trans, ret); 1003 break; 1004 } 1005 ret = 0; 1006 1007 key.offset = end_byte - 1; 1008 } else { 1009 truncate_one_csum(trans, path, &key, bytenr, len); 1010 if (key.offset < bytenr) 1011 break; 1012 } 1013 btrfs_release_path(path); 1014 } 1015 btrfs_free_path(path); 1016 return ret; 1017 } 1018 1019 static int find_next_csum_offset(struct btrfs_root *root, 1020 struct btrfs_path *path, 1021 u64 *next_offset) 1022 { 1023 const u32 nritems = btrfs_header_nritems(path->nodes[0]); 1024 struct btrfs_key found_key; 1025 int slot = path->slots[0] + 1; 1026 int ret; 1027 1028 if (nritems == 0 || slot >= nritems) { 1029 ret = btrfs_next_leaf(root, path); 1030 if (ret < 0) { 1031 return ret; 1032 } else if (ret > 0) { 1033 *next_offset = (u64)-1; 1034 return 0; 1035 } 1036 slot = path->slots[0]; 1037 } 1038 1039 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 1040 1041 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1042 found_key.type != BTRFS_EXTENT_CSUM_KEY) 1043 *next_offset = (u64)-1; 1044 else 1045 *next_offset = found_key.offset; 1046 1047 return 0; 1048 } 1049 1050 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 1051 struct btrfs_root *root, 1052 struct btrfs_ordered_sum *sums) 1053 { 1054 struct btrfs_fs_info *fs_info = root->fs_info; 1055 struct btrfs_key file_key; 1056 struct btrfs_key found_key; 1057 struct btrfs_path *path; 1058 struct btrfs_csum_item *item; 1059 struct btrfs_csum_item *item_end; 1060 struct extent_buffer *leaf = NULL; 1061 u64 next_offset; 1062 u64 total_bytes = 0; 1063 u64 csum_offset; 1064 u64 bytenr; 1065 u32 ins_size; 1066 int index = 0; 1067 int found_next; 1068 int ret; 1069 const u32 csum_size = fs_info->csum_size; 1070 1071 path = btrfs_alloc_path(); 1072 if (!path) 1073 return -ENOMEM; 1074 again: 1075 next_offset = (u64)-1; 1076 found_next = 0; 1077 bytenr = sums->logical + total_bytes; 1078 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 1079 file_key.offset = bytenr; 1080 file_key.type = BTRFS_EXTENT_CSUM_KEY; 1081 1082 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 1083 if (!IS_ERR(item)) { 1084 ret = 0; 1085 leaf = path->nodes[0]; 1086 item_end = btrfs_item_ptr(leaf, path->slots[0], 1087 struct btrfs_csum_item); 1088 item_end = (struct btrfs_csum_item *)((char *)item_end + 1089 btrfs_item_size(leaf, path->slots[0])); 1090 goto found; 1091 } 1092 ret = PTR_ERR(item); 1093 if (ret != -EFBIG && ret != -ENOENT) 1094 goto out; 1095 1096 if (ret == -EFBIG) { 1097 u32 item_size; 1098 /* we found one, but it isn't big enough yet */ 1099 leaf = path->nodes[0]; 1100 item_size = btrfs_item_size(leaf, path->slots[0]); 1101 if ((item_size / csum_size) >= 1102 MAX_CSUM_ITEMS(fs_info, csum_size)) { 1103 /* already at max size, make a new one */ 1104 goto insert; 1105 } 1106 } else { 1107 /* We didn't find a csum item, insert one. */ 1108 ret = find_next_csum_offset(root, path, &next_offset); 1109 if (ret < 0) 1110 goto out; 1111 found_next = 1; 1112 goto insert; 1113 } 1114 1115 /* 1116 * At this point, we know the tree has a checksum item that ends at an 1117 * offset matching the start of the checksum range we want to insert. 1118 * We try to extend that item as much as possible and then add as many 1119 * checksums to it as they fit. 1120 * 1121 * First check if the leaf has enough free space for at least one 1122 * checksum. If it has go directly to the item extension code, otherwise 1123 * release the path and do a search for insertion before the extension. 1124 */ 1125 if (btrfs_leaf_free_space(leaf) >= csum_size) { 1126 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1127 csum_offset = (bytenr - found_key.offset) >> 1128 fs_info->sectorsize_bits; 1129 goto extend_csum; 1130 } 1131 1132 btrfs_release_path(path); 1133 path->search_for_extension = 1; 1134 ret = btrfs_search_slot(trans, root, &file_key, path, 1135 csum_size, 1); 1136 path->search_for_extension = 0; 1137 if (ret < 0) 1138 goto out; 1139 1140 if (ret > 0) { 1141 if (path->slots[0] == 0) 1142 goto insert; 1143 path->slots[0]--; 1144 } 1145 1146 leaf = path->nodes[0]; 1147 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1148 csum_offset = (bytenr - found_key.offset) >> fs_info->sectorsize_bits; 1149 1150 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 1151 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1152 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) { 1153 goto insert; 1154 } 1155 1156 extend_csum: 1157 if (csum_offset == btrfs_item_size(leaf, path->slots[0]) / 1158 csum_size) { 1159 int extend_nr; 1160 u64 tmp; 1161 u32 diff; 1162 1163 tmp = sums->len - total_bytes; 1164 tmp >>= fs_info->sectorsize_bits; 1165 WARN_ON(tmp < 1); 1166 extend_nr = max_t(int, 1, tmp); 1167 1168 /* 1169 * A log tree can already have checksum items with a subset of 1170 * the checksums we are trying to log. This can happen after 1171 * doing a sequence of partial writes into prealloc extents and 1172 * fsyncs in between, with a full fsync logging a larger subrange 1173 * of an extent for which a previous fast fsync logged a smaller 1174 * subrange. And this happens in particular due to merging file 1175 * extent items when we complete an ordered extent for a range 1176 * covered by a prealloc extent - this is done at 1177 * btrfs_mark_extent_written(). 1178 * 1179 * So if we try to extend the previous checksum item, which has 1180 * a range that ends at the start of the range we want to insert, 1181 * make sure we don't extend beyond the start offset of the next 1182 * checksum item. If we are at the last item in the leaf, then 1183 * forget the optimization of extending and add a new checksum 1184 * item - it is not worth the complexity of releasing the path, 1185 * getting the first key for the next leaf, repeat the btree 1186 * search, etc, because log trees are temporary anyway and it 1187 * would only save a few bytes of leaf space. 1188 */ 1189 if (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID) { 1190 if (path->slots[0] + 1 >= 1191 btrfs_header_nritems(path->nodes[0])) { 1192 ret = find_next_csum_offset(root, path, &next_offset); 1193 if (ret < 0) 1194 goto out; 1195 found_next = 1; 1196 goto insert; 1197 } 1198 1199 ret = find_next_csum_offset(root, path, &next_offset); 1200 if (ret < 0) 1201 goto out; 1202 1203 tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits; 1204 if (tmp <= INT_MAX) 1205 extend_nr = min_t(int, extend_nr, tmp); 1206 } 1207 1208 diff = (csum_offset + extend_nr) * csum_size; 1209 diff = min(diff, 1210 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); 1211 1212 diff = diff - btrfs_item_size(leaf, path->slots[0]); 1213 diff = min_t(u32, btrfs_leaf_free_space(leaf), diff); 1214 diff /= csum_size; 1215 diff *= csum_size; 1216 1217 btrfs_extend_item(trans, path, diff); 1218 ret = 0; 1219 goto csum; 1220 } 1221 1222 insert: 1223 btrfs_release_path(path); 1224 csum_offset = 0; 1225 if (found_next) { 1226 u64 tmp; 1227 1228 tmp = sums->len - total_bytes; 1229 tmp >>= fs_info->sectorsize_bits; 1230 tmp = min(tmp, (next_offset - file_key.offset) >> 1231 fs_info->sectorsize_bits); 1232 1233 tmp = max_t(u64, 1, tmp); 1234 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size)); 1235 ins_size = csum_size * tmp; 1236 } else { 1237 ins_size = csum_size; 1238 } 1239 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 1240 ins_size); 1241 if (ret < 0) 1242 goto out; 1243 leaf = path->nodes[0]; 1244 csum: 1245 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 1246 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 1247 btrfs_item_size(leaf, path->slots[0])); 1248 item = (struct btrfs_csum_item *)((unsigned char *)item + 1249 csum_offset * csum_size); 1250 found: 1251 ins_size = (u32)(sums->len - total_bytes) >> fs_info->sectorsize_bits; 1252 ins_size *= csum_size; 1253 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 1254 ins_size); 1255 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 1256 ins_size); 1257 1258 index += ins_size; 1259 ins_size /= csum_size; 1260 total_bytes += ins_size * fs_info->sectorsize; 1261 1262 btrfs_mark_buffer_dirty(trans, path->nodes[0]); 1263 if (total_bytes < sums->len) { 1264 btrfs_release_path(path); 1265 cond_resched(); 1266 goto again; 1267 } 1268 out: 1269 btrfs_free_path(path); 1270 return ret; 1271 } 1272 1273 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, 1274 const struct btrfs_path *path, 1275 const struct btrfs_file_extent_item *fi, 1276 struct extent_map *em) 1277 { 1278 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1279 struct btrfs_root *root = inode->root; 1280 struct extent_buffer *leaf = path->nodes[0]; 1281 const int slot = path->slots[0]; 1282 struct btrfs_key key; 1283 u64 extent_start; 1284 u8 type = btrfs_file_extent_type(leaf, fi); 1285 int compress_type = btrfs_file_extent_compression(leaf, fi); 1286 1287 btrfs_item_key_to_cpu(leaf, &key, slot); 1288 extent_start = key.offset; 1289 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1290 em->generation = btrfs_file_extent_generation(leaf, fi); 1291 if (type == BTRFS_FILE_EXTENT_REG || 1292 type == BTRFS_FILE_EXTENT_PREALLOC) { 1293 const u64 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1294 1295 em->start = extent_start; 1296 em->len = btrfs_file_extent_end(path) - extent_start; 1297 if (disk_bytenr == 0) { 1298 em->disk_bytenr = EXTENT_MAP_HOLE; 1299 em->disk_num_bytes = 0; 1300 em->offset = 0; 1301 return; 1302 } 1303 em->disk_bytenr = disk_bytenr; 1304 em->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1305 em->offset = btrfs_file_extent_offset(leaf, fi); 1306 if (compress_type != BTRFS_COMPRESS_NONE) { 1307 extent_map_set_compression(em, compress_type); 1308 } else { 1309 /* 1310 * Older kernels can create regular non-hole data 1311 * extents with ram_bytes smaller than disk_num_bytes. 1312 * Not a big deal, just always use disk_num_bytes 1313 * for ram_bytes. 1314 */ 1315 em->ram_bytes = em->disk_num_bytes; 1316 if (type == BTRFS_FILE_EXTENT_PREALLOC) 1317 em->flags |= EXTENT_FLAG_PREALLOC; 1318 } 1319 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 1320 /* Tree-checker has ensured this. */ 1321 ASSERT(extent_start == 0); 1322 1323 em->disk_bytenr = EXTENT_MAP_INLINE; 1324 em->start = 0; 1325 em->len = fs_info->sectorsize; 1326 em->offset = 0; 1327 extent_map_set_compression(em, compress_type); 1328 } else { 1329 btrfs_err(fs_info, 1330 "unknown file extent item type %d, inode %llu, offset %llu, " 1331 "root %llu", type, btrfs_ino(inode), extent_start, 1332 btrfs_root_id(root)); 1333 } 1334 } 1335 1336 /* 1337 * Returns the end offset (non inclusive) of the file extent item the given path 1338 * points to. If it points to an inline extent, the returned offset is rounded 1339 * up to the sector size. 1340 */ 1341 u64 btrfs_file_extent_end(const struct btrfs_path *path) 1342 { 1343 const struct extent_buffer *leaf = path->nodes[0]; 1344 const int slot = path->slots[0]; 1345 struct btrfs_file_extent_item *fi; 1346 struct btrfs_key key; 1347 u64 end; 1348 1349 btrfs_item_key_to_cpu(leaf, &key, slot); 1350 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 1351 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 1352 1353 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) 1354 end = leaf->fs_info->sectorsize; 1355 else 1356 end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1357 1358 return end; 1359 } 1360