1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/bio.h> 7 #include <linux/slab.h> 8 #include <linux/pagemap.h> 9 #include <linux/highmem.h> 10 #include <linux/sched/mm.h> 11 #include <crypto/hash.h> 12 #include "messages.h" 13 #include "ctree.h" 14 #include "disk-io.h" 15 #include "transaction.h" 16 #include "bio.h" 17 #include "compression.h" 18 #include "fs.h" 19 #include "accessors.h" 20 #include "file-item.h" 21 22 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 23 sizeof(struct btrfs_item) * 2) / \ 24 size) - 1)) 25 26 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 27 PAGE_SIZE)) 28 29 /* 30 * Set inode's size according to filesystem options. 31 * 32 * @inode: inode we want to update the disk_i_size for 33 * @new_i_size: i_size we want to set to, 0 if we use i_size 34 * 35 * With NO_HOLES set this simply sets the disk_is_size to whatever i_size_read() 36 * returns as it is perfectly fine with a file that has holes without hole file 37 * extent items. 38 * 39 * However without NO_HOLES we need to only return the area that is contiguous 40 * from the 0 offset of the file. Otherwise we could end up adjust i_size up 41 * to an extent that has a gap in between. 42 * 43 * Finally new_i_size should only be set in the case of truncate where we're not 44 * ready to use i_size_read() as the limiter yet. 45 */ 46 void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size) 47 { 48 u64 start, end, i_size; 49 bool found; 50 51 spin_lock(&inode->lock); 52 i_size = new_i_size ?: i_size_read(&inode->vfs_inode); 53 if (!inode->file_extent_tree) { 54 inode->disk_i_size = i_size; 55 goto out_unlock; 56 } 57 58 found = btrfs_find_contiguous_extent_bit(inode->file_extent_tree, 0, &start, 59 &end, EXTENT_DIRTY); 60 if (found && start == 0) 61 i_size = min(i_size, end + 1); 62 else 63 i_size = 0; 64 inode->disk_i_size = i_size; 65 out_unlock: 66 spin_unlock(&inode->lock); 67 } 68 69 /* 70 * Mark range within a file as having a new extent inserted. 71 * 72 * @inode: inode being modified 73 * @start: start file offset of the file extent we've inserted 74 * @len: logical length of the file extent item 75 * 76 * Call when we are inserting a new file extent where there was none before. 77 * Does not need to call this in the case where we're replacing an existing file 78 * extent, however if not sure it's fine to call this multiple times. 79 * 80 * The start and len must match the file extent item, so thus must be sectorsize 81 * aligned. 82 */ 83 int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, 84 u64 len) 85 { 86 if (!inode->file_extent_tree) 87 return 0; 88 89 if (len == 0) 90 return 0; 91 92 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize)); 93 94 return btrfs_set_extent_bit(inode->file_extent_tree, start, start + len - 1, 95 EXTENT_DIRTY, NULL); 96 } 97 98 /* 99 * Mark an inode range as not having a backing extent. 100 * 101 * @inode: inode being modified 102 * @start: start file offset of the file extent we've inserted 103 * @len: logical length of the file extent item 104 * 105 * Called when we drop a file extent, for example when we truncate. Doesn't 106 * need to be called for cases where we're replacing a file extent, like when 107 * we've COWed a file extent. 108 * 109 * The start and len must match the file extent item, so thus must be sectorsize 110 * aligned. 111 */ 112 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, 113 u64 len) 114 { 115 if (!inode->file_extent_tree) 116 return 0; 117 118 if (len == 0) 119 return 0; 120 121 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) || 122 len == (u64)-1); 123 124 return btrfs_clear_extent_bit(inode->file_extent_tree, start, 125 start + len - 1, EXTENT_DIRTY, NULL); 126 } 127 128 static size_t bytes_to_csum_size(const struct btrfs_fs_info *fs_info, u32 bytes) 129 { 130 ASSERT(IS_ALIGNED(bytes, fs_info->sectorsize)); 131 132 return (bytes >> fs_info->sectorsize_bits) * fs_info->csum_size; 133 } 134 135 static size_t csum_size_to_bytes(const struct btrfs_fs_info *fs_info, u32 csum_size) 136 { 137 ASSERT(IS_ALIGNED(csum_size, fs_info->csum_size)); 138 139 return (csum_size / fs_info->csum_size) << fs_info->sectorsize_bits; 140 } 141 142 static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info) 143 { 144 u32 max_csum_size = round_down(PAGE_SIZE - sizeof(struct btrfs_ordered_sum), 145 fs_info->csum_size); 146 147 return csum_size_to_bytes(fs_info, max_csum_size); 148 } 149 150 /* 151 * Calculate the total size needed to allocate for an ordered sum structure 152 * spanning @bytes in the file. 153 */ 154 static int btrfs_ordered_sum_size(const struct btrfs_fs_info *fs_info, unsigned long bytes) 155 { 156 return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes); 157 } 158 159 int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans, 160 struct btrfs_root *root, 161 u64 objectid, u64 pos, u64 num_bytes) 162 { 163 int ret = 0; 164 struct btrfs_file_extent_item *item; 165 struct btrfs_key file_key; 166 BTRFS_PATH_AUTO_FREE(path); 167 struct extent_buffer *leaf; 168 169 path = btrfs_alloc_path(); 170 if (!path) 171 return -ENOMEM; 172 173 file_key.objectid = objectid; 174 file_key.type = BTRFS_EXTENT_DATA_KEY; 175 file_key.offset = pos; 176 177 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 178 sizeof(*item)); 179 if (ret < 0) 180 return ret; 181 leaf = path->nodes[0]; 182 item = btrfs_item_ptr(leaf, path->slots[0], 183 struct btrfs_file_extent_item); 184 btrfs_set_file_extent_disk_bytenr(leaf, item, 0); 185 btrfs_set_file_extent_disk_num_bytes(leaf, item, 0); 186 btrfs_set_file_extent_offset(leaf, item, 0); 187 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 188 btrfs_set_file_extent_ram_bytes(leaf, item, num_bytes); 189 btrfs_set_file_extent_generation(leaf, item, trans->transid); 190 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 191 btrfs_set_file_extent_compression(leaf, item, 0); 192 btrfs_set_file_extent_encryption(leaf, item, 0); 193 btrfs_set_file_extent_other_encoding(leaf, item, 0); 194 195 return ret; 196 } 197 198 static struct btrfs_csum_item * 199 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 200 struct btrfs_root *root, 201 struct btrfs_path *path, 202 u64 bytenr, int cow) 203 { 204 struct btrfs_fs_info *fs_info = root->fs_info; 205 int ret; 206 struct btrfs_key file_key; 207 struct btrfs_key found_key; 208 struct btrfs_csum_item *item; 209 struct extent_buffer *leaf; 210 u64 csum_offset = 0; 211 const u32 csum_size = fs_info->csum_size; 212 int csums_in_item; 213 214 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 215 file_key.type = BTRFS_EXTENT_CSUM_KEY; 216 file_key.offset = bytenr; 217 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 218 if (ret < 0) 219 goto fail; 220 leaf = path->nodes[0]; 221 if (ret > 0) { 222 ret = 1; 223 if (path->slots[0] == 0) 224 goto fail; 225 path->slots[0]--; 226 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 227 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 228 goto fail; 229 230 csum_offset = (bytenr - found_key.offset) >> 231 fs_info->sectorsize_bits; 232 csums_in_item = btrfs_item_size(leaf, path->slots[0]); 233 csums_in_item /= csum_size; 234 235 if (csum_offset == csums_in_item) { 236 ret = -EFBIG; 237 goto fail; 238 } else if (csum_offset > csums_in_item) { 239 goto fail; 240 } 241 } 242 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 243 item = (struct btrfs_csum_item *)((unsigned char *)item + 244 csum_offset * csum_size); 245 return item; 246 fail: 247 if (ret > 0) 248 ret = -ENOENT; 249 return ERR_PTR(ret); 250 } 251 252 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 253 struct btrfs_root *root, 254 struct btrfs_path *path, u64 objectid, 255 u64 offset, int mod) 256 { 257 struct btrfs_key file_key; 258 int ins_len = mod < 0 ? -1 : 0; 259 int cow = mod != 0; 260 261 file_key.objectid = objectid; 262 file_key.type = BTRFS_EXTENT_DATA_KEY; 263 file_key.offset = offset; 264 265 return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 266 } 267 268 /* 269 * Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and 270 * store the result to @dst. 271 * 272 * Return >0 for the number of sectors we found. 273 * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum 274 * for it. Caller may want to try next sector until one range is hit. 275 * Return <0 for fatal error. 276 */ 277 static int search_csum_tree(struct btrfs_fs_info *fs_info, 278 struct btrfs_path *path, u64 disk_bytenr, 279 u64 len, u8 *dst) 280 { 281 struct btrfs_root *csum_root; 282 struct btrfs_csum_item *item = NULL; 283 struct btrfs_key key; 284 const u32 sectorsize = fs_info->sectorsize; 285 const u32 csum_size = fs_info->csum_size; 286 u32 itemsize; 287 int ret; 288 u64 csum_start; 289 u64 csum_len; 290 291 ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) && 292 IS_ALIGNED(len, sectorsize)); 293 294 /* Check if the current csum item covers disk_bytenr */ 295 if (path->nodes[0]) { 296 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 297 struct btrfs_csum_item); 298 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 299 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 300 301 csum_start = key.offset; 302 csum_len = (itemsize / csum_size) * sectorsize; 303 304 if (in_range(disk_bytenr, csum_start, csum_len)) 305 goto found; 306 } 307 308 /* Current item doesn't contain the desired range, search again */ 309 btrfs_release_path(path); 310 csum_root = btrfs_csum_root(fs_info, disk_bytenr); 311 item = btrfs_lookup_csum(NULL, csum_root, path, disk_bytenr, 0); 312 if (IS_ERR(item)) { 313 ret = PTR_ERR(item); 314 goto out; 315 } 316 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 317 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 318 319 csum_start = key.offset; 320 csum_len = (itemsize / csum_size) * sectorsize; 321 ASSERT(in_range(disk_bytenr, csum_start, csum_len)); 322 323 found: 324 ret = (min(csum_start + csum_len, disk_bytenr + len) - 325 disk_bytenr) >> fs_info->sectorsize_bits; 326 read_extent_buffer(path->nodes[0], dst, (unsigned long)item, 327 ret * csum_size); 328 out: 329 if (ret == -ENOENT || ret == -EFBIG) 330 ret = 0; 331 return ret; 332 } 333 334 /* 335 * Lookup the checksum for the read bio in csum tree. 336 * 337 * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. 338 */ 339 int btrfs_lookup_bio_sums(struct btrfs_bio *bbio) 340 { 341 struct btrfs_inode *inode = bbio->inode; 342 struct btrfs_fs_info *fs_info = inode->root->fs_info; 343 struct bio *bio = &bbio->bio; 344 BTRFS_PATH_AUTO_FREE(path); 345 const u32 sectorsize = fs_info->sectorsize; 346 const u32 csum_size = fs_info->csum_size; 347 u32 orig_len = bio->bi_iter.bi_size; 348 u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; 349 const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits; 350 int ret = 0; 351 u32 bio_offset = 0; 352 353 if ((inode->flags & BTRFS_INODE_NODATASUM) || 354 test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) 355 return 0; 356 357 /* 358 * This function is only called for read bio. 359 * 360 * This means two things: 361 * - All our csums should only be in csum tree 362 * No ordered extents csums, as ordered extents are only for write 363 * path. 364 * - No need to bother any other info from bvec 365 * Since we're looking up csums, the only important info is the 366 * disk_bytenr and the length, which can be extracted from bi_iter 367 * directly. 368 */ 369 ASSERT(bio_op(bio) == REQ_OP_READ); 370 path = btrfs_alloc_path(); 371 if (!path) 372 return -ENOMEM; 373 374 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 375 bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS); 376 if (!bbio->csum) 377 return -ENOMEM; 378 } else { 379 bbio->csum = bbio->csum_inline; 380 } 381 382 /* 383 * If requested number of sectors is larger than one leaf can contain, 384 * kick the readahead for csum tree. 385 */ 386 if (nblocks > fs_info->csums_per_leaf) 387 path->reada = READA_FORWARD; 388 389 /* 390 * the free space stuff is only read when it hasn't been 391 * updated in the current transaction. So, we can safely 392 * read from the commit root and sidestep a nasty deadlock 393 * between reading the free space cache and updating the csum tree. 394 */ 395 if (btrfs_is_free_space_inode(inode)) { 396 path->search_commit_root = 1; 397 path->skip_locking = 1; 398 } 399 400 /* 401 * If we are searching for a csum of an extent from a past 402 * transaction, we can search in the commit root and reduce 403 * lock contention on the csum tree extent buffers. 404 * 405 * This is important because that lock is an rwsem which gets 406 * pretty heavy write load under memory pressure and sustained 407 * csum overwrites, unlike the commit_root_sem. (Memory pressure 408 * makes us writeback the nodes multiple times per transaction, 409 * which makes us cow them each time, taking the write lock.) 410 * 411 * Due to how rwsem is implemented, there is a possible 412 * priority inversion where the readers holding the lock don't 413 * get scheduled (say they're in a cgroup stuck in heavy reclaim) 414 * which then blocks writers, including transaction commit. By 415 * using a semaphore with fewer writers (only a commit switching 416 * the roots), we make this issue less likely. 417 * 418 * Note that we don't rely on btrfs_search_slot to lock the 419 * commit root csum. We call search_slot multiple times, which would 420 * create a potential race where a commit comes in between searches 421 * while we are not holding the commit_root_sem, and we get csums 422 * from across transactions. 423 */ 424 if (bbio->csum_search_commit_root) { 425 path->search_commit_root = 1; 426 path->skip_locking = 1; 427 down_read(&fs_info->commit_root_sem); 428 } 429 430 while (bio_offset < orig_len) { 431 int count; 432 u64 cur_disk_bytenr = orig_disk_bytenr + bio_offset; 433 u8 *csum_dst = bbio->csum + 434 (bio_offset >> fs_info->sectorsize_bits) * csum_size; 435 436 count = search_csum_tree(fs_info, path, cur_disk_bytenr, 437 orig_len - bio_offset, csum_dst); 438 if (count < 0) { 439 ret = count; 440 if (bbio->csum != bbio->csum_inline) 441 kfree(bbio->csum); 442 bbio->csum = NULL; 443 break; 444 } 445 446 /* 447 * We didn't find a csum for this range. We need to make sure 448 * we complain loudly about this, because we are not NODATASUM. 449 * 450 * However for the DATA_RELOC inode we could potentially be 451 * relocating data extents for a NODATASUM inode, so the inode 452 * itself won't be marked with NODATASUM, but the extent we're 453 * copying is in fact NODATASUM. If we don't find a csum we 454 * assume this is the case. 455 */ 456 if (count == 0) { 457 memset(csum_dst, 0, csum_size); 458 count = 1; 459 460 if (btrfs_is_data_reloc_root(inode->root)) { 461 u64 file_offset = bbio->file_offset + bio_offset; 462 463 btrfs_set_extent_bit(&inode->io_tree, file_offset, 464 file_offset + sectorsize - 1, 465 EXTENT_NODATASUM, NULL); 466 } else { 467 btrfs_warn_rl(fs_info, 468 "csum hole found for disk bytenr range [%llu, %llu)", 469 cur_disk_bytenr, cur_disk_bytenr + sectorsize); 470 } 471 } 472 bio_offset += count * sectorsize; 473 } 474 475 if (bbio->csum_search_commit_root) 476 up_read(&fs_info->commit_root_sem); 477 return ret; 478 } 479 480 /* 481 * Search for checksums for a given logical range. 482 * 483 * @root: The root where to look for checksums. 484 * @start: Logical address of target checksum range. 485 * @end: End offset (inclusive) of the target checksum range. 486 * @list: List for adding each checksum that was found. 487 * Can be NULL in case the caller only wants to check if 488 * there any checksums for the range. 489 * @nowait: Indicate if the search must be non-blocking or not. 490 * 491 * Return < 0 on error, 0 if no checksums were found, or 1 if checksums were 492 * found. 493 */ 494 int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end, 495 struct list_head *list, bool nowait) 496 { 497 struct btrfs_fs_info *fs_info = root->fs_info; 498 struct btrfs_key key; 499 struct btrfs_path *path; 500 struct extent_buffer *leaf; 501 struct btrfs_ordered_sum *sums; 502 struct btrfs_csum_item *item; 503 int ret; 504 bool found_csums = false; 505 506 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 507 IS_ALIGNED(end + 1, fs_info->sectorsize)); 508 509 path = btrfs_alloc_path(); 510 if (!path) 511 return -ENOMEM; 512 513 path->nowait = nowait; 514 515 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 516 key.type = BTRFS_EXTENT_CSUM_KEY; 517 key.offset = start; 518 519 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 520 if (ret < 0) 521 goto out; 522 if (ret > 0 && path->slots[0] > 0) { 523 leaf = path->nodes[0]; 524 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 525 526 /* 527 * There are two cases we can hit here for the previous csum 528 * item: 529 * 530 * |<- search range ->| 531 * |<- csum item ->| 532 * 533 * Or 534 * |<- search range ->| 535 * |<- csum item ->| 536 * 537 * Check if the previous csum item covers the leading part of 538 * the search range. If so we have to start from previous csum 539 * item. 540 */ 541 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 542 key.type == BTRFS_EXTENT_CSUM_KEY) { 543 if (bytes_to_csum_size(fs_info, start - key.offset) < 544 btrfs_item_size(leaf, path->slots[0] - 1)) 545 path->slots[0]--; 546 } 547 } 548 549 while (start <= end) { 550 u64 csum_end; 551 552 leaf = path->nodes[0]; 553 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 554 ret = btrfs_next_leaf(root, path); 555 if (ret < 0) 556 goto out; 557 if (ret > 0) 558 break; 559 leaf = path->nodes[0]; 560 } 561 562 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 563 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 564 key.type != BTRFS_EXTENT_CSUM_KEY || 565 key.offset > end) 566 break; 567 568 if (key.offset > start) 569 start = key.offset; 570 571 csum_end = key.offset + csum_size_to_bytes(fs_info, 572 btrfs_item_size(leaf, path->slots[0])); 573 if (csum_end <= start) { 574 path->slots[0]++; 575 continue; 576 } 577 578 found_csums = true; 579 if (!list) 580 goto out; 581 582 csum_end = min(csum_end, end + 1); 583 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 584 struct btrfs_csum_item); 585 while (start < csum_end) { 586 unsigned long offset; 587 size_t size; 588 589 size = min_t(size_t, csum_end - start, 590 max_ordered_sum_bytes(fs_info)); 591 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size), 592 GFP_NOFS); 593 if (!sums) { 594 ret = -ENOMEM; 595 goto out; 596 } 597 598 sums->logical = start; 599 sums->len = size; 600 601 offset = bytes_to_csum_size(fs_info, start - key.offset); 602 603 read_extent_buffer(path->nodes[0], 604 sums->sums, 605 ((unsigned long)item) + offset, 606 bytes_to_csum_size(fs_info, size)); 607 608 start += size; 609 list_add_tail(&sums->list, list); 610 } 611 path->slots[0]++; 612 } 613 out: 614 btrfs_free_path(path); 615 if (ret < 0) { 616 if (list) { 617 struct btrfs_ordered_sum *tmp_sums; 618 619 list_for_each_entry_safe(sums, tmp_sums, list, list) 620 kfree(sums); 621 } 622 623 return ret; 624 } 625 626 return found_csums ? 1 : 0; 627 } 628 629 /* 630 * Do the same work as btrfs_lookup_csums_list(), the difference is in how 631 * we return the result. 632 * 633 * This version will set the corresponding bits in @csum_bitmap to represent 634 * that there is a csum found. 635 * Each bit represents a sector. Thus caller should ensure @csum_buf passed 636 * in is large enough to contain all csums. 637 */ 638 int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path, 639 u64 start, u64 end, u8 *csum_buf, 640 unsigned long *csum_bitmap) 641 { 642 struct btrfs_fs_info *fs_info = root->fs_info; 643 struct btrfs_key key; 644 struct extent_buffer *leaf; 645 struct btrfs_csum_item *item; 646 const u64 orig_start = start; 647 bool free_path = false; 648 int ret; 649 650 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 651 IS_ALIGNED(end + 1, fs_info->sectorsize)); 652 653 if (!path) { 654 path = btrfs_alloc_path(); 655 if (!path) 656 return -ENOMEM; 657 free_path = true; 658 } 659 660 /* Check if we can reuse the previous path. */ 661 if (path->nodes[0]) { 662 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 663 664 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 665 key.type == BTRFS_EXTENT_CSUM_KEY && 666 key.offset <= start) 667 goto search_forward; 668 btrfs_release_path(path); 669 } 670 671 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 672 key.type = BTRFS_EXTENT_CSUM_KEY; 673 key.offset = start; 674 675 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 676 if (ret < 0) 677 goto fail; 678 if (ret > 0 && path->slots[0] > 0) { 679 leaf = path->nodes[0]; 680 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 681 682 /* 683 * There are two cases we can hit here for the previous csum 684 * item: 685 * 686 * |<- search range ->| 687 * |<- csum item ->| 688 * 689 * Or 690 * |<- search range ->| 691 * |<- csum item ->| 692 * 693 * Check if the previous csum item covers the leading part of 694 * the search range. If so we have to start from previous csum 695 * item. 696 */ 697 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 698 key.type == BTRFS_EXTENT_CSUM_KEY) { 699 if (bytes_to_csum_size(fs_info, start - key.offset) < 700 btrfs_item_size(leaf, path->slots[0] - 1)) 701 path->slots[0]--; 702 } 703 } 704 705 search_forward: 706 while (start <= end) { 707 u64 csum_end; 708 709 leaf = path->nodes[0]; 710 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 711 ret = btrfs_next_leaf(root, path); 712 if (ret < 0) 713 goto fail; 714 if (ret > 0) 715 break; 716 leaf = path->nodes[0]; 717 } 718 719 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 720 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 721 key.type != BTRFS_EXTENT_CSUM_KEY || 722 key.offset > end) 723 break; 724 725 if (key.offset > start) 726 start = key.offset; 727 728 csum_end = key.offset + csum_size_to_bytes(fs_info, 729 btrfs_item_size(leaf, path->slots[0])); 730 if (csum_end <= start) { 731 path->slots[0]++; 732 continue; 733 } 734 735 csum_end = min(csum_end, end + 1); 736 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 737 struct btrfs_csum_item); 738 while (start < csum_end) { 739 unsigned long offset; 740 size_t size; 741 u8 *csum_dest = csum_buf + bytes_to_csum_size(fs_info, 742 start - orig_start); 743 744 size = min_t(size_t, csum_end - start, end + 1 - start); 745 746 offset = bytes_to_csum_size(fs_info, start - key.offset); 747 748 read_extent_buffer(path->nodes[0], csum_dest, 749 ((unsigned long)item) + offset, 750 bytes_to_csum_size(fs_info, size)); 751 752 bitmap_set(csum_bitmap, 753 (start - orig_start) >> fs_info->sectorsize_bits, 754 size >> fs_info->sectorsize_bits); 755 756 start += size; 757 } 758 path->slots[0]++; 759 } 760 ret = 0; 761 fail: 762 if (free_path) 763 btrfs_free_path(path); 764 return ret; 765 } 766 767 /* 768 * Calculate checksums of the data contained inside a bio. 769 */ 770 int btrfs_csum_one_bio(struct btrfs_bio *bbio) 771 { 772 struct btrfs_ordered_extent *ordered = bbio->ordered; 773 struct btrfs_inode *inode = bbio->inode; 774 struct btrfs_fs_info *fs_info = inode->root->fs_info; 775 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 776 struct bio *bio = &bbio->bio; 777 struct btrfs_ordered_sum *sums; 778 struct bvec_iter iter = bio->bi_iter; 779 phys_addr_t paddr; 780 const u32 blocksize = fs_info->sectorsize; 781 int index; 782 unsigned nofs_flag; 783 784 nofs_flag = memalloc_nofs_save(); 785 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), 786 GFP_KERNEL); 787 memalloc_nofs_restore(nofs_flag); 788 789 if (!sums) 790 return -ENOMEM; 791 792 sums->len = bio->bi_iter.bi_size; 793 INIT_LIST_HEAD(&sums->list); 794 795 sums->logical = bio->bi_iter.bi_sector << SECTOR_SHIFT; 796 index = 0; 797 798 shash->tfm = fs_info->csum_shash; 799 800 btrfs_bio_for_each_block(paddr, bio, &iter, blocksize) { 801 btrfs_calculate_block_csum(fs_info, paddr, sums->sums + index); 802 index += fs_info->csum_size; 803 } 804 805 bbio->sums = sums; 806 btrfs_add_ordered_sum(ordered, sums); 807 return 0; 808 } 809 810 /* 811 * Nodatasum I/O on zoned file systems still requires an btrfs_ordered_sum to 812 * record the updated logical address on Zone Append completion. 813 * Allocate just the structure with an empty sums array here for that case. 814 */ 815 int btrfs_alloc_dummy_sum(struct btrfs_bio *bbio) 816 { 817 bbio->sums = kmalloc(sizeof(*bbio->sums), GFP_NOFS); 818 if (!bbio->sums) 819 return -ENOMEM; 820 bbio->sums->len = bbio->bio.bi_iter.bi_size; 821 bbio->sums->logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; 822 btrfs_add_ordered_sum(bbio->ordered, bbio->sums); 823 return 0; 824 } 825 826 /* 827 * Remove one checksum overlapping a range. 828 * 829 * This expects the key to describe the csum pointed to by the path, and it 830 * expects the csum to overlap the range [bytenr, len] 831 * 832 * The csum should not be entirely contained in the range and the range should 833 * not be entirely contained in the csum. 834 * 835 * This calls btrfs_truncate_item with the correct args based on the overlap, 836 * and fixes up the key as required. 837 */ 838 static noinline void truncate_one_csum(struct btrfs_trans_handle *trans, 839 struct btrfs_path *path, 840 struct btrfs_key *key, 841 u64 bytenr, u64 len) 842 { 843 struct btrfs_fs_info *fs_info = trans->fs_info; 844 struct extent_buffer *leaf; 845 const u32 csum_size = fs_info->csum_size; 846 u64 csum_end; 847 u64 end_byte = bytenr + len; 848 u32 blocksize_bits = fs_info->sectorsize_bits; 849 850 leaf = path->nodes[0]; 851 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 852 csum_end <<= blocksize_bits; 853 csum_end += key->offset; 854 855 if (key->offset < bytenr && csum_end <= end_byte) { 856 /* 857 * [ bytenr - len ] 858 * [ ] 859 * [csum ] 860 * A simple truncate off the end of the item 861 */ 862 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 863 new_size *= csum_size; 864 btrfs_truncate_item(trans, path, new_size, 1); 865 } else if (key->offset >= bytenr && csum_end > end_byte && 866 end_byte > key->offset) { 867 /* 868 * [ bytenr - len ] 869 * [ ] 870 * [csum ] 871 * we need to truncate from the beginning of the csum 872 */ 873 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 874 new_size *= csum_size; 875 876 btrfs_truncate_item(trans, path, new_size, 0); 877 878 key->offset = end_byte; 879 btrfs_set_item_key_safe(trans, path, key); 880 } else { 881 BUG(); 882 } 883 } 884 885 /* 886 * Delete the csum items from the csum tree for a given range of bytes. 887 */ 888 int btrfs_del_csums(struct btrfs_trans_handle *trans, 889 struct btrfs_root *root, u64 bytenr, u64 len) 890 { 891 struct btrfs_fs_info *fs_info = trans->fs_info; 892 BTRFS_PATH_AUTO_FREE(path); 893 struct btrfs_key key; 894 u64 end_byte = bytenr + len; 895 u64 csum_end; 896 struct extent_buffer *leaf; 897 int ret = 0; 898 const u32 csum_size = fs_info->csum_size; 899 u32 blocksize_bits = fs_info->sectorsize_bits; 900 901 ASSERT(btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID || 902 btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID); 903 904 path = btrfs_alloc_path(); 905 if (!path) 906 return -ENOMEM; 907 908 while (1) { 909 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 910 key.type = BTRFS_EXTENT_CSUM_KEY; 911 key.offset = end_byte - 1; 912 913 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 914 if (ret > 0) { 915 ret = 0; 916 if (path->slots[0] == 0) 917 break; 918 path->slots[0]--; 919 } else if (ret < 0) { 920 break; 921 } 922 923 leaf = path->nodes[0]; 924 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 925 926 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 927 key.type != BTRFS_EXTENT_CSUM_KEY) { 928 break; 929 } 930 931 if (key.offset >= end_byte) 932 break; 933 934 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 935 csum_end <<= blocksize_bits; 936 csum_end += key.offset; 937 938 /* this csum ends before we start, we're done */ 939 if (csum_end <= bytenr) 940 break; 941 942 /* delete the entire item, it is inside our range */ 943 if (key.offset >= bytenr && csum_end <= end_byte) { 944 int del_nr = 1; 945 946 /* 947 * Check how many csum items preceding this one in this 948 * leaf correspond to our range and then delete them all 949 * at once. 950 */ 951 if (key.offset > bytenr && path->slots[0] > 0) { 952 int slot = path->slots[0] - 1; 953 954 while (slot >= 0) { 955 struct btrfs_key pk; 956 957 btrfs_item_key_to_cpu(leaf, &pk, slot); 958 if (pk.offset < bytenr || 959 pk.type != BTRFS_EXTENT_CSUM_KEY || 960 pk.objectid != 961 BTRFS_EXTENT_CSUM_OBJECTID) 962 break; 963 path->slots[0] = slot; 964 del_nr++; 965 key.offset = pk.offset; 966 slot--; 967 } 968 } 969 ret = btrfs_del_items(trans, root, path, 970 path->slots[0], del_nr); 971 if (ret) 972 break; 973 if (key.offset == bytenr) 974 break; 975 } else if (key.offset < bytenr && csum_end > end_byte) { 976 unsigned long offset; 977 unsigned long shift_len; 978 unsigned long item_offset; 979 /* 980 * [ bytenr - len ] 981 * [csum ] 982 * 983 * Our bytes are in the middle of the csum, 984 * we need to split this item and insert a new one. 985 * 986 * But we can't drop the path because the 987 * csum could change, get removed, extended etc. 988 * 989 * The trick here is the max size of a csum item leaves 990 * enough room in the tree block for a single 991 * item header. So, we split the item in place, 992 * adding a new header pointing to the existing 993 * bytes. Then we loop around again and we have 994 * a nicely formed csum item that we can neatly 995 * truncate. 996 */ 997 offset = (bytenr - key.offset) >> blocksize_bits; 998 offset *= csum_size; 999 1000 shift_len = (len >> blocksize_bits) * csum_size; 1001 1002 item_offset = btrfs_item_ptr_offset(leaf, 1003 path->slots[0]); 1004 1005 memzero_extent_buffer(leaf, item_offset + offset, 1006 shift_len); 1007 key.offset = bytenr; 1008 1009 /* 1010 * btrfs_split_item returns -EAGAIN when the 1011 * item changed size or key 1012 */ 1013 ret = btrfs_split_item(trans, root, path, &key, offset); 1014 if (unlikely(ret && ret != -EAGAIN)) { 1015 btrfs_abort_transaction(trans, ret); 1016 break; 1017 } 1018 ret = 0; 1019 1020 key.offset = end_byte - 1; 1021 } else { 1022 truncate_one_csum(trans, path, &key, bytenr, len); 1023 if (key.offset < bytenr) 1024 break; 1025 } 1026 btrfs_release_path(path); 1027 } 1028 return ret; 1029 } 1030 1031 static int find_next_csum_offset(struct btrfs_root *root, 1032 struct btrfs_path *path, 1033 u64 *next_offset) 1034 { 1035 const u32 nritems = btrfs_header_nritems(path->nodes[0]); 1036 struct btrfs_key found_key; 1037 int slot = path->slots[0] + 1; 1038 int ret; 1039 1040 if (nritems == 0 || slot >= nritems) { 1041 ret = btrfs_next_leaf(root, path); 1042 if (ret < 0) { 1043 return ret; 1044 } else if (ret > 0) { 1045 *next_offset = (u64)-1; 1046 return 0; 1047 } 1048 slot = path->slots[0]; 1049 } 1050 1051 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 1052 1053 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1054 found_key.type != BTRFS_EXTENT_CSUM_KEY) 1055 *next_offset = (u64)-1; 1056 else 1057 *next_offset = found_key.offset; 1058 1059 return 0; 1060 } 1061 1062 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 1063 struct btrfs_root *root, 1064 struct btrfs_ordered_sum *sums) 1065 { 1066 struct btrfs_fs_info *fs_info = root->fs_info; 1067 struct btrfs_key file_key; 1068 struct btrfs_key found_key; 1069 BTRFS_PATH_AUTO_FREE(path); 1070 struct btrfs_csum_item *item; 1071 struct btrfs_csum_item *item_end; 1072 struct extent_buffer *leaf = NULL; 1073 u64 next_offset; 1074 u64 total_bytes = 0; 1075 u64 csum_offset; 1076 u64 bytenr; 1077 u32 ins_size; 1078 int index = 0; 1079 int found_next; 1080 int ret; 1081 const u32 csum_size = fs_info->csum_size; 1082 1083 path = btrfs_alloc_path(); 1084 if (!path) 1085 return -ENOMEM; 1086 again: 1087 next_offset = (u64)-1; 1088 found_next = 0; 1089 bytenr = sums->logical + total_bytes; 1090 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 1091 file_key.type = BTRFS_EXTENT_CSUM_KEY; 1092 file_key.offset = bytenr; 1093 1094 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 1095 if (!IS_ERR(item)) { 1096 ret = 0; 1097 leaf = path->nodes[0]; 1098 item_end = btrfs_item_ptr(leaf, path->slots[0], 1099 struct btrfs_csum_item); 1100 item_end = (struct btrfs_csum_item *)((char *)item_end + 1101 btrfs_item_size(leaf, path->slots[0])); 1102 goto found; 1103 } 1104 ret = PTR_ERR(item); 1105 if (ret != -EFBIG && ret != -ENOENT) 1106 goto out; 1107 1108 if (ret == -EFBIG) { 1109 u32 item_size; 1110 /* we found one, but it isn't big enough yet */ 1111 leaf = path->nodes[0]; 1112 item_size = btrfs_item_size(leaf, path->slots[0]); 1113 if ((item_size / csum_size) >= 1114 MAX_CSUM_ITEMS(fs_info, csum_size)) { 1115 /* already at max size, make a new one */ 1116 goto insert; 1117 } 1118 } else { 1119 /* We didn't find a csum item, insert one. */ 1120 ret = find_next_csum_offset(root, path, &next_offset); 1121 if (ret < 0) 1122 goto out; 1123 found_next = 1; 1124 goto insert; 1125 } 1126 1127 /* 1128 * At this point, we know the tree has a checksum item that ends at an 1129 * offset matching the start of the checksum range we want to insert. 1130 * We try to extend that item as much as possible and then add as many 1131 * checksums to it as they fit. 1132 * 1133 * First check if the leaf has enough free space for at least one 1134 * checksum. If it has go directly to the item extension code, otherwise 1135 * release the path and do a search for insertion before the extension. 1136 */ 1137 if (btrfs_leaf_free_space(leaf) >= csum_size) { 1138 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1139 csum_offset = (bytenr - found_key.offset) >> 1140 fs_info->sectorsize_bits; 1141 goto extend_csum; 1142 } 1143 1144 btrfs_release_path(path); 1145 path->search_for_extension = 1; 1146 ret = btrfs_search_slot(trans, root, &file_key, path, 1147 csum_size, 1); 1148 path->search_for_extension = 0; 1149 if (ret < 0) 1150 goto out; 1151 1152 if (ret > 0) { 1153 if (path->slots[0] == 0) 1154 goto insert; 1155 path->slots[0]--; 1156 } 1157 1158 leaf = path->nodes[0]; 1159 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1160 csum_offset = (bytenr - found_key.offset) >> fs_info->sectorsize_bits; 1161 1162 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 1163 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1164 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) { 1165 goto insert; 1166 } 1167 1168 extend_csum: 1169 if (csum_offset == btrfs_item_size(leaf, path->slots[0]) / 1170 csum_size) { 1171 int extend_nr; 1172 u64 tmp; 1173 u32 diff; 1174 1175 tmp = sums->len - total_bytes; 1176 tmp >>= fs_info->sectorsize_bits; 1177 WARN_ON(tmp < 1); 1178 extend_nr = max_t(int, 1, tmp); 1179 1180 /* 1181 * A log tree can already have checksum items with a subset of 1182 * the checksums we are trying to log. This can happen after 1183 * doing a sequence of partial writes into prealloc extents and 1184 * fsyncs in between, with a full fsync logging a larger subrange 1185 * of an extent for which a previous fast fsync logged a smaller 1186 * subrange. And this happens in particular due to merging file 1187 * extent items when we complete an ordered extent for a range 1188 * covered by a prealloc extent - this is done at 1189 * btrfs_mark_extent_written(). 1190 * 1191 * So if we try to extend the previous checksum item, which has 1192 * a range that ends at the start of the range we want to insert, 1193 * make sure we don't extend beyond the start offset of the next 1194 * checksum item. If we are at the last item in the leaf, then 1195 * forget the optimization of extending and add a new checksum 1196 * item - it is not worth the complexity of releasing the path, 1197 * getting the first key for the next leaf, repeat the btree 1198 * search, etc, because log trees are temporary anyway and it 1199 * would only save a few bytes of leaf space. 1200 */ 1201 if (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID) { 1202 if (path->slots[0] + 1 >= 1203 btrfs_header_nritems(path->nodes[0])) { 1204 ret = find_next_csum_offset(root, path, &next_offset); 1205 if (ret < 0) 1206 goto out; 1207 found_next = 1; 1208 goto insert; 1209 } 1210 1211 ret = find_next_csum_offset(root, path, &next_offset); 1212 if (ret < 0) 1213 goto out; 1214 1215 tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits; 1216 if (tmp <= INT_MAX) 1217 extend_nr = min_t(int, extend_nr, tmp); 1218 } 1219 1220 diff = (csum_offset + extend_nr) * csum_size; 1221 diff = min(diff, 1222 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); 1223 1224 diff = diff - btrfs_item_size(leaf, path->slots[0]); 1225 diff = min_t(u32, btrfs_leaf_free_space(leaf), diff); 1226 diff /= csum_size; 1227 diff *= csum_size; 1228 1229 btrfs_extend_item(trans, path, diff); 1230 ret = 0; 1231 goto csum; 1232 } 1233 1234 insert: 1235 btrfs_release_path(path); 1236 csum_offset = 0; 1237 if (found_next) { 1238 u64 tmp; 1239 1240 tmp = sums->len - total_bytes; 1241 tmp >>= fs_info->sectorsize_bits; 1242 tmp = min(tmp, (next_offset - file_key.offset) >> 1243 fs_info->sectorsize_bits); 1244 1245 tmp = max_t(u64, 1, tmp); 1246 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size)); 1247 ins_size = csum_size * tmp; 1248 } else { 1249 ins_size = csum_size; 1250 } 1251 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 1252 ins_size); 1253 if (ret < 0) 1254 goto out; 1255 leaf = path->nodes[0]; 1256 csum: 1257 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 1258 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 1259 btrfs_item_size(leaf, path->slots[0])); 1260 item = (struct btrfs_csum_item *)((unsigned char *)item + 1261 csum_offset * csum_size); 1262 found: 1263 ins_size = (u32)(sums->len - total_bytes) >> fs_info->sectorsize_bits; 1264 ins_size *= csum_size; 1265 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 1266 ins_size); 1267 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 1268 ins_size); 1269 1270 index += ins_size; 1271 ins_size /= csum_size; 1272 total_bytes += ins_size * fs_info->sectorsize; 1273 1274 if (total_bytes < sums->len) { 1275 btrfs_release_path(path); 1276 cond_resched(); 1277 goto again; 1278 } 1279 out: 1280 return ret; 1281 } 1282 1283 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, 1284 const struct btrfs_path *path, 1285 const struct btrfs_file_extent_item *fi, 1286 struct extent_map *em) 1287 { 1288 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1289 struct btrfs_root *root = inode->root; 1290 struct extent_buffer *leaf = path->nodes[0]; 1291 const int slot = path->slots[0]; 1292 struct btrfs_key key; 1293 u64 extent_start; 1294 u8 type = btrfs_file_extent_type(leaf, fi); 1295 int compress_type = btrfs_file_extent_compression(leaf, fi); 1296 1297 btrfs_item_key_to_cpu(leaf, &key, slot); 1298 extent_start = key.offset; 1299 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1300 em->generation = btrfs_file_extent_generation(leaf, fi); 1301 if (type == BTRFS_FILE_EXTENT_REG || 1302 type == BTRFS_FILE_EXTENT_PREALLOC) { 1303 const u64 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1304 1305 em->start = extent_start; 1306 em->len = btrfs_file_extent_end(path) - extent_start; 1307 if (disk_bytenr == 0) { 1308 em->disk_bytenr = EXTENT_MAP_HOLE; 1309 em->disk_num_bytes = 0; 1310 em->offset = 0; 1311 return; 1312 } 1313 em->disk_bytenr = disk_bytenr; 1314 em->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 1315 em->offset = btrfs_file_extent_offset(leaf, fi); 1316 if (compress_type != BTRFS_COMPRESS_NONE) { 1317 btrfs_extent_map_set_compression(em, compress_type); 1318 } else { 1319 /* 1320 * Older kernels can create regular non-hole data 1321 * extents with ram_bytes smaller than disk_num_bytes. 1322 * Not a big deal, just always use disk_num_bytes 1323 * for ram_bytes. 1324 */ 1325 em->ram_bytes = em->disk_num_bytes; 1326 if (type == BTRFS_FILE_EXTENT_PREALLOC) 1327 em->flags |= EXTENT_FLAG_PREALLOC; 1328 } 1329 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 1330 /* Tree-checker has ensured this. */ 1331 ASSERT(extent_start == 0); 1332 1333 em->disk_bytenr = EXTENT_MAP_INLINE; 1334 em->start = 0; 1335 em->len = fs_info->sectorsize; 1336 em->offset = 0; 1337 btrfs_extent_map_set_compression(em, compress_type); 1338 } else { 1339 btrfs_err(fs_info, 1340 "unknown file extent item type %d, inode %llu, offset %llu, " 1341 "root %llu", type, btrfs_ino(inode), extent_start, 1342 btrfs_root_id(root)); 1343 } 1344 } 1345 1346 /* 1347 * Returns the end offset (non inclusive) of the file extent item the given path 1348 * points to. If it points to an inline extent, the returned offset is rounded 1349 * up to the sector size. 1350 */ 1351 u64 btrfs_file_extent_end(const struct btrfs_path *path) 1352 { 1353 const struct extent_buffer *leaf = path->nodes[0]; 1354 const int slot = path->slots[0]; 1355 struct btrfs_file_extent_item *fi; 1356 struct btrfs_key key; 1357 u64 end; 1358 1359 btrfs_item_key_to_cpu(leaf, &key, slot); 1360 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 1361 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 1362 1363 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) 1364 end = leaf->fs_info->sectorsize; 1365 else 1366 end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1367 1368 return end; 1369 } 1370