1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/bio.h> 7 #include <linux/slab.h> 8 #include <linux/pagemap.h> 9 #include <linux/highmem.h> 10 #include <linux/sched/mm.h> 11 #include <crypto/hash.h> 12 #include "messages.h" 13 #include "misc.h" 14 #include "ctree.h" 15 #include "disk-io.h" 16 #include "transaction.h" 17 #include "bio.h" 18 #include "print-tree.h" 19 #include "compression.h" 20 #include "fs.h" 21 #include "accessors.h" 22 #include "file-item.h" 23 #include "super.h" 24 25 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 26 sizeof(struct btrfs_item) * 2) / \ 27 size) - 1)) 28 29 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 30 PAGE_SIZE)) 31 32 /* 33 * Set inode's size according to filesystem options. 34 * 35 * @inode: inode we want to update the disk_i_size for 36 * @new_i_size: i_size we want to set to, 0 if we use i_size 37 * 38 * With NO_HOLES set this simply sets the disk_is_size to whatever i_size_read() 39 * returns as it is perfectly fine with a file that has holes without hole file 40 * extent items. 41 * 42 * However without NO_HOLES we need to only return the area that is contiguous 43 * from the 0 offset of the file. Otherwise we could end up adjust i_size up 44 * to an extent that has a gap in between. 45 * 46 * Finally new_i_size should only be set in the case of truncate where we're not 47 * ready to use i_size_read() as the limiter yet. 48 */ 49 void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size) 50 { 51 struct btrfs_fs_info *fs_info = inode->root->fs_info; 52 u64 start, end, i_size; 53 int ret; 54 55 i_size = new_i_size ?: i_size_read(&inode->vfs_inode); 56 if (btrfs_fs_incompat(fs_info, NO_HOLES)) { 57 inode->disk_i_size = i_size; 58 return; 59 } 60 61 spin_lock(&inode->lock); 62 ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start, 63 &end, EXTENT_DIRTY); 64 if (!ret && start == 0) 65 i_size = min(i_size, end + 1); 66 else 67 i_size = 0; 68 inode->disk_i_size = i_size; 69 spin_unlock(&inode->lock); 70 } 71 72 /* 73 * Mark range within a file as having a new extent inserted. 74 * 75 * @inode: inode being modified 76 * @start: start file offset of the file extent we've inserted 77 * @len: logical length of the file extent item 78 * 79 * Call when we are inserting a new file extent where there was none before. 80 * Does not need to call this in the case where we're replacing an existing file 81 * extent, however if not sure it's fine to call this multiple times. 82 * 83 * The start and len must match the file extent item, so thus must be sectorsize 84 * aligned. 85 */ 86 int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, 87 u64 len) 88 { 89 if (len == 0) 90 return 0; 91 92 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize)); 93 94 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES)) 95 return 0; 96 return set_extent_bits(&inode->file_extent_tree, start, start + len - 1, 97 EXTENT_DIRTY); 98 } 99 100 /* 101 * Mark an inode range as not having a backing extent. 102 * 103 * @inode: inode being modified 104 * @start: start file offset of the file extent we've inserted 105 * @len: logical length of the file extent item 106 * 107 * Called when we drop a file extent, for example when we truncate. Doesn't 108 * need to be called for cases where we're replacing a file extent, like when 109 * we've COWed a file extent. 110 * 111 * The start and len must match the file extent item, so thus must be sectorsize 112 * aligned. 113 */ 114 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, 115 u64 len) 116 { 117 if (len == 0) 118 return 0; 119 120 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) || 121 len == (u64)-1); 122 123 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES)) 124 return 0; 125 return clear_extent_bit(&inode->file_extent_tree, start, 126 start + len - 1, EXTENT_DIRTY, NULL); 127 } 128 129 static size_t bytes_to_csum_size(const struct btrfs_fs_info *fs_info, u32 bytes) 130 { 131 ASSERT(IS_ALIGNED(bytes, fs_info->sectorsize)); 132 133 return (bytes >> fs_info->sectorsize_bits) * fs_info->csum_size; 134 } 135 136 static size_t csum_size_to_bytes(const struct btrfs_fs_info *fs_info, u32 csum_size) 137 { 138 ASSERT(IS_ALIGNED(csum_size, fs_info->csum_size)); 139 140 return (csum_size / fs_info->csum_size) << fs_info->sectorsize_bits; 141 } 142 143 static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info) 144 { 145 u32 max_csum_size = round_down(PAGE_SIZE - sizeof(struct btrfs_ordered_sum), 146 fs_info->csum_size); 147 148 return csum_size_to_bytes(fs_info, max_csum_size); 149 } 150 151 /* 152 * Calculate the total size needed to allocate for an ordered sum structure 153 * spanning @bytes in the file. 154 */ 155 static int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info, unsigned long bytes) 156 { 157 return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes); 158 } 159 160 int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans, 161 struct btrfs_root *root, 162 u64 objectid, u64 pos, u64 num_bytes) 163 { 164 int ret = 0; 165 struct btrfs_file_extent_item *item; 166 struct btrfs_key file_key; 167 struct btrfs_path *path; 168 struct extent_buffer *leaf; 169 170 path = btrfs_alloc_path(); 171 if (!path) 172 return -ENOMEM; 173 file_key.objectid = objectid; 174 file_key.offset = pos; 175 file_key.type = BTRFS_EXTENT_DATA_KEY; 176 177 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 178 sizeof(*item)); 179 if (ret < 0) 180 goto out; 181 BUG_ON(ret); /* Can't happen */ 182 leaf = path->nodes[0]; 183 item = btrfs_item_ptr(leaf, path->slots[0], 184 struct btrfs_file_extent_item); 185 btrfs_set_file_extent_disk_bytenr(leaf, item, 0); 186 btrfs_set_file_extent_disk_num_bytes(leaf, item, 0); 187 btrfs_set_file_extent_offset(leaf, item, 0); 188 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 189 btrfs_set_file_extent_ram_bytes(leaf, item, num_bytes); 190 btrfs_set_file_extent_generation(leaf, item, trans->transid); 191 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 192 btrfs_set_file_extent_compression(leaf, item, 0); 193 btrfs_set_file_extent_encryption(leaf, item, 0); 194 btrfs_set_file_extent_other_encoding(leaf, item, 0); 195 196 btrfs_mark_buffer_dirty(leaf); 197 out: 198 btrfs_free_path(path); 199 return ret; 200 } 201 202 static struct btrfs_csum_item * 203 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 204 struct btrfs_root *root, 205 struct btrfs_path *path, 206 u64 bytenr, int cow) 207 { 208 struct btrfs_fs_info *fs_info = root->fs_info; 209 int ret; 210 struct btrfs_key file_key; 211 struct btrfs_key found_key; 212 struct btrfs_csum_item *item; 213 struct extent_buffer *leaf; 214 u64 csum_offset = 0; 215 const u32 csum_size = fs_info->csum_size; 216 int csums_in_item; 217 218 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 219 file_key.offset = bytenr; 220 file_key.type = BTRFS_EXTENT_CSUM_KEY; 221 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 222 if (ret < 0) 223 goto fail; 224 leaf = path->nodes[0]; 225 if (ret > 0) { 226 ret = 1; 227 if (path->slots[0] == 0) 228 goto fail; 229 path->slots[0]--; 230 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 231 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 232 goto fail; 233 234 csum_offset = (bytenr - found_key.offset) >> 235 fs_info->sectorsize_bits; 236 csums_in_item = btrfs_item_size(leaf, path->slots[0]); 237 csums_in_item /= csum_size; 238 239 if (csum_offset == csums_in_item) { 240 ret = -EFBIG; 241 goto fail; 242 } else if (csum_offset > csums_in_item) { 243 goto fail; 244 } 245 } 246 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 247 item = (struct btrfs_csum_item *)((unsigned char *)item + 248 csum_offset * csum_size); 249 return item; 250 fail: 251 if (ret > 0) 252 ret = -ENOENT; 253 return ERR_PTR(ret); 254 } 255 256 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 257 struct btrfs_root *root, 258 struct btrfs_path *path, u64 objectid, 259 u64 offset, int mod) 260 { 261 struct btrfs_key file_key; 262 int ins_len = mod < 0 ? -1 : 0; 263 int cow = mod != 0; 264 265 file_key.objectid = objectid; 266 file_key.offset = offset; 267 file_key.type = BTRFS_EXTENT_DATA_KEY; 268 269 return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 270 } 271 272 /* 273 * Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and 274 * store the result to @dst. 275 * 276 * Return >0 for the number of sectors we found. 277 * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum 278 * for it. Caller may want to try next sector until one range is hit. 279 * Return <0 for fatal error. 280 */ 281 static int search_csum_tree(struct btrfs_fs_info *fs_info, 282 struct btrfs_path *path, u64 disk_bytenr, 283 u64 len, u8 *dst) 284 { 285 struct btrfs_root *csum_root; 286 struct btrfs_csum_item *item = NULL; 287 struct btrfs_key key; 288 const u32 sectorsize = fs_info->sectorsize; 289 const u32 csum_size = fs_info->csum_size; 290 u32 itemsize; 291 int ret; 292 u64 csum_start; 293 u64 csum_len; 294 295 ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) && 296 IS_ALIGNED(len, sectorsize)); 297 298 /* Check if the current csum item covers disk_bytenr */ 299 if (path->nodes[0]) { 300 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 301 struct btrfs_csum_item); 302 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 303 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 304 305 csum_start = key.offset; 306 csum_len = (itemsize / csum_size) * sectorsize; 307 308 if (in_range(disk_bytenr, csum_start, csum_len)) 309 goto found; 310 } 311 312 /* Current item doesn't contain the desired range, search again */ 313 btrfs_release_path(path); 314 csum_root = btrfs_csum_root(fs_info, disk_bytenr); 315 item = btrfs_lookup_csum(NULL, csum_root, path, disk_bytenr, 0); 316 if (IS_ERR(item)) { 317 ret = PTR_ERR(item); 318 goto out; 319 } 320 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 321 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 322 323 csum_start = key.offset; 324 csum_len = (itemsize / csum_size) * sectorsize; 325 ASSERT(in_range(disk_bytenr, csum_start, csum_len)); 326 327 found: 328 ret = (min(csum_start + csum_len, disk_bytenr + len) - 329 disk_bytenr) >> fs_info->sectorsize_bits; 330 read_extent_buffer(path->nodes[0], dst, (unsigned long)item, 331 ret * csum_size); 332 out: 333 if (ret == -ENOENT || ret == -EFBIG) 334 ret = 0; 335 return ret; 336 } 337 338 /* 339 * Locate the file_offset of @cur_disk_bytenr of a @bio. 340 * 341 * Bio of btrfs represents read range of 342 * [bi_sector << 9, bi_sector << 9 + bi_size). 343 * Knowing this, we can iterate through each bvec to locate the page belong to 344 * @cur_disk_bytenr and get the file offset. 345 * 346 * @inode is used to determine if the bvec page really belongs to @inode. 347 * 348 * Return 0 if we can't find the file offset 349 * Return >0 if we find the file offset and restore it to @file_offset_ret 350 */ 351 static int search_file_offset_in_bio(struct bio *bio, struct inode *inode, 352 u64 disk_bytenr, u64 *file_offset_ret) 353 { 354 struct bvec_iter iter; 355 struct bio_vec bvec; 356 u64 cur = bio->bi_iter.bi_sector << SECTOR_SHIFT; 357 int ret = 0; 358 359 bio_for_each_segment(bvec, bio, iter) { 360 struct page *page = bvec.bv_page; 361 362 if (cur > disk_bytenr) 363 break; 364 if (cur + bvec.bv_len <= disk_bytenr) { 365 cur += bvec.bv_len; 366 continue; 367 } 368 ASSERT(in_range(disk_bytenr, cur, bvec.bv_len)); 369 if (page->mapping && page->mapping->host && 370 page->mapping->host == inode) { 371 ret = 1; 372 *file_offset_ret = page_offset(page) + bvec.bv_offset + 373 disk_bytenr - cur; 374 break; 375 } 376 } 377 return ret; 378 } 379 380 /* 381 * Lookup the checksum for the read bio in csum tree. 382 * 383 * @inode: inode that the bio is for. 384 * @bio: bio to look up. 385 * @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return 386 * checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If 387 * NULL, the checksum buffer is allocated and returned in 388 * btrfs_bio(bio)->csum instead. 389 * 390 * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. 391 */ 392 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst) 393 { 394 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 395 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 396 struct btrfs_bio *bbio = NULL; 397 struct btrfs_path *path; 398 const u32 sectorsize = fs_info->sectorsize; 399 const u32 csum_size = fs_info->csum_size; 400 u32 orig_len = bio->bi_iter.bi_size; 401 u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; 402 u64 cur_disk_bytenr; 403 u8 *csum; 404 const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits; 405 int count = 0; 406 blk_status_t ret = BLK_STS_OK; 407 408 if ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) || 409 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) 410 return BLK_STS_OK; 411 412 /* 413 * This function is only called for read bio. 414 * 415 * This means two things: 416 * - All our csums should only be in csum tree 417 * No ordered extents csums, as ordered extents are only for write 418 * path. 419 * - No need to bother any other info from bvec 420 * Since we're looking up csums, the only important info is the 421 * disk_bytenr and the length, which can be extracted from bi_iter 422 * directly. 423 */ 424 ASSERT(bio_op(bio) == REQ_OP_READ); 425 path = btrfs_alloc_path(); 426 if (!path) 427 return BLK_STS_RESOURCE; 428 429 if (!dst) { 430 bbio = btrfs_bio(bio); 431 432 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 433 bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS); 434 if (!bbio->csum) { 435 btrfs_free_path(path); 436 return BLK_STS_RESOURCE; 437 } 438 } else { 439 bbio->csum = bbio->csum_inline; 440 } 441 csum = bbio->csum; 442 } else { 443 csum = dst; 444 } 445 446 /* 447 * If requested number of sectors is larger than one leaf can contain, 448 * kick the readahead for csum tree. 449 */ 450 if (nblocks > fs_info->csums_per_leaf) 451 path->reada = READA_FORWARD; 452 453 /* 454 * the free space stuff is only read when it hasn't been 455 * updated in the current transaction. So, we can safely 456 * read from the commit root and sidestep a nasty deadlock 457 * between reading the free space cache and updating the csum tree. 458 */ 459 if (btrfs_is_free_space_inode(BTRFS_I(inode))) { 460 path->search_commit_root = 1; 461 path->skip_locking = 1; 462 } 463 464 for (cur_disk_bytenr = orig_disk_bytenr; 465 cur_disk_bytenr < orig_disk_bytenr + orig_len; 466 cur_disk_bytenr += (count * sectorsize)) { 467 u64 search_len = orig_disk_bytenr + orig_len - cur_disk_bytenr; 468 unsigned int sector_offset; 469 u8 *csum_dst; 470 471 /* 472 * Although both cur_disk_bytenr and orig_disk_bytenr is u64, 473 * we're calculating the offset to the bio start. 474 * 475 * Bio size is limited to UINT_MAX, thus unsigned int is large 476 * enough to contain the raw result, not to mention the right 477 * shifted result. 478 */ 479 ASSERT(cur_disk_bytenr - orig_disk_bytenr < UINT_MAX); 480 sector_offset = (cur_disk_bytenr - orig_disk_bytenr) >> 481 fs_info->sectorsize_bits; 482 csum_dst = csum + sector_offset * csum_size; 483 484 count = search_csum_tree(fs_info, path, cur_disk_bytenr, 485 search_len, csum_dst); 486 if (count < 0) { 487 ret = errno_to_blk_status(count); 488 if (bbio) 489 btrfs_bio_free_csum(bbio); 490 break; 491 } 492 493 /* 494 * We didn't find a csum for this range. We need to make sure 495 * we complain loudly about this, because we are not NODATASUM. 496 * 497 * However for the DATA_RELOC inode we could potentially be 498 * relocating data extents for a NODATASUM inode, so the inode 499 * itself won't be marked with NODATASUM, but the extent we're 500 * copying is in fact NODATASUM. If we don't find a csum we 501 * assume this is the case. 502 */ 503 if (count == 0) { 504 memset(csum_dst, 0, csum_size); 505 count = 1; 506 507 if (BTRFS_I(inode)->root->root_key.objectid == 508 BTRFS_DATA_RELOC_TREE_OBJECTID) { 509 u64 file_offset; 510 int ret; 511 512 ret = search_file_offset_in_bio(bio, inode, 513 cur_disk_bytenr, &file_offset); 514 if (ret) 515 set_extent_bits(io_tree, file_offset, 516 file_offset + sectorsize - 1, 517 EXTENT_NODATASUM); 518 } else { 519 btrfs_warn_rl(fs_info, 520 "csum hole found for disk bytenr range [%llu, %llu)", 521 cur_disk_bytenr, cur_disk_bytenr + sectorsize); 522 } 523 } 524 } 525 526 btrfs_free_path(path); 527 return ret; 528 } 529 530 int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end, 531 struct list_head *list, int search_commit, 532 bool nowait) 533 { 534 struct btrfs_fs_info *fs_info = root->fs_info; 535 struct btrfs_key key; 536 struct btrfs_path *path; 537 struct extent_buffer *leaf; 538 struct btrfs_ordered_sum *sums; 539 struct btrfs_csum_item *item; 540 LIST_HEAD(tmplist); 541 int ret; 542 543 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 544 IS_ALIGNED(end + 1, fs_info->sectorsize)); 545 546 path = btrfs_alloc_path(); 547 if (!path) 548 return -ENOMEM; 549 550 path->nowait = nowait; 551 if (search_commit) { 552 path->skip_locking = 1; 553 path->reada = READA_FORWARD; 554 path->search_commit_root = 1; 555 } 556 557 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 558 key.offset = start; 559 key.type = BTRFS_EXTENT_CSUM_KEY; 560 561 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 562 if (ret < 0) 563 goto fail; 564 if (ret > 0 && path->slots[0] > 0) { 565 leaf = path->nodes[0]; 566 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 567 568 /* 569 * There are two cases we can hit here for the previous csum 570 * item: 571 * 572 * |<- search range ->| 573 * |<- csum item ->| 574 * 575 * Or 576 * |<- search range ->| 577 * |<- csum item ->| 578 * 579 * Check if the previous csum item covers the leading part of 580 * the search range. If so we have to start from previous csum 581 * item. 582 */ 583 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 584 key.type == BTRFS_EXTENT_CSUM_KEY) { 585 if (bytes_to_csum_size(fs_info, start - key.offset) < 586 btrfs_item_size(leaf, path->slots[0] - 1)) 587 path->slots[0]--; 588 } 589 } 590 591 while (start <= end) { 592 u64 csum_end; 593 594 leaf = path->nodes[0]; 595 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 596 ret = btrfs_next_leaf(root, path); 597 if (ret < 0) 598 goto fail; 599 if (ret > 0) 600 break; 601 leaf = path->nodes[0]; 602 } 603 604 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 605 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 606 key.type != BTRFS_EXTENT_CSUM_KEY || 607 key.offset > end) 608 break; 609 610 if (key.offset > start) 611 start = key.offset; 612 613 csum_end = key.offset + csum_size_to_bytes(fs_info, 614 btrfs_item_size(leaf, path->slots[0])); 615 if (csum_end <= start) { 616 path->slots[0]++; 617 continue; 618 } 619 620 csum_end = min(csum_end, end + 1); 621 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 622 struct btrfs_csum_item); 623 while (start < csum_end) { 624 unsigned long offset; 625 size_t size; 626 627 size = min_t(size_t, csum_end - start, 628 max_ordered_sum_bytes(fs_info)); 629 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size), 630 GFP_NOFS); 631 if (!sums) { 632 ret = -ENOMEM; 633 goto fail; 634 } 635 636 sums->bytenr = start; 637 sums->len = (int)size; 638 639 offset = bytes_to_csum_size(fs_info, start - key.offset); 640 641 read_extent_buffer(path->nodes[0], 642 sums->sums, 643 ((unsigned long)item) + offset, 644 bytes_to_csum_size(fs_info, size)); 645 646 start += size; 647 list_add_tail(&sums->list, &tmplist); 648 } 649 path->slots[0]++; 650 } 651 ret = 0; 652 fail: 653 while (ret < 0 && !list_empty(&tmplist)) { 654 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); 655 list_del(&sums->list); 656 kfree(sums); 657 } 658 list_splice_tail(&tmplist, list); 659 660 btrfs_free_path(path); 661 return ret; 662 } 663 664 /* 665 * Do the same work as btrfs_lookup_csums_list(), the difference is in how 666 * we return the result. 667 * 668 * This version will set the corresponding bits in @csum_bitmap to represent 669 * that there is a csum found. 670 * Each bit represents a sector. Thus caller should ensure @csum_buf passed 671 * in is large enough to contain all csums. 672 */ 673 int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end, 674 u8 *csum_buf, unsigned long *csum_bitmap) 675 { 676 struct btrfs_fs_info *fs_info = root->fs_info; 677 struct btrfs_key key; 678 struct btrfs_path *path; 679 struct extent_buffer *leaf; 680 struct btrfs_csum_item *item; 681 const u64 orig_start = start; 682 int ret; 683 684 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 685 IS_ALIGNED(end + 1, fs_info->sectorsize)); 686 687 path = btrfs_alloc_path(); 688 if (!path) 689 return -ENOMEM; 690 691 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 692 key.type = BTRFS_EXTENT_CSUM_KEY; 693 key.offset = start; 694 695 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 696 if (ret < 0) 697 goto fail; 698 if (ret > 0 && path->slots[0] > 0) { 699 leaf = path->nodes[0]; 700 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 701 702 /* 703 * There are two cases we can hit here for the previous csum 704 * item: 705 * 706 * |<- search range ->| 707 * |<- csum item ->| 708 * 709 * Or 710 * |<- search range ->| 711 * |<- csum item ->| 712 * 713 * Check if the previous csum item covers the leading part of 714 * the search range. If so we have to start from previous csum 715 * item. 716 */ 717 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 718 key.type == BTRFS_EXTENT_CSUM_KEY) { 719 if (bytes_to_csum_size(fs_info, start - key.offset) < 720 btrfs_item_size(leaf, path->slots[0] - 1)) 721 path->slots[0]--; 722 } 723 } 724 725 while (start <= end) { 726 u64 csum_end; 727 728 leaf = path->nodes[0]; 729 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 730 ret = btrfs_next_leaf(root, path); 731 if (ret < 0) 732 goto fail; 733 if (ret > 0) 734 break; 735 leaf = path->nodes[0]; 736 } 737 738 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 739 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 740 key.type != BTRFS_EXTENT_CSUM_KEY || 741 key.offset > end) 742 break; 743 744 if (key.offset > start) 745 start = key.offset; 746 747 csum_end = key.offset + csum_size_to_bytes(fs_info, 748 btrfs_item_size(leaf, path->slots[0])); 749 if (csum_end <= start) { 750 path->slots[0]++; 751 continue; 752 } 753 754 csum_end = min(csum_end, end + 1); 755 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 756 struct btrfs_csum_item); 757 while (start < csum_end) { 758 unsigned long offset; 759 size_t size; 760 u8 *csum_dest = csum_buf + bytes_to_csum_size(fs_info, 761 start - orig_start); 762 763 size = min_t(size_t, csum_end - start, end + 1 - start); 764 765 offset = bytes_to_csum_size(fs_info, start - key.offset); 766 767 read_extent_buffer(path->nodes[0], csum_dest, 768 ((unsigned long)item) + offset, 769 bytes_to_csum_size(fs_info, size)); 770 771 bitmap_set(csum_bitmap, 772 (start - orig_start) >> fs_info->sectorsize_bits, 773 size >> fs_info->sectorsize_bits); 774 775 start += size; 776 } 777 path->slots[0]++; 778 } 779 ret = 0; 780 fail: 781 btrfs_free_path(path); 782 return ret; 783 } 784 785 /* 786 * Calculate checksums of the data contained inside a bio. 787 * 788 * @inode: Owner of the data inside the bio 789 * @bio: Contains the data to be checksummed 790 * @offset: If (u64)-1, @bio may contain discontiguous bio vecs, so the 791 * file offsets are determined from the page offsets in the bio. 792 * Otherwise, this is the starting file offset of the bio vecs in 793 * @bio, which must be contiguous. 794 * @one_ordered: If true, @bio only refers to one ordered extent. 795 */ 796 blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio, 797 u64 offset, bool one_ordered) 798 { 799 struct btrfs_fs_info *fs_info = inode->root->fs_info; 800 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 801 struct btrfs_ordered_sum *sums; 802 struct btrfs_ordered_extent *ordered = NULL; 803 const bool use_page_offsets = (offset == (u64)-1); 804 char *data; 805 struct bvec_iter iter; 806 struct bio_vec bvec; 807 int index; 808 unsigned int blockcount; 809 unsigned long total_bytes = 0; 810 unsigned long this_sum_bytes = 0; 811 int i; 812 unsigned nofs_flag; 813 814 nofs_flag = memalloc_nofs_save(); 815 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), 816 GFP_KERNEL); 817 memalloc_nofs_restore(nofs_flag); 818 819 if (!sums) 820 return BLK_STS_RESOURCE; 821 822 sums->len = bio->bi_iter.bi_size; 823 INIT_LIST_HEAD(&sums->list); 824 825 sums->bytenr = bio->bi_iter.bi_sector << 9; 826 index = 0; 827 828 shash->tfm = fs_info->csum_shash; 829 830 bio_for_each_segment(bvec, bio, iter) { 831 if (use_page_offsets) 832 offset = page_offset(bvec.bv_page) + bvec.bv_offset; 833 834 if (!ordered) { 835 ordered = btrfs_lookup_ordered_extent(inode, offset); 836 /* 837 * The bio range is not covered by any ordered extent, 838 * must be a code logic error. 839 */ 840 if (unlikely(!ordered)) { 841 WARN(1, KERN_WARNING 842 "no ordered extent for root %llu ino %llu offset %llu\n", 843 inode->root->root_key.objectid, 844 btrfs_ino(inode), offset); 845 kvfree(sums); 846 return BLK_STS_IOERR; 847 } 848 } 849 850 blockcount = BTRFS_BYTES_TO_BLKS(fs_info, 851 bvec.bv_len + fs_info->sectorsize 852 - 1); 853 854 for (i = 0; i < blockcount; i++) { 855 if (!one_ordered && 856 !in_range(offset, ordered->file_offset, 857 ordered->num_bytes)) { 858 unsigned long bytes_left; 859 860 sums->len = this_sum_bytes; 861 this_sum_bytes = 0; 862 btrfs_add_ordered_sum(ordered, sums); 863 btrfs_put_ordered_extent(ordered); 864 865 bytes_left = bio->bi_iter.bi_size - total_bytes; 866 867 nofs_flag = memalloc_nofs_save(); 868 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, 869 bytes_left), GFP_KERNEL); 870 memalloc_nofs_restore(nofs_flag); 871 BUG_ON(!sums); /* -ENOMEM */ 872 sums->len = bytes_left; 873 ordered = btrfs_lookup_ordered_extent(inode, 874 offset); 875 ASSERT(ordered); /* Logic error */ 876 sums->bytenr = (bio->bi_iter.bi_sector << 9) 877 + total_bytes; 878 index = 0; 879 } 880 881 data = bvec_kmap_local(&bvec); 882 crypto_shash_digest(shash, 883 data + (i * fs_info->sectorsize), 884 fs_info->sectorsize, 885 sums->sums + index); 886 kunmap_local(data); 887 index += fs_info->csum_size; 888 offset += fs_info->sectorsize; 889 this_sum_bytes += fs_info->sectorsize; 890 total_bytes += fs_info->sectorsize; 891 } 892 893 } 894 this_sum_bytes = 0; 895 btrfs_add_ordered_sum(ordered, sums); 896 btrfs_put_ordered_extent(ordered); 897 return 0; 898 } 899 900 /* 901 * Remove one checksum overlapping a range. 902 * 903 * This expects the key to describe the csum pointed to by the path, and it 904 * expects the csum to overlap the range [bytenr, len] 905 * 906 * The csum should not be entirely contained in the range and the range should 907 * not be entirely contained in the csum. 908 * 909 * This calls btrfs_truncate_item with the correct args based on the overlap, 910 * and fixes up the key as required. 911 */ 912 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info, 913 struct btrfs_path *path, 914 struct btrfs_key *key, 915 u64 bytenr, u64 len) 916 { 917 struct extent_buffer *leaf; 918 const u32 csum_size = fs_info->csum_size; 919 u64 csum_end; 920 u64 end_byte = bytenr + len; 921 u32 blocksize_bits = fs_info->sectorsize_bits; 922 923 leaf = path->nodes[0]; 924 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 925 csum_end <<= blocksize_bits; 926 csum_end += key->offset; 927 928 if (key->offset < bytenr && csum_end <= end_byte) { 929 /* 930 * [ bytenr - len ] 931 * [ ] 932 * [csum ] 933 * A simple truncate off the end of the item 934 */ 935 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 936 new_size *= csum_size; 937 btrfs_truncate_item(path, new_size, 1); 938 } else if (key->offset >= bytenr && csum_end > end_byte && 939 end_byte > key->offset) { 940 /* 941 * [ bytenr - len ] 942 * [ ] 943 * [csum ] 944 * we need to truncate from the beginning of the csum 945 */ 946 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 947 new_size *= csum_size; 948 949 btrfs_truncate_item(path, new_size, 0); 950 951 key->offset = end_byte; 952 btrfs_set_item_key_safe(fs_info, path, key); 953 } else { 954 BUG(); 955 } 956 } 957 958 /* 959 * Delete the csum items from the csum tree for a given range of bytes. 960 */ 961 int btrfs_del_csums(struct btrfs_trans_handle *trans, 962 struct btrfs_root *root, u64 bytenr, u64 len) 963 { 964 struct btrfs_fs_info *fs_info = trans->fs_info; 965 struct btrfs_path *path; 966 struct btrfs_key key; 967 u64 end_byte = bytenr + len; 968 u64 csum_end; 969 struct extent_buffer *leaf; 970 int ret = 0; 971 const u32 csum_size = fs_info->csum_size; 972 u32 blocksize_bits = fs_info->sectorsize_bits; 973 974 ASSERT(root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID || 975 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 976 977 path = btrfs_alloc_path(); 978 if (!path) 979 return -ENOMEM; 980 981 while (1) { 982 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 983 key.offset = end_byte - 1; 984 key.type = BTRFS_EXTENT_CSUM_KEY; 985 986 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 987 if (ret > 0) { 988 ret = 0; 989 if (path->slots[0] == 0) 990 break; 991 path->slots[0]--; 992 } else if (ret < 0) { 993 break; 994 } 995 996 leaf = path->nodes[0]; 997 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 998 999 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1000 key.type != BTRFS_EXTENT_CSUM_KEY) { 1001 break; 1002 } 1003 1004 if (key.offset >= end_byte) 1005 break; 1006 1007 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 1008 csum_end <<= blocksize_bits; 1009 csum_end += key.offset; 1010 1011 /* this csum ends before we start, we're done */ 1012 if (csum_end <= bytenr) 1013 break; 1014 1015 /* delete the entire item, it is inside our range */ 1016 if (key.offset >= bytenr && csum_end <= end_byte) { 1017 int del_nr = 1; 1018 1019 /* 1020 * Check how many csum items preceding this one in this 1021 * leaf correspond to our range and then delete them all 1022 * at once. 1023 */ 1024 if (key.offset > bytenr && path->slots[0] > 0) { 1025 int slot = path->slots[0] - 1; 1026 1027 while (slot >= 0) { 1028 struct btrfs_key pk; 1029 1030 btrfs_item_key_to_cpu(leaf, &pk, slot); 1031 if (pk.offset < bytenr || 1032 pk.type != BTRFS_EXTENT_CSUM_KEY || 1033 pk.objectid != 1034 BTRFS_EXTENT_CSUM_OBJECTID) 1035 break; 1036 path->slots[0] = slot; 1037 del_nr++; 1038 key.offset = pk.offset; 1039 slot--; 1040 } 1041 } 1042 ret = btrfs_del_items(trans, root, path, 1043 path->slots[0], del_nr); 1044 if (ret) 1045 break; 1046 if (key.offset == bytenr) 1047 break; 1048 } else if (key.offset < bytenr && csum_end > end_byte) { 1049 unsigned long offset; 1050 unsigned long shift_len; 1051 unsigned long item_offset; 1052 /* 1053 * [ bytenr - len ] 1054 * [csum ] 1055 * 1056 * Our bytes are in the middle of the csum, 1057 * we need to split this item and insert a new one. 1058 * 1059 * But we can't drop the path because the 1060 * csum could change, get removed, extended etc. 1061 * 1062 * The trick here is the max size of a csum item leaves 1063 * enough room in the tree block for a single 1064 * item header. So, we split the item in place, 1065 * adding a new header pointing to the existing 1066 * bytes. Then we loop around again and we have 1067 * a nicely formed csum item that we can neatly 1068 * truncate. 1069 */ 1070 offset = (bytenr - key.offset) >> blocksize_bits; 1071 offset *= csum_size; 1072 1073 shift_len = (len >> blocksize_bits) * csum_size; 1074 1075 item_offset = btrfs_item_ptr_offset(leaf, 1076 path->slots[0]); 1077 1078 memzero_extent_buffer(leaf, item_offset + offset, 1079 shift_len); 1080 key.offset = bytenr; 1081 1082 /* 1083 * btrfs_split_item returns -EAGAIN when the 1084 * item changed size or key 1085 */ 1086 ret = btrfs_split_item(trans, root, path, &key, offset); 1087 if (ret && ret != -EAGAIN) { 1088 btrfs_abort_transaction(trans, ret); 1089 break; 1090 } 1091 ret = 0; 1092 1093 key.offset = end_byte - 1; 1094 } else { 1095 truncate_one_csum(fs_info, path, &key, bytenr, len); 1096 if (key.offset < bytenr) 1097 break; 1098 } 1099 btrfs_release_path(path); 1100 } 1101 btrfs_free_path(path); 1102 return ret; 1103 } 1104 1105 static int find_next_csum_offset(struct btrfs_root *root, 1106 struct btrfs_path *path, 1107 u64 *next_offset) 1108 { 1109 const u32 nritems = btrfs_header_nritems(path->nodes[0]); 1110 struct btrfs_key found_key; 1111 int slot = path->slots[0] + 1; 1112 int ret; 1113 1114 if (nritems == 0 || slot >= nritems) { 1115 ret = btrfs_next_leaf(root, path); 1116 if (ret < 0) { 1117 return ret; 1118 } else if (ret > 0) { 1119 *next_offset = (u64)-1; 1120 return 0; 1121 } 1122 slot = path->slots[0]; 1123 } 1124 1125 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 1126 1127 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1128 found_key.type != BTRFS_EXTENT_CSUM_KEY) 1129 *next_offset = (u64)-1; 1130 else 1131 *next_offset = found_key.offset; 1132 1133 return 0; 1134 } 1135 1136 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 1137 struct btrfs_root *root, 1138 struct btrfs_ordered_sum *sums) 1139 { 1140 struct btrfs_fs_info *fs_info = root->fs_info; 1141 struct btrfs_key file_key; 1142 struct btrfs_key found_key; 1143 struct btrfs_path *path; 1144 struct btrfs_csum_item *item; 1145 struct btrfs_csum_item *item_end; 1146 struct extent_buffer *leaf = NULL; 1147 u64 next_offset; 1148 u64 total_bytes = 0; 1149 u64 csum_offset; 1150 u64 bytenr; 1151 u32 ins_size; 1152 int index = 0; 1153 int found_next; 1154 int ret; 1155 const u32 csum_size = fs_info->csum_size; 1156 1157 path = btrfs_alloc_path(); 1158 if (!path) 1159 return -ENOMEM; 1160 again: 1161 next_offset = (u64)-1; 1162 found_next = 0; 1163 bytenr = sums->bytenr + total_bytes; 1164 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 1165 file_key.offset = bytenr; 1166 file_key.type = BTRFS_EXTENT_CSUM_KEY; 1167 1168 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 1169 if (!IS_ERR(item)) { 1170 ret = 0; 1171 leaf = path->nodes[0]; 1172 item_end = btrfs_item_ptr(leaf, path->slots[0], 1173 struct btrfs_csum_item); 1174 item_end = (struct btrfs_csum_item *)((char *)item_end + 1175 btrfs_item_size(leaf, path->slots[0])); 1176 goto found; 1177 } 1178 ret = PTR_ERR(item); 1179 if (ret != -EFBIG && ret != -ENOENT) 1180 goto out; 1181 1182 if (ret == -EFBIG) { 1183 u32 item_size; 1184 /* we found one, but it isn't big enough yet */ 1185 leaf = path->nodes[0]; 1186 item_size = btrfs_item_size(leaf, path->slots[0]); 1187 if ((item_size / csum_size) >= 1188 MAX_CSUM_ITEMS(fs_info, csum_size)) { 1189 /* already at max size, make a new one */ 1190 goto insert; 1191 } 1192 } else { 1193 /* We didn't find a csum item, insert one. */ 1194 ret = find_next_csum_offset(root, path, &next_offset); 1195 if (ret < 0) 1196 goto out; 1197 found_next = 1; 1198 goto insert; 1199 } 1200 1201 /* 1202 * At this point, we know the tree has a checksum item that ends at an 1203 * offset matching the start of the checksum range we want to insert. 1204 * We try to extend that item as much as possible and then add as many 1205 * checksums to it as they fit. 1206 * 1207 * First check if the leaf has enough free space for at least one 1208 * checksum. If it has go directly to the item extension code, otherwise 1209 * release the path and do a search for insertion before the extension. 1210 */ 1211 if (btrfs_leaf_free_space(leaf) >= csum_size) { 1212 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1213 csum_offset = (bytenr - found_key.offset) >> 1214 fs_info->sectorsize_bits; 1215 goto extend_csum; 1216 } 1217 1218 btrfs_release_path(path); 1219 path->search_for_extension = 1; 1220 ret = btrfs_search_slot(trans, root, &file_key, path, 1221 csum_size, 1); 1222 path->search_for_extension = 0; 1223 if (ret < 0) 1224 goto out; 1225 1226 if (ret > 0) { 1227 if (path->slots[0] == 0) 1228 goto insert; 1229 path->slots[0]--; 1230 } 1231 1232 leaf = path->nodes[0]; 1233 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1234 csum_offset = (bytenr - found_key.offset) >> fs_info->sectorsize_bits; 1235 1236 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 1237 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1238 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) { 1239 goto insert; 1240 } 1241 1242 extend_csum: 1243 if (csum_offset == btrfs_item_size(leaf, path->slots[0]) / 1244 csum_size) { 1245 int extend_nr; 1246 u64 tmp; 1247 u32 diff; 1248 1249 tmp = sums->len - total_bytes; 1250 tmp >>= fs_info->sectorsize_bits; 1251 WARN_ON(tmp < 1); 1252 extend_nr = max_t(int, 1, tmp); 1253 1254 /* 1255 * A log tree can already have checksum items with a subset of 1256 * the checksums we are trying to log. This can happen after 1257 * doing a sequence of partial writes into prealloc extents and 1258 * fsyncs in between, with a full fsync logging a larger subrange 1259 * of an extent for which a previous fast fsync logged a smaller 1260 * subrange. And this happens in particular due to merging file 1261 * extent items when we complete an ordered extent for a range 1262 * covered by a prealloc extent - this is done at 1263 * btrfs_mark_extent_written(). 1264 * 1265 * So if we try to extend the previous checksum item, which has 1266 * a range that ends at the start of the range we want to insert, 1267 * make sure we don't extend beyond the start offset of the next 1268 * checksum item. If we are at the last item in the leaf, then 1269 * forget the optimization of extending and add a new checksum 1270 * item - it is not worth the complexity of releasing the path, 1271 * getting the first key for the next leaf, repeat the btree 1272 * search, etc, because log trees are temporary anyway and it 1273 * would only save a few bytes of leaf space. 1274 */ 1275 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 1276 if (path->slots[0] + 1 >= 1277 btrfs_header_nritems(path->nodes[0])) { 1278 ret = find_next_csum_offset(root, path, &next_offset); 1279 if (ret < 0) 1280 goto out; 1281 found_next = 1; 1282 goto insert; 1283 } 1284 1285 ret = find_next_csum_offset(root, path, &next_offset); 1286 if (ret < 0) 1287 goto out; 1288 1289 tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits; 1290 if (tmp <= INT_MAX) 1291 extend_nr = min_t(int, extend_nr, tmp); 1292 } 1293 1294 diff = (csum_offset + extend_nr) * csum_size; 1295 diff = min(diff, 1296 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); 1297 1298 diff = diff - btrfs_item_size(leaf, path->slots[0]); 1299 diff = min_t(u32, btrfs_leaf_free_space(leaf), diff); 1300 diff /= csum_size; 1301 diff *= csum_size; 1302 1303 btrfs_extend_item(path, diff); 1304 ret = 0; 1305 goto csum; 1306 } 1307 1308 insert: 1309 btrfs_release_path(path); 1310 csum_offset = 0; 1311 if (found_next) { 1312 u64 tmp; 1313 1314 tmp = sums->len - total_bytes; 1315 tmp >>= fs_info->sectorsize_bits; 1316 tmp = min(tmp, (next_offset - file_key.offset) >> 1317 fs_info->sectorsize_bits); 1318 1319 tmp = max_t(u64, 1, tmp); 1320 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size)); 1321 ins_size = csum_size * tmp; 1322 } else { 1323 ins_size = csum_size; 1324 } 1325 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 1326 ins_size); 1327 if (ret < 0) 1328 goto out; 1329 if (WARN_ON(ret != 0)) 1330 goto out; 1331 leaf = path->nodes[0]; 1332 csum: 1333 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 1334 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 1335 btrfs_item_size(leaf, path->slots[0])); 1336 item = (struct btrfs_csum_item *)((unsigned char *)item + 1337 csum_offset * csum_size); 1338 found: 1339 ins_size = (u32)(sums->len - total_bytes) >> fs_info->sectorsize_bits; 1340 ins_size *= csum_size; 1341 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 1342 ins_size); 1343 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 1344 ins_size); 1345 1346 index += ins_size; 1347 ins_size /= csum_size; 1348 total_bytes += ins_size * fs_info->sectorsize; 1349 1350 btrfs_mark_buffer_dirty(path->nodes[0]); 1351 if (total_bytes < sums->len) { 1352 btrfs_release_path(path); 1353 cond_resched(); 1354 goto again; 1355 } 1356 out: 1357 btrfs_free_path(path); 1358 return ret; 1359 } 1360 1361 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, 1362 const struct btrfs_path *path, 1363 struct btrfs_file_extent_item *fi, 1364 struct extent_map *em) 1365 { 1366 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1367 struct btrfs_root *root = inode->root; 1368 struct extent_buffer *leaf = path->nodes[0]; 1369 const int slot = path->slots[0]; 1370 struct btrfs_key key; 1371 u64 extent_start, extent_end; 1372 u64 bytenr; 1373 u8 type = btrfs_file_extent_type(leaf, fi); 1374 int compress_type = btrfs_file_extent_compression(leaf, fi); 1375 1376 btrfs_item_key_to_cpu(leaf, &key, slot); 1377 extent_start = key.offset; 1378 extent_end = btrfs_file_extent_end(path); 1379 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1380 em->generation = btrfs_file_extent_generation(leaf, fi); 1381 if (type == BTRFS_FILE_EXTENT_REG || 1382 type == BTRFS_FILE_EXTENT_PREALLOC) { 1383 em->start = extent_start; 1384 em->len = extent_end - extent_start; 1385 em->orig_start = extent_start - 1386 btrfs_file_extent_offset(leaf, fi); 1387 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 1388 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1389 if (bytenr == 0) { 1390 em->block_start = EXTENT_MAP_HOLE; 1391 return; 1392 } 1393 if (compress_type != BTRFS_COMPRESS_NONE) { 1394 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 1395 em->compress_type = compress_type; 1396 em->block_start = bytenr; 1397 em->block_len = em->orig_block_len; 1398 } else { 1399 bytenr += btrfs_file_extent_offset(leaf, fi); 1400 em->block_start = bytenr; 1401 em->block_len = em->len; 1402 if (type == BTRFS_FILE_EXTENT_PREALLOC) 1403 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 1404 } 1405 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 1406 em->block_start = EXTENT_MAP_INLINE; 1407 em->start = extent_start; 1408 em->len = extent_end - extent_start; 1409 /* 1410 * Initialize orig_start and block_len with the same values 1411 * as in inode.c:btrfs_get_extent(). 1412 */ 1413 em->orig_start = EXTENT_MAP_HOLE; 1414 em->block_len = (u64)-1; 1415 em->compress_type = compress_type; 1416 if (compress_type != BTRFS_COMPRESS_NONE) 1417 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 1418 } else { 1419 btrfs_err(fs_info, 1420 "unknown file extent item type %d, inode %llu, offset %llu, " 1421 "root %llu", type, btrfs_ino(inode), extent_start, 1422 root->root_key.objectid); 1423 } 1424 } 1425 1426 /* 1427 * Returns the end offset (non inclusive) of the file extent item the given path 1428 * points to. If it points to an inline extent, the returned offset is rounded 1429 * up to the sector size. 1430 */ 1431 u64 btrfs_file_extent_end(const struct btrfs_path *path) 1432 { 1433 const struct extent_buffer *leaf = path->nodes[0]; 1434 const int slot = path->slots[0]; 1435 struct btrfs_file_extent_item *fi; 1436 struct btrfs_key key; 1437 u64 end; 1438 1439 btrfs_item_key_to_cpu(leaf, &key, slot); 1440 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 1441 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 1442 1443 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { 1444 end = btrfs_file_extent_ram_bytes(leaf, fi); 1445 end = ALIGN(key.offset + end, leaf->fs_info->sectorsize); 1446 } else { 1447 end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1448 } 1449 1450 return end; 1451 } 1452