1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include "ctree.h" 7 #include "fs.h" 8 #include "messages.h" 9 #include "inode-item.h" 10 #include "disk-io.h" 11 #include "transaction.h" 12 #include "space-info.h" 13 #include "accessors.h" 14 #include "extent-tree.h" 15 #include "file-item.h" 16 17 struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, 18 int slot, 19 const struct fscrypt_str *name) 20 { 21 struct btrfs_inode_ref *ref; 22 unsigned long ptr; 23 unsigned long name_ptr; 24 u32 item_size; 25 u32 cur_offset = 0; 26 int len; 27 28 item_size = btrfs_item_size(leaf, slot); 29 ptr = btrfs_item_ptr_offset(leaf, slot); 30 while (cur_offset < item_size) { 31 ref = (struct btrfs_inode_ref *)(ptr + cur_offset); 32 len = btrfs_inode_ref_name_len(leaf, ref); 33 name_ptr = (unsigned long)(ref + 1); 34 cur_offset += len + sizeof(*ref); 35 if (len != name->len) 36 continue; 37 if (memcmp_extent_buffer(leaf, name->name, name_ptr, 38 name->len) == 0) 39 return ref; 40 } 41 return NULL; 42 } 43 44 struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( 45 const struct extent_buffer *leaf, int slot, u64 ref_objectid, 46 const struct fscrypt_str *name) 47 { 48 struct btrfs_inode_extref *extref; 49 unsigned long ptr; 50 unsigned long name_ptr; 51 u32 item_size; 52 u32 cur_offset = 0; 53 int ref_name_len; 54 55 item_size = btrfs_item_size(leaf, slot); 56 ptr = btrfs_item_ptr_offset(leaf, slot); 57 58 /* 59 * Search all extended backrefs in this item. We're only 60 * looking through any collisions so most of the time this is 61 * just going to compare against one buffer. If all is well, 62 * we'll return success and the inode ref object. 63 */ 64 while (cur_offset < item_size) { 65 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 66 name_ptr = (unsigned long)(&extref->name); 67 ref_name_len = btrfs_inode_extref_name_len(leaf, extref); 68 69 if (ref_name_len == name->len && 70 btrfs_inode_extref_parent(leaf, extref) == ref_objectid && 71 (memcmp_extent_buffer(leaf, name->name, name_ptr, 72 name->len) == 0)) 73 return extref; 74 75 cur_offset += ref_name_len + sizeof(*extref); 76 } 77 return NULL; 78 } 79 80 /* Returns NULL if no extref found */ 81 struct btrfs_inode_extref *btrfs_lookup_inode_extref(struct btrfs_root *root, 82 struct btrfs_path *path, 83 const struct fscrypt_str *name, 84 u64 inode_objectid, u64 ref_objectid) 85 { 86 int ret; 87 struct btrfs_key key; 88 89 key.objectid = inode_objectid; 90 key.type = BTRFS_INODE_EXTREF_KEY; 91 key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); 92 93 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 94 if (ret < 0) 95 return ERR_PTR(ret); 96 if (ret > 0) 97 return NULL; 98 return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], 99 ref_objectid, name); 100 101 } 102 103 static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, 104 struct btrfs_root *root, 105 const struct fscrypt_str *name, 106 u64 inode_objectid, u64 ref_objectid, 107 u64 *index) 108 { 109 BTRFS_PATH_AUTO_FREE(path); 110 struct btrfs_key key; 111 struct btrfs_inode_extref *extref; 112 struct extent_buffer *leaf; 113 int ret; 114 int del_len = name->len + sizeof(*extref); 115 unsigned long ptr; 116 unsigned long item_start; 117 u32 item_size; 118 119 key.objectid = inode_objectid; 120 key.type = BTRFS_INODE_EXTREF_KEY; 121 key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); 122 123 path = btrfs_alloc_path(); 124 if (!path) 125 return -ENOMEM; 126 127 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 128 if (ret > 0) 129 return -ENOENT; 130 if (ret < 0) 131 return ret; 132 133 /* 134 * Sanity check - did we find the right item for this name? 135 * This should always succeed so error here will make the FS 136 * readonly. 137 */ 138 extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], 139 ref_objectid, name); 140 if (unlikely(!extref)) { 141 btrfs_abort_transaction(trans, -ENOENT); 142 return -ENOENT; 143 } 144 145 leaf = path->nodes[0]; 146 item_size = btrfs_item_size(leaf, path->slots[0]); 147 if (index) 148 *index = btrfs_inode_extref_index(leaf, extref); 149 150 if (del_len == item_size) { 151 /* Common case only one ref in the item, remove the whole item. */ 152 return btrfs_del_item(trans, root, path); 153 } 154 155 ptr = (unsigned long)extref; 156 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); 157 158 memmove_extent_buffer(leaf, ptr, ptr + del_len, 159 item_size - (ptr + del_len - item_start)); 160 161 btrfs_truncate_item(trans, path, item_size - del_len, 1); 162 163 return ret; 164 } 165 166 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, 167 struct btrfs_root *root, const struct fscrypt_str *name, 168 u64 inode_objectid, u64 ref_objectid, u64 *index) 169 { 170 struct btrfs_path *path; 171 struct btrfs_key key; 172 struct btrfs_inode_ref *ref; 173 struct extent_buffer *leaf; 174 unsigned long ptr; 175 unsigned long item_start; 176 u32 item_size; 177 u32 sub_item_len; 178 int ret; 179 int search_ext_refs = 0; 180 int del_len = name->len + sizeof(*ref); 181 182 key.objectid = inode_objectid; 183 key.type = BTRFS_INODE_REF_KEY; 184 key.offset = ref_objectid; 185 186 path = btrfs_alloc_path(); 187 if (!path) 188 return -ENOMEM; 189 190 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 191 if (ret > 0) { 192 ret = -ENOENT; 193 search_ext_refs = 1; 194 goto out; 195 } else if (ret < 0) { 196 goto out; 197 } 198 199 ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name); 200 if (!ref) { 201 ret = -ENOENT; 202 search_ext_refs = 1; 203 goto out; 204 } 205 leaf = path->nodes[0]; 206 item_size = btrfs_item_size(leaf, path->slots[0]); 207 208 if (index) 209 *index = btrfs_inode_ref_index(leaf, ref); 210 211 if (del_len == item_size) { 212 ret = btrfs_del_item(trans, root, path); 213 goto out; 214 } 215 ptr = (unsigned long)ref; 216 sub_item_len = name->len + sizeof(*ref); 217 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); 218 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, 219 item_size - (ptr + sub_item_len - item_start)); 220 btrfs_truncate_item(trans, path, item_size - sub_item_len, 1); 221 out: 222 btrfs_free_path(path); 223 224 if (search_ext_refs) { 225 /* 226 * No refs were found, or we could not find the 227 * name in our ref array. Find and remove the extended 228 * inode ref then. 229 */ 230 return btrfs_del_inode_extref(trans, root, name, 231 inode_objectid, ref_objectid, index); 232 } 233 234 return ret; 235 } 236 237 /* 238 * Insert an extended inode ref into a tree. 239 * 240 * The caller must have checked against BTRFS_LINK_MAX already. 241 */ 242 static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, 243 struct btrfs_root *root, 244 const struct fscrypt_str *name, 245 u64 inode_objectid, u64 ref_objectid, 246 u64 index) 247 { 248 struct btrfs_inode_extref *extref; 249 int ret; 250 int ins_len = name->len + sizeof(*extref); 251 unsigned long ptr; 252 BTRFS_PATH_AUTO_FREE(path); 253 struct btrfs_key key; 254 struct extent_buffer *leaf; 255 256 key.objectid = inode_objectid; 257 key.type = BTRFS_INODE_EXTREF_KEY; 258 key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); 259 260 path = btrfs_alloc_path(); 261 if (!path) 262 return -ENOMEM; 263 264 ret = btrfs_insert_empty_item(trans, root, path, &key, 265 ins_len); 266 if (ret == -EEXIST) { 267 if (btrfs_find_name_in_ext_backref(path->nodes[0], 268 path->slots[0], 269 ref_objectid, 270 name)) 271 return ret; 272 273 btrfs_extend_item(trans, path, ins_len); 274 ret = 0; 275 } 276 if (ret < 0) 277 return ret; 278 279 leaf = path->nodes[0]; 280 ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char); 281 ptr += btrfs_item_size(leaf, path->slots[0]) - ins_len; 282 extref = (struct btrfs_inode_extref *)ptr; 283 284 btrfs_set_inode_extref_name_len(path->nodes[0], extref, name->len); 285 btrfs_set_inode_extref_index(path->nodes[0], extref, index); 286 btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid); 287 288 ptr = (unsigned long)&extref->name; 289 write_extent_buffer(path->nodes[0], name->name, ptr, name->len); 290 291 return 0; 292 } 293 294 /* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */ 295 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 296 struct btrfs_root *root, const struct fscrypt_str *name, 297 u64 inode_objectid, u64 ref_objectid, u64 index) 298 { 299 struct btrfs_fs_info *fs_info = root->fs_info; 300 struct btrfs_path *path; 301 struct btrfs_key key; 302 struct btrfs_inode_ref *ref; 303 unsigned long ptr; 304 int ret; 305 int ins_len = name->len + sizeof(*ref); 306 307 key.objectid = inode_objectid; 308 key.type = BTRFS_INODE_REF_KEY; 309 key.offset = ref_objectid; 310 311 path = btrfs_alloc_path(); 312 if (!path) 313 return -ENOMEM; 314 315 path->skip_release_on_error = true; 316 ret = btrfs_insert_empty_item(trans, root, path, &key, 317 ins_len); 318 if (ret == -EEXIST) { 319 u32 old_size; 320 ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], 321 name); 322 if (ref) 323 goto out; 324 325 old_size = btrfs_item_size(path->nodes[0], path->slots[0]); 326 btrfs_extend_item(trans, path, ins_len); 327 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 328 struct btrfs_inode_ref); 329 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); 330 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len); 331 btrfs_set_inode_ref_index(path->nodes[0], ref, index); 332 ptr = (unsigned long)(ref + 1); 333 ret = 0; 334 } else if (ret < 0) { 335 if (ret == -EOVERFLOW) { 336 if (btrfs_find_name_in_backref(path->nodes[0], 337 path->slots[0], 338 name)) 339 ret = -EEXIST; 340 else 341 ret = -EMLINK; 342 } 343 goto out; 344 } else { 345 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 346 struct btrfs_inode_ref); 347 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len); 348 btrfs_set_inode_ref_index(path->nodes[0], ref, index); 349 ptr = (unsigned long)(ref + 1); 350 } 351 write_extent_buffer(path->nodes[0], name->name, ptr, name->len); 352 out: 353 btrfs_free_path(path); 354 355 if (ret == -EMLINK) { 356 struct btrfs_super_block *disk_super = fs_info->super_copy; 357 /* We ran out of space in the ref array. Need to 358 * add an extended ref. */ 359 if (btrfs_super_incompat_flags(disk_super) 360 & BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) 361 ret = btrfs_insert_inode_extref(trans, root, name, 362 inode_objectid, 363 ref_objectid, index); 364 } 365 366 return ret; 367 } 368 369 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, 370 struct btrfs_root *root, 371 struct btrfs_path *path, u64 objectid) 372 { 373 struct btrfs_key key; 374 375 key.objectid = objectid; 376 key.type = BTRFS_INODE_ITEM_KEY; 377 key.offset = 0; 378 379 return btrfs_insert_empty_item(trans, root, path, &key, 380 sizeof(struct btrfs_inode_item)); 381 } 382 383 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root 384 *root, struct btrfs_path *path, 385 struct btrfs_key *location, int mod) 386 { 387 int ins_len = mod < 0 ? -1 : 0; 388 int cow = mod != 0; 389 int ret; 390 int slot; 391 struct extent_buffer *leaf; 392 struct btrfs_key found_key; 393 394 ret = btrfs_search_slot(trans, root, location, path, ins_len, cow); 395 if (ret > 0 && location->type == BTRFS_ROOT_ITEM_KEY && 396 location->offset == (u64)-1 && path->slots[0] != 0) { 397 slot = path->slots[0] - 1; 398 leaf = path->nodes[0]; 399 btrfs_item_key_to_cpu(leaf, &found_key, slot); 400 if (found_key.objectid == location->objectid && 401 found_key.type == location->type) { 402 path->slots[0]--; 403 return 0; 404 } 405 } 406 return ret; 407 } 408 409 static inline void btrfs_trace_truncate(const struct btrfs_inode *inode, 410 const struct extent_buffer *leaf, 411 const struct btrfs_file_extent_item *fi, 412 u64 offset, int extent_type, int slot) 413 { 414 if (!inode) 415 return; 416 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 417 trace_btrfs_truncate_show_fi_inline(inode, leaf, fi, slot, 418 offset); 419 else 420 trace_btrfs_truncate_show_fi_regular(inode, leaf, fi, offset); 421 } 422 423 /* 424 * Remove inode items from a given root. 425 * 426 * @trans: A transaction handle. 427 * @root: The root from which to remove items. 428 * @inode: The inode whose items we want to remove. 429 * @control: The btrfs_truncate_control to control how and what we 430 * are truncating. 431 * 432 * Remove all keys associated with the inode from the given root that have a key 433 * with a type greater than or equals to @min_type. When @min_type has a value of 434 * BTRFS_EXTENT_DATA_KEY, only remove file extent items that have an offset value 435 * greater than or equals to @new_size. If a file extent item that starts before 436 * @new_size and ends after it is found, its length is adjusted. 437 * 438 * Returns: 0 on success, < 0 on error and NEED_TRUNCATE_BLOCK when @min_type is 439 * BTRFS_EXTENT_DATA_KEY and the caller must truncate the last block. 440 */ 441 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 442 struct btrfs_root *root, 443 struct btrfs_truncate_control *control) 444 { 445 struct btrfs_fs_info *fs_info = root->fs_info; 446 BTRFS_PATH_AUTO_FREE(path); 447 struct extent_buffer *leaf; 448 struct btrfs_file_extent_item *fi; 449 struct btrfs_key key; 450 struct btrfs_key found_key; 451 u64 new_size = control->new_size; 452 u64 extent_num_bytes = 0; 453 u64 extent_offset = 0; 454 u64 item_end = 0; 455 u32 found_type = (u8)-1; 456 int del_item; 457 int pending_del_nr = 0; 458 int pending_del_slot = 0; 459 int extent_type = -1; 460 int ret; 461 u64 bytes_deleted = 0; 462 bool be_nice = false; 463 464 ASSERT(control->inode || !control->clear_extent_range); 465 ASSERT(new_size == 0 || control->min_type == BTRFS_EXTENT_DATA_KEY); 466 467 control->last_size = new_size; 468 control->sub_bytes = 0; 469 470 /* 471 * For shareable roots we want to back off from time to time, this turns 472 * out to be subvolume roots, reloc roots, and data reloc roots. 473 */ 474 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 475 be_nice = true; 476 477 path = btrfs_alloc_path(); 478 if (!path) 479 return -ENOMEM; 480 path->reada = READA_BACK; 481 482 key.objectid = control->ino; 483 key.type = (u8)-1; 484 key.offset = (u64)-1; 485 486 search_again: 487 /* 488 * With a 16K leaf size and 128MiB extents, you can actually queue up a 489 * huge file in a single leaf. Most of the time that bytes_deleted is 490 * > 0, it will be huge by the time we get here 491 */ 492 if (be_nice && bytes_deleted > SZ_32M && 493 btrfs_should_end_transaction(trans)) { 494 ret = -EAGAIN; 495 goto out; 496 } 497 498 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 499 if (ret < 0) 500 goto out; 501 502 if (ret > 0) { 503 ret = 0; 504 /* There are no items in the tree for us to truncate, we're done */ 505 if (path->slots[0] == 0) 506 goto out; 507 path->slots[0]--; 508 } 509 510 while (1) { 511 u64 clear_start = 0, clear_len = 0, extent_start = 0; 512 bool refill_delayed_refs_rsv = false; 513 514 fi = NULL; 515 leaf = path->nodes[0]; 516 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 517 found_type = found_key.type; 518 519 if (found_key.objectid != control->ino) 520 break; 521 522 if (found_type < control->min_type) 523 break; 524 525 item_end = found_key.offset; 526 if (found_type == BTRFS_EXTENT_DATA_KEY) { 527 fi = btrfs_item_ptr(leaf, path->slots[0], 528 struct btrfs_file_extent_item); 529 extent_type = btrfs_file_extent_type(leaf, fi); 530 if (extent_type != BTRFS_FILE_EXTENT_INLINE) 531 item_end += 532 btrfs_file_extent_num_bytes(leaf, fi); 533 else if (extent_type == BTRFS_FILE_EXTENT_INLINE) 534 item_end += btrfs_file_extent_ram_bytes(leaf, fi); 535 536 btrfs_trace_truncate(control->inode, leaf, fi, 537 found_key.offset, extent_type, 538 path->slots[0]); 539 item_end--; 540 } 541 if (found_type > control->min_type) { 542 del_item = 1; 543 } else { 544 if (item_end < new_size) 545 break; 546 if (found_key.offset >= new_size) 547 del_item = 1; 548 else 549 del_item = 0; 550 } 551 552 /* FIXME, shrink the extent if the ref count is only 1 */ 553 if (found_type != BTRFS_EXTENT_DATA_KEY) 554 goto delete; 555 556 control->extents_found++; 557 558 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 559 u64 num_dec; 560 561 clear_start = found_key.offset; 562 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 563 if (!del_item) { 564 u64 orig_num_bytes = 565 btrfs_file_extent_num_bytes(leaf, fi); 566 extent_num_bytes = ALIGN(new_size - 567 found_key.offset, 568 fs_info->sectorsize); 569 clear_start = ALIGN(new_size, fs_info->sectorsize); 570 571 btrfs_set_file_extent_num_bytes(leaf, fi, 572 extent_num_bytes); 573 num_dec = (orig_num_bytes - extent_num_bytes); 574 if (extent_start != 0) 575 control->sub_bytes += num_dec; 576 } else { 577 extent_num_bytes = 578 btrfs_file_extent_disk_num_bytes(leaf, fi); 579 extent_offset = found_key.offset - 580 btrfs_file_extent_offset(leaf, fi); 581 582 /* FIXME blocksize != 4096 */ 583 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 584 if (extent_start != 0) 585 control->sub_bytes += num_dec; 586 } 587 clear_len = num_dec; 588 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 589 /* 590 * We can't truncate inline items that have had 591 * special encodings 592 */ 593 if (!del_item && 594 btrfs_file_extent_encryption(leaf, fi) == 0 && 595 btrfs_file_extent_other_encoding(leaf, fi) == 0 && 596 btrfs_file_extent_compression(leaf, fi) == 0) { 597 u32 size = (u32)(new_size - found_key.offset); 598 599 btrfs_set_file_extent_ram_bytes(leaf, fi, size); 600 size = btrfs_file_extent_calc_inline_size(size); 601 btrfs_truncate_item(trans, path, size, 1); 602 } else if (!del_item) { 603 /* 604 * We have to bail so the last_size is set to 605 * just before this extent. 606 */ 607 ret = BTRFS_NEED_TRUNCATE_BLOCK; 608 break; 609 } else { 610 /* 611 * Inline extents are special, we just treat 612 * them as a full sector worth in the file 613 * extent tree just for simplicity sake. 614 */ 615 clear_len = fs_info->sectorsize; 616 } 617 618 control->sub_bytes += item_end + 1 - new_size; 619 } 620 delete: 621 /* 622 * We only want to clear the file extent range if we're 623 * modifying the actual inode's mapping, which is just the 624 * normal truncate path. 625 */ 626 if (control->clear_extent_range) { 627 ret = btrfs_inode_clear_file_extent_range(control->inode, 628 clear_start, clear_len); 629 if (unlikely(ret)) { 630 btrfs_abort_transaction(trans, ret); 631 break; 632 } 633 } 634 635 if (del_item) { 636 ASSERT(!pending_del_nr || 637 ((path->slots[0] + 1) == pending_del_slot)); 638 639 control->last_size = found_key.offset; 640 if (!pending_del_nr) { 641 /* No pending yet, add ourselves */ 642 pending_del_slot = path->slots[0]; 643 pending_del_nr = 1; 644 } else if (path->slots[0] + 1 == pending_del_slot) { 645 /* Hop on the pending chunk */ 646 pending_del_nr++; 647 pending_del_slot = path->slots[0]; 648 } 649 } else { 650 control->last_size = new_size; 651 break; 652 } 653 654 if (del_item && extent_start != 0 && !control->skip_ref_updates) { 655 struct btrfs_ref ref = { 656 .action = BTRFS_DROP_DELAYED_REF, 657 .bytenr = extent_start, 658 .num_bytes = extent_num_bytes, 659 .owning_root = btrfs_root_id(root), 660 .ref_root = btrfs_header_owner(leaf), 661 }; 662 663 bytes_deleted += extent_num_bytes; 664 665 btrfs_init_data_ref(&ref, control->ino, extent_offset, 666 btrfs_root_id(root), false); 667 ret = btrfs_free_extent(trans, &ref); 668 if (unlikely(ret)) { 669 btrfs_abort_transaction(trans, ret); 670 break; 671 } 672 if (be_nice && btrfs_check_space_for_delayed_refs(fs_info)) 673 refill_delayed_refs_rsv = true; 674 } 675 676 if (found_type == BTRFS_INODE_ITEM_KEY) 677 break; 678 679 if (path->slots[0] == 0 || 680 path->slots[0] != pending_del_slot || 681 refill_delayed_refs_rsv) { 682 if (pending_del_nr) { 683 ret = btrfs_del_items(trans, root, path, 684 pending_del_slot, 685 pending_del_nr); 686 if (unlikely(ret)) { 687 btrfs_abort_transaction(trans, ret); 688 break; 689 } 690 pending_del_nr = 0; 691 } 692 btrfs_release_path(path); 693 694 /* 695 * We can generate a lot of delayed refs, so we need to 696 * throttle every once and a while and make sure we're 697 * adding enough space to keep up with the work we are 698 * generating. Since we hold a transaction here we 699 * can't flush, and we don't want to FLUSH_LIMIT because 700 * we could have generated too many delayed refs to 701 * actually allocate, so just bail if we're short and 702 * let the normal reservation dance happen higher up. 703 */ 704 if (refill_delayed_refs_rsv) { 705 ret = btrfs_delayed_refs_rsv_refill(fs_info, 706 BTRFS_RESERVE_NO_FLUSH); 707 if (ret) { 708 ret = -EAGAIN; 709 break; 710 } 711 } 712 goto search_again; 713 } else { 714 path->slots[0]--; 715 } 716 } 717 out: 718 if (ret >= 0 && pending_del_nr) { 719 int ret2; 720 721 ret2 = btrfs_del_items(trans, root, path, pending_del_slot, pending_del_nr); 722 if (unlikely(ret2)) { 723 btrfs_abort_transaction(trans, ret2); 724 ret = ret2; 725 } 726 } 727 728 ASSERT(control->last_size >= new_size); 729 if (!ret && control->last_size > new_size) 730 control->last_size = new_size; 731 732 return ret; 733 } 734