1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include "ctree.h" 7 #include "fs.h" 8 #include "messages.h" 9 #include "inode-item.h" 10 #include "disk-io.h" 11 #include "transaction.h" 12 #include "space-info.h" 13 #include "accessors.h" 14 #include "extent-tree.h" 15 #include "file-item.h" 16 17 struct btrfs_inode_ref *btrfs_find_name_in_backref(const struct extent_buffer *leaf, 18 int slot, 19 const struct fscrypt_str *name) 20 { 21 struct btrfs_inode_ref *ref; 22 unsigned long ptr; 23 unsigned long name_ptr; 24 u32 item_size; 25 u32 cur_offset = 0; 26 int len; 27 28 item_size = btrfs_item_size(leaf, slot); 29 ptr = btrfs_item_ptr_offset(leaf, slot); 30 while (cur_offset < item_size) { 31 ref = (struct btrfs_inode_ref *)(ptr + cur_offset); 32 len = btrfs_inode_ref_name_len(leaf, ref); 33 name_ptr = (unsigned long)(ref + 1); 34 cur_offset += len + sizeof(*ref); 35 if (len != name->len) 36 continue; 37 if (memcmp_extent_buffer(leaf, name->name, name_ptr, 38 name->len) == 0) 39 return ref; 40 } 41 return NULL; 42 } 43 44 struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( 45 const struct extent_buffer *leaf, int slot, u64 ref_objectid, 46 const struct fscrypt_str *name) 47 { 48 struct btrfs_inode_extref *extref; 49 unsigned long ptr; 50 unsigned long name_ptr; 51 u32 item_size; 52 u32 cur_offset = 0; 53 int ref_name_len; 54 55 item_size = btrfs_item_size(leaf, slot); 56 ptr = btrfs_item_ptr_offset(leaf, slot); 57 58 /* 59 * Search all extended backrefs in this item. We're only 60 * looking through any collisions so most of the time this is 61 * just going to compare against one buffer. If all is well, 62 * we'll return success and the inode ref object. 63 */ 64 while (cur_offset < item_size) { 65 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 66 name_ptr = (unsigned long)(&extref->name); 67 ref_name_len = btrfs_inode_extref_name_len(leaf, extref); 68 69 if (ref_name_len == name->len && 70 btrfs_inode_extref_parent(leaf, extref) == ref_objectid && 71 (memcmp_extent_buffer(leaf, name->name, name_ptr, 72 name->len) == 0)) 73 return extref; 74 75 cur_offset += ref_name_len + sizeof(*extref); 76 } 77 return NULL; 78 } 79 80 /* Returns NULL if no extref found */ 81 struct btrfs_inode_extref *btrfs_lookup_inode_extref(struct btrfs_root *root, 82 struct btrfs_path *path, 83 const struct fscrypt_str *name, 84 u64 inode_objectid, u64 ref_objectid) 85 { 86 int ret; 87 struct btrfs_key key; 88 89 key.objectid = inode_objectid; 90 key.type = BTRFS_INODE_EXTREF_KEY; 91 key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); 92 93 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 94 if (ret < 0) 95 return ERR_PTR(ret); 96 if (ret > 0) 97 return NULL; 98 return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], 99 ref_objectid, name); 100 101 } 102 103 static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, 104 struct btrfs_root *root, 105 const struct fscrypt_str *name, 106 u64 inode_objectid, u64 ref_objectid, 107 u64 *index) 108 { 109 BTRFS_PATH_AUTO_FREE(path); 110 struct btrfs_key key; 111 struct btrfs_inode_extref *extref; 112 struct extent_buffer *leaf; 113 int ret; 114 int del_len = name->len + sizeof(*extref); 115 unsigned long ptr; 116 unsigned long item_start; 117 u32 item_size; 118 119 key.objectid = inode_objectid; 120 key.type = BTRFS_INODE_EXTREF_KEY; 121 key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); 122 123 path = btrfs_alloc_path(); 124 if (!path) 125 return -ENOMEM; 126 127 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 128 if (ret > 0) 129 return -ENOENT; 130 if (ret < 0) 131 return ret; 132 133 /* 134 * Sanity check - did we find the right item for this name? 135 * This should always succeed so error here will make the FS 136 * readonly. 137 */ 138 extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], 139 ref_objectid, name); 140 if (!extref) { 141 btrfs_abort_transaction(trans, -ENOENT); 142 return -ENOENT; 143 } 144 145 leaf = path->nodes[0]; 146 item_size = btrfs_item_size(leaf, path->slots[0]); 147 if (index) 148 *index = btrfs_inode_extref_index(leaf, extref); 149 150 if (del_len == item_size) { 151 /* Common case only one ref in the item, remove the whole item. */ 152 return btrfs_del_item(trans, root, path); 153 } 154 155 ptr = (unsigned long)extref; 156 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); 157 158 memmove_extent_buffer(leaf, ptr, ptr + del_len, 159 item_size - (ptr + del_len - item_start)); 160 161 btrfs_truncate_item(trans, path, item_size - del_len, 1); 162 163 return ret; 164 } 165 166 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, 167 struct btrfs_root *root, const struct fscrypt_str *name, 168 u64 inode_objectid, u64 ref_objectid, u64 *index) 169 { 170 struct btrfs_path *path; 171 struct btrfs_key key; 172 struct btrfs_inode_ref *ref; 173 struct extent_buffer *leaf; 174 unsigned long ptr; 175 unsigned long item_start; 176 u32 item_size; 177 u32 sub_item_len; 178 int ret; 179 int search_ext_refs = 0; 180 int del_len = name->len + sizeof(*ref); 181 182 key.objectid = inode_objectid; 183 key.type = BTRFS_INODE_REF_KEY; 184 key.offset = ref_objectid; 185 186 path = btrfs_alloc_path(); 187 if (!path) 188 return -ENOMEM; 189 190 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 191 if (ret > 0) { 192 ret = -ENOENT; 193 search_ext_refs = 1; 194 goto out; 195 } else if (ret < 0) { 196 goto out; 197 } 198 199 ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name); 200 if (!ref) { 201 ret = -ENOENT; 202 search_ext_refs = 1; 203 goto out; 204 } 205 leaf = path->nodes[0]; 206 item_size = btrfs_item_size(leaf, path->slots[0]); 207 208 if (index) 209 *index = btrfs_inode_ref_index(leaf, ref); 210 211 if (del_len == item_size) { 212 ret = btrfs_del_item(trans, root, path); 213 goto out; 214 } 215 ptr = (unsigned long)ref; 216 sub_item_len = name->len + sizeof(*ref); 217 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); 218 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, 219 item_size - (ptr + sub_item_len - item_start)); 220 btrfs_truncate_item(trans, path, item_size - sub_item_len, 1); 221 out: 222 btrfs_free_path(path); 223 224 if (search_ext_refs) { 225 /* 226 * No refs were found, or we could not find the 227 * name in our ref array. Find and remove the extended 228 * inode ref then. 229 */ 230 return btrfs_del_inode_extref(trans, root, name, 231 inode_objectid, ref_objectid, index); 232 } 233 234 return ret; 235 } 236 237 /* 238 * Insert an extended inode ref into a tree. 239 * 240 * The caller must have checked against BTRFS_LINK_MAX already. 241 */ 242 static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, 243 struct btrfs_root *root, 244 const struct fscrypt_str *name, 245 u64 inode_objectid, u64 ref_objectid, 246 u64 index) 247 { 248 struct btrfs_inode_extref *extref; 249 int ret; 250 int ins_len = name->len + sizeof(*extref); 251 unsigned long ptr; 252 BTRFS_PATH_AUTO_FREE(path); 253 struct btrfs_key key; 254 struct extent_buffer *leaf; 255 256 key.objectid = inode_objectid; 257 key.type = BTRFS_INODE_EXTREF_KEY; 258 key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len); 259 260 path = btrfs_alloc_path(); 261 if (!path) 262 return -ENOMEM; 263 264 ret = btrfs_insert_empty_item(trans, root, path, &key, 265 ins_len); 266 if (ret == -EEXIST) { 267 if (btrfs_find_name_in_ext_backref(path->nodes[0], 268 path->slots[0], 269 ref_objectid, 270 name)) 271 return ret; 272 273 btrfs_extend_item(trans, path, ins_len); 274 ret = 0; 275 } 276 if (ret < 0) 277 return ret; 278 279 leaf = path->nodes[0]; 280 ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char); 281 ptr += btrfs_item_size(leaf, path->slots[0]) - ins_len; 282 extref = (struct btrfs_inode_extref *)ptr; 283 284 btrfs_set_inode_extref_name_len(path->nodes[0], extref, name->len); 285 btrfs_set_inode_extref_index(path->nodes[0], extref, index); 286 btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid); 287 288 ptr = (unsigned long)&extref->name; 289 write_extent_buffer(path->nodes[0], name->name, ptr, name->len); 290 291 return 0; 292 } 293 294 /* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */ 295 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 296 struct btrfs_root *root, const struct fscrypt_str *name, 297 u64 inode_objectid, u64 ref_objectid, u64 index) 298 { 299 struct btrfs_fs_info *fs_info = root->fs_info; 300 struct btrfs_path *path; 301 struct btrfs_key key; 302 struct btrfs_inode_ref *ref; 303 unsigned long ptr; 304 int ret; 305 int ins_len = name->len + sizeof(*ref); 306 307 key.objectid = inode_objectid; 308 key.type = BTRFS_INODE_REF_KEY; 309 key.offset = ref_objectid; 310 311 path = btrfs_alloc_path(); 312 if (!path) 313 return -ENOMEM; 314 315 path->skip_release_on_error = 1; 316 ret = btrfs_insert_empty_item(trans, root, path, &key, 317 ins_len); 318 if (ret == -EEXIST) { 319 u32 old_size; 320 ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], 321 name); 322 if (ref) 323 goto out; 324 325 old_size = btrfs_item_size(path->nodes[0], path->slots[0]); 326 btrfs_extend_item(trans, path, ins_len); 327 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 328 struct btrfs_inode_ref); 329 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); 330 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len); 331 btrfs_set_inode_ref_index(path->nodes[0], ref, index); 332 ptr = (unsigned long)(ref + 1); 333 ret = 0; 334 } else if (ret < 0) { 335 if (ret == -EOVERFLOW) { 336 if (btrfs_find_name_in_backref(path->nodes[0], 337 path->slots[0], 338 name)) 339 ret = -EEXIST; 340 else 341 ret = -EMLINK; 342 } 343 goto out; 344 } else { 345 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 346 struct btrfs_inode_ref); 347 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len); 348 btrfs_set_inode_ref_index(path->nodes[0], ref, index); 349 ptr = (unsigned long)(ref + 1); 350 } 351 write_extent_buffer(path->nodes[0], name->name, ptr, name->len); 352 out: 353 btrfs_free_path(path); 354 355 if (ret == -EMLINK) { 356 struct btrfs_super_block *disk_super = fs_info->super_copy; 357 /* We ran out of space in the ref array. Need to 358 * add an extended ref. */ 359 if (btrfs_super_incompat_flags(disk_super) 360 & BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) 361 ret = btrfs_insert_inode_extref(trans, root, name, 362 inode_objectid, 363 ref_objectid, index); 364 } 365 366 return ret; 367 } 368 369 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, 370 struct btrfs_root *root, 371 struct btrfs_path *path, u64 objectid) 372 { 373 struct btrfs_key key; 374 int ret; 375 key.objectid = objectid; 376 key.type = BTRFS_INODE_ITEM_KEY; 377 key.offset = 0; 378 379 ret = btrfs_insert_empty_item(trans, root, path, &key, 380 sizeof(struct btrfs_inode_item)); 381 return ret; 382 } 383 384 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root 385 *root, struct btrfs_path *path, 386 struct btrfs_key *location, int mod) 387 { 388 int ins_len = mod < 0 ? -1 : 0; 389 int cow = mod != 0; 390 int ret; 391 int slot; 392 struct extent_buffer *leaf; 393 struct btrfs_key found_key; 394 395 ret = btrfs_search_slot(trans, root, location, path, ins_len, cow); 396 if (ret > 0 && location->type == BTRFS_ROOT_ITEM_KEY && 397 location->offset == (u64)-1 && path->slots[0] != 0) { 398 slot = path->slots[0] - 1; 399 leaf = path->nodes[0]; 400 btrfs_item_key_to_cpu(leaf, &found_key, slot); 401 if (found_key.objectid == location->objectid && 402 found_key.type == location->type) { 403 path->slots[0]--; 404 return 0; 405 } 406 } 407 return ret; 408 } 409 410 static inline void btrfs_trace_truncate(const struct btrfs_inode *inode, 411 const struct extent_buffer *leaf, 412 const struct btrfs_file_extent_item *fi, 413 u64 offset, int extent_type, int slot) 414 { 415 if (!inode) 416 return; 417 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 418 trace_btrfs_truncate_show_fi_inline(inode, leaf, fi, slot, 419 offset); 420 else 421 trace_btrfs_truncate_show_fi_regular(inode, leaf, fi, offset); 422 } 423 424 /* 425 * Remove inode items from a given root. 426 * 427 * @trans: A transaction handle. 428 * @root: The root from which to remove items. 429 * @inode: The inode whose items we want to remove. 430 * @control: The btrfs_truncate_control to control how and what we 431 * are truncating. 432 * 433 * Remove all keys associated with the inode from the given root that have a key 434 * with a type greater than or equals to @min_type. When @min_type has a value of 435 * BTRFS_EXTENT_DATA_KEY, only remove file extent items that have an offset value 436 * greater than or equals to @new_size. If a file extent item that starts before 437 * @new_size and ends after it is found, its length is adjusted. 438 * 439 * Returns: 0 on success, < 0 on error and NEED_TRUNCATE_BLOCK when @min_type is 440 * BTRFS_EXTENT_DATA_KEY and the caller must truncate the last block. 441 */ 442 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 443 struct btrfs_root *root, 444 struct btrfs_truncate_control *control) 445 { 446 struct btrfs_fs_info *fs_info = root->fs_info; 447 struct btrfs_path *path; 448 struct extent_buffer *leaf; 449 struct btrfs_file_extent_item *fi; 450 struct btrfs_key key; 451 struct btrfs_key found_key; 452 u64 new_size = control->new_size; 453 u64 extent_num_bytes = 0; 454 u64 extent_offset = 0; 455 u64 item_end = 0; 456 u32 found_type = (u8)-1; 457 int del_item; 458 int pending_del_nr = 0; 459 int pending_del_slot = 0; 460 int extent_type = -1; 461 int ret; 462 u64 bytes_deleted = 0; 463 bool be_nice = false; 464 465 ASSERT(control->inode || !control->clear_extent_range); 466 ASSERT(new_size == 0 || control->min_type == BTRFS_EXTENT_DATA_KEY); 467 468 control->last_size = new_size; 469 control->sub_bytes = 0; 470 471 /* 472 * For shareable roots we want to back off from time to time, this turns 473 * out to be subvolume roots, reloc roots, and data reloc roots. 474 */ 475 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 476 be_nice = true; 477 478 path = btrfs_alloc_path(); 479 if (!path) 480 return -ENOMEM; 481 path->reada = READA_BACK; 482 483 key.objectid = control->ino; 484 key.type = (u8)-1; 485 key.offset = (u64)-1; 486 487 search_again: 488 /* 489 * With a 16K leaf size and 128MiB extents, you can actually queue up a 490 * huge file in a single leaf. Most of the time that bytes_deleted is 491 * > 0, it will be huge by the time we get here 492 */ 493 if (be_nice && bytes_deleted > SZ_32M && 494 btrfs_should_end_transaction(trans)) { 495 ret = -EAGAIN; 496 goto out; 497 } 498 499 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 500 if (ret < 0) 501 goto out; 502 503 if (ret > 0) { 504 ret = 0; 505 /* There are no items in the tree for us to truncate, we're done */ 506 if (path->slots[0] == 0) 507 goto out; 508 path->slots[0]--; 509 } 510 511 while (1) { 512 u64 clear_start = 0, clear_len = 0, extent_start = 0; 513 bool refill_delayed_refs_rsv = false; 514 515 fi = NULL; 516 leaf = path->nodes[0]; 517 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 518 found_type = found_key.type; 519 520 if (found_key.objectid != control->ino) 521 break; 522 523 if (found_type < control->min_type) 524 break; 525 526 item_end = found_key.offset; 527 if (found_type == BTRFS_EXTENT_DATA_KEY) { 528 fi = btrfs_item_ptr(leaf, path->slots[0], 529 struct btrfs_file_extent_item); 530 extent_type = btrfs_file_extent_type(leaf, fi); 531 if (extent_type != BTRFS_FILE_EXTENT_INLINE) 532 item_end += 533 btrfs_file_extent_num_bytes(leaf, fi); 534 else if (extent_type == BTRFS_FILE_EXTENT_INLINE) 535 item_end += btrfs_file_extent_ram_bytes(leaf, fi); 536 537 btrfs_trace_truncate(control->inode, leaf, fi, 538 found_key.offset, extent_type, 539 path->slots[0]); 540 item_end--; 541 } 542 if (found_type > control->min_type) { 543 del_item = 1; 544 } else { 545 if (item_end < new_size) 546 break; 547 if (found_key.offset >= new_size) 548 del_item = 1; 549 else 550 del_item = 0; 551 } 552 553 /* FIXME, shrink the extent if the ref count is only 1 */ 554 if (found_type != BTRFS_EXTENT_DATA_KEY) 555 goto delete; 556 557 control->extents_found++; 558 559 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 560 u64 num_dec; 561 562 clear_start = found_key.offset; 563 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 564 if (!del_item) { 565 u64 orig_num_bytes = 566 btrfs_file_extent_num_bytes(leaf, fi); 567 extent_num_bytes = ALIGN(new_size - 568 found_key.offset, 569 fs_info->sectorsize); 570 clear_start = ALIGN(new_size, fs_info->sectorsize); 571 572 btrfs_set_file_extent_num_bytes(leaf, fi, 573 extent_num_bytes); 574 num_dec = (orig_num_bytes - extent_num_bytes); 575 if (extent_start != 0) 576 control->sub_bytes += num_dec; 577 } else { 578 extent_num_bytes = 579 btrfs_file_extent_disk_num_bytes(leaf, fi); 580 extent_offset = found_key.offset - 581 btrfs_file_extent_offset(leaf, fi); 582 583 /* FIXME blocksize != 4096 */ 584 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 585 if (extent_start != 0) 586 control->sub_bytes += num_dec; 587 } 588 clear_len = num_dec; 589 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 590 /* 591 * We can't truncate inline items that have had 592 * special encodings 593 */ 594 if (!del_item && 595 btrfs_file_extent_encryption(leaf, fi) == 0 && 596 btrfs_file_extent_other_encoding(leaf, fi) == 0 && 597 btrfs_file_extent_compression(leaf, fi) == 0) { 598 u32 size = (u32)(new_size - found_key.offset); 599 600 btrfs_set_file_extent_ram_bytes(leaf, fi, size); 601 size = btrfs_file_extent_calc_inline_size(size); 602 btrfs_truncate_item(trans, path, size, 1); 603 } else if (!del_item) { 604 /* 605 * We have to bail so the last_size is set to 606 * just before this extent. 607 */ 608 ret = BTRFS_NEED_TRUNCATE_BLOCK; 609 break; 610 } else { 611 /* 612 * Inline extents are special, we just treat 613 * them as a full sector worth in the file 614 * extent tree just for simplicity sake. 615 */ 616 clear_len = fs_info->sectorsize; 617 } 618 619 control->sub_bytes += item_end + 1 - new_size; 620 } 621 delete: 622 /* 623 * We only want to clear the file extent range if we're 624 * modifying the actual inode's mapping, which is just the 625 * normal truncate path. 626 */ 627 if (control->clear_extent_range) { 628 ret = btrfs_inode_clear_file_extent_range(control->inode, 629 clear_start, clear_len); 630 if (ret) { 631 btrfs_abort_transaction(trans, ret); 632 break; 633 } 634 } 635 636 if (del_item) { 637 ASSERT(!pending_del_nr || 638 ((path->slots[0] + 1) == pending_del_slot)); 639 640 control->last_size = found_key.offset; 641 if (!pending_del_nr) { 642 /* No pending yet, add ourselves */ 643 pending_del_slot = path->slots[0]; 644 pending_del_nr = 1; 645 } else if (path->slots[0] + 1 == pending_del_slot) { 646 /* Hop on the pending chunk */ 647 pending_del_nr++; 648 pending_del_slot = path->slots[0]; 649 } 650 } else { 651 control->last_size = new_size; 652 break; 653 } 654 655 if (del_item && extent_start != 0 && !control->skip_ref_updates) { 656 struct btrfs_ref ref = { 657 .action = BTRFS_DROP_DELAYED_REF, 658 .bytenr = extent_start, 659 .num_bytes = extent_num_bytes, 660 .owning_root = btrfs_root_id(root), 661 .ref_root = btrfs_header_owner(leaf), 662 }; 663 664 bytes_deleted += extent_num_bytes; 665 666 btrfs_init_data_ref(&ref, control->ino, extent_offset, 667 btrfs_root_id(root), false); 668 ret = btrfs_free_extent(trans, &ref); 669 if (ret) { 670 btrfs_abort_transaction(trans, ret); 671 break; 672 } 673 if (be_nice && btrfs_check_space_for_delayed_refs(fs_info)) 674 refill_delayed_refs_rsv = true; 675 } 676 677 if (found_type == BTRFS_INODE_ITEM_KEY) 678 break; 679 680 if (path->slots[0] == 0 || 681 path->slots[0] != pending_del_slot || 682 refill_delayed_refs_rsv) { 683 if (pending_del_nr) { 684 ret = btrfs_del_items(trans, root, path, 685 pending_del_slot, 686 pending_del_nr); 687 if (ret) { 688 btrfs_abort_transaction(trans, ret); 689 break; 690 } 691 pending_del_nr = 0; 692 } 693 btrfs_release_path(path); 694 695 /* 696 * We can generate a lot of delayed refs, so we need to 697 * throttle every once and a while and make sure we're 698 * adding enough space to keep up with the work we are 699 * generating. Since we hold a transaction here we 700 * can't flush, and we don't want to FLUSH_LIMIT because 701 * we could have generated too many delayed refs to 702 * actually allocate, so just bail if we're short and 703 * let the normal reservation dance happen higher up. 704 */ 705 if (refill_delayed_refs_rsv) { 706 ret = btrfs_delayed_refs_rsv_refill(fs_info, 707 BTRFS_RESERVE_NO_FLUSH); 708 if (ret) { 709 ret = -EAGAIN; 710 break; 711 } 712 } 713 goto search_again; 714 } else { 715 path->slots[0]--; 716 } 717 } 718 out: 719 if (ret >= 0 && pending_del_nr) { 720 int ret2; 721 722 ret2 = btrfs_del_items(trans, root, path, pending_del_slot, pending_del_nr); 723 if (ret2) { 724 btrfs_abort_transaction(trans, ret2); 725 ret = ret2; 726 } 727 } 728 729 ASSERT(control->last_size >= new_size); 730 if (!ret && control->last_size > new_size) 731 control->last_size = new_size; 732 733 btrfs_free_path(path); 734 return ret; 735 } 736