1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/err.h> 7 #include <linux/uuid.h> 8 #include "ctree.h" 9 #include "fs.h" 10 #include "messages.h" 11 #include "transaction.h" 12 #include "disk-io.h" 13 #include "qgroup.h" 14 #include "space-info.h" 15 #include "accessors.h" 16 #include "root-tree.h" 17 #include "orphan.h" 18 19 /* 20 * Read a root item from the tree. In case we detect a root item smaller then 21 * sizeof(root_item), we know it's an old version of the root structure and 22 * initialize all new fields to zero. The same happens if we detect mismatching 23 * generation numbers as then we know the root was once mounted with an older 24 * kernel that was not aware of the root item structure change. 25 */ 26 static void btrfs_read_root_item(struct extent_buffer *eb, int slot, 27 struct btrfs_root_item *item) 28 { 29 u32 len; 30 int need_reset = 0; 31 32 len = btrfs_item_size(eb, slot); 33 read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot), 34 min_t(u32, len, sizeof(*item))); 35 if (len < sizeof(*item)) 36 need_reset = 1; 37 if (!need_reset && btrfs_root_generation(item) 38 != btrfs_root_generation_v2(item)) { 39 if (btrfs_root_generation_v2(item) != 0) { 40 btrfs_warn(eb->fs_info, 41 "mismatching generation and generation_v2 found in root item. This root was probably mounted with an older kernel. Resetting all new fields."); 42 } 43 need_reset = 1; 44 } 45 if (need_reset) { 46 /* Clear all members from generation_v2 onwards. */ 47 memset_startat(item, 0, generation_v2); 48 generate_random_guid(item->uuid); 49 } 50 } 51 52 /* 53 * Lookup the root by the key. 54 * 55 * root: the root of the root tree 56 * search_key: the key to search 57 * path: the path we search 58 * root_item: the root item of the tree we look for 59 * root_key: the root key of the tree we look for 60 * 61 * If ->offset of 'search_key' is -1ULL, it means we are not sure the offset 62 * of the search key, just lookup the root with the highest offset for a 63 * given objectid. 64 * 65 * If we find something return 0, otherwise > 0, < 0 on error. 66 */ 67 int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key, 68 struct btrfs_path *path, struct btrfs_root_item *root_item, 69 struct btrfs_key *root_key) 70 { 71 struct btrfs_key found_key; 72 struct extent_buffer *l; 73 int ret; 74 int slot; 75 76 ret = btrfs_search_slot(NULL, root, search_key, path, 0, 0); 77 if (ret < 0) 78 return ret; 79 80 if (search_key->offset != -1ULL) { /* the search key is exact */ 81 if (ret > 0) 82 goto out; 83 } else { 84 /* 85 * Key with offset -1 found, there would have to exist a root 86 * with such id, but this is out of the valid range. 87 */ 88 if (unlikely(ret == 0)) { 89 ret = -EUCLEAN; 90 goto out; 91 } 92 if (path->slots[0] == 0) 93 goto out; 94 path->slots[0]--; 95 ret = 0; 96 } 97 98 l = path->nodes[0]; 99 slot = path->slots[0]; 100 101 btrfs_item_key_to_cpu(l, &found_key, slot); 102 if (found_key.objectid != search_key->objectid || 103 found_key.type != BTRFS_ROOT_ITEM_KEY) { 104 ret = 1; 105 goto out; 106 } 107 108 if (root_item) 109 btrfs_read_root_item(l, slot, root_item); 110 if (root_key) 111 memcpy(root_key, &found_key, sizeof(found_key)); 112 out: 113 btrfs_release_path(path); 114 return ret; 115 } 116 117 void btrfs_set_root_node(struct btrfs_root_item *item, 118 struct extent_buffer *node) 119 { 120 btrfs_set_root_bytenr(item, node->start); 121 btrfs_set_root_level(item, btrfs_header_level(node)); 122 btrfs_set_root_generation(item, btrfs_header_generation(node)); 123 } 124 125 /* 126 * copy the data in 'item' into the btree 127 */ 128 int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root 129 *root, struct btrfs_key *key, struct btrfs_root_item 130 *item) 131 { 132 struct btrfs_fs_info *fs_info = root->fs_info; 133 BTRFS_PATH_AUTO_FREE(path); 134 struct extent_buffer *l; 135 int ret; 136 int slot; 137 unsigned long ptr; 138 u32 old_len; 139 140 path = btrfs_alloc_path(); 141 if (!path) 142 return -ENOMEM; 143 144 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 145 if (ret < 0) 146 return ret; 147 148 if (unlikely(ret > 0)) { 149 btrfs_crit(fs_info, 150 "unable to find root key (%llu %u %llu) in tree %llu", 151 key->objectid, key->type, key->offset, btrfs_root_id(root)); 152 ret = -EUCLEAN; 153 btrfs_abort_transaction(trans, ret); 154 return ret; 155 } 156 157 l = path->nodes[0]; 158 slot = path->slots[0]; 159 ptr = btrfs_item_ptr_offset(l, slot); 160 old_len = btrfs_item_size(l, slot); 161 162 /* 163 * If this is the first time we update the root item which originated 164 * from an older kernel, we need to enlarge the item size to make room 165 * for the added fields. 166 */ 167 if (old_len < sizeof(*item)) { 168 btrfs_release_path(path); 169 ret = btrfs_search_slot(trans, root, key, path, 170 -1, 1); 171 if (unlikely(ret < 0)) { 172 btrfs_abort_transaction(trans, ret); 173 return ret; 174 } 175 176 ret = btrfs_del_item(trans, root, path); 177 if (unlikely(ret < 0)) { 178 btrfs_abort_transaction(trans, ret); 179 return ret; 180 } 181 btrfs_release_path(path); 182 ret = btrfs_insert_empty_item(trans, root, path, 183 key, sizeof(*item)); 184 if (unlikely(ret < 0)) { 185 btrfs_abort_transaction(trans, ret); 186 return ret; 187 } 188 l = path->nodes[0]; 189 slot = path->slots[0]; 190 ptr = btrfs_item_ptr_offset(l, slot); 191 } 192 193 /* 194 * Update generation_v2 so at the next mount we know the new root 195 * fields are valid. 196 */ 197 btrfs_set_root_generation_v2(item, btrfs_root_generation(item)); 198 199 write_extent_buffer(l, item, ptr, sizeof(*item)); 200 return ret; 201 } 202 203 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, 204 const struct btrfs_key *key, struct btrfs_root_item *item) 205 { 206 /* 207 * Make sure generation v1 and v2 match. See update_root for details. 208 */ 209 btrfs_set_root_generation_v2(item, btrfs_root_generation(item)); 210 return btrfs_insert_item(trans, root, key, item, sizeof(*item)); 211 } 212 213 int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info) 214 { 215 struct btrfs_root *tree_root = fs_info->tree_root; 216 struct extent_buffer *leaf; 217 BTRFS_PATH_AUTO_FREE(path); 218 struct btrfs_key key; 219 struct btrfs_root *root; 220 int err = 0; 221 int ret; 222 223 path = btrfs_alloc_path(); 224 if (!path) 225 return -ENOMEM; 226 227 key.objectid = BTRFS_ORPHAN_OBJECTID; 228 key.type = BTRFS_ORPHAN_ITEM_KEY; 229 key.offset = 0; 230 231 while (1) { 232 u64 root_objectid; 233 234 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); 235 if (ret < 0) { 236 err = ret; 237 break; 238 } 239 240 leaf = path->nodes[0]; 241 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 242 ret = btrfs_next_leaf(tree_root, path); 243 if (ret < 0) 244 err = ret; 245 if (ret != 0) 246 break; 247 leaf = path->nodes[0]; 248 } 249 250 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 251 btrfs_release_path(path); 252 253 if (key.objectid != BTRFS_ORPHAN_OBJECTID || 254 key.type != BTRFS_ORPHAN_ITEM_KEY) 255 break; 256 257 root_objectid = key.offset; 258 key.offset++; 259 260 root = btrfs_get_fs_root(fs_info, root_objectid, false); 261 err = PTR_ERR_OR_ZERO(root); 262 if (err && err != -ENOENT) { 263 break; 264 } else if (err == -ENOENT) { 265 struct btrfs_trans_handle *trans; 266 267 btrfs_release_path(path); 268 269 trans = btrfs_join_transaction(tree_root); 270 if (IS_ERR(trans)) { 271 err = PTR_ERR(trans); 272 btrfs_handle_fs_error(fs_info, err, 273 "Failed to start trans to delete orphan item"); 274 break; 275 } 276 err = btrfs_del_orphan_item(trans, tree_root, 277 root_objectid); 278 btrfs_end_transaction(trans); 279 if (err) { 280 btrfs_handle_fs_error(fs_info, err, 281 "Failed to delete root orphan item"); 282 break; 283 } 284 continue; 285 } 286 287 WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)); 288 if (btrfs_root_refs(&root->root_item) == 0) { 289 struct btrfs_key drop_key; 290 291 btrfs_disk_key_to_cpu(&drop_key, &root->root_item.drop_progress); 292 /* 293 * If we have a non-zero drop_progress then we know we 294 * made it partly through deleting this snapshot, and 295 * thus we need to make sure we block any balance from 296 * happening until this snapshot is completely dropped. 297 */ 298 if (drop_key.objectid != 0 || drop_key.type != 0 || 299 drop_key.offset != 0) { 300 set_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags); 301 set_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); 302 } 303 304 set_bit(BTRFS_ROOT_DEAD_TREE, &root->state); 305 btrfs_add_dead_root(root); 306 } 307 btrfs_put_root(root); 308 } 309 310 return err; 311 } 312 313 /* drop the root item for 'key' from the tree root */ 314 int btrfs_del_root(struct btrfs_trans_handle *trans, 315 const struct btrfs_key *key) 316 { 317 struct btrfs_root *root = trans->fs_info->tree_root; 318 BTRFS_PATH_AUTO_FREE(path); 319 int ret; 320 321 path = btrfs_alloc_path(); 322 if (!path) 323 return -ENOMEM; 324 ret = btrfs_search_slot(trans, root, key, path, -1, 1); 325 if (ret < 0) 326 return ret; 327 if (unlikely(ret > 0)) 328 /* The root must exist but we did not find it by the key. */ 329 return -EUCLEAN; 330 331 return btrfs_del_item(trans, root, path); 332 } 333 334 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id, 335 u64 ref_id, u64 dirid, u64 *sequence, 336 const struct fscrypt_str *name) 337 { 338 struct btrfs_root *tree_root = trans->fs_info->tree_root; 339 BTRFS_PATH_AUTO_FREE(path); 340 struct btrfs_root_ref *ref; 341 struct extent_buffer *leaf; 342 struct btrfs_key key; 343 unsigned long ptr; 344 int ret; 345 346 path = btrfs_alloc_path(); 347 if (!path) 348 return -ENOMEM; 349 350 key.objectid = root_id; 351 key.type = BTRFS_ROOT_BACKREF_KEY; 352 key.offset = ref_id; 353 again: 354 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); 355 if (ret < 0) { 356 return ret; 357 } else if (ret == 0) { 358 leaf = path->nodes[0]; 359 ref = btrfs_item_ptr(leaf, path->slots[0], 360 struct btrfs_root_ref); 361 ptr = (unsigned long)(ref + 1); 362 if ((btrfs_root_ref_dirid(leaf, ref) != dirid) || 363 (btrfs_root_ref_name_len(leaf, ref) != name->len) || 364 memcmp_extent_buffer(leaf, name->name, ptr, name->len)) 365 return -ENOENT; 366 367 *sequence = btrfs_root_ref_sequence(leaf, ref); 368 369 ret = btrfs_del_item(trans, tree_root, path); 370 if (ret) 371 return ret; 372 } else { 373 return -ENOENT; 374 } 375 376 if (key.type == BTRFS_ROOT_BACKREF_KEY) { 377 btrfs_release_path(path); 378 key.objectid = ref_id; 379 key.type = BTRFS_ROOT_REF_KEY; 380 key.offset = root_id; 381 goto again; 382 } 383 384 return ret; 385 } 386 387 /* 388 * add a btrfs_root_ref item. type is either BTRFS_ROOT_REF_KEY 389 * or BTRFS_ROOT_BACKREF_KEY. 390 * 391 * The dirid, sequence, name and name_len refer to the directory entry 392 * that is referencing the root. 393 * 394 * For a forward ref, the root_id is the id of the tree referencing 395 * the root and ref_id is the id of the subvol or snapshot. 396 * 397 * For a back ref the root_id is the id of the subvol or snapshot and 398 * ref_id is the id of the tree referencing it. 399 * 400 * Will return 0, -ENOMEM, or anything from the CoW path 401 */ 402 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id, 403 u64 ref_id, u64 dirid, u64 sequence, 404 const struct fscrypt_str *name) 405 { 406 struct btrfs_root *tree_root = trans->fs_info->tree_root; 407 struct btrfs_key key; 408 int ret; 409 BTRFS_PATH_AUTO_FREE(path); 410 struct btrfs_root_ref *ref; 411 struct extent_buffer *leaf; 412 unsigned long ptr; 413 414 path = btrfs_alloc_path(); 415 if (!path) 416 return -ENOMEM; 417 418 key.objectid = root_id; 419 key.type = BTRFS_ROOT_BACKREF_KEY; 420 key.offset = ref_id; 421 again: 422 ret = btrfs_insert_empty_item(trans, tree_root, path, &key, 423 sizeof(*ref) + name->len); 424 if (unlikely(ret)) { 425 btrfs_abort_transaction(trans, ret); 426 return ret; 427 } 428 429 leaf = path->nodes[0]; 430 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 431 btrfs_set_root_ref_dirid(leaf, ref, dirid); 432 btrfs_set_root_ref_sequence(leaf, ref, sequence); 433 btrfs_set_root_ref_name_len(leaf, ref, name->len); 434 ptr = (unsigned long)(ref + 1); 435 write_extent_buffer(leaf, name->name, ptr, name->len); 436 437 if (key.type == BTRFS_ROOT_BACKREF_KEY) { 438 btrfs_release_path(path); 439 key.objectid = ref_id; 440 key.type = BTRFS_ROOT_REF_KEY; 441 key.offset = root_id; 442 goto again; 443 } 444 445 return 0; 446 } 447 448 /* 449 * Old btrfs forgets to init root_item->flags and root_item->byte_limit 450 * for subvolumes. To work around this problem, we steal a bit from 451 * root_item->inode_item->flags, and use it to indicate if those fields 452 * have been properly initialized. 453 */ 454 void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item) 455 { 456 u64 inode_flags = btrfs_stack_inode_flags(&root_item->inode); 457 458 if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) { 459 inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT; 460 btrfs_set_stack_inode_flags(&root_item->inode, inode_flags); 461 btrfs_set_root_flags(root_item, 0); 462 btrfs_set_root_limit(root_item, 0); 463 } 464 } 465 466 void btrfs_update_root_times(struct btrfs_trans_handle *trans, 467 struct btrfs_root *root) 468 { 469 struct btrfs_root_item *item = &root->root_item; 470 struct timespec64 ct; 471 472 ktime_get_real_ts64(&ct); 473 spin_lock(&root->root_item_lock); 474 btrfs_set_root_ctransid(item, trans->transid); 475 btrfs_set_stack_timespec_sec(&item->ctime, ct.tv_sec); 476 btrfs_set_stack_timespec_nsec(&item->ctime, ct.tv_nsec); 477 spin_unlock(&root->root_item_lock); 478 } 479 480 /* 481 * Reserve space for subvolume operation. 482 * 483 * root: the root of the parent directory 484 * rsv: block reservation 485 * items: the number of items that we need do reservation 486 * use_global_rsv: allow fallback to the global block reservation 487 * 488 * This function is used to reserve the space for snapshot/subvolume 489 * creation and deletion. Those operations are different with the 490 * common file/directory operations, they change two fs/file trees 491 * and root tree, the number of items that the qgroup reserves is 492 * different with the free space reservation. So we can not use 493 * the space reservation mechanism in start_transaction(). 494 */ 495 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 496 struct btrfs_block_rsv *rsv, int items, 497 bool use_global_rsv) 498 { 499 u64 qgroup_num_bytes = 0; 500 u64 num_bytes; 501 int ret; 502 struct btrfs_fs_info *fs_info = root->fs_info; 503 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 504 505 if (btrfs_qgroup_enabled(fs_info)) { 506 /* One for parent inode, two for dir entries */ 507 qgroup_num_bytes = 3 * fs_info->nodesize; 508 ret = btrfs_qgroup_reserve_meta_prealloc(root, 509 qgroup_num_bytes, true, 510 false); 511 if (ret) 512 return ret; 513 } 514 515 num_bytes = btrfs_calc_insert_metadata_size(fs_info, items); 516 rsv->space_info = btrfs_find_space_info(fs_info, 517 BTRFS_BLOCK_GROUP_METADATA); 518 ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, 519 BTRFS_RESERVE_FLUSH_ALL); 520 521 if (ret == -ENOSPC && use_global_rsv) 522 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, true); 523 524 if (ret && qgroup_num_bytes) 525 btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes); 526 527 if (!ret) { 528 spin_lock(&rsv->lock); 529 rsv->qgroup_rsv_reserved += qgroup_num_bytes; 530 spin_unlock(&rsv->lock); 531 } 532 return ret; 533 } 534