1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "bkey_buf.h" 5 #include "bkey_methods.h" 6 #include "btree_update.h" 7 #include "extents.h" 8 #include "dirent.h" 9 #include "fs.h" 10 #include "keylist.h" 11 #include "str_hash.h" 12 #include "subvolume.h" 13 14 #include <linux/dcache.h> 15 16 static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d) 17 { 18 unsigned bkey_u64s = bkey_val_u64s(d.k); 19 unsigned bkey_bytes = bkey_u64s * sizeof(u64); 20 u64 last_u64 = ((u64*)d.v)[bkey_u64s - 1]; 21 #if CPU_BIG_ENDIAN 22 unsigned trailing_nuls = last_u64 ? __builtin_ctzll(last_u64) / 8 : 64 / 8; 23 #else 24 unsigned trailing_nuls = last_u64 ? __builtin_clzll(last_u64) / 8 : 64 / 8; 25 #endif 26 27 return bkey_bytes - 28 offsetof(struct bch_dirent, d_name) - 29 trailing_nuls; 30 } 31 32 struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d) 33 { 34 return (struct qstr) QSTR_INIT(d.v->d_name, bch2_dirent_name_bytes(d)); 35 } 36 37 static u64 bch2_dirent_hash(const struct bch_hash_info *info, 38 const struct qstr *name) 39 { 40 struct bch_str_hash_ctx ctx; 41 42 bch2_str_hash_init(&ctx, info); 43 bch2_str_hash_update(&ctx, info, name->name, name->len); 44 45 /* [0,2) reserved for dots */ 46 return max_t(u64, bch2_str_hash_end(&ctx, info), 2); 47 } 48 49 static u64 dirent_hash_key(const struct bch_hash_info *info, const void *key) 50 { 51 return bch2_dirent_hash(info, key); 52 } 53 54 static u64 dirent_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k) 55 { 56 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); 57 struct qstr name = bch2_dirent_get_name(d); 58 59 return bch2_dirent_hash(info, &name); 60 } 61 62 static bool dirent_cmp_key(struct bkey_s_c _l, const void *_r) 63 { 64 struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l); 65 const struct qstr l_name = bch2_dirent_get_name(l); 66 const struct qstr *r_name = _r; 67 68 return !qstr_eq(l_name, *r_name); 69 } 70 71 static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r) 72 { 73 struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l); 74 struct bkey_s_c_dirent r = bkey_s_c_to_dirent(_r); 75 const struct qstr l_name = bch2_dirent_get_name(l); 76 const struct qstr r_name = bch2_dirent_get_name(r); 77 78 return !qstr_eq(l_name, r_name); 79 } 80 81 static bool dirent_is_visible(subvol_inum inum, struct bkey_s_c k) 82 { 83 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); 84 85 if (d.v->d_type == DT_SUBVOL) 86 return le32_to_cpu(d.v->d_parent_subvol) == inum.subvol; 87 return true; 88 } 89 90 const struct bch_hash_desc bch2_dirent_hash_desc = { 91 .btree_id = BTREE_ID_dirents, 92 .key_type = KEY_TYPE_dirent, 93 .hash_key = dirent_hash_key, 94 .hash_bkey = dirent_hash_bkey, 95 .cmp_key = dirent_cmp_key, 96 .cmp_bkey = dirent_cmp_bkey, 97 .is_visible = dirent_is_visible, 98 }; 99 100 int bch2_dirent_invalid(struct bch_fs *c, struct bkey_s_c k, 101 enum bch_validate_flags flags, 102 struct printbuf *err) 103 { 104 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); 105 struct qstr d_name = bch2_dirent_get_name(d); 106 int ret = 0; 107 108 bkey_fsck_err_on(!d_name.len, c, err, 109 dirent_empty_name, 110 "empty name"); 111 112 bkey_fsck_err_on(bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len), c, err, 113 dirent_val_too_big, 114 "value too big (%zu > %u)", 115 bkey_val_u64s(k.k), dirent_val_u64s(d_name.len)); 116 117 /* 118 * Check new keys don't exceed the max length 119 * (older keys may be larger.) 120 */ 121 bkey_fsck_err_on((flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX, c, err, 122 dirent_name_too_long, 123 "dirent name too big (%u > %u)", 124 d_name.len, BCH_NAME_MAX); 125 126 bkey_fsck_err_on(d_name.len != strnlen(d_name.name, d_name.len), c, err, 127 dirent_name_embedded_nul, 128 "dirent has stray data after name's NUL"); 129 130 bkey_fsck_err_on((d_name.len == 1 && !memcmp(d_name.name, ".", 1)) || 131 (d_name.len == 2 && !memcmp(d_name.name, "..", 2)), c, err, 132 dirent_name_dot_or_dotdot, 133 "invalid name"); 134 135 bkey_fsck_err_on(memchr(d_name.name, '/', d_name.len), c, err, 136 dirent_name_has_slash, 137 "name with /"); 138 139 bkey_fsck_err_on(d.v->d_type != DT_SUBVOL && 140 le64_to_cpu(d.v->d_inum) == d.k->p.inode, c, err, 141 dirent_to_itself, 142 "dirent points to own directory"); 143 fsck_err: 144 return ret; 145 } 146 147 void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 148 { 149 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); 150 struct qstr d_name = bch2_dirent_get_name(d); 151 152 prt_printf(out, "%.*s -> ", d_name.len, d_name.name); 153 154 if (d.v->d_type != DT_SUBVOL) 155 prt_printf(out, "%llu", le64_to_cpu(d.v->d_inum)); 156 else 157 prt_printf(out, "%u -> %u", 158 le32_to_cpu(d.v->d_parent_subvol), 159 le32_to_cpu(d.v->d_child_subvol)); 160 161 prt_printf(out, " type %s", bch2_d_type_str(d.v->d_type)); 162 } 163 164 static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans, 165 subvol_inum dir, u8 type, 166 const struct qstr *name, u64 dst) 167 { 168 struct bkey_i_dirent *dirent; 169 unsigned u64s = BKEY_U64s + dirent_val_u64s(name->len); 170 171 if (name->len > BCH_NAME_MAX) 172 return ERR_PTR(-ENAMETOOLONG); 173 174 BUG_ON(u64s > U8_MAX); 175 176 dirent = bch2_trans_kmalloc(trans, u64s * sizeof(u64)); 177 if (IS_ERR(dirent)) 178 return dirent; 179 180 bkey_dirent_init(&dirent->k_i); 181 dirent->k.u64s = u64s; 182 183 if (type != DT_SUBVOL) { 184 dirent->v.d_inum = cpu_to_le64(dst); 185 } else { 186 dirent->v.d_parent_subvol = cpu_to_le32(dir.subvol); 187 dirent->v.d_child_subvol = cpu_to_le32(dst); 188 } 189 190 dirent->v.d_type = type; 191 192 memcpy(dirent->v.d_name, name->name, name->len); 193 memset(dirent->v.d_name + name->len, 0, 194 bkey_val_bytes(&dirent->k) - 195 offsetof(struct bch_dirent, d_name) - 196 name->len); 197 198 EBUG_ON(bch2_dirent_name_bytes(dirent_i_to_s_c(dirent)) != name->len); 199 200 return dirent; 201 } 202 203 int bch2_dirent_create_snapshot(struct btree_trans *trans, 204 u32 dir_subvol, u64 dir, u32 snapshot, 205 const struct bch_hash_info *hash_info, 206 u8 type, const struct qstr *name, u64 dst_inum, 207 u64 *dir_offset, 208 enum btree_iter_update_trigger_flags flags) 209 { 210 subvol_inum dir_inum = { .subvol = dir_subvol, .inum = dir }; 211 struct bkey_i_dirent *dirent; 212 int ret; 213 214 dirent = dirent_create_key(trans, dir_inum, type, name, dst_inum); 215 ret = PTR_ERR_OR_ZERO(dirent); 216 if (ret) 217 return ret; 218 219 dirent->k.p.inode = dir; 220 dirent->k.p.snapshot = snapshot; 221 222 ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info, 223 dir_inum, snapshot, &dirent->k_i, 224 flags|BTREE_UPDATE_internal_snapshot_node); 225 *dir_offset = dirent->k.p.offset; 226 227 return ret; 228 } 229 230 int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir, 231 const struct bch_hash_info *hash_info, 232 u8 type, const struct qstr *name, u64 dst_inum, 233 u64 *dir_offset, 234 enum btree_iter_update_trigger_flags flags) 235 { 236 struct bkey_i_dirent *dirent; 237 int ret; 238 239 dirent = dirent_create_key(trans, dir, type, name, dst_inum); 240 ret = PTR_ERR_OR_ZERO(dirent); 241 if (ret) 242 return ret; 243 244 ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info, 245 dir, &dirent->k_i, flags); 246 *dir_offset = dirent->k.p.offset; 247 248 return ret; 249 } 250 251 static void dirent_copy_target(struct bkey_i_dirent *dst, 252 struct bkey_s_c_dirent src) 253 { 254 dst->v.d_inum = src.v->d_inum; 255 dst->v.d_type = src.v->d_type; 256 } 257 258 int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir, 259 struct bkey_s_c_dirent d, subvol_inum *target) 260 { 261 struct bch_subvolume s; 262 int ret = 0; 263 264 if (d.v->d_type == DT_SUBVOL && 265 le32_to_cpu(d.v->d_parent_subvol) != dir.subvol) 266 return 1; 267 268 if (likely(d.v->d_type != DT_SUBVOL)) { 269 target->subvol = dir.subvol; 270 target->inum = le64_to_cpu(d.v->d_inum); 271 } else { 272 target->subvol = le32_to_cpu(d.v->d_child_subvol); 273 274 ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_cached, &s); 275 276 target->inum = le64_to_cpu(s.inode); 277 } 278 279 return ret; 280 } 281 282 int bch2_dirent_rename(struct btree_trans *trans, 283 subvol_inum src_dir, struct bch_hash_info *src_hash, 284 subvol_inum dst_dir, struct bch_hash_info *dst_hash, 285 const struct qstr *src_name, subvol_inum *src_inum, u64 *src_offset, 286 const struct qstr *dst_name, subvol_inum *dst_inum, u64 *dst_offset, 287 enum bch_rename_mode mode) 288 { 289 struct btree_iter src_iter = { NULL }; 290 struct btree_iter dst_iter = { NULL }; 291 struct bkey_s_c old_src, old_dst = bkey_s_c_null; 292 struct bkey_i_dirent *new_src = NULL, *new_dst = NULL; 293 struct bpos dst_pos = 294 POS(dst_dir.inum, bch2_dirent_hash(dst_hash, dst_name)); 295 unsigned src_update_flags = 0; 296 bool delete_src, delete_dst; 297 int ret = 0; 298 299 memset(src_inum, 0, sizeof(*src_inum)); 300 memset(dst_inum, 0, sizeof(*dst_inum)); 301 302 /* Lookup src: */ 303 old_src = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc, 304 src_hash, src_dir, src_name, 305 BTREE_ITER_intent); 306 ret = bkey_err(old_src); 307 if (ret) 308 goto out; 309 310 ret = bch2_dirent_read_target(trans, src_dir, 311 bkey_s_c_to_dirent(old_src), src_inum); 312 if (ret) 313 goto out; 314 315 /* Lookup dst: */ 316 if (mode == BCH_RENAME) { 317 /* 318 * Note that we're _not_ checking if the target already exists - 319 * we're relying on the VFS to do that check for us for 320 * correctness: 321 */ 322 ret = bch2_hash_hole(trans, &dst_iter, bch2_dirent_hash_desc, 323 dst_hash, dst_dir, dst_name); 324 if (ret) 325 goto out; 326 } else { 327 old_dst = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc, 328 dst_hash, dst_dir, dst_name, 329 BTREE_ITER_intent); 330 ret = bkey_err(old_dst); 331 if (ret) 332 goto out; 333 334 ret = bch2_dirent_read_target(trans, dst_dir, 335 bkey_s_c_to_dirent(old_dst), dst_inum); 336 if (ret) 337 goto out; 338 } 339 340 if (mode != BCH_RENAME_EXCHANGE) 341 *src_offset = dst_iter.pos.offset; 342 343 /* Create new dst key: */ 344 new_dst = dirent_create_key(trans, dst_dir, 0, dst_name, 0); 345 ret = PTR_ERR_OR_ZERO(new_dst); 346 if (ret) 347 goto out; 348 349 dirent_copy_target(new_dst, bkey_s_c_to_dirent(old_src)); 350 new_dst->k.p = dst_iter.pos; 351 352 /* Create new src key: */ 353 if (mode == BCH_RENAME_EXCHANGE) { 354 new_src = dirent_create_key(trans, src_dir, 0, src_name, 0); 355 ret = PTR_ERR_OR_ZERO(new_src); 356 if (ret) 357 goto out; 358 359 dirent_copy_target(new_src, bkey_s_c_to_dirent(old_dst)); 360 new_src->k.p = src_iter.pos; 361 } else { 362 new_src = bch2_trans_kmalloc(trans, sizeof(struct bkey_i)); 363 ret = PTR_ERR_OR_ZERO(new_src); 364 if (ret) 365 goto out; 366 367 bkey_init(&new_src->k); 368 new_src->k.p = src_iter.pos; 369 370 if (bkey_le(dst_pos, src_iter.pos) && 371 bkey_lt(src_iter.pos, dst_iter.pos)) { 372 /* 373 * We have a hash collision for the new dst key, 374 * and new_src - the key we're deleting - is between 375 * new_dst's hashed slot and the slot we're going to be 376 * inserting it into - oops. This will break the hash 377 * table if we don't deal with it: 378 */ 379 if (mode == BCH_RENAME) { 380 /* 381 * If we're not overwriting, we can just insert 382 * new_dst at the src position: 383 */ 384 new_src = new_dst; 385 new_src->k.p = src_iter.pos; 386 goto out_set_src; 387 } else { 388 /* If we're overwriting, we can't insert new_dst 389 * at a different slot because it has to 390 * overwrite old_dst - just make sure to use a 391 * whiteout when deleting src: 392 */ 393 new_src->k.type = KEY_TYPE_hash_whiteout; 394 } 395 } else { 396 /* Check if we need a whiteout to delete src: */ 397 ret = bch2_hash_needs_whiteout(trans, bch2_dirent_hash_desc, 398 src_hash, &src_iter); 399 if (ret < 0) 400 goto out; 401 402 if (ret) 403 new_src->k.type = KEY_TYPE_hash_whiteout; 404 } 405 } 406 407 if (new_dst->v.d_type == DT_SUBVOL) 408 new_dst->v.d_parent_subvol = cpu_to_le32(dst_dir.subvol); 409 410 if ((mode == BCH_RENAME_EXCHANGE) && 411 new_src->v.d_type == DT_SUBVOL) 412 new_src->v.d_parent_subvol = cpu_to_le32(src_dir.subvol); 413 414 ret = bch2_trans_update(trans, &dst_iter, &new_dst->k_i, 0); 415 if (ret) 416 goto out; 417 out_set_src: 418 /* 419 * If we're deleting a subvolume we need to really delete the dirent, 420 * not just emit a whiteout in the current snapshot - there can only be 421 * single dirent that points to a given subvolume. 422 * 423 * IOW, we don't maintain multiple versions in different snapshots of 424 * dirents that point to subvolumes - dirents that point to subvolumes 425 * are only visible in one particular subvolume so it's not necessary, 426 * and it would be particularly confusing for fsck to have to deal with. 427 */ 428 delete_src = bkey_s_c_to_dirent(old_src).v->d_type == DT_SUBVOL && 429 new_src->k.p.snapshot != old_src.k->p.snapshot; 430 431 delete_dst = old_dst.k && 432 bkey_s_c_to_dirent(old_dst).v->d_type == DT_SUBVOL && 433 new_dst->k.p.snapshot != old_dst.k->p.snapshot; 434 435 if (!delete_src || !bkey_deleted(&new_src->k)) { 436 ret = bch2_trans_update(trans, &src_iter, &new_src->k_i, src_update_flags); 437 if (ret) 438 goto out; 439 } 440 441 if (delete_src) { 442 bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot); 443 ret = bch2_btree_iter_traverse(&src_iter) ?: 444 bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node); 445 if (ret) 446 goto out; 447 } 448 449 if (delete_dst) { 450 bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot); 451 ret = bch2_btree_iter_traverse(&dst_iter) ?: 452 bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node); 453 if (ret) 454 goto out; 455 } 456 457 if (mode == BCH_RENAME_EXCHANGE) 458 *src_offset = new_src->k.p.offset; 459 *dst_offset = new_dst->k.p.offset; 460 out: 461 bch2_trans_iter_exit(trans, &src_iter); 462 bch2_trans_iter_exit(trans, &dst_iter); 463 return ret; 464 } 465 466 int bch2_dirent_lookup_trans(struct btree_trans *trans, 467 struct btree_iter *iter, 468 subvol_inum dir, 469 const struct bch_hash_info *hash_info, 470 const struct qstr *name, subvol_inum *inum, 471 unsigned flags) 472 { 473 struct bkey_s_c k = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc, 474 hash_info, dir, name, flags); 475 int ret = bkey_err(k); 476 if (ret) 477 goto err; 478 479 ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), inum); 480 if (ret > 0) 481 ret = -ENOENT; 482 err: 483 if (ret) 484 bch2_trans_iter_exit(trans, iter); 485 return ret; 486 } 487 488 u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir, 489 const struct bch_hash_info *hash_info, 490 const struct qstr *name, subvol_inum *inum) 491 { 492 struct btree_trans *trans = bch2_trans_get(c); 493 struct btree_iter iter = { NULL }; 494 495 int ret = lockrestart_do(trans, 496 bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0)); 497 bch2_trans_iter_exit(trans, &iter); 498 bch2_trans_put(trans); 499 return ret; 500 } 501 502 int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 snapshot) 503 { 504 struct btree_iter iter; 505 struct bkey_s_c k; 506 int ret; 507 508 for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents, 509 SPOS(dir, 0, snapshot), 510 POS(dir, U64_MAX), 0, k, ret) 511 if (k.k->type == KEY_TYPE_dirent) { 512 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); 513 if (d.v->d_type == DT_SUBVOL && le32_to_cpu(d.v->d_parent_subvol) != subvol) 514 continue; 515 ret = -BCH_ERR_ENOTEMPTY_dir_not_empty; 516 break; 517 } 518 bch2_trans_iter_exit(trans, &iter); 519 520 return ret; 521 } 522 523 int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir) 524 { 525 u32 snapshot; 526 527 return bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot) ?: 528 bch2_empty_dir_snapshot(trans, dir.inum, dir.subvol, snapshot); 529 } 530 531 static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subvol_inum target) 532 { 533 struct qstr name = bch2_dirent_get_name(d); 534 bool ret = dir_emit(ctx, name.name, 535 name.len, 536 target.inum, 537 vfs_d_type(d.v->d_type)); 538 if (ret) 539 ctx->pos = d.k->p.offset + 1; 540 return ret; 541 } 542 543 int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx) 544 { 545 struct btree_trans *trans = bch2_trans_get(c); 546 struct btree_iter iter; 547 struct bkey_s_c k; 548 subvol_inum target; 549 u32 snapshot; 550 struct bkey_buf sk; 551 int ret; 552 553 bch2_bkey_buf_init(&sk); 554 retry: 555 bch2_trans_begin(trans); 556 557 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 558 if (ret) 559 goto err; 560 561 for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents, 562 SPOS(inum.inum, ctx->pos, snapshot), 563 POS(inum.inum, U64_MAX), 0, k, ret) { 564 if (k.k->type != KEY_TYPE_dirent) 565 continue; 566 567 /* dir_emit() can fault and block: */ 568 bch2_bkey_buf_reassemble(&sk, c, k); 569 struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k); 570 571 ret = bch2_dirent_read_target(trans, inum, dirent, &target); 572 if (ret < 0) 573 break; 574 if (ret) 575 continue; 576 577 /* 578 * read_target looks up subvolumes, we can overflow paths if the 579 * directory has many subvolumes in it 580 * 581 * XXX: btree_trans_too_many_iters() is something we'd like to 582 * get rid of, and there's no good reason to be using it here 583 * except that we don't yet have a for_each_btree_key() helper 584 * that does subvolume_get_snapshot(). 585 */ 586 ret = drop_locks_do(trans, 587 bch2_dir_emit(ctx, dirent, target)) ?: 588 btree_trans_too_many_iters(trans); 589 if (ret) { 590 ret = ret < 0 ? ret : 0; 591 break; 592 } 593 } 594 bch2_trans_iter_exit(trans, &iter); 595 err: 596 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 597 goto retry; 598 599 bch2_trans_put(trans); 600 bch2_bkey_buf_exit(&sk, c); 601 602 return ret; 603 } 604