xref: /linux/fs/bcachefs/dirent.c (revision 482deed9dfa065cf3f68372dadac857541c7d504)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "bkey_methods.h"
6 #include "btree_update.h"
7 #include "extents.h"
8 #include "dirent.h"
9 #include "fs.h"
10 #include "keylist.h"
11 #include "str_hash.h"
12 #include "subvolume.h"
13 
14 #include <linux/dcache.h>
15 
bch2_casefold(struct btree_trans * trans,const struct bch_hash_info * info,const struct qstr * str,struct qstr * out_cf)16 int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info,
17 		  const struct qstr *str, struct qstr *out_cf)
18 {
19 	*out_cf = (struct qstr) QSTR_INIT(NULL, 0);
20 
21 	if (!bch2_fs_casefold_enabled(trans->c))
22 		return -EOPNOTSUPP;
23 
24 	unsigned char *buf = bch2_trans_kmalloc(trans, BCH_NAME_MAX + 1);
25 	int ret = PTR_ERR_OR_ZERO(buf);
26 	if (ret)
27 		return ret;
28 
29 	ret = utf8_casefold(info->cf_encoding, str, buf, BCH_NAME_MAX + 1);
30 	if (ret <= 0)
31 		return ret;
32 
33 	*out_cf = (struct qstr) QSTR_INIT(buf, ret);
34 	return 0;
35 }
36 
bch2_dirent_name_bytes(struct bkey_s_c_dirent d)37 static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d)
38 {
39 	if (bkey_val_bytes(d.k) < offsetof(struct bch_dirent, d_name))
40 		return 0;
41 
42 	unsigned bkey_u64s = bkey_val_u64s(d.k);
43 	unsigned bkey_bytes = bkey_u64s * sizeof(u64);
44 	u64 last_u64 = ((u64*)d.v)[bkey_u64s - 1];
45 #if CPU_BIG_ENDIAN
46 	unsigned trailing_nuls = last_u64 ? __builtin_ctzll(last_u64) / 8 : 64 / 8;
47 #else
48 	unsigned trailing_nuls = last_u64 ? __builtin_clzll(last_u64) / 8 : 64 / 8;
49 #endif
50 
51 	return bkey_bytes -
52 		(d.v->d_casefold
53 		? offsetof(struct bch_dirent, d_cf_name_block.d_names)
54 		: offsetof(struct bch_dirent, d_name)) -
55 		trailing_nuls;
56 }
57 
bch2_dirent_get_name(struct bkey_s_c_dirent d)58 struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d)
59 {
60 	if (d.v->d_casefold) {
61 		unsigned name_len = le16_to_cpu(d.v->d_cf_name_block.d_name_len);
62 		return (struct qstr) QSTR_INIT(&d.v->d_cf_name_block.d_names[0], name_len);
63 	} else {
64 		return (struct qstr) QSTR_INIT(d.v->d_name, bch2_dirent_name_bytes(d));
65 	}
66 }
67 
bch2_dirent_get_casefold_name(struct bkey_s_c_dirent d)68 static struct qstr bch2_dirent_get_casefold_name(struct bkey_s_c_dirent d)
69 {
70 	if (d.v->d_casefold) {
71 		unsigned name_len = le16_to_cpu(d.v->d_cf_name_block.d_name_len);
72 		unsigned cf_name_len = le16_to_cpu(d.v->d_cf_name_block.d_cf_name_len);
73 		return (struct qstr) QSTR_INIT(&d.v->d_cf_name_block.d_names[name_len], cf_name_len);
74 	} else {
75 		return (struct qstr) QSTR_INIT(NULL, 0);
76 	}
77 }
78 
bch2_dirent_get_lookup_name(struct bkey_s_c_dirent d)79 static inline struct qstr bch2_dirent_get_lookup_name(struct bkey_s_c_dirent d)
80 {
81 	return d.v->d_casefold
82 		? bch2_dirent_get_casefold_name(d)
83 		: bch2_dirent_get_name(d);
84 }
85 
bch2_dirent_hash(const struct bch_hash_info * info,const struct qstr * name)86 static u64 bch2_dirent_hash(const struct bch_hash_info *info,
87 			    const struct qstr *name)
88 {
89 	struct bch_str_hash_ctx ctx;
90 
91 	bch2_str_hash_init(&ctx, info);
92 	bch2_str_hash_update(&ctx, info, name->name, name->len);
93 
94 	/* [0,2) reserved for dots */
95 	return max_t(u64, bch2_str_hash_end(&ctx, info), 2);
96 }
97 
dirent_hash_key(const struct bch_hash_info * info,const void * key)98 static u64 dirent_hash_key(const struct bch_hash_info *info, const void *key)
99 {
100 	return bch2_dirent_hash(info, key);
101 }
102 
dirent_hash_bkey(const struct bch_hash_info * info,struct bkey_s_c k)103 static u64 dirent_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
104 {
105 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
106 	struct qstr name = bch2_dirent_get_lookup_name(d);
107 
108 	return bch2_dirent_hash(info, &name);
109 }
110 
dirent_cmp_key(struct bkey_s_c _l,const void * _r)111 static bool dirent_cmp_key(struct bkey_s_c _l, const void *_r)
112 {
113 	struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
114 	const struct qstr l_name = bch2_dirent_get_lookup_name(l);
115 	const struct qstr *r_name = _r;
116 
117 	return !qstr_eq(l_name, *r_name);
118 }
119 
dirent_cmp_bkey(struct bkey_s_c _l,struct bkey_s_c _r)120 static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
121 {
122 	struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
123 	struct bkey_s_c_dirent r = bkey_s_c_to_dirent(_r);
124 	const struct qstr l_name = bch2_dirent_get_lookup_name(l);
125 	const struct qstr r_name = bch2_dirent_get_lookup_name(r);
126 
127 	return !qstr_eq(l_name, r_name);
128 }
129 
dirent_is_visible(subvol_inum inum,struct bkey_s_c k)130 static bool dirent_is_visible(subvol_inum inum, struct bkey_s_c k)
131 {
132 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
133 
134 	if (d.v->d_type == DT_SUBVOL)
135 		return le32_to_cpu(d.v->d_parent_subvol) == inum.subvol;
136 	return true;
137 }
138 
139 const struct bch_hash_desc bch2_dirent_hash_desc = {
140 	.btree_id	= BTREE_ID_dirents,
141 	.key_type	= KEY_TYPE_dirent,
142 	.hash_key	= dirent_hash_key,
143 	.hash_bkey	= dirent_hash_bkey,
144 	.cmp_key	= dirent_cmp_key,
145 	.cmp_bkey	= dirent_cmp_bkey,
146 	.is_visible	= dirent_is_visible,
147 };
148 
bch2_dirent_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)149 int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k,
150 			 struct bkey_validate_context from)
151 {
152 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
153 	unsigned name_block_len = bch2_dirent_name_bytes(d);
154 	struct qstr d_name = bch2_dirent_get_name(d);
155 	struct qstr d_cf_name = bch2_dirent_get_casefold_name(d);
156 	int ret = 0;
157 
158 	bkey_fsck_err_on(!d_name.len,
159 			 c, dirent_empty_name,
160 			 "empty name");
161 
162 	bkey_fsck_err_on(d_name.len + d_cf_name.len > name_block_len,
163 			 c, dirent_val_too_big,
164 			 "dirent names exceed bkey size (%d + %d > %d)",
165 			 d_name.len, d_cf_name.len, name_block_len);
166 
167 	/*
168 	 * Check new keys don't exceed the max length
169 	 * (older keys may be larger.)
170 	 */
171 	bkey_fsck_err_on((from.flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX,
172 			 c, dirent_name_too_long,
173 			 "dirent name too big (%u > %u)",
174 			 d_name.len, BCH_NAME_MAX);
175 
176 	bkey_fsck_err_on(d_name.len != strnlen(d_name.name, d_name.len),
177 			 c, dirent_name_embedded_nul,
178 			 "dirent has stray data after name's NUL");
179 
180 	bkey_fsck_err_on((d_name.len == 1 && !memcmp(d_name.name, ".", 1)) ||
181 			 (d_name.len == 2 && !memcmp(d_name.name, "..", 2)),
182 			 c, dirent_name_dot_or_dotdot,
183 			 "invalid name");
184 
185 	bkey_fsck_err_on(memchr(d_name.name, '/', d_name.len),
186 			 c, dirent_name_has_slash,
187 			 "name with /");
188 
189 	bkey_fsck_err_on(d.v->d_type != DT_SUBVOL &&
190 			 le64_to_cpu(d.v->d_inum) == d.k->p.inode,
191 			 c, dirent_to_itself,
192 			 "dirent points to own directory");
193 
194 	if (d.v->d_casefold) {
195 		bkey_fsck_err_on(from.from == BKEY_VALIDATE_commit &&
196 				 d_cf_name.len > BCH_NAME_MAX,
197 				 c, dirent_cf_name_too_big,
198 				 "dirent w/ cf name too big (%u > %u)",
199 				 d_cf_name.len, BCH_NAME_MAX);
200 
201 		bkey_fsck_err_on(d_cf_name.len != strnlen(d_cf_name.name, d_cf_name.len),
202 				 c, dirent_stray_data_after_cf_name,
203 				 "dirent has stray data after cf name's NUL");
204 	}
205 fsck_err:
206 	return ret;
207 }
208 
bch2_dirent_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)209 void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
210 {
211 	struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
212 	struct qstr d_name = bch2_dirent_get_name(d);
213 
214 	prt_printf(out, "%.*s", d_name.len, d_name.name);
215 
216 	if (d.v->d_casefold) {
217 		struct qstr d_name = bch2_dirent_get_lookup_name(d);
218 		prt_printf(out, " (casefold %.*s)", d_name.len, d_name.name);
219 	}
220 
221 	prt_str(out, " ->");
222 
223 	if (d.v->d_type != DT_SUBVOL)
224 		prt_printf(out, " %llu", le64_to_cpu(d.v->d_inum));
225 	else
226 		prt_printf(out, " %u -> %u",
227 			   le32_to_cpu(d.v->d_parent_subvol),
228 			   le32_to_cpu(d.v->d_child_subvol));
229 
230 	prt_printf(out, " type %s", bch2_d_type_str(d.v->d_type));
231 }
232 
bch2_dirent_init_name(struct bch_fs * c,struct bkey_i_dirent * dirent,const struct bch_hash_info * hash_info,const struct qstr * name,const struct qstr * cf_name)233 int bch2_dirent_init_name(struct bch_fs *c,
234 			  struct bkey_i_dirent *dirent,
235 			  const struct bch_hash_info *hash_info,
236 			  const struct qstr *name,
237 			  const struct qstr *cf_name)
238 {
239 	EBUG_ON(hash_info->cf_encoding == NULL && cf_name);
240 	int cf_len = 0;
241 
242 	if (name->len > BCH_NAME_MAX)
243 		return -ENAMETOOLONG;
244 
245 	dirent->v.d_casefold = hash_info->cf_encoding != NULL;
246 
247 	if (!dirent->v.d_casefold) {
248 		memcpy(&dirent->v.d_name[0], name->name, name->len);
249 		memset(&dirent->v.d_name[name->len], 0,
250 		       bkey_val_bytes(&dirent->k) -
251 		       offsetof(struct bch_dirent, d_name) -
252 		       name->len);
253 	} else {
254 		if (!bch2_fs_casefold_enabled(c))
255 			return -EOPNOTSUPP;
256 
257 		memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
258 
259 		char *cf_out = &dirent->v.d_cf_name_block.d_names[name->len];
260 
261 		if (cf_name) {
262 			cf_len = cf_name->len;
263 
264 			memcpy(cf_out, cf_name->name, cf_name->len);
265 		} else {
266 			cf_len = utf8_casefold(hash_info->cf_encoding, name,
267 					       cf_out,
268 					       bkey_val_end(bkey_i_to_s(&dirent->k_i)) - (void *) cf_out);
269 			if (cf_len <= 0)
270 				return cf_len;
271 		}
272 
273 		memset(&dirent->v.d_cf_name_block.d_names[name->len + cf_len], 0,
274 		       bkey_val_bytes(&dirent->k) -
275 		       offsetof(struct bch_dirent, d_cf_name_block.d_names) -
276 		       name->len + cf_len);
277 
278 		dirent->v.d_cf_name_block.d_name_len = cpu_to_le16(name->len);
279 		dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_len);
280 
281 		EBUG_ON(bch2_dirent_get_casefold_name(dirent_i_to_s_c(dirent)).len != cf_len);
282 	}
283 
284 	unsigned u64s = dirent_val_u64s(name->len, cf_len);
285 	BUG_ON(u64s > bkey_val_u64s(&dirent->k));
286 	set_bkey_val_u64s(&dirent->k, u64s);
287 	return 0;
288 }
289 
bch2_dirent_create_key(struct btree_trans * trans,const struct bch_hash_info * hash_info,subvol_inum dir,u8 type,const struct qstr * name,const struct qstr * cf_name,u64 dst)290 struct bkey_i_dirent *bch2_dirent_create_key(struct btree_trans *trans,
291 				const struct bch_hash_info *hash_info,
292 				subvol_inum dir,
293 				u8 type,
294 				const struct qstr *name,
295 				const struct qstr *cf_name,
296 				u64 dst)
297 {
298 	struct bkey_i_dirent *dirent = bch2_trans_kmalloc(trans, BKEY_U64s_MAX * sizeof(u64));
299 	if (IS_ERR(dirent))
300 		return dirent;
301 
302 	bkey_dirent_init(&dirent->k_i);
303 	dirent->k.u64s = BKEY_U64s_MAX;
304 
305 	if (type != DT_SUBVOL) {
306 		dirent->v.d_inum = cpu_to_le64(dst);
307 	} else {
308 		dirent->v.d_parent_subvol = cpu_to_le32(dir.subvol);
309 		dirent->v.d_child_subvol = cpu_to_le32(dst);
310 	}
311 
312 	dirent->v.d_type = type;
313 	dirent->v.d_unused = 0;
314 
315 	int ret = bch2_dirent_init_name(trans->c, dirent, hash_info, name, cf_name);
316 	if (ret)
317 		return ERR_PTR(ret);
318 
319 	EBUG_ON(bch2_dirent_get_name(dirent_i_to_s_c(dirent)).len != name->len);
320 	return dirent;
321 }
322 
bch2_dirent_create_snapshot(struct btree_trans * trans,u32 dir_subvol,u64 dir,u32 snapshot,const struct bch_hash_info * hash_info,u8 type,const struct qstr * name,u64 dst_inum,u64 * dir_offset,enum btree_iter_update_trigger_flags flags)323 int bch2_dirent_create_snapshot(struct btree_trans *trans,
324 			u32 dir_subvol, u64 dir, u32 snapshot,
325 			const struct bch_hash_info *hash_info,
326 			u8 type, const struct qstr *name, u64 dst_inum,
327 			u64 *dir_offset,
328 			enum btree_iter_update_trigger_flags flags)
329 {
330 	subvol_inum dir_inum = { .subvol = dir_subvol, .inum = dir };
331 	struct bkey_i_dirent *dirent;
332 	int ret;
333 
334 	dirent = bch2_dirent_create_key(trans, hash_info, dir_inum, type, name, NULL, dst_inum);
335 	ret = PTR_ERR_OR_ZERO(dirent);
336 	if (ret)
337 		return ret;
338 
339 	dirent->k.p.inode	= dir;
340 	dirent->k.p.snapshot	= snapshot;
341 
342 	ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info,
343 					dir_inum, snapshot, &dirent->k_i, flags);
344 	*dir_offset = dirent->k.p.offset;
345 
346 	return ret;
347 }
348 
bch2_dirent_create(struct btree_trans * trans,subvol_inum dir,const struct bch_hash_info * hash_info,u8 type,const struct qstr * name,u64 dst_inum,u64 * dir_offset,enum btree_iter_update_trigger_flags flags)349 int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
350 		       const struct bch_hash_info *hash_info,
351 		       u8 type, const struct qstr *name, u64 dst_inum,
352 		       u64 *dir_offset,
353 		       enum btree_iter_update_trigger_flags flags)
354 {
355 	struct bkey_i_dirent *dirent;
356 	int ret;
357 
358 	dirent = bch2_dirent_create_key(trans, hash_info, dir, type, name, NULL, dst_inum);
359 	ret = PTR_ERR_OR_ZERO(dirent);
360 	if (ret)
361 		return ret;
362 
363 	ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
364 			    dir, &dirent->k_i, flags);
365 	*dir_offset = dirent->k.p.offset;
366 
367 	return ret;
368 }
369 
bch2_dirent_read_target(struct btree_trans * trans,subvol_inum dir,struct bkey_s_c_dirent d,subvol_inum * target)370 int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
371 			    struct bkey_s_c_dirent d, subvol_inum *target)
372 {
373 	struct bch_subvolume s;
374 	int ret = 0;
375 
376 	if (d.v->d_type == DT_SUBVOL &&
377 	    le32_to_cpu(d.v->d_parent_subvol) != dir.subvol)
378 		return 1;
379 
380 	if (likely(d.v->d_type != DT_SUBVOL)) {
381 		target->subvol	= dir.subvol;
382 		target->inum	= le64_to_cpu(d.v->d_inum);
383 	} else {
384 		target->subvol	= le32_to_cpu(d.v->d_child_subvol);
385 
386 		ret = bch2_subvolume_get(trans, target->subvol, true, &s);
387 
388 		target->inum	= le64_to_cpu(s.inode);
389 	}
390 
391 	return ret;
392 }
393 
bch2_dirent_rename(struct btree_trans * trans,subvol_inum src_dir,struct bch_hash_info * src_hash,subvol_inum dst_dir,struct bch_hash_info * dst_hash,const struct qstr * src_name,subvol_inum * src_inum,u64 * src_offset,const struct qstr * dst_name,subvol_inum * dst_inum,u64 * dst_offset,enum bch_rename_mode mode)394 int bch2_dirent_rename(struct btree_trans *trans,
395 		subvol_inum src_dir, struct bch_hash_info *src_hash,
396 		subvol_inum dst_dir, struct bch_hash_info *dst_hash,
397 		const struct qstr *src_name, subvol_inum *src_inum, u64 *src_offset,
398 		const struct qstr *dst_name, subvol_inum *dst_inum, u64 *dst_offset,
399 		enum bch_rename_mode mode)
400 {
401 	struct qstr src_name_lookup, dst_name_lookup;
402 	struct btree_iter src_iter = {};
403 	struct btree_iter dst_iter = {};
404 	struct bkey_s_c old_src, old_dst = bkey_s_c_null;
405 	struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
406 	struct bpos dst_pos =
407 		POS(dst_dir.inum, bch2_dirent_hash(dst_hash, dst_name));
408 	unsigned src_update_flags = 0;
409 	bool delete_src, delete_dst;
410 	int ret = 0;
411 
412 	memset(src_inum, 0, sizeof(*src_inum));
413 	memset(dst_inum, 0, sizeof(*dst_inum));
414 
415 	/* Lookup src: */
416 	ret = bch2_maybe_casefold(trans, src_hash, src_name, &src_name_lookup);
417 	if (ret)
418 		goto out;
419 	old_src = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
420 				   src_hash, src_dir, &src_name_lookup,
421 				   BTREE_ITER_intent);
422 	ret = bkey_err(old_src);
423 	if (ret)
424 		goto out;
425 
426 	ret = bch2_dirent_read_target(trans, src_dir,
427 			bkey_s_c_to_dirent(old_src), src_inum);
428 	if (ret)
429 		goto out;
430 
431 	/* Lookup dst: */
432 	ret = bch2_maybe_casefold(trans, dst_hash, dst_name, &dst_name_lookup);
433 	if (ret)
434 		goto out;
435 	if (mode == BCH_RENAME) {
436 		/*
437 		 * Note that we're _not_ checking if the target already exists -
438 		 * we're relying on the VFS to do that check for us for
439 		 * correctness:
440 		 */
441 		ret = bch2_hash_hole(trans, &dst_iter, bch2_dirent_hash_desc,
442 				     dst_hash, dst_dir, &dst_name_lookup);
443 		if (ret)
444 			goto out;
445 	} else {
446 		old_dst = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
447 					    dst_hash, dst_dir, &dst_name_lookup,
448 					    BTREE_ITER_intent);
449 		ret = bkey_err(old_dst);
450 		if (ret)
451 			goto out;
452 
453 		ret = bch2_dirent_read_target(trans, dst_dir,
454 				bkey_s_c_to_dirent(old_dst), dst_inum);
455 		if (ret)
456 			goto out;
457 	}
458 
459 	if (mode != BCH_RENAME_EXCHANGE)
460 		*src_offset = dst_iter.pos.offset;
461 
462 	/* Create new dst key: */
463 	new_dst = bch2_dirent_create_key(trans, dst_hash, dst_dir, 0, dst_name,
464 					 dst_hash->cf_encoding ? &dst_name_lookup : NULL, 0);
465 	ret = PTR_ERR_OR_ZERO(new_dst);
466 	if (ret)
467 		goto out;
468 
469 	dirent_copy_target(new_dst, bkey_s_c_to_dirent(old_src));
470 	new_dst->k.p = dst_iter.pos;
471 
472 	/* Create new src key: */
473 	if (mode == BCH_RENAME_EXCHANGE) {
474 		new_src = bch2_dirent_create_key(trans, src_hash, src_dir, 0, src_name,
475 						 src_hash->cf_encoding ? &src_name_lookup : NULL, 0);
476 		ret = PTR_ERR_OR_ZERO(new_src);
477 		if (ret)
478 			goto out;
479 
480 		dirent_copy_target(new_src, bkey_s_c_to_dirent(old_dst));
481 		new_src->k.p = src_iter.pos;
482 	} else {
483 		new_src = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
484 		ret = PTR_ERR_OR_ZERO(new_src);
485 		if (ret)
486 			goto out;
487 
488 		bkey_init(&new_src->k);
489 		new_src->k.p = src_iter.pos;
490 
491 		if (bkey_le(dst_pos, src_iter.pos) &&
492 		    bkey_lt(src_iter.pos, dst_iter.pos)) {
493 			/*
494 			 * We have a hash collision for the new dst key,
495 			 * and new_src - the key we're deleting - is between
496 			 * new_dst's hashed slot and the slot we're going to be
497 			 * inserting it into - oops.  This will break the hash
498 			 * table if we don't deal with it:
499 			 */
500 			if (mode == BCH_RENAME) {
501 				/*
502 				 * If we're not overwriting, we can just insert
503 				 * new_dst at the src position:
504 				 */
505 				new_src = new_dst;
506 				new_src->k.p = src_iter.pos;
507 				goto out_set_src;
508 			} else {
509 				/* If we're overwriting, we can't insert new_dst
510 				 * at a different slot because it has to
511 				 * overwrite old_dst - just make sure to use a
512 				 * whiteout when deleting src:
513 				 */
514 				new_src->k.type = KEY_TYPE_hash_whiteout;
515 			}
516 		} else {
517 			/* Check if we need a whiteout to delete src: */
518 			ret = bch2_hash_needs_whiteout(trans, bch2_dirent_hash_desc,
519 						       src_hash, &src_iter);
520 			if (ret < 0)
521 				goto out;
522 
523 			if (ret)
524 				new_src->k.type = KEY_TYPE_hash_whiteout;
525 		}
526 	}
527 
528 	if (new_dst->v.d_type == DT_SUBVOL)
529 		new_dst->v.d_parent_subvol = cpu_to_le32(dst_dir.subvol);
530 
531 	if ((mode == BCH_RENAME_EXCHANGE) &&
532 	    new_src->v.d_type == DT_SUBVOL)
533 		new_src->v.d_parent_subvol = cpu_to_le32(src_dir.subvol);
534 
535 	ret = bch2_trans_update(trans, &dst_iter, &new_dst->k_i, 0);
536 	if (ret)
537 		goto out;
538 out_set_src:
539 	/*
540 	 * If we're deleting a subvolume we need to really delete the dirent,
541 	 * not just emit a whiteout in the current snapshot - there can only be
542 	 * single dirent that points to a given subvolume.
543 	 *
544 	 * IOW, we don't maintain multiple versions in different snapshots of
545 	 * dirents that point to subvolumes - dirents that point to subvolumes
546 	 * are only visible in one particular subvolume so it's not necessary,
547 	 * and it would be particularly confusing for fsck to have to deal with.
548 	 */
549 	delete_src = bkey_s_c_to_dirent(old_src).v->d_type == DT_SUBVOL &&
550 		new_src->k.p.snapshot != old_src.k->p.snapshot;
551 
552 	delete_dst = old_dst.k &&
553 		bkey_s_c_to_dirent(old_dst).v->d_type == DT_SUBVOL &&
554 		new_dst->k.p.snapshot != old_dst.k->p.snapshot;
555 
556 	if (!delete_src || !bkey_deleted(&new_src->k)) {
557 		ret = bch2_trans_update(trans, &src_iter, &new_src->k_i, src_update_flags);
558 		if (ret)
559 			goto out;
560 	}
561 
562 	if (delete_src) {
563 		bch2_btree_iter_set_snapshot(trans, &src_iter, old_src.k->p.snapshot);
564 		ret =   bch2_btree_iter_traverse(trans, &src_iter) ?:
565 			bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
566 		if (ret)
567 			goto out;
568 	}
569 
570 	if (delete_dst) {
571 		bch2_btree_iter_set_snapshot(trans, &dst_iter, old_dst.k->p.snapshot);
572 		ret =   bch2_btree_iter_traverse(trans, &dst_iter) ?:
573 			bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
574 		if (ret)
575 			goto out;
576 	}
577 
578 	if (mode == BCH_RENAME_EXCHANGE)
579 		*src_offset = new_src->k.p.offset;
580 	*dst_offset = new_dst->k.p.offset;
581 out:
582 	bch2_trans_iter_exit(trans, &src_iter);
583 	bch2_trans_iter_exit(trans, &dst_iter);
584 	return ret;
585 }
586 
bch2_dirent_lookup_trans(struct btree_trans * trans,struct btree_iter * iter,subvol_inum dir,const struct bch_hash_info * hash_info,const struct qstr * name,subvol_inum * inum,unsigned flags)587 int bch2_dirent_lookup_trans(struct btree_trans *trans,
588 			     struct btree_iter *iter,
589 			     subvol_inum dir,
590 			     const struct bch_hash_info *hash_info,
591 			     const struct qstr *name, subvol_inum *inum,
592 			     unsigned flags)
593 {
594 	struct qstr lookup_name;
595 	int ret = bch2_maybe_casefold(trans, hash_info, name, &lookup_name);
596 	if (ret)
597 		return ret;
598 
599 	struct bkey_s_c k = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
600 					     hash_info, dir, &lookup_name, flags);
601 	ret = bkey_err(k);
602 	if (ret)
603 		goto err;
604 
605 	ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), inum);
606 	if (ret > 0)
607 		ret = -ENOENT;
608 err:
609 	if (ret)
610 		bch2_trans_iter_exit(trans, iter);
611 	return ret;
612 }
613 
bch2_dirent_lookup(struct bch_fs * c,subvol_inum dir,const struct bch_hash_info * hash_info,const struct qstr * name,subvol_inum * inum)614 u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
615 		       const struct bch_hash_info *hash_info,
616 		       const struct qstr *name, subvol_inum *inum)
617 {
618 	struct btree_trans *trans = bch2_trans_get(c);
619 	struct btree_iter iter = {};
620 
621 	int ret = lockrestart_do(trans,
622 		bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0));
623 	bch2_trans_iter_exit(trans, &iter);
624 	bch2_trans_put(trans);
625 	return ret;
626 }
627 
bch2_empty_dir_snapshot(struct btree_trans * trans,u64 dir,u32 subvol,u32 snapshot)628 int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 snapshot)
629 {
630 	struct btree_iter iter;
631 	struct bkey_s_c k;
632 	int ret;
633 
634 	for_each_btree_key_max_norestart(trans, iter, BTREE_ID_dirents,
635 			   SPOS(dir, 0, snapshot),
636 			   POS(dir, U64_MAX), 0, k, ret)
637 		if (k.k->type == KEY_TYPE_dirent) {
638 			struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
639 			if (d.v->d_type == DT_SUBVOL && le32_to_cpu(d.v->d_parent_subvol) != subvol)
640 				continue;
641 			ret = bch_err_throw(trans->c, ENOTEMPTY_dir_not_empty);
642 			break;
643 		}
644 	bch2_trans_iter_exit(trans, &iter);
645 
646 	return ret;
647 }
648 
bch2_empty_dir_trans(struct btree_trans * trans,subvol_inum dir)649 int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir)
650 {
651 	u32 snapshot;
652 
653 	return bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot) ?:
654 		bch2_empty_dir_snapshot(trans, dir.inum, dir.subvol, snapshot);
655 }
656 
bch2_dir_emit(struct dir_context * ctx,struct bkey_s_c_dirent d,subvol_inum target)657 static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subvol_inum target)
658 {
659 	struct qstr name = bch2_dirent_get_name(d);
660 	/*
661 	 * Although not required by the kernel code, updating ctx->pos is needed
662 	 * for the bcachefs FUSE driver. Without this update, the FUSE
663 	 * implementation will be stuck in an infinite loop when reading
664 	 * directories (via the bcachefs_fuse_readdir callback).
665 	 * In kernel space, ctx->pos is updated by the VFS code.
666 	 */
667 	ctx->pos = d.k->p.offset;
668 	bool ret = dir_emit(ctx, name.name,
669 		      name.len,
670 		      target.inum,
671 		      vfs_d_type(d.v->d_type));
672 	if (ret)
673 		ctx->pos = d.k->p.offset + 1;
674 	return !ret;
675 }
676 
bch2_readdir(struct bch_fs * c,subvol_inum inum,struct bch_hash_info * hash_info,struct dir_context * ctx)677 int bch2_readdir(struct bch_fs *c, subvol_inum inum,
678 		 struct bch_hash_info *hash_info,
679 		 struct dir_context *ctx)
680 {
681 	struct bkey_buf sk;
682 	bch2_bkey_buf_init(&sk);
683 
684 	int ret = bch2_trans_run(c,
685 		for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_dirents,
686 				   POS(inum.inum, ctx->pos),
687 				   POS(inum.inum, U64_MAX),
688 				   inum.subvol, 0, k, ({
689 			if (k.k->type != KEY_TYPE_dirent)
690 				continue;
691 
692 			/* dir_emit() can fault and block: */
693 			bch2_bkey_buf_reassemble(&sk, c, k);
694 			struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k);
695 
696 			subvol_inum target;
697 
698 			bool need_second_pass = false;
699 			int ret2 = bch2_str_hash_check_key(trans, NULL, &bch2_dirent_hash_desc,
700 							   hash_info, &iter, k, &need_second_pass) ?:
701 				bch2_dirent_read_target(trans, inum, dirent, &target);
702 			if (ret2 > 0)
703 				continue;
704 
705 			ret2 ?: (bch2_trans_unlock(trans), bch2_dir_emit(ctx, dirent, target));
706 		})));
707 
708 	bch2_bkey_buf_exit(&sk, c);
709 
710 	return ret < 0 ? ret : 0;
711 }
712 
713 /* fsck */
714 
lookup_first_inode(struct btree_trans * trans,u64 inode_nr,struct bch_inode_unpacked * inode)715 static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
716 			      struct bch_inode_unpacked *inode)
717 {
718 	struct btree_iter iter;
719 	struct bkey_s_c k;
720 	int ret;
721 
722 	for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr),
723 				     BTREE_ITER_all_snapshots, k, ret) {
724 		if (k.k->p.offset != inode_nr)
725 			break;
726 		if (!bkey_is_inode(k.k))
727 			continue;
728 		ret = bch2_inode_unpack(k, inode);
729 		goto found;
730 	}
731 	ret = bch_err_throw(trans->c, ENOENT_inode);
732 found:
733 	bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
734 	bch2_trans_iter_exit(trans, &iter);
735 	return ret;
736 }
737 
bch2_fsck_remove_dirent(struct btree_trans * trans,struct bpos pos)738 int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos)
739 {
740 	struct bch_fs *c = trans->c;
741 	struct btree_iter iter;
742 	struct bch_inode_unpacked dir_inode;
743 	struct bch_hash_info dir_hash_info;
744 	int ret;
745 
746 	ret = lookup_first_inode(trans, pos.inode, &dir_inode);
747 	if (ret)
748 		goto err;
749 
750 	dir_hash_info = bch2_hash_info_init(c, &dir_inode);
751 
752 	bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
753 
754 	ret =   bch2_btree_iter_traverse(trans, &iter) ?:
755 		bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
756 				    &dir_hash_info, &iter,
757 				    BTREE_UPDATE_internal_snapshot_node);
758 	bch2_trans_iter_exit(trans, &iter);
759 err:
760 	bch_err_fn(c, ret);
761 	return ret;
762 }
763