xref: /linux/fs/bcachefs/fs.c (revision 8f72c31f45a575d156cfe964099b4cfcc02e03eb)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3 
4 #include "bcachefs.h"
5 #include "acl.h"
6 #include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "chardev.h"
10 #include "dirent.h"
11 #include "errcode.h"
12 #include "extents.h"
13 #include "fs.h"
14 #include "fs-common.h"
15 #include "fs-io.h"
16 #include "fs-ioctl.h"
17 #include "fs-io-buffered.h"
18 #include "fs-io-direct.h"
19 #include "fs-io-pagecache.h"
20 #include "fsck.h"
21 #include "inode.h"
22 #include "io_read.h"
23 #include "journal.h"
24 #include "keylist.h"
25 #include "quota.h"
26 #include "snapshot.h"
27 #include "super.h"
28 #include "xattr.h"
29 #include "trace.h"
30 
31 #include <linux/aio.h>
32 #include <linux/backing-dev.h>
33 #include <linux/exportfs.h>
34 #include <linux/fiemap.h>
35 #include <linux/fs_context.h>
36 #include <linux/module.h>
37 #include <linux/pagemap.h>
38 #include <linux/posix_acl.h>
39 #include <linux/random.h>
40 #include <linux/seq_file.h>
41 #include <linux/statfs.h>
42 #include <linux/string.h>
43 #include <linux/xattr.h>
44 
45 static struct kmem_cache *bch2_inode_cache;
46 
47 static void bch2_vfs_inode_init(struct btree_trans *, subvol_inum,
48 				struct bch_inode_info *,
49 				struct bch_inode_unpacked *,
50 				struct bch_subvolume *);
51 
bch2_inode_update_after_write(struct btree_trans * trans,struct bch_inode_info * inode,struct bch_inode_unpacked * bi,unsigned fields)52 void bch2_inode_update_after_write(struct btree_trans *trans,
53 				   struct bch_inode_info *inode,
54 				   struct bch_inode_unpacked *bi,
55 				   unsigned fields)
56 {
57 	struct bch_fs *c = trans->c;
58 
59 	BUG_ON(bi->bi_inum != inode->v.i_ino);
60 
61 	bch2_assert_pos_locked(trans, BTREE_ID_inodes, POS(0, bi->bi_inum));
62 
63 	set_nlink(&inode->v, bch2_inode_nlink_get(bi));
64 	i_uid_write(&inode->v, bi->bi_uid);
65 	i_gid_write(&inode->v, bi->bi_gid);
66 	inode->v.i_mode	= bi->bi_mode;
67 
68 	if (fields & ATTR_ATIME)
69 		inode_set_atime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_atime));
70 	if (fields & ATTR_MTIME)
71 		inode_set_mtime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_mtime));
72 	if (fields & ATTR_CTIME)
73 		inode_set_ctime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_ctime));
74 
75 	inode->ei_inode		= *bi;
76 
77 	bch2_inode_flags_to_vfs(inode);
78 }
79 
bch2_write_inode(struct bch_fs * c,struct bch_inode_info * inode,inode_set_fn set,void * p,unsigned fields)80 int __must_check bch2_write_inode(struct bch_fs *c,
81 				  struct bch_inode_info *inode,
82 				  inode_set_fn set,
83 				  void *p, unsigned fields)
84 {
85 	struct btree_trans *trans = bch2_trans_get(c);
86 	struct btree_iter iter = { NULL };
87 	struct bch_inode_unpacked inode_u;
88 	int ret;
89 retry:
90 	bch2_trans_begin(trans);
91 
92 	ret   = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode),
93 				BTREE_ITER_intent) ?:
94 		(set ? set(trans, inode, &inode_u, p) : 0) ?:
95 		bch2_inode_write(trans, &iter, &inode_u) ?:
96 		bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
97 
98 	/*
99 	 * the btree node lock protects inode->ei_inode, not ei_update_lock;
100 	 * this is important for inode updates via bchfs_write_index_update
101 	 */
102 	if (!ret)
103 		bch2_inode_update_after_write(trans, inode, &inode_u, fields);
104 
105 	bch2_trans_iter_exit(trans, &iter);
106 
107 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
108 		goto retry;
109 
110 	bch2_fs_fatal_err_on(bch2_err_matches(ret, ENOENT), c,
111 			     "%s: inode %u:%llu not found when updating",
112 			     bch2_err_str(ret),
113 			     inode_inum(inode).subvol,
114 			     inode_inum(inode).inum);
115 
116 	bch2_trans_put(trans);
117 	return ret < 0 ? ret : 0;
118 }
119 
bch2_fs_quota_transfer(struct bch_fs * c,struct bch_inode_info * inode,struct bch_qid new_qid,unsigned qtypes,enum quota_acct_mode mode)120 int bch2_fs_quota_transfer(struct bch_fs *c,
121 			   struct bch_inode_info *inode,
122 			   struct bch_qid new_qid,
123 			   unsigned qtypes,
124 			   enum quota_acct_mode mode)
125 {
126 	unsigned i;
127 	int ret;
128 
129 	qtypes &= enabled_qtypes(c);
130 
131 	for (i = 0; i < QTYP_NR; i++)
132 		if (new_qid.q[i] == inode->ei_qid.q[i])
133 			qtypes &= ~(1U << i);
134 
135 	if (!qtypes)
136 		return 0;
137 
138 	mutex_lock(&inode->ei_quota_lock);
139 
140 	ret = bch2_quota_transfer(c, qtypes, new_qid,
141 				  inode->ei_qid,
142 				  inode->v.i_blocks +
143 				  inode->ei_quota_reserved,
144 				  mode);
145 	if (!ret)
146 		for (i = 0; i < QTYP_NR; i++)
147 			if (qtypes & (1 << i))
148 				inode->ei_qid.q[i] = new_qid.q[i];
149 
150 	mutex_unlock(&inode->ei_quota_lock);
151 
152 	return ret;
153 }
154 
bch2_iget5_test(struct inode * vinode,void * p)155 static int bch2_iget5_test(struct inode *vinode, void *p)
156 {
157 	struct bch_inode_info *inode = to_bch_ei(vinode);
158 	subvol_inum *inum = p;
159 
160 	return inode->ei_subvol == inum->subvol &&
161 		inode->ei_inode.bi_inum == inum->inum;
162 }
163 
bch2_iget5_set(struct inode * vinode,void * p)164 static int bch2_iget5_set(struct inode *vinode, void *p)
165 {
166 	struct bch_inode_info *inode = to_bch_ei(vinode);
167 	subvol_inum *inum = p;
168 
169 	inode->v.i_ino		= inum->inum;
170 	inode->ei_subvol	= inum->subvol;
171 	inode->ei_inode.bi_inum	= inum->inum;
172 	return 0;
173 }
174 
bch2_inode_hash(subvol_inum inum)175 static unsigned bch2_inode_hash(subvol_inum inum)
176 {
177 	return jhash_3words(inum.subvol, inum.inum >> 32, inum.inum, JHASH_INITVAL);
178 }
179 
__bch2_inode_hash_find(struct bch_fs * c,subvol_inum inum)180 struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum)
181 {
182 	return to_bch_ei(ilookup5_nowait(c->vfs_sb,
183 					 bch2_inode_hash(inum),
184 					 bch2_iget5_test,
185 					 &inum));
186 }
187 
bch2_inode_insert(struct bch_fs * c,struct bch_inode_info * inode)188 static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_inode_info *inode)
189 {
190 	subvol_inum inum = inode_inum(inode);
191 	struct bch_inode_info *old = to_bch_ei(inode_insert5(&inode->v,
192 				      bch2_inode_hash(inum),
193 				      bch2_iget5_test,
194 				      bch2_iget5_set,
195 				      &inum));
196 	BUG_ON(!old);
197 
198 	if (unlikely(old != inode)) {
199 		/*
200 		 * bcachefs doesn't use I_NEW; we have no use for it since we
201 		 * only insert fully created inodes in the inode hash table. But
202 		 * discard_new_inode() expects it to be set...
203 		 */
204 		inode->v.i_state |= I_NEW;
205 		/*
206 		 * We don't want bch2_evict_inode() to delete the inode on disk,
207 		 * we just raced and had another inode in cache. Normally new
208 		 * inodes don't have nlink == 0 - except tmpfiles do...
209 		 */
210 		set_nlink(&inode->v, 1);
211 		discard_new_inode(&inode->v);
212 		inode = old;
213 	} else {
214 		mutex_lock(&c->vfs_inodes_lock);
215 		list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list);
216 		mutex_unlock(&c->vfs_inodes_lock);
217 		/*
218 		 * Again, I_NEW makes no sense for bcachefs. This is only needed
219 		 * for clearing I_NEW, but since the inode was already fully
220 		 * created and initialized we didn't actually want
221 		 * inode_insert5() to set it for us.
222 		 */
223 		unlock_new_inode(&inode->v);
224 	}
225 
226 	return inode;
227 }
228 
229 #define memalloc_flags_do(_flags, _do)						\
230 ({										\
231 	unsigned _saved_flags = memalloc_flags_save(_flags);			\
232 	typeof(_do) _ret = _do;							\
233 	memalloc_noreclaim_restore(_saved_flags);				\
234 	_ret;									\
235 })
236 
bch2_alloc_inode(struct super_block * sb)237 static struct inode *bch2_alloc_inode(struct super_block *sb)
238 {
239 	BUG();
240 }
241 
__bch2_new_inode(struct bch_fs * c)242 static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
243 {
244 	struct bch_inode_info *inode = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS);
245 	if (!inode)
246 		return NULL;
247 
248 	inode_init_once(&inode->v);
249 	mutex_init(&inode->ei_update_lock);
250 	two_state_lock_init(&inode->ei_pagecache_lock);
251 	INIT_LIST_HEAD(&inode->ei_vfs_inode_list);
252 	inode->ei_flags = 0;
253 	mutex_init(&inode->ei_quota_lock);
254 	memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
255 
256 	if (unlikely(inode_init_always(c->vfs_sb, &inode->v))) {
257 		kmem_cache_free(bch2_inode_cache, inode);
258 		return NULL;
259 	}
260 
261 	return inode;
262 }
263 
264 /*
265  * Allocate a new inode, dropping/retaking btree locks if necessary:
266  */
bch2_new_inode(struct btree_trans * trans)267 static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans)
268 {
269 	struct bch_inode_info *inode =
270 		memalloc_flags_do(PF_MEMALLOC_NORECLAIM|PF_MEMALLOC_NOWARN,
271 				  __bch2_new_inode(trans->c));
272 
273 	if (unlikely(!inode)) {
274 		int ret = drop_locks_do(trans, (inode = __bch2_new_inode(trans->c)) ? 0 : -ENOMEM);
275 		if (ret && inode) {
276 			__destroy_inode(&inode->v);
277 			kmem_cache_free(bch2_inode_cache, inode);
278 		}
279 		if (ret)
280 			return ERR_PTR(ret);
281 	}
282 
283 	return inode;
284 }
285 
bch2_vfs_inode_get(struct bch_fs * c,subvol_inum inum)286 struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum)
287 {
288 	struct bch_inode_info *inode =
289 		to_bch_ei(ilookup5_nowait(c->vfs_sb,
290 					  bch2_inode_hash(inum),
291 					  bch2_iget5_test,
292 					  &inum));
293 	if (inode)
294 		return &inode->v;
295 
296 	struct btree_trans *trans = bch2_trans_get(c);
297 
298 	struct bch_inode_unpacked inode_u;
299 	struct bch_subvolume subvol;
300 	int ret = lockrestart_do(trans,
301 		bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
302 		bch2_inode_find_by_inum_trans(trans, inum, &inode_u)) ?:
303 		PTR_ERR_OR_ZERO(inode = bch2_new_inode(trans));
304 	if (!ret) {
305 		bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
306 		inode = bch2_inode_insert(c, inode);
307 	}
308 	bch2_trans_put(trans);
309 
310 	return ret ? ERR_PTR(ret) : &inode->v;
311 }
312 
313 struct bch_inode_info *
__bch2_create(struct mnt_idmap * idmap,struct bch_inode_info * dir,struct dentry * dentry,umode_t mode,dev_t rdev,subvol_inum snapshot_src,unsigned flags)314 __bch2_create(struct mnt_idmap *idmap,
315 	      struct bch_inode_info *dir, struct dentry *dentry,
316 	      umode_t mode, dev_t rdev, subvol_inum snapshot_src,
317 	      unsigned flags)
318 {
319 	struct bch_fs *c = dir->v.i_sb->s_fs_info;
320 	struct btree_trans *trans;
321 	struct bch_inode_unpacked dir_u;
322 	struct bch_inode_info *inode;
323 	struct bch_inode_unpacked inode_u;
324 	struct posix_acl *default_acl = NULL, *acl = NULL;
325 	subvol_inum inum;
326 	struct bch_subvolume subvol;
327 	u64 journal_seq = 0;
328 	int ret;
329 
330 	/*
331 	 * preallocate acls + vfs inode before btree transaction, so that
332 	 * nothing can fail after the transaction succeeds:
333 	 */
334 #ifdef CONFIG_BCACHEFS_POSIX_ACL
335 	ret = posix_acl_create(&dir->v, &mode, &default_acl, &acl);
336 	if (ret)
337 		return ERR_PTR(ret);
338 #endif
339 	inode = __bch2_new_inode(c);
340 	if (unlikely(!inode)) {
341 		inode = ERR_PTR(-ENOMEM);
342 		goto err;
343 	}
344 
345 	bch2_inode_init_early(c, &inode_u);
346 
347 	if (!(flags & BCH_CREATE_TMPFILE))
348 		mutex_lock(&dir->ei_update_lock);
349 
350 	trans = bch2_trans_get(c);
351 retry:
352 	bch2_trans_begin(trans);
353 
354 	ret   = bch2_subvol_is_ro_trans(trans, dir->ei_subvol) ?:
355 		bch2_create_trans(trans,
356 				  inode_inum(dir), &dir_u, &inode_u,
357 				  !(flags & BCH_CREATE_TMPFILE)
358 				  ? &dentry->d_name : NULL,
359 				  from_kuid(i_user_ns(&dir->v), current_fsuid()),
360 				  from_kgid(i_user_ns(&dir->v), current_fsgid()),
361 				  mode, rdev,
362 				  default_acl, acl, snapshot_src, flags) ?:
363 		bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1,
364 				KEY_TYPE_QUOTA_PREALLOC);
365 	if (unlikely(ret))
366 		goto err_before_quota;
367 
368 	inum.subvol = inode_u.bi_subvol ?: dir->ei_subvol;
369 	inum.inum = inode_u.bi_inum;
370 
371 	ret   = bch2_subvolume_get(trans, inum.subvol, true,
372 				   BTREE_ITER_with_updates, &subvol) ?:
373 		bch2_trans_commit(trans, NULL, &journal_seq, 0);
374 	if (unlikely(ret)) {
375 		bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1,
376 				KEY_TYPE_QUOTA_WARN);
377 err_before_quota:
378 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
379 			goto retry;
380 		goto err_trans;
381 	}
382 
383 	if (!(flags & BCH_CREATE_TMPFILE)) {
384 		bch2_inode_update_after_write(trans, dir, &dir_u,
385 					      ATTR_MTIME|ATTR_CTIME);
386 		mutex_unlock(&dir->ei_update_lock);
387 	}
388 
389 	bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
390 
391 	set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl);
392 	set_cached_acl(&inode->v, ACL_TYPE_DEFAULT, default_acl);
393 
394 	/*
395 	 * we must insert the new inode into the inode cache before calling
396 	 * bch2_trans_exit() and dropping locks, else we could race with another
397 	 * thread pulling the inode in and modifying it:
398 	 */
399 	inode = bch2_inode_insert(c, inode);
400 	bch2_trans_put(trans);
401 err:
402 	posix_acl_release(default_acl);
403 	posix_acl_release(acl);
404 	return inode;
405 err_trans:
406 	if (!(flags & BCH_CREATE_TMPFILE))
407 		mutex_unlock(&dir->ei_update_lock);
408 
409 	bch2_trans_put(trans);
410 	make_bad_inode(&inode->v);
411 	iput(&inode->v);
412 	inode = ERR_PTR(ret);
413 	goto err;
414 }
415 
416 /* methods */
417 
bch2_lookup_trans(struct btree_trans * trans,subvol_inum dir,struct bch_hash_info * dir_hash_info,const struct qstr * name)418 static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
419 			subvol_inum dir, struct bch_hash_info *dir_hash_info,
420 			const struct qstr *name)
421 {
422 	struct bch_fs *c = trans->c;
423 	struct btree_iter dirent_iter = {};
424 	subvol_inum inum = {};
425 	struct printbuf buf = PRINTBUF;
426 
427 	struct bkey_s_c k = bch2_hash_lookup(trans, &dirent_iter, bch2_dirent_hash_desc,
428 					     dir_hash_info, dir, name, 0);
429 	int ret = bkey_err(k);
430 	if (ret)
431 		return ERR_PTR(ret);
432 
433 	ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), &inum);
434 	if (ret > 0)
435 		ret = -ENOENT;
436 	if (ret)
437 		goto err;
438 
439 	struct bch_inode_info *inode =
440 		to_bch_ei(ilookup5_nowait(c->vfs_sb,
441 					  bch2_inode_hash(inum),
442 					  bch2_iget5_test,
443 					  &inum));
444 	if (inode)
445 		goto out;
446 
447 	struct bch_subvolume subvol;
448 	struct bch_inode_unpacked inode_u;
449 	ret =   bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
450 		bch2_inode_find_by_inum_nowarn_trans(trans, inum, &inode_u) ?:
451 		PTR_ERR_OR_ZERO(inode = bch2_new_inode(trans));
452 
453 	bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
454 				c, "dirent to missing inode:\n  %s",
455 				(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
456 	if (ret)
457 		goto err;
458 
459 	/* regular files may have hardlinks: */
460 	if (bch2_fs_inconsistent_on(bch2_inode_should_have_bp(&inode_u) &&
461 				    !bkey_eq(k.k->p, POS(inode_u.bi_dir, inode_u.bi_dir_offset)),
462 				    c,
463 				    "dirent points to inode that does not point back:\n  %s",
464 				    (bch2_bkey_val_to_text(&buf, c, k),
465 				     prt_printf(&buf, "\n  "),
466 				     bch2_inode_unpacked_to_text(&buf, &inode_u),
467 				     buf.buf))) {
468 		ret = -ENOENT;
469 		goto err;
470 	}
471 
472 	bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
473 	inode = bch2_inode_insert(c, inode);
474 out:
475 	bch2_trans_iter_exit(trans, &dirent_iter);
476 	printbuf_exit(&buf);
477 	return inode;
478 err:
479 	inode = ERR_PTR(ret);
480 	goto out;
481 }
482 
bch2_lookup(struct inode * vdir,struct dentry * dentry,unsigned int flags)483 static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry,
484 				  unsigned int flags)
485 {
486 	struct bch_fs *c = vdir->i_sb->s_fs_info;
487 	struct bch_inode_info *dir = to_bch_ei(vdir);
488 	struct bch_hash_info hash = bch2_hash_info_init(c, &dir->ei_inode);
489 
490 	struct bch_inode_info *inode;
491 	bch2_trans_do(c, NULL, NULL, 0,
492 		PTR_ERR_OR_ZERO(inode = bch2_lookup_trans(trans, inode_inum(dir),
493 							  &hash, &dentry->d_name)));
494 	if (IS_ERR(inode))
495 		inode = NULL;
496 
497 	return d_splice_alias(&inode->v, dentry);
498 }
499 
bch2_mknod(struct mnt_idmap * idmap,struct inode * vdir,struct dentry * dentry,umode_t mode,dev_t rdev)500 static int bch2_mknod(struct mnt_idmap *idmap,
501 		      struct inode *vdir, struct dentry *dentry,
502 		      umode_t mode, dev_t rdev)
503 {
504 	struct bch_inode_info *inode =
505 		__bch2_create(idmap, to_bch_ei(vdir), dentry, mode, rdev,
506 			      (subvol_inum) { 0 }, 0);
507 
508 	if (IS_ERR(inode))
509 		return bch2_err_class(PTR_ERR(inode));
510 
511 	d_instantiate(dentry, &inode->v);
512 	return 0;
513 }
514 
bch2_create(struct mnt_idmap * idmap,struct inode * vdir,struct dentry * dentry,umode_t mode,bool excl)515 static int bch2_create(struct mnt_idmap *idmap,
516 		       struct inode *vdir, struct dentry *dentry,
517 		       umode_t mode, bool excl)
518 {
519 	return bch2_mknod(idmap, vdir, dentry, mode|S_IFREG, 0);
520 }
521 
__bch2_link(struct bch_fs * c,struct bch_inode_info * inode,struct bch_inode_info * dir,struct dentry * dentry)522 static int __bch2_link(struct bch_fs *c,
523 		       struct bch_inode_info *inode,
524 		       struct bch_inode_info *dir,
525 		       struct dentry *dentry)
526 {
527 	struct bch_inode_unpacked dir_u, inode_u;
528 	int ret;
529 
530 	mutex_lock(&inode->ei_update_lock);
531 	struct btree_trans *trans = bch2_trans_get(c);
532 
533 	ret = commit_do(trans, NULL, NULL, 0,
534 			bch2_link_trans(trans,
535 					inode_inum(dir),   &dir_u,
536 					inode_inum(inode), &inode_u,
537 					&dentry->d_name));
538 
539 	if (likely(!ret)) {
540 		bch2_inode_update_after_write(trans, dir, &dir_u,
541 					      ATTR_MTIME|ATTR_CTIME);
542 		bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME);
543 	}
544 
545 	bch2_trans_put(trans);
546 	mutex_unlock(&inode->ei_update_lock);
547 	return ret;
548 }
549 
bch2_link(struct dentry * old_dentry,struct inode * vdir,struct dentry * dentry)550 static int bch2_link(struct dentry *old_dentry, struct inode *vdir,
551 		     struct dentry *dentry)
552 {
553 	struct bch_fs *c = vdir->i_sb->s_fs_info;
554 	struct bch_inode_info *dir = to_bch_ei(vdir);
555 	struct bch_inode_info *inode = to_bch_ei(old_dentry->d_inode);
556 	int ret;
557 
558 	lockdep_assert_held(&inode->v.i_rwsem);
559 
560 	ret   = bch2_subvol_is_ro(c, dir->ei_subvol) ?:
561 		bch2_subvol_is_ro(c, inode->ei_subvol) ?:
562 		__bch2_link(c, inode, dir, dentry);
563 	if (unlikely(ret))
564 		return bch2_err_class(ret);
565 
566 	ihold(&inode->v);
567 	d_instantiate(dentry, &inode->v);
568 	return 0;
569 }
570 
__bch2_unlink(struct inode * vdir,struct dentry * dentry,bool deleting_snapshot)571 int __bch2_unlink(struct inode *vdir, struct dentry *dentry,
572 		  bool deleting_snapshot)
573 {
574 	struct bch_fs *c = vdir->i_sb->s_fs_info;
575 	struct bch_inode_info *dir = to_bch_ei(vdir);
576 	struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
577 	struct bch_inode_unpacked dir_u, inode_u;
578 	int ret;
579 
580 	bch2_lock_inodes(INODE_UPDATE_LOCK, dir, inode);
581 
582 	struct btree_trans *trans = bch2_trans_get(c);
583 
584 	ret = commit_do(trans, NULL, NULL,
585 			BCH_TRANS_COMMIT_no_enospc,
586 		bch2_unlink_trans(trans,
587 				  inode_inum(dir), &dir_u,
588 				  &inode_u, &dentry->d_name,
589 				  deleting_snapshot));
590 	if (unlikely(ret))
591 		goto err;
592 
593 	bch2_inode_update_after_write(trans, dir, &dir_u,
594 				      ATTR_MTIME|ATTR_CTIME);
595 	bch2_inode_update_after_write(trans, inode, &inode_u,
596 				      ATTR_MTIME);
597 
598 	if (inode_u.bi_subvol) {
599 		/*
600 		 * Subvolume deletion is asynchronous, but we still want to tell
601 		 * the VFS that it's been deleted here:
602 		 */
603 		set_nlink(&inode->v, 0);
604 	}
605 err:
606 	bch2_trans_put(trans);
607 	bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode);
608 
609 	return ret;
610 }
611 
bch2_unlink(struct inode * vdir,struct dentry * dentry)612 static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
613 {
614 	struct bch_inode_info *dir= to_bch_ei(vdir);
615 	struct bch_fs *c = dir->v.i_sb->s_fs_info;
616 
617 	int ret = bch2_subvol_is_ro(c, dir->ei_subvol) ?:
618 		__bch2_unlink(vdir, dentry, false);
619 	return bch2_err_class(ret);
620 }
621 
bch2_symlink(struct mnt_idmap * idmap,struct inode * vdir,struct dentry * dentry,const char * symname)622 static int bch2_symlink(struct mnt_idmap *idmap,
623 			struct inode *vdir, struct dentry *dentry,
624 			const char *symname)
625 {
626 	struct bch_fs *c = vdir->i_sb->s_fs_info;
627 	struct bch_inode_info *dir = to_bch_ei(vdir), *inode;
628 	int ret;
629 
630 	inode = __bch2_create(idmap, dir, dentry, S_IFLNK|S_IRWXUGO, 0,
631 			      (subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
632 	if (IS_ERR(inode))
633 		return bch2_err_class(PTR_ERR(inode));
634 
635 	inode_lock(&inode->v);
636 	ret = page_symlink(&inode->v, symname, strlen(symname) + 1);
637 	inode_unlock(&inode->v);
638 
639 	if (unlikely(ret))
640 		goto err;
641 
642 	ret = filemap_write_and_wait_range(inode->v.i_mapping, 0, LLONG_MAX);
643 	if (unlikely(ret))
644 		goto err;
645 
646 	ret = __bch2_link(c, inode, dir, dentry);
647 	if (unlikely(ret))
648 		goto err;
649 
650 	d_instantiate(dentry, &inode->v);
651 	return 0;
652 err:
653 	iput(&inode->v);
654 	return bch2_err_class(ret);
655 }
656 
bch2_mkdir(struct mnt_idmap * idmap,struct inode * vdir,struct dentry * dentry,umode_t mode)657 static int bch2_mkdir(struct mnt_idmap *idmap,
658 		      struct inode *vdir, struct dentry *dentry, umode_t mode)
659 {
660 	return bch2_mknod(idmap, vdir, dentry, mode|S_IFDIR, 0);
661 }
662 
bch2_rename2(struct mnt_idmap * idmap,struct inode * src_vdir,struct dentry * src_dentry,struct inode * dst_vdir,struct dentry * dst_dentry,unsigned flags)663 static int bch2_rename2(struct mnt_idmap *idmap,
664 			struct inode *src_vdir, struct dentry *src_dentry,
665 			struct inode *dst_vdir, struct dentry *dst_dentry,
666 			unsigned flags)
667 {
668 	struct bch_fs *c = src_vdir->i_sb->s_fs_info;
669 	struct bch_inode_info *src_dir = to_bch_ei(src_vdir);
670 	struct bch_inode_info *dst_dir = to_bch_ei(dst_vdir);
671 	struct bch_inode_info *src_inode = to_bch_ei(src_dentry->d_inode);
672 	struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode);
673 	struct bch_inode_unpacked dst_dir_u, src_dir_u;
674 	struct bch_inode_unpacked src_inode_u, dst_inode_u;
675 	struct btree_trans *trans;
676 	enum bch_rename_mode mode = flags & RENAME_EXCHANGE
677 		? BCH_RENAME_EXCHANGE
678 		: dst_dentry->d_inode
679 		? BCH_RENAME_OVERWRITE : BCH_RENAME;
680 	int ret;
681 
682 	if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE))
683 		return -EINVAL;
684 
685 	if (mode == BCH_RENAME_OVERWRITE) {
686 		ret = filemap_write_and_wait_range(src_inode->v.i_mapping,
687 						   0, LLONG_MAX);
688 		if (ret)
689 			return ret;
690 	}
691 
692 	bch2_lock_inodes(INODE_UPDATE_LOCK,
693 			 src_dir,
694 			 dst_dir,
695 			 src_inode,
696 			 dst_inode);
697 
698 	trans = bch2_trans_get(c);
699 
700 	ret   = bch2_subvol_is_ro_trans(trans, src_dir->ei_subvol) ?:
701 		bch2_subvol_is_ro_trans(trans, dst_dir->ei_subvol);
702 	if (ret)
703 		goto err;
704 
705 	if (inode_attr_changing(dst_dir, src_inode, Inode_opt_project)) {
706 		ret = bch2_fs_quota_transfer(c, src_inode,
707 					     dst_dir->ei_qid,
708 					     1 << QTYP_PRJ,
709 					     KEY_TYPE_QUOTA_PREALLOC);
710 		if (ret)
711 			goto err;
712 	}
713 
714 	if (mode == BCH_RENAME_EXCHANGE &&
715 	    inode_attr_changing(src_dir, dst_inode, Inode_opt_project)) {
716 		ret = bch2_fs_quota_transfer(c, dst_inode,
717 					     src_dir->ei_qid,
718 					     1 << QTYP_PRJ,
719 					     KEY_TYPE_QUOTA_PREALLOC);
720 		if (ret)
721 			goto err;
722 	}
723 
724 	ret = commit_do(trans, NULL, NULL, 0,
725 			bch2_rename_trans(trans,
726 					  inode_inum(src_dir), &src_dir_u,
727 					  inode_inum(dst_dir), &dst_dir_u,
728 					  &src_inode_u,
729 					  &dst_inode_u,
730 					  &src_dentry->d_name,
731 					  &dst_dentry->d_name,
732 					  mode));
733 	if (unlikely(ret))
734 		goto err;
735 
736 	BUG_ON(src_inode->v.i_ino != src_inode_u.bi_inum);
737 	BUG_ON(dst_inode &&
738 	       dst_inode->v.i_ino != dst_inode_u.bi_inum);
739 
740 	bch2_inode_update_after_write(trans, src_dir, &src_dir_u,
741 				      ATTR_MTIME|ATTR_CTIME);
742 
743 	if (src_dir != dst_dir)
744 		bch2_inode_update_after_write(trans, dst_dir, &dst_dir_u,
745 					      ATTR_MTIME|ATTR_CTIME);
746 
747 	bch2_inode_update_after_write(trans, src_inode, &src_inode_u,
748 				      ATTR_CTIME);
749 
750 	if (dst_inode)
751 		bch2_inode_update_after_write(trans, dst_inode, &dst_inode_u,
752 					      ATTR_CTIME);
753 err:
754 	bch2_trans_put(trans);
755 
756 	bch2_fs_quota_transfer(c, src_inode,
757 			       bch_qid(&src_inode->ei_inode),
758 			       1 << QTYP_PRJ,
759 			       KEY_TYPE_QUOTA_NOCHECK);
760 	if (dst_inode)
761 		bch2_fs_quota_transfer(c, dst_inode,
762 				       bch_qid(&dst_inode->ei_inode),
763 				       1 << QTYP_PRJ,
764 				       KEY_TYPE_QUOTA_NOCHECK);
765 
766 	bch2_unlock_inodes(INODE_UPDATE_LOCK,
767 			   src_dir,
768 			   dst_dir,
769 			   src_inode,
770 			   dst_inode);
771 
772 	return bch2_err_class(ret);
773 }
774 
bch2_setattr_copy(struct mnt_idmap * idmap,struct bch_inode_info * inode,struct bch_inode_unpacked * bi,struct iattr * attr)775 static void bch2_setattr_copy(struct mnt_idmap *idmap,
776 			      struct bch_inode_info *inode,
777 			      struct bch_inode_unpacked *bi,
778 			      struct iattr *attr)
779 {
780 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
781 	unsigned int ia_valid = attr->ia_valid;
782 
783 	if (ia_valid & ATTR_UID)
784 		bi->bi_uid = from_kuid(i_user_ns(&inode->v), attr->ia_uid);
785 	if (ia_valid & ATTR_GID)
786 		bi->bi_gid = from_kgid(i_user_ns(&inode->v), attr->ia_gid);
787 
788 	if (ia_valid & ATTR_SIZE)
789 		bi->bi_size = attr->ia_size;
790 
791 	if (ia_valid & ATTR_ATIME)
792 		bi->bi_atime = timespec_to_bch2_time(c, attr->ia_atime);
793 	if (ia_valid & ATTR_MTIME)
794 		bi->bi_mtime = timespec_to_bch2_time(c, attr->ia_mtime);
795 	if (ia_valid & ATTR_CTIME)
796 		bi->bi_ctime = timespec_to_bch2_time(c, attr->ia_ctime);
797 
798 	if (ia_valid & ATTR_MODE) {
799 		umode_t mode = attr->ia_mode;
800 		kgid_t gid = ia_valid & ATTR_GID
801 			? attr->ia_gid
802 			: inode->v.i_gid;
803 
804 		if (!in_group_p(gid) &&
805 		    !capable_wrt_inode_uidgid(idmap, &inode->v, CAP_FSETID))
806 			mode &= ~S_ISGID;
807 		bi->bi_mode = mode;
808 	}
809 }
810 
bch2_setattr_nonsize(struct mnt_idmap * idmap,struct bch_inode_info * inode,struct iattr * attr)811 int bch2_setattr_nonsize(struct mnt_idmap *idmap,
812 			 struct bch_inode_info *inode,
813 			 struct iattr *attr)
814 {
815 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
816 	struct bch_qid qid;
817 	struct btree_trans *trans;
818 	struct btree_iter inode_iter = { NULL };
819 	struct bch_inode_unpacked inode_u;
820 	struct posix_acl *acl = NULL;
821 	int ret;
822 
823 	mutex_lock(&inode->ei_update_lock);
824 
825 	qid = inode->ei_qid;
826 
827 	if (attr->ia_valid & ATTR_UID)
828 		qid.q[QTYP_USR] = from_kuid(i_user_ns(&inode->v), attr->ia_uid);
829 
830 	if (attr->ia_valid & ATTR_GID)
831 		qid.q[QTYP_GRP] = from_kgid(i_user_ns(&inode->v), attr->ia_gid);
832 
833 	ret = bch2_fs_quota_transfer(c, inode, qid, ~0,
834 				     KEY_TYPE_QUOTA_PREALLOC);
835 	if (ret)
836 		goto err;
837 
838 	trans = bch2_trans_get(c);
839 retry:
840 	bch2_trans_begin(trans);
841 	kfree(acl);
842 	acl = NULL;
843 
844 	ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
845 			      BTREE_ITER_intent);
846 	if (ret)
847 		goto btree_err;
848 
849 	bch2_setattr_copy(idmap, inode, &inode_u, attr);
850 
851 	if (attr->ia_valid & ATTR_MODE) {
852 		ret = bch2_acl_chmod(trans, inode_inum(inode), &inode_u,
853 				     inode_u.bi_mode, &acl);
854 		if (ret)
855 			goto btree_err;
856 	}
857 
858 	ret =   bch2_inode_write(trans, &inode_iter, &inode_u) ?:
859 		bch2_trans_commit(trans, NULL, NULL,
860 				  BCH_TRANS_COMMIT_no_enospc);
861 btree_err:
862 	bch2_trans_iter_exit(trans, &inode_iter);
863 
864 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
865 		goto retry;
866 	if (unlikely(ret))
867 		goto err_trans;
868 
869 	bch2_inode_update_after_write(trans, inode, &inode_u, attr->ia_valid);
870 
871 	if (acl)
872 		set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl);
873 err_trans:
874 	bch2_trans_put(trans);
875 err:
876 	mutex_unlock(&inode->ei_update_lock);
877 
878 	return bch2_err_class(ret);
879 }
880 
bch2_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned query_flags)881 static int bch2_getattr(struct mnt_idmap *idmap,
882 			const struct path *path, struct kstat *stat,
883 			u32 request_mask, unsigned query_flags)
884 {
885 	struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry));
886 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
887 
888 	stat->dev	= inode->v.i_sb->s_dev;
889 	stat->ino	= inode->v.i_ino;
890 	stat->mode	= inode->v.i_mode;
891 	stat->nlink	= inode->v.i_nlink;
892 	stat->uid	= inode->v.i_uid;
893 	stat->gid	= inode->v.i_gid;
894 	stat->rdev	= inode->v.i_rdev;
895 	stat->size	= i_size_read(&inode->v);
896 	stat->atime	= inode_get_atime(&inode->v);
897 	stat->mtime	= inode_get_mtime(&inode->v);
898 	stat->ctime	= inode_get_ctime(&inode->v);
899 	stat->blksize	= block_bytes(c);
900 	stat->blocks	= inode->v.i_blocks;
901 
902 	stat->subvol	= inode->ei_subvol;
903 	stat->result_mask |= STATX_SUBVOL;
904 
905 	if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->v.i_mode)) {
906 		stat->result_mask |= STATX_DIOALIGN;
907 		/*
908 		 * this is incorrect; we should be tracking this in superblock,
909 		 * and checking the alignment of open devices
910 		 */
911 		stat->dio_mem_align = SECTOR_SIZE;
912 		stat->dio_offset_align = block_bytes(c);
913 	}
914 
915 	if (request_mask & STATX_BTIME) {
916 		stat->result_mask |= STATX_BTIME;
917 		stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime);
918 	}
919 
920 	if (inode->ei_inode.bi_flags & BCH_INODE_immutable)
921 		stat->attributes |= STATX_ATTR_IMMUTABLE;
922 	stat->attributes_mask	 |= STATX_ATTR_IMMUTABLE;
923 
924 	if (inode->ei_inode.bi_flags & BCH_INODE_append)
925 		stat->attributes |= STATX_ATTR_APPEND;
926 	stat->attributes_mask	 |= STATX_ATTR_APPEND;
927 
928 	if (inode->ei_inode.bi_flags & BCH_INODE_nodump)
929 		stat->attributes |= STATX_ATTR_NODUMP;
930 	stat->attributes_mask	 |= STATX_ATTR_NODUMP;
931 
932 	return 0;
933 }
934 
bch2_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * iattr)935 static int bch2_setattr(struct mnt_idmap *idmap,
936 			struct dentry *dentry, struct iattr *iattr)
937 {
938 	struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
939 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
940 	int ret;
941 
942 	lockdep_assert_held(&inode->v.i_rwsem);
943 
944 	ret   = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
945 		setattr_prepare(idmap, dentry, iattr);
946 	if (ret)
947 		return ret;
948 
949 	return iattr->ia_valid & ATTR_SIZE
950 		? bchfs_truncate(idmap, inode, iattr)
951 		: bch2_setattr_nonsize(idmap, inode, iattr);
952 }
953 
bch2_tmpfile(struct mnt_idmap * idmap,struct inode * vdir,struct file * file,umode_t mode)954 static int bch2_tmpfile(struct mnt_idmap *idmap,
955 			struct inode *vdir, struct file *file, umode_t mode)
956 {
957 	struct bch_inode_info *inode =
958 		__bch2_create(idmap, to_bch_ei(vdir),
959 			      file->f_path.dentry, mode, 0,
960 			      (subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
961 
962 	if (IS_ERR(inode))
963 		return bch2_err_class(PTR_ERR(inode));
964 
965 	d_mark_tmpfile(file, &inode->v);
966 	d_instantiate(file->f_path.dentry, &inode->v);
967 	return finish_open_simple(file, 0);
968 }
969 
bch2_fill_extent(struct bch_fs * c,struct fiemap_extent_info * info,struct bkey_s_c k,unsigned flags)970 static int bch2_fill_extent(struct bch_fs *c,
971 			    struct fiemap_extent_info *info,
972 			    struct bkey_s_c k, unsigned flags)
973 {
974 	if (bkey_extent_is_direct_data(k.k)) {
975 		struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
976 		const union bch_extent_entry *entry;
977 		struct extent_ptr_decoded p;
978 		int ret;
979 
980 		if (k.k->type == KEY_TYPE_reflink_v)
981 			flags |= FIEMAP_EXTENT_SHARED;
982 
983 		bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
984 			int flags2 = 0;
985 			u64 offset = p.ptr.offset;
986 
987 			if (p.ptr.unwritten)
988 				flags2 |= FIEMAP_EXTENT_UNWRITTEN;
989 
990 			if (p.crc.compression_type)
991 				flags2 |= FIEMAP_EXTENT_ENCODED;
992 			else
993 				offset += p.crc.offset;
994 
995 			if ((offset & (block_sectors(c) - 1)) ||
996 			    (k.k->size & (block_sectors(c) - 1)))
997 				flags2 |= FIEMAP_EXTENT_NOT_ALIGNED;
998 
999 			ret = fiemap_fill_next_extent(info,
1000 						bkey_start_offset(k.k) << 9,
1001 						offset << 9,
1002 						k.k->size << 9, flags|flags2);
1003 			if (ret)
1004 				return ret;
1005 		}
1006 
1007 		return 0;
1008 	} else if (bkey_extent_is_inline_data(k.k)) {
1009 		return fiemap_fill_next_extent(info,
1010 					       bkey_start_offset(k.k) << 9,
1011 					       0, k.k->size << 9,
1012 					       flags|
1013 					       FIEMAP_EXTENT_DATA_INLINE);
1014 	} else if (k.k->type == KEY_TYPE_reservation) {
1015 		return fiemap_fill_next_extent(info,
1016 					       bkey_start_offset(k.k) << 9,
1017 					       0, k.k->size << 9,
1018 					       flags|
1019 					       FIEMAP_EXTENT_DELALLOC|
1020 					       FIEMAP_EXTENT_UNWRITTEN);
1021 	} else {
1022 		BUG();
1023 	}
1024 }
1025 
bch2_fiemap(struct inode * vinode,struct fiemap_extent_info * info,u64 start,u64 len)1026 static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
1027 		       u64 start, u64 len)
1028 {
1029 	struct bch_fs *c = vinode->i_sb->s_fs_info;
1030 	struct bch_inode_info *ei = to_bch_ei(vinode);
1031 	struct btree_trans *trans;
1032 	struct btree_iter iter;
1033 	struct bkey_s_c k;
1034 	struct bkey_buf cur, prev;
1035 	unsigned offset_into_extent, sectors;
1036 	bool have_extent = false;
1037 	u32 snapshot;
1038 	int ret = 0;
1039 
1040 	ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC);
1041 	if (ret)
1042 		return ret;
1043 
1044 	struct bpos end = POS(ei->v.i_ino, (start + len) >> 9);
1045 	if (start + len < start)
1046 		return -EINVAL;
1047 
1048 	start >>= 9;
1049 
1050 	bch2_bkey_buf_init(&cur);
1051 	bch2_bkey_buf_init(&prev);
1052 	trans = bch2_trans_get(c);
1053 retry:
1054 	bch2_trans_begin(trans);
1055 
1056 	ret = bch2_subvolume_get_snapshot(trans, ei->ei_subvol, &snapshot);
1057 	if (ret)
1058 		goto err;
1059 
1060 	bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1061 			     SPOS(ei->v.i_ino, start, snapshot), 0);
1062 
1063 	while (!(ret = btree_trans_too_many_iters(trans)) &&
1064 	       (k = bch2_btree_iter_peek_upto(&iter, end)).k &&
1065 	       !(ret = bkey_err(k))) {
1066 		enum btree_id data_btree = BTREE_ID_extents;
1067 
1068 		if (!bkey_extent_is_data(k.k) &&
1069 		    k.k->type != KEY_TYPE_reservation) {
1070 			bch2_btree_iter_advance(&iter);
1071 			continue;
1072 		}
1073 
1074 		offset_into_extent	= iter.pos.offset -
1075 			bkey_start_offset(k.k);
1076 		sectors			= k.k->size - offset_into_extent;
1077 
1078 		bch2_bkey_buf_reassemble(&cur, c, k);
1079 
1080 		ret = bch2_read_indirect_extent(trans, &data_btree,
1081 					&offset_into_extent, &cur);
1082 		if (ret)
1083 			break;
1084 
1085 		k = bkey_i_to_s_c(cur.k);
1086 		bch2_bkey_buf_realloc(&prev, c, k.k->u64s);
1087 
1088 		sectors = min(sectors, k.k->size - offset_into_extent);
1089 
1090 		bch2_cut_front(POS(k.k->p.inode,
1091 				   bkey_start_offset(k.k) +
1092 				   offset_into_extent),
1093 			       cur.k);
1094 		bch2_key_resize(&cur.k->k, sectors);
1095 		cur.k->k.p = iter.pos;
1096 		cur.k->k.p.offset += cur.k->k.size;
1097 
1098 		if (have_extent) {
1099 			bch2_trans_unlock(trans);
1100 			ret = bch2_fill_extent(c, info,
1101 					bkey_i_to_s_c(prev.k), 0);
1102 			if (ret)
1103 				break;
1104 		}
1105 
1106 		bkey_copy(prev.k, cur.k);
1107 		have_extent = true;
1108 
1109 		bch2_btree_iter_set_pos(&iter,
1110 			POS(iter.pos.inode, iter.pos.offset + sectors));
1111 
1112 		ret = bch2_trans_relock(trans);
1113 		if (ret)
1114 			break;
1115 	}
1116 	start = iter.pos.offset;
1117 	bch2_trans_iter_exit(trans, &iter);
1118 err:
1119 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1120 		goto retry;
1121 
1122 	if (!ret && have_extent) {
1123 		bch2_trans_unlock(trans);
1124 		ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k),
1125 				       FIEMAP_EXTENT_LAST);
1126 	}
1127 
1128 	bch2_trans_put(trans);
1129 	bch2_bkey_buf_exit(&cur, c);
1130 	bch2_bkey_buf_exit(&prev, c);
1131 	return ret < 0 ? ret : 0;
1132 }
1133 
1134 static const struct vm_operations_struct bch_vm_ops = {
1135 	.fault		= bch2_page_fault,
1136 	.map_pages	= filemap_map_pages,
1137 	.page_mkwrite   = bch2_page_mkwrite,
1138 };
1139 
bch2_mmap(struct file * file,struct vm_area_struct * vma)1140 static int bch2_mmap(struct file *file, struct vm_area_struct *vma)
1141 {
1142 	file_accessed(file);
1143 
1144 	vma->vm_ops = &bch_vm_ops;
1145 	return 0;
1146 }
1147 
1148 /* Directories: */
1149 
bch2_dir_llseek(struct file * file,loff_t offset,int whence)1150 static loff_t bch2_dir_llseek(struct file *file, loff_t offset, int whence)
1151 {
1152 	return generic_file_llseek_size(file, offset, whence,
1153 					S64_MAX, S64_MAX);
1154 }
1155 
bch2_vfs_readdir(struct file * file,struct dir_context * ctx)1156 static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
1157 {
1158 	struct bch_inode_info *inode = file_bch_inode(file);
1159 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
1160 
1161 	if (!dir_emit_dots(file, ctx))
1162 		return 0;
1163 
1164 	int ret = bch2_readdir(c, inode_inum(inode), ctx);
1165 
1166 	bch_err_fn(c, ret);
1167 	return bch2_err_class(ret);
1168 }
1169 
bch2_open(struct inode * vinode,struct file * file)1170 static int bch2_open(struct inode *vinode, struct file *file)
1171 {
1172 	if (file->f_flags & (O_WRONLY|O_RDWR)) {
1173 		struct bch_inode_info *inode = to_bch_ei(vinode);
1174 		struct bch_fs *c = inode->v.i_sb->s_fs_info;
1175 
1176 		int ret = bch2_subvol_is_ro(c, inode->ei_subvol);
1177 		if (ret)
1178 			return ret;
1179 	}
1180 
1181 	file->f_mode |= FMODE_CAN_ODIRECT;
1182 
1183 	return generic_file_open(vinode, file);
1184 }
1185 
1186 static const struct file_operations bch_file_operations = {
1187 	.open		= bch2_open,
1188 	.llseek		= bch2_llseek,
1189 	.read_iter	= bch2_read_iter,
1190 	.write_iter	= bch2_write_iter,
1191 	.mmap		= bch2_mmap,
1192 	.get_unmapped_area = thp_get_unmapped_area,
1193 	.fsync		= bch2_fsync,
1194 	.splice_read	= filemap_splice_read,
1195 	.splice_write	= iter_file_splice_write,
1196 	.fallocate	= bch2_fallocate_dispatch,
1197 	.unlocked_ioctl = bch2_fs_file_ioctl,
1198 #ifdef CONFIG_COMPAT
1199 	.compat_ioctl	= bch2_compat_fs_ioctl,
1200 #endif
1201 	.remap_file_range = bch2_remap_file_range,
1202 };
1203 
1204 static const struct inode_operations bch_file_inode_operations = {
1205 	.getattr	= bch2_getattr,
1206 	.setattr	= bch2_setattr,
1207 	.fiemap		= bch2_fiemap,
1208 	.listxattr	= bch2_xattr_list,
1209 #ifdef CONFIG_BCACHEFS_POSIX_ACL
1210 	.get_inode_acl	= bch2_get_acl,
1211 	.set_acl	= bch2_set_acl,
1212 #endif
1213 };
1214 
1215 static const struct inode_operations bch_dir_inode_operations = {
1216 	.lookup		= bch2_lookup,
1217 	.create		= bch2_create,
1218 	.link		= bch2_link,
1219 	.unlink		= bch2_unlink,
1220 	.symlink	= bch2_symlink,
1221 	.mkdir		= bch2_mkdir,
1222 	.rmdir		= bch2_unlink,
1223 	.mknod		= bch2_mknod,
1224 	.rename		= bch2_rename2,
1225 	.getattr	= bch2_getattr,
1226 	.setattr	= bch2_setattr,
1227 	.tmpfile	= bch2_tmpfile,
1228 	.listxattr	= bch2_xattr_list,
1229 #ifdef CONFIG_BCACHEFS_POSIX_ACL
1230 	.get_inode_acl	= bch2_get_acl,
1231 	.set_acl	= bch2_set_acl,
1232 #endif
1233 };
1234 
1235 static const struct file_operations bch_dir_file_operations = {
1236 	.llseek		= bch2_dir_llseek,
1237 	.read		= generic_read_dir,
1238 	.iterate_shared	= bch2_vfs_readdir,
1239 	.fsync		= bch2_fsync,
1240 	.unlocked_ioctl = bch2_fs_file_ioctl,
1241 #ifdef CONFIG_COMPAT
1242 	.compat_ioctl	= bch2_compat_fs_ioctl,
1243 #endif
1244 };
1245 
1246 static const struct inode_operations bch_symlink_inode_operations = {
1247 	.get_link	= page_get_link,
1248 	.getattr	= bch2_getattr,
1249 	.setattr	= bch2_setattr,
1250 	.listxattr	= bch2_xattr_list,
1251 #ifdef CONFIG_BCACHEFS_POSIX_ACL
1252 	.get_inode_acl	= bch2_get_acl,
1253 	.set_acl	= bch2_set_acl,
1254 #endif
1255 };
1256 
1257 static const struct inode_operations bch_special_inode_operations = {
1258 	.getattr	= bch2_getattr,
1259 	.setattr	= bch2_setattr,
1260 	.listxattr	= bch2_xattr_list,
1261 #ifdef CONFIG_BCACHEFS_POSIX_ACL
1262 	.get_inode_acl	= bch2_get_acl,
1263 	.set_acl	= bch2_set_acl,
1264 #endif
1265 };
1266 
1267 static const struct address_space_operations bch_address_space_operations = {
1268 	.read_folio	= bch2_read_folio,
1269 	.writepages	= bch2_writepages,
1270 	.readahead	= bch2_readahead,
1271 	.dirty_folio	= filemap_dirty_folio,
1272 	.write_begin	= bch2_write_begin,
1273 	.write_end	= bch2_write_end,
1274 	.invalidate_folio = bch2_invalidate_folio,
1275 	.release_folio	= bch2_release_folio,
1276 #ifdef CONFIG_MIGRATION
1277 	.migrate_folio	= filemap_migrate_folio,
1278 #endif
1279 	.error_remove_folio = generic_error_remove_folio,
1280 };
1281 
1282 struct bcachefs_fid {
1283 	u64		inum;
1284 	u32		subvol;
1285 	u32		gen;
1286 } __packed;
1287 
1288 struct bcachefs_fid_with_parent {
1289 	struct bcachefs_fid	fid;
1290 	struct bcachefs_fid	dir;
1291 } __packed;
1292 
bcachefs_fid_valid(int fh_len,int fh_type)1293 static int bcachefs_fid_valid(int fh_len, int fh_type)
1294 {
1295 	switch (fh_type) {
1296 	case FILEID_BCACHEFS_WITHOUT_PARENT:
1297 		return fh_len == sizeof(struct bcachefs_fid) / sizeof(u32);
1298 	case FILEID_BCACHEFS_WITH_PARENT:
1299 		return fh_len == sizeof(struct bcachefs_fid_with_parent) / sizeof(u32);
1300 	default:
1301 		return false;
1302 	}
1303 }
1304 
bch2_inode_to_fid(struct bch_inode_info * inode)1305 static struct bcachefs_fid bch2_inode_to_fid(struct bch_inode_info *inode)
1306 {
1307 	return (struct bcachefs_fid) {
1308 		.inum	= inode->ei_inode.bi_inum,
1309 		.subvol	= inode->ei_subvol,
1310 		.gen	= inode->ei_inode.bi_generation,
1311 	};
1312 }
1313 
bch2_encode_fh(struct inode * vinode,u32 * fh,int * len,struct inode * vdir)1314 static int bch2_encode_fh(struct inode *vinode, u32 *fh, int *len,
1315 			  struct inode *vdir)
1316 {
1317 	struct bch_inode_info *inode	= to_bch_ei(vinode);
1318 	struct bch_inode_info *dir	= to_bch_ei(vdir);
1319 	int min_len;
1320 
1321 	if (!S_ISDIR(inode->v.i_mode) && dir) {
1322 		struct bcachefs_fid_with_parent *fid = (void *) fh;
1323 
1324 		min_len = sizeof(*fid) / sizeof(u32);
1325 		if (*len < min_len) {
1326 			*len = min_len;
1327 			return FILEID_INVALID;
1328 		}
1329 
1330 		fid->fid = bch2_inode_to_fid(inode);
1331 		fid->dir = bch2_inode_to_fid(dir);
1332 
1333 		*len = min_len;
1334 		return FILEID_BCACHEFS_WITH_PARENT;
1335 	} else {
1336 		struct bcachefs_fid *fid = (void *) fh;
1337 
1338 		min_len = sizeof(*fid) / sizeof(u32);
1339 		if (*len < min_len) {
1340 			*len = min_len;
1341 			return FILEID_INVALID;
1342 		}
1343 		*fid = bch2_inode_to_fid(inode);
1344 
1345 		*len = min_len;
1346 		return FILEID_BCACHEFS_WITHOUT_PARENT;
1347 	}
1348 }
1349 
bch2_nfs_get_inode(struct super_block * sb,struct bcachefs_fid fid)1350 static struct inode *bch2_nfs_get_inode(struct super_block *sb,
1351 					struct bcachefs_fid fid)
1352 {
1353 	struct bch_fs *c = sb->s_fs_info;
1354 	struct inode *vinode = bch2_vfs_inode_get(c, (subvol_inum) {
1355 				    .subvol = fid.subvol,
1356 				    .inum = fid.inum,
1357 	});
1358 	if (!IS_ERR(vinode) && vinode->i_generation != fid.gen) {
1359 		iput(vinode);
1360 		vinode = ERR_PTR(-ESTALE);
1361 	}
1362 	return vinode;
1363 }
1364 
bch2_fh_to_dentry(struct super_block * sb,struct fid * _fid,int fh_len,int fh_type)1365 static struct dentry *bch2_fh_to_dentry(struct super_block *sb, struct fid *_fid,
1366 		int fh_len, int fh_type)
1367 {
1368 	struct bcachefs_fid *fid = (void *) _fid;
1369 
1370 	if (!bcachefs_fid_valid(fh_len, fh_type))
1371 		return NULL;
1372 
1373 	return d_obtain_alias(bch2_nfs_get_inode(sb, *fid));
1374 }
1375 
bch2_fh_to_parent(struct super_block * sb,struct fid * _fid,int fh_len,int fh_type)1376 static struct dentry *bch2_fh_to_parent(struct super_block *sb, struct fid *_fid,
1377 		int fh_len, int fh_type)
1378 {
1379 	struct bcachefs_fid_with_parent *fid = (void *) _fid;
1380 
1381 	if (!bcachefs_fid_valid(fh_len, fh_type) ||
1382 	    fh_type != FILEID_BCACHEFS_WITH_PARENT)
1383 		return NULL;
1384 
1385 	return d_obtain_alias(bch2_nfs_get_inode(sb, fid->dir));
1386 }
1387 
bch2_get_parent(struct dentry * child)1388 static struct dentry *bch2_get_parent(struct dentry *child)
1389 {
1390 	struct bch_inode_info *inode = to_bch_ei(child->d_inode);
1391 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
1392 	subvol_inum parent_inum = {
1393 		.subvol = inode->ei_inode.bi_parent_subvol ?:
1394 			inode->ei_subvol,
1395 		.inum = inode->ei_inode.bi_dir,
1396 	};
1397 
1398 	return d_obtain_alias(bch2_vfs_inode_get(c, parent_inum));
1399 }
1400 
bch2_get_name(struct dentry * parent,char * name,struct dentry * child)1401 static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child)
1402 {
1403 	struct bch_inode_info *inode	= to_bch_ei(child->d_inode);
1404 	struct bch_inode_info *dir	= to_bch_ei(parent->d_inode);
1405 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
1406 	struct btree_trans *trans;
1407 	struct btree_iter iter1;
1408 	struct btree_iter iter2;
1409 	struct bkey_s_c k;
1410 	struct bkey_s_c_dirent d;
1411 	struct bch_inode_unpacked inode_u;
1412 	subvol_inum target;
1413 	u32 snapshot;
1414 	struct qstr dirent_name;
1415 	unsigned name_len = 0;
1416 	int ret;
1417 
1418 	if (!S_ISDIR(dir->v.i_mode))
1419 		return -EINVAL;
1420 
1421 	trans = bch2_trans_get(c);
1422 
1423 	bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents,
1424 			     POS(dir->ei_inode.bi_inum, 0), 0);
1425 	bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents,
1426 			     POS(dir->ei_inode.bi_inum, 0), 0);
1427 retry:
1428 	bch2_trans_begin(trans);
1429 
1430 	ret = bch2_subvolume_get_snapshot(trans, dir->ei_subvol, &snapshot);
1431 	if (ret)
1432 		goto err;
1433 
1434 	bch2_btree_iter_set_snapshot(&iter1, snapshot);
1435 	bch2_btree_iter_set_snapshot(&iter2, snapshot);
1436 
1437 	ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u);
1438 	if (ret)
1439 		goto err;
1440 
1441 	if (inode_u.bi_dir == dir->ei_inode.bi_inum) {
1442 		bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
1443 
1444 		k = bch2_btree_iter_peek_slot(&iter1);
1445 		ret = bkey_err(k);
1446 		if (ret)
1447 			goto err;
1448 
1449 		if (k.k->type != KEY_TYPE_dirent) {
1450 			ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
1451 			goto err;
1452 		}
1453 
1454 		d = bkey_s_c_to_dirent(k);
1455 		ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target);
1456 		if (ret > 0)
1457 			ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
1458 		if (ret)
1459 			goto err;
1460 
1461 		if (target.subvol	== inode->ei_subvol &&
1462 		    target.inum		== inode->ei_inode.bi_inum)
1463 			goto found;
1464 	} else {
1465 		/*
1466 		 * File with multiple hardlinks and our backref is to the wrong
1467 		 * directory - linear search:
1468 		 */
1469 		for_each_btree_key_continue_norestart(iter2, 0, k, ret) {
1470 			if (k.k->p.inode > dir->ei_inode.bi_inum)
1471 				break;
1472 
1473 			if (k.k->type != KEY_TYPE_dirent)
1474 				continue;
1475 
1476 			d = bkey_s_c_to_dirent(k);
1477 			ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target);
1478 			if (ret < 0)
1479 				break;
1480 			if (ret)
1481 				continue;
1482 
1483 			if (target.subvol	== inode->ei_subvol &&
1484 			    target.inum		== inode->ei_inode.bi_inum)
1485 				goto found;
1486 		}
1487 	}
1488 
1489 	ret = -ENOENT;
1490 	goto err;
1491 found:
1492 	dirent_name = bch2_dirent_get_name(d);
1493 
1494 	name_len = min_t(unsigned, dirent_name.len, NAME_MAX);
1495 	memcpy(name, dirent_name.name, name_len);
1496 	name[name_len] = '\0';
1497 err:
1498 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1499 		goto retry;
1500 
1501 	bch2_trans_iter_exit(trans, &iter1);
1502 	bch2_trans_iter_exit(trans, &iter2);
1503 	bch2_trans_put(trans);
1504 
1505 	return ret;
1506 }
1507 
1508 static const struct export_operations bch_export_ops = {
1509 	.encode_fh	= bch2_encode_fh,
1510 	.fh_to_dentry	= bch2_fh_to_dentry,
1511 	.fh_to_parent	= bch2_fh_to_parent,
1512 	.get_parent	= bch2_get_parent,
1513 	.get_name	= bch2_get_name,
1514 };
1515 
bch2_vfs_inode_init(struct btree_trans * trans,subvol_inum inum,struct bch_inode_info * inode,struct bch_inode_unpacked * bi,struct bch_subvolume * subvol)1516 static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
1517 				struct bch_inode_info *inode,
1518 				struct bch_inode_unpacked *bi,
1519 				struct bch_subvolume *subvol)
1520 {
1521 	bch2_iget5_set(&inode->v, &inum);
1522 	bch2_inode_update_after_write(trans, inode, bi, ~0);
1523 
1524 	inode->v.i_blocks	= bi->bi_sectors;
1525 	inode->v.i_ino		= bi->bi_inum;
1526 	inode->v.i_rdev		= bi->bi_dev;
1527 	inode->v.i_generation	= bi->bi_generation;
1528 	inode->v.i_size		= bi->bi_size;
1529 
1530 	inode->ei_flags		= 0;
1531 	inode->ei_quota_reserved = 0;
1532 	inode->ei_qid		= bch_qid(bi);
1533 	inode->ei_subvol	= inum.subvol;
1534 
1535 	if (BCH_SUBVOLUME_SNAP(subvol))
1536 		set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
1537 
1538 	inode->v.i_mapping->a_ops = &bch_address_space_operations;
1539 
1540 	switch (inode->v.i_mode & S_IFMT) {
1541 	case S_IFREG:
1542 		inode->v.i_op	= &bch_file_inode_operations;
1543 		inode->v.i_fop	= &bch_file_operations;
1544 		break;
1545 	case S_IFDIR:
1546 		inode->v.i_op	= &bch_dir_inode_operations;
1547 		inode->v.i_fop	= &bch_dir_file_operations;
1548 		break;
1549 	case S_IFLNK:
1550 		inode_nohighmem(&inode->v);
1551 		inode->v.i_op	= &bch_symlink_inode_operations;
1552 		break;
1553 	default:
1554 		init_special_inode(&inode->v, inode->v.i_mode, inode->v.i_rdev);
1555 		inode->v.i_op	= &bch_special_inode_operations;
1556 		break;
1557 	}
1558 
1559 	mapping_set_large_folios(inode->v.i_mapping);
1560 }
1561 
bch2_free_inode(struct inode * vinode)1562 static void bch2_free_inode(struct inode *vinode)
1563 {
1564 	kmem_cache_free(bch2_inode_cache, to_bch_ei(vinode));
1565 }
1566 
inode_update_times_fn(struct btree_trans * trans,struct bch_inode_info * inode,struct bch_inode_unpacked * bi,void * p)1567 static int inode_update_times_fn(struct btree_trans *trans,
1568 				 struct bch_inode_info *inode,
1569 				 struct bch_inode_unpacked *bi,
1570 				 void *p)
1571 {
1572 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
1573 
1574 	bi->bi_atime	= timespec_to_bch2_time(c, inode_get_atime(&inode->v));
1575 	bi->bi_mtime	= timespec_to_bch2_time(c, inode_get_mtime(&inode->v));
1576 	bi->bi_ctime	= timespec_to_bch2_time(c, inode_get_ctime(&inode->v));
1577 
1578 	return 0;
1579 }
1580 
bch2_vfs_write_inode(struct inode * vinode,struct writeback_control * wbc)1581 static int bch2_vfs_write_inode(struct inode *vinode,
1582 				struct writeback_control *wbc)
1583 {
1584 	struct bch_fs *c = vinode->i_sb->s_fs_info;
1585 	struct bch_inode_info *inode = to_bch_ei(vinode);
1586 	int ret;
1587 
1588 	mutex_lock(&inode->ei_update_lock);
1589 	ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
1590 			       ATTR_ATIME|ATTR_MTIME|ATTR_CTIME);
1591 	mutex_unlock(&inode->ei_update_lock);
1592 
1593 	return bch2_err_class(ret);
1594 }
1595 
bch2_evict_inode(struct inode * vinode)1596 static void bch2_evict_inode(struct inode *vinode)
1597 {
1598 	struct bch_fs *c = vinode->i_sb->s_fs_info;
1599 	struct bch_inode_info *inode = to_bch_ei(vinode);
1600 
1601 	truncate_inode_pages_final(&inode->v.i_data);
1602 
1603 	clear_inode(&inode->v);
1604 
1605 	BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
1606 
1607 	if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
1608 		bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
1609 				KEY_TYPE_QUOTA_WARN);
1610 		bch2_quota_acct(c, inode->ei_qid, Q_INO, -1,
1611 				KEY_TYPE_QUOTA_WARN);
1612 		bch2_inode_rm(c, inode_inum(inode));
1613 	}
1614 
1615 	mutex_lock(&c->vfs_inodes_lock);
1616 	list_del_init(&inode->ei_vfs_inode_list);
1617 	mutex_unlock(&c->vfs_inodes_lock);
1618 }
1619 
bch2_evict_subvolume_inodes(struct bch_fs * c,snapshot_id_list * s)1620 void bch2_evict_subvolume_inodes(struct bch_fs *c, snapshot_id_list *s)
1621 {
1622 	struct bch_inode_info *inode;
1623 	DARRAY(struct bch_inode_info *) grabbed;
1624 	bool clean_pass = false, this_pass_clean;
1625 
1626 	/*
1627 	 * Initially, we scan for inodes without I_DONTCACHE, then mark them to
1628 	 * be pruned with d_mark_dontcache().
1629 	 *
1630 	 * Once we've had a clean pass where we didn't find any inodes without
1631 	 * I_DONTCACHE, we wait for them to be freed:
1632 	 */
1633 
1634 	darray_init(&grabbed);
1635 	darray_make_room(&grabbed, 1024);
1636 again:
1637 	cond_resched();
1638 	this_pass_clean = true;
1639 
1640 	mutex_lock(&c->vfs_inodes_lock);
1641 	list_for_each_entry(inode, &c->vfs_inodes_list, ei_vfs_inode_list) {
1642 		if (!snapshot_list_has_id(s, inode->ei_subvol))
1643 			continue;
1644 
1645 		if (!(inode->v.i_state & I_DONTCACHE) &&
1646 		    !(inode->v.i_state & I_FREEING) &&
1647 		    igrab(&inode->v)) {
1648 			this_pass_clean = false;
1649 
1650 			if (darray_push_gfp(&grabbed, inode, GFP_ATOMIC|__GFP_NOWARN)) {
1651 				iput(&inode->v);
1652 				break;
1653 			}
1654 		} else if (clean_pass && this_pass_clean) {
1655 			struct wait_bit_queue_entry wqe;
1656 			struct wait_queue_head *wq_head;
1657 
1658 			wq_head = inode_bit_waitqueue(&wqe, &inode->v, __I_NEW);
1659 			prepare_to_wait_event(wq_head, &wqe.wq_entry,
1660 					      TASK_UNINTERRUPTIBLE);
1661 			mutex_unlock(&c->vfs_inodes_lock);
1662 
1663 			schedule();
1664 			finish_wait(wq_head, &wqe.wq_entry);
1665 			goto again;
1666 		}
1667 	}
1668 	mutex_unlock(&c->vfs_inodes_lock);
1669 
1670 	darray_for_each(grabbed, i) {
1671 		inode = *i;
1672 		d_mark_dontcache(&inode->v);
1673 		d_prune_aliases(&inode->v);
1674 		iput(&inode->v);
1675 	}
1676 	grabbed.nr = 0;
1677 
1678 	if (!clean_pass || !this_pass_clean) {
1679 		clean_pass = this_pass_clean;
1680 		goto again;
1681 	}
1682 
1683 	darray_exit(&grabbed);
1684 }
1685 
bch2_statfs(struct dentry * dentry,struct kstatfs * buf)1686 static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf)
1687 {
1688 	struct super_block *sb = dentry->d_sb;
1689 	struct bch_fs *c = sb->s_fs_info;
1690 	struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
1691 	unsigned shift = sb->s_blocksize_bits - 9;
1692 	/*
1693 	 * this assumes inodes take up 64 bytes, which is a decent average
1694 	 * number:
1695 	 */
1696 	u64 avail_inodes = ((usage.capacity - usage.used) << 3);
1697 
1698 	buf->f_type	= BCACHEFS_STATFS_MAGIC;
1699 	buf->f_bsize	= sb->s_blocksize;
1700 	buf->f_blocks	= usage.capacity >> shift;
1701 	buf->f_bfree	= usage.free >> shift;
1702 	buf->f_bavail	= avail_factor(usage.free) >> shift;
1703 
1704 	buf->f_files	= usage.nr_inodes + avail_inodes;
1705 	buf->f_ffree	= avail_inodes;
1706 
1707 	buf->f_fsid	= uuid_to_fsid(c->sb.user_uuid.b);
1708 	buf->f_namelen	= BCH_NAME_MAX;
1709 
1710 	return 0;
1711 }
1712 
bch2_sync_fs(struct super_block * sb,int wait)1713 static int bch2_sync_fs(struct super_block *sb, int wait)
1714 {
1715 	struct bch_fs *c = sb->s_fs_info;
1716 	int ret;
1717 
1718 	trace_bch2_sync_fs(sb, wait);
1719 
1720 	if (c->opts.journal_flush_disabled)
1721 		return 0;
1722 
1723 	if (!wait) {
1724 		bch2_journal_flush_async(&c->journal, NULL);
1725 		return 0;
1726 	}
1727 
1728 	ret = bch2_journal_flush(&c->journal);
1729 	return bch2_err_class(ret);
1730 }
1731 
bch2_path_to_fs(const char * path)1732 static struct bch_fs *bch2_path_to_fs(const char *path)
1733 {
1734 	struct bch_fs *c;
1735 	dev_t dev;
1736 	int ret;
1737 
1738 	ret = lookup_bdev(path, &dev);
1739 	if (ret)
1740 		return ERR_PTR(ret);
1741 
1742 	c = bch2_dev_to_fs(dev);
1743 	if (c)
1744 		closure_put(&c->cl);
1745 	return c ?: ERR_PTR(-ENOENT);
1746 }
1747 
bch2_remount(struct super_block * sb,int * flags,struct bch_opts opts)1748 static int bch2_remount(struct super_block *sb, int *flags,
1749 			struct bch_opts opts)
1750 {
1751 	struct bch_fs *c = sb->s_fs_info;
1752 	int ret = 0;
1753 
1754 	opt_set(opts, read_only, (*flags & SB_RDONLY) != 0);
1755 
1756 	if (opts.read_only != c->opts.read_only) {
1757 		down_write(&c->state_lock);
1758 
1759 		if (opts.read_only) {
1760 			bch2_fs_read_only(c);
1761 
1762 			sb->s_flags |= SB_RDONLY;
1763 		} else {
1764 			ret = bch2_fs_read_write(c);
1765 			if (ret) {
1766 				bch_err(c, "error going rw: %i", ret);
1767 				up_write(&c->state_lock);
1768 				ret = -EINVAL;
1769 				goto err;
1770 			}
1771 
1772 			sb->s_flags &= ~SB_RDONLY;
1773 		}
1774 
1775 		c->opts.read_only = opts.read_only;
1776 
1777 		up_write(&c->state_lock);
1778 	}
1779 
1780 	if (opt_defined(opts, errors))
1781 		c->opts.errors = opts.errors;
1782 err:
1783 	return bch2_err_class(ret);
1784 }
1785 
bch2_show_devname(struct seq_file * seq,struct dentry * root)1786 static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
1787 {
1788 	struct bch_fs *c = root->d_sb->s_fs_info;
1789 	bool first = true;
1790 
1791 	for_each_online_member(c, ca) {
1792 		if (!first)
1793 			seq_putc(seq, ':');
1794 		first = false;
1795 		seq_puts(seq, ca->disk_sb.sb_name);
1796 	}
1797 
1798 	return 0;
1799 }
1800 
bch2_show_options(struct seq_file * seq,struct dentry * root)1801 static int bch2_show_options(struct seq_file *seq, struct dentry *root)
1802 {
1803 	struct bch_fs *c = root->d_sb->s_fs_info;
1804 	enum bch_opt_id i;
1805 	struct printbuf buf = PRINTBUF;
1806 	int ret = 0;
1807 
1808 	for (i = 0; i < bch2_opts_nr; i++) {
1809 		const struct bch_option *opt = &bch2_opt_table[i];
1810 		u64 v = bch2_opt_get_by_id(&c->opts, i);
1811 
1812 		if ((opt->flags & OPT_HIDDEN) ||
1813 		    !(opt->flags & OPT_MOUNT))
1814 			continue;
1815 
1816 		if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
1817 			continue;
1818 
1819 		printbuf_reset(&buf);
1820 		bch2_opt_to_text(&buf, c, c->disk_sb.sb, opt, v,
1821 				 OPT_SHOW_MOUNT_STYLE);
1822 		seq_putc(seq, ',');
1823 		seq_puts(seq, buf.buf);
1824 	}
1825 
1826 	if (buf.allocation_failure)
1827 		ret = -ENOMEM;
1828 	printbuf_exit(&buf);
1829 	return ret;
1830 }
1831 
bch2_put_super(struct super_block * sb)1832 static void bch2_put_super(struct super_block *sb)
1833 {
1834 	struct bch_fs *c = sb->s_fs_info;
1835 
1836 	__bch2_fs_stop(c);
1837 }
1838 
1839 /*
1840  * bcachefs doesn't currently integrate intwrite freeze protection but the
1841  * internal write references serve the same purpose. Therefore reuse the
1842  * read-only transition code to perform the quiesce. The caveat is that we don't
1843  * currently have the ability to block tasks that want a write reference while
1844  * the superblock is frozen. This is fine for now, but we should either add
1845  * blocking support or find a way to integrate sb_start_intwrite() and friends.
1846  */
bch2_freeze(struct super_block * sb)1847 static int bch2_freeze(struct super_block *sb)
1848 {
1849 	struct bch_fs *c = sb->s_fs_info;
1850 
1851 	down_write(&c->state_lock);
1852 	bch2_fs_read_only(c);
1853 	up_write(&c->state_lock);
1854 	return 0;
1855 }
1856 
bch2_unfreeze(struct super_block * sb)1857 static int bch2_unfreeze(struct super_block *sb)
1858 {
1859 	struct bch_fs *c = sb->s_fs_info;
1860 	int ret;
1861 
1862 	if (test_bit(BCH_FS_emergency_ro, &c->flags))
1863 		return 0;
1864 
1865 	down_write(&c->state_lock);
1866 	ret = bch2_fs_read_write(c);
1867 	up_write(&c->state_lock);
1868 	return ret;
1869 }
1870 
1871 static const struct super_operations bch_super_operations = {
1872 	.alloc_inode	= bch2_alloc_inode,
1873 	.free_inode	= bch2_free_inode,
1874 	.write_inode	= bch2_vfs_write_inode,
1875 	.evict_inode	= bch2_evict_inode,
1876 	.sync_fs	= bch2_sync_fs,
1877 	.statfs		= bch2_statfs,
1878 	.show_devname	= bch2_show_devname,
1879 	.show_options	= bch2_show_options,
1880 	.put_super	= bch2_put_super,
1881 	.freeze_fs	= bch2_freeze,
1882 	.unfreeze_fs	= bch2_unfreeze,
1883 };
1884 
bch2_set_super(struct super_block * s,void * data)1885 static int bch2_set_super(struct super_block *s, void *data)
1886 {
1887 	s->s_fs_info = data;
1888 	return 0;
1889 }
1890 
bch2_noset_super(struct super_block * s,void * data)1891 static int bch2_noset_super(struct super_block *s, void *data)
1892 {
1893 	return -EBUSY;
1894 }
1895 
1896 typedef DARRAY(struct bch_fs *) darray_fs;
1897 
bch2_test_super(struct super_block * s,void * data)1898 static int bch2_test_super(struct super_block *s, void *data)
1899 {
1900 	struct bch_fs *c = s->s_fs_info;
1901 	darray_fs *d = data;
1902 
1903 	if (!c)
1904 		return false;
1905 
1906 	darray_for_each(*d, i)
1907 		if (c != *i)
1908 			return false;
1909 	return true;
1910 }
1911 
bch2_fs_get_tree(struct fs_context * fc)1912 static int bch2_fs_get_tree(struct fs_context *fc)
1913 {
1914 	struct bch_fs *c;
1915 	struct super_block *sb;
1916 	struct inode *vinode;
1917 	struct bch2_opts_parse *opts_parse = fc->fs_private;
1918 	struct bch_opts opts = opts_parse->opts;
1919 	darray_str devs;
1920 	darray_fs devs_to_fs = {};
1921 	int ret;
1922 
1923 	opt_set(opts, read_only, (fc->sb_flags & SB_RDONLY) != 0);
1924 	opt_set(opts, nostart, true);
1925 
1926 	if (!fc->source || strlen(fc->source) == 0)
1927 		return -EINVAL;
1928 
1929 	ret = bch2_split_devs(fc->source, &devs);
1930 	if (ret)
1931 		return ret;
1932 
1933 	darray_for_each(devs, i) {
1934 		ret = darray_push(&devs_to_fs, bch2_path_to_fs(*i));
1935 		if (ret)
1936 			goto err;
1937 	}
1938 
1939 	sb = sget(fc->fs_type, bch2_test_super, bch2_noset_super, fc->sb_flags|SB_NOSEC, &devs_to_fs);
1940 	if (!IS_ERR(sb))
1941 		goto got_sb;
1942 
1943 	c = bch2_fs_open(devs.data, devs.nr, opts);
1944 	ret = PTR_ERR_OR_ZERO(c);
1945 	if (ret)
1946 		goto err;
1947 
1948 	/* Some options can't be parsed until after the fs is started: */
1949 	opts = bch2_opts_empty();
1950 	ret = bch2_parse_mount_opts(c, &opts, NULL, opts_parse->parse_later.buf);
1951 	if (ret)
1952 		goto err_stop_fs;
1953 
1954 	bch2_opts_apply(&c->opts, opts);
1955 
1956 	ret = bch2_fs_start(c);
1957 	if (ret)
1958 		goto err_stop_fs;
1959 
1960 	sb = sget(fc->fs_type, NULL, bch2_set_super, fc->sb_flags|SB_NOSEC, c);
1961 	ret = PTR_ERR_OR_ZERO(sb);
1962 	if (ret)
1963 		goto err_stop_fs;
1964 got_sb:
1965 	c = sb->s_fs_info;
1966 
1967 	if (sb->s_root) {
1968 		if ((fc->sb_flags ^ sb->s_flags) & SB_RDONLY) {
1969 			ret = -EBUSY;
1970 			goto err_put_super;
1971 		}
1972 		goto out;
1973 	}
1974 
1975 	sb->s_blocksize		= block_bytes(c);
1976 	sb->s_blocksize_bits	= ilog2(block_bytes(c));
1977 	sb->s_maxbytes		= MAX_LFS_FILESIZE;
1978 	sb->s_op		= &bch_super_operations;
1979 	sb->s_export_op		= &bch_export_ops;
1980 #ifdef CONFIG_BCACHEFS_QUOTA
1981 	sb->s_qcop		= &bch2_quotactl_operations;
1982 	sb->s_quota_types	= QTYPE_MASK_USR|QTYPE_MASK_GRP|QTYPE_MASK_PRJ;
1983 #endif
1984 	sb->s_xattr		= bch2_xattr_handlers;
1985 	sb->s_magic		= BCACHEFS_STATFS_MAGIC;
1986 	sb->s_time_gran		= c->sb.nsec_per_time_unit;
1987 	sb->s_time_min		= div_s64(S64_MIN, c->sb.time_units_per_sec) + 1;
1988 	sb->s_time_max		= div_s64(S64_MAX, c->sb.time_units_per_sec);
1989 	sb->s_uuid		= c->sb.user_uuid;
1990 	sb->s_shrink->seeks	= 0;
1991 	c->vfs_sb		= sb;
1992 	strscpy(sb->s_id, c->name, sizeof(sb->s_id));
1993 
1994 	ret = super_setup_bdi(sb);
1995 	if (ret)
1996 		goto err_put_super;
1997 
1998 	sb->s_bdi->ra_pages		= VM_READAHEAD_PAGES;
1999 
2000 	for_each_online_member(c, ca) {
2001 		struct block_device *bdev = ca->disk_sb.bdev;
2002 
2003 		/* XXX: create an anonymous device for multi device filesystems */
2004 		sb->s_bdev	= bdev;
2005 		sb->s_dev	= bdev->bd_dev;
2006 		percpu_ref_put(&ca->io_ref);
2007 		break;
2008 	}
2009 
2010 	c->dev = sb->s_dev;
2011 
2012 #ifdef CONFIG_BCACHEFS_POSIX_ACL
2013 	if (c->opts.acl)
2014 		sb->s_flags	|= SB_POSIXACL;
2015 #endif
2016 
2017 	sb->s_shrink->seeks = 0;
2018 
2019 	vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM);
2020 	ret = PTR_ERR_OR_ZERO(vinode);
2021 	bch_err_msg(c, ret, "mounting: error getting root inode");
2022 	if (ret)
2023 		goto err_put_super;
2024 
2025 	sb->s_root = d_make_root(vinode);
2026 	if (!sb->s_root) {
2027 		bch_err(c, "error mounting: error allocating root dentry");
2028 		ret = -ENOMEM;
2029 		goto err_put_super;
2030 	}
2031 
2032 	sb->s_flags |= SB_ACTIVE;
2033 out:
2034 	fc->root = dget(sb->s_root);
2035 err:
2036 	darray_exit(&devs_to_fs);
2037 	bch2_darray_str_exit(&devs);
2038 	if (ret)
2039 		pr_err("error: %s", bch2_err_str(ret));
2040 	/*
2041 	 * On an inconsistency error in recovery we might see an -EROFS derived
2042 	 * errorcode (from the journal), but we don't want to return that to
2043 	 * userspace as that causes util-linux to retry the mount RO - which is
2044 	 * confusing:
2045 	 */
2046 	if (bch2_err_matches(ret, EROFS) && ret != -EROFS)
2047 		ret = -EIO;
2048 	return bch2_err_class(ret);
2049 
2050 err_stop_fs:
2051 	bch2_fs_stop(c);
2052 	goto err;
2053 
2054 err_put_super:
2055 	__bch2_fs_stop(c);
2056 	deactivate_locked_super(sb);
2057 	goto err;
2058 }
2059 
bch2_kill_sb(struct super_block * sb)2060 static void bch2_kill_sb(struct super_block *sb)
2061 {
2062 	struct bch_fs *c = sb->s_fs_info;
2063 
2064 	generic_shutdown_super(sb);
2065 	bch2_fs_free(c);
2066 }
2067 
bch2_fs_context_free(struct fs_context * fc)2068 static void bch2_fs_context_free(struct fs_context *fc)
2069 {
2070 	struct bch2_opts_parse *opts = fc->fs_private;
2071 
2072 	if (opts) {
2073 		printbuf_exit(&opts->parse_later);
2074 		kfree(opts);
2075 	}
2076 }
2077 
bch2_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)2078 static int bch2_fs_parse_param(struct fs_context *fc,
2079 			       struct fs_parameter *param)
2080 {
2081 	/*
2082 	 * the "source" param, i.e., the name of the device(s) to mount,
2083 	 * is handled by the VFS layer.
2084 	 */
2085 	if (!strcmp(param->key, "source"))
2086 		return -ENOPARAM;
2087 
2088 	struct bch2_opts_parse *opts = fc->fs_private;
2089 	struct bch_fs *c = NULL;
2090 
2091 	/* for reconfigure, we already have a struct bch_fs */
2092 	if (fc->root)
2093 		c = fc->root->d_sb->s_fs_info;
2094 
2095 	int ret = bch2_parse_one_mount_opt(c, &opts->opts,
2096 					   &opts->parse_later, param->key,
2097 					   param->string);
2098 
2099 	return bch2_err_class(ret);
2100 }
2101 
bch2_fs_reconfigure(struct fs_context * fc)2102 static int bch2_fs_reconfigure(struct fs_context *fc)
2103 {
2104 	struct super_block *sb = fc->root->d_sb;
2105 	struct bch2_opts_parse *opts = fc->fs_private;
2106 
2107 	return bch2_remount(sb, &fc->sb_flags, opts->opts);
2108 }
2109 
2110 static const struct fs_context_operations bch2_context_ops = {
2111 	.free        = bch2_fs_context_free,
2112 	.parse_param = bch2_fs_parse_param,
2113 	.get_tree    = bch2_fs_get_tree,
2114 	.reconfigure = bch2_fs_reconfigure,
2115 };
2116 
bch2_init_fs_context(struct fs_context * fc)2117 static int bch2_init_fs_context(struct fs_context *fc)
2118 {
2119 	struct bch2_opts_parse *opts = kzalloc(sizeof(*opts), GFP_KERNEL);
2120 
2121 	if (!opts)
2122 		return -ENOMEM;
2123 
2124 	opts->parse_later = PRINTBUF;
2125 
2126 	fc->ops = &bch2_context_ops;
2127 	fc->fs_private = opts;
2128 
2129 	return 0;
2130 }
2131 
2132 static struct file_system_type bcache_fs_type = {
2133 	.owner			= THIS_MODULE,
2134 	.name			= "bcachefs",
2135 	.init_fs_context	= bch2_init_fs_context,
2136 	.kill_sb		= bch2_kill_sb,
2137 	.fs_flags		= FS_REQUIRES_DEV,
2138 };
2139 
2140 MODULE_ALIAS_FS("bcachefs");
2141 
bch2_vfs_exit(void)2142 void bch2_vfs_exit(void)
2143 {
2144 	unregister_filesystem(&bcache_fs_type);
2145 	kmem_cache_destroy(bch2_inode_cache);
2146 }
2147 
bch2_vfs_init(void)2148 int __init bch2_vfs_init(void)
2149 {
2150 	int ret = -ENOMEM;
2151 
2152 	bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT);
2153 	if (!bch2_inode_cache)
2154 		goto err;
2155 
2156 	ret = register_filesystem(&bcache_fs_type);
2157 	if (ret)
2158 		goto err;
2159 
2160 	return 0;
2161 err:
2162 	bch2_vfs_exit();
2163 	return ret;
2164 }
2165 
2166 #endif /* NO_BCACHEFS_FS */
2167