xref: /linux/fs/bcachefs/fs.h (revision bdc72765122356796aa72f6e99142cdf24254ce5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FS_H
3 #define _BCACHEFS_FS_H
4 
5 #include "inode.h"
6 #include "opts.h"
7 #include "str_hash.h"
8 #include "quota_types.h"
9 #include "two_state_shared_lock.h"
10 
11 #include <linux/seqlock.h>
12 #include <linux/stat.h>
13 
14 struct bch_inode_info {
15 	struct inode		v;
16 	struct rhash_head	hash;
17 	subvol_inum		ei_inum;
18 
19 	struct list_head	ei_vfs_inode_list;
20 	unsigned long		ei_flags;
21 
22 	struct mutex		ei_update_lock;
23 	u64			ei_quota_reserved;
24 	unsigned long		ei_last_dirtied;
25 	two_state_lock_t	ei_pagecache_lock;
26 
27 	struct mutex		ei_quota_lock;
28 	struct bch_qid		ei_qid;
29 
30 	/*
31 	 * When we've been doing nocow writes we'll need to issue flushes to the
32 	 * underlying block devices
33 	 *
34 	 * XXX: a device may have had a flush issued by some other codepath. It
35 	 * would be better to keep for each device a sequence number that's
36 	 * incremented when we isusue a cache flush, and track here the sequence
37 	 * number that needs flushing.
38 	 */
39 	struct bch_devs_mask	ei_devs_need_flush;
40 
41 	/* copy of inode in btree: */
42 	struct bch_inode_unpacked ei_inode;
43 };
44 
45 #define bch2_pagecache_add_put(i)	bch2_two_state_unlock(&i->ei_pagecache_lock, 0)
46 #define bch2_pagecache_add_tryget(i)	bch2_two_state_trylock(&i->ei_pagecache_lock, 0)
47 #define bch2_pagecache_add_get(i)	bch2_two_state_lock(&i->ei_pagecache_lock, 0)
48 
49 #define bch2_pagecache_block_put(i)	bch2_two_state_unlock(&i->ei_pagecache_lock, 1)
50 #define bch2_pagecache_block_get(i)	bch2_two_state_lock(&i->ei_pagecache_lock, 1)
51 
inode_inum(struct bch_inode_info * inode)52 static inline subvol_inum inode_inum(struct bch_inode_info *inode)
53 {
54 	return inode->ei_inum;
55 }
56 
57 /*
58  * Set if we've gotten a btree error for this inode, and thus the vfs inode and
59  * btree inode may be inconsistent:
60  */
61 #define EI_INODE_ERROR			0
62 
63 /*
64  * Set in the inode is in a snapshot subvolume - we don't do quota accounting in
65  * those:
66  */
67 #define EI_INODE_SNAPSHOT		1
68 #define EI_INODE_HASHED			2
69 
70 #define to_bch_ei(_inode)					\
71 	container_of_or_null(_inode, struct bch_inode_info, v)
72 
ptrcmp(void * l,void * r)73 static inline int ptrcmp(void *l, void *r)
74 {
75 	return cmp_int(l, r);
76 }
77 
78 enum bch_inode_lock_op {
79 	INODE_PAGECACHE_BLOCK	= (1U << 0),
80 	INODE_UPDATE_LOCK	= (1U << 1),
81 };
82 
83 #define bch2_lock_inodes(_locks, ...)					\
84 do {									\
85 	struct bch_inode_info *a[] = { NULL, __VA_ARGS__ };		\
86 	unsigned i;							\
87 									\
88 	bubble_sort(&a[1], ARRAY_SIZE(a) - 1, ptrcmp);			\
89 									\
90 	for (i = 1; i < ARRAY_SIZE(a); i++)				\
91 		if (a[i] != a[i - 1]) {					\
92 			if ((_locks) & INODE_PAGECACHE_BLOCK)		\
93 				bch2_pagecache_block_get(a[i]);\
94 			if ((_locks) & INODE_UPDATE_LOCK)			\
95 				mutex_lock_nested(&a[i]->ei_update_lock, i);\
96 		}							\
97 } while (0)
98 
99 #define bch2_unlock_inodes(_locks, ...)					\
100 do {									\
101 	struct bch_inode_info *a[] = { NULL, __VA_ARGS__ };		\
102 	unsigned i;							\
103 									\
104 	bubble_sort(&a[1], ARRAY_SIZE(a) - 1, ptrcmp);			\
105 									\
106 	for (i = 1; i < ARRAY_SIZE(a); i++)				\
107 		if (a[i] != a[i - 1]) {					\
108 			if ((_locks) & INODE_PAGECACHE_BLOCK)		\
109 				bch2_pagecache_block_put(a[i]);\
110 			if ((_locks) & INODE_UPDATE_LOCK)			\
111 				mutex_unlock(&a[i]->ei_update_lock);	\
112 		}							\
113 } while (0)
114 
file_bch_inode(struct file * file)115 static inline struct bch_inode_info *file_bch_inode(struct file *file)
116 {
117 	return to_bch_ei(file_inode(file));
118 }
119 
inode_attr_changing(struct bch_inode_info * dir,struct bch_inode_info * inode,enum inode_opt_id id)120 static inline bool inode_attr_changing(struct bch_inode_info *dir,
121 				struct bch_inode_info *inode,
122 				enum inode_opt_id id)
123 {
124 	return !(inode->ei_inode.bi_fields_set & (1 << id)) &&
125 		bch2_inode_opt_get(&dir->ei_inode, id) !=
126 		bch2_inode_opt_get(&inode->ei_inode, id);
127 }
128 
inode_attrs_changing(struct bch_inode_info * dir,struct bch_inode_info * inode)129 static inline bool inode_attrs_changing(struct bch_inode_info *dir,
130 				 struct bch_inode_info *inode)
131 {
132 	unsigned id;
133 
134 	for (id = 0; id < Inode_opt_nr; id++)
135 		if (inode_attr_changing(dir, inode, id))
136 			return true;
137 
138 	return false;
139 }
140 
141 struct bch_inode_unpacked;
142 
143 #ifndef NO_BCACHEFS_FS
144 
145 struct bch_inode_info *
146 __bch2_create(struct mnt_idmap *, struct bch_inode_info *,
147 	      struct dentry *, umode_t, dev_t, subvol_inum, unsigned);
148 
149 int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p);
150 
151 int bch2_fs_quota_transfer(struct bch_fs *,
152 			   struct bch_inode_info *,
153 			   struct bch_qid,
154 			   unsigned,
155 			   enum quota_acct_mode);
156 
bch2_set_projid(struct bch_fs * c,struct bch_inode_info * inode,u32 projid)157 static inline int bch2_set_projid(struct bch_fs *c,
158 				  struct bch_inode_info *inode,
159 				  u32 projid)
160 {
161 	struct bch_qid qid = inode->ei_qid;
162 
163 	qid.q[QTYP_PRJ] = projid;
164 
165 	return bch2_fs_quota_transfer(c, inode, qid,
166 				      1 << QTYP_PRJ,
167 				      KEY_TYPE_QUOTA_PREALLOC);
168 }
169 
170 struct inode *bch2_vfs_inode_get(struct bch_fs *, subvol_inum);
171 
172 /* returns 0 if we want to do the update, or error is passed up */
173 typedef int (*inode_set_fn)(struct btree_trans *,
174 			    struct bch_inode_info *,
175 			    struct bch_inode_unpacked *, void *);
176 
177 void bch2_inode_update_after_write(struct btree_trans *,
178 				   struct bch_inode_info *,
179 				   struct bch_inode_unpacked *,
180 				   unsigned);
181 int __must_check bch2_write_inode(struct bch_fs *, struct bch_inode_info *,
182 				  inode_set_fn, void *, unsigned);
183 
184 int bch2_setattr_nonsize(struct mnt_idmap *,
185 			 struct bch_inode_info *,
186 			 struct iattr *);
187 int __bch2_unlink(struct inode *, struct dentry *, bool);
188 
189 void bch2_evict_subvolume_inodes(struct bch_fs *, snapshot_id_list *);
190 
191 void bch2_fs_vfs_exit(struct bch_fs *);
192 int bch2_fs_vfs_init(struct bch_fs *);
193 
194 void bch2_vfs_exit(void);
195 int bch2_vfs_init(void);
196 
197 #else
198 
199 #define bch2_inode_update_after_write(_trans, _inode, _inode_u, _fields)	({ do {} while (0); })
200 
bch2_inode_or_descendents_is_open(struct btree_trans * trans,struct bpos p)201 static inline int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p) { return 0; }
202 
bch2_evict_subvolume_inodes(struct bch_fs * c,snapshot_id_list * s)203 static inline void bch2_evict_subvolume_inodes(struct bch_fs *c,
204 					       snapshot_id_list *s) {}
205 
bch2_fs_vfs_exit(struct bch_fs * c)206 static inline void bch2_fs_vfs_exit(struct bch_fs *c) {}
bch2_fs_vfs_init(struct bch_fs * c)207 static inline int bch2_fs_vfs_init(struct bch_fs *c) { return 0; }
208 
bch2_vfs_exit(void)209 static inline void bch2_vfs_exit(void) {}
bch2_vfs_init(void)210 static inline int bch2_vfs_init(void) { return 0; }
211 
212 #endif /* NO_BCACHEFS_FS */
213 
214 #endif /* _BCACHEFS_FS_H */
215