xref: /linux/fs/bcachefs/acl.c (revision e467705a9fb37f51595aa6deaca085ccb4005454)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 
5 #include "acl.h"
6 #include "xattr.h"
7 
8 #include <linux/posix_acl.h>
9 
10 static const char * const acl_types[] = {
11 	[ACL_USER_OBJ]	= "user_obj",
12 	[ACL_USER]	= "user",
13 	[ACL_GROUP_OBJ]	= "group_obj",
14 	[ACL_GROUP]	= "group",
15 	[ACL_MASK]	= "mask",
16 	[ACL_OTHER]	= "other",
17 	NULL,
18 };
19 
20 void bch2_acl_to_text(struct printbuf *out, const void *value, size_t size)
21 {
22 	const void *p, *end = value + size;
23 
24 	if (!value ||
25 	    size < sizeof(bch_acl_header) ||
26 	    ((bch_acl_header *)value)->a_version != cpu_to_le32(BCH_ACL_VERSION))
27 		return;
28 
29 	p = value + sizeof(bch_acl_header);
30 	while (p < end) {
31 		const bch_acl_entry *in = p;
32 		unsigned tag = le16_to_cpu(in->e_tag);
33 
34 		prt_str(out, acl_types[tag]);
35 
36 		switch (tag) {
37 		case ACL_USER_OBJ:
38 		case ACL_GROUP_OBJ:
39 		case ACL_MASK:
40 		case ACL_OTHER:
41 			p += sizeof(bch_acl_entry_short);
42 			break;
43 		case ACL_USER:
44 			prt_printf(out, " uid %u", le32_to_cpu(in->e_id));
45 			p += sizeof(bch_acl_entry);
46 			break;
47 		case ACL_GROUP:
48 			prt_printf(out, " gid %u", le32_to_cpu(in->e_id));
49 			p += sizeof(bch_acl_entry);
50 			break;
51 		}
52 
53 		prt_printf(out, " %o", le16_to_cpu(in->e_perm));
54 
55 		if (p != end)
56 			prt_char(out, ' ');
57 	}
58 }
59 
60 #ifdef CONFIG_BCACHEFS_POSIX_ACL
61 
62 #include "fs.h"
63 
64 #include <linux/fs.h>
65 #include <linux/posix_acl_xattr.h>
66 #include <linux/sched.h>
67 #include <linux/slab.h>
68 
69 static inline size_t bch2_acl_size(unsigned nr_short, unsigned nr_long)
70 {
71 	return sizeof(bch_acl_header) +
72 		sizeof(bch_acl_entry_short) * nr_short +
73 		sizeof(bch_acl_entry) * nr_long;
74 }
75 
76 static inline int acl_to_xattr_type(int type)
77 {
78 	switch (type) {
79 	case ACL_TYPE_ACCESS:
80 		return KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS;
81 	case ACL_TYPE_DEFAULT:
82 		return KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT;
83 	default:
84 		BUG();
85 	}
86 }
87 
88 /*
89  * Convert from filesystem to in-memory representation.
90  */
91 static struct posix_acl *bch2_acl_from_disk(struct btree_trans *trans,
92 					    const void *value, size_t size)
93 {
94 	const void *p, *end = value + size;
95 	struct posix_acl *acl;
96 	struct posix_acl_entry *out;
97 	unsigned count = 0;
98 	int ret;
99 
100 	if (!value)
101 		return NULL;
102 	if (size < sizeof(bch_acl_header))
103 		goto invalid;
104 	if (((bch_acl_header *)value)->a_version !=
105 	    cpu_to_le32(BCH_ACL_VERSION))
106 		goto invalid;
107 
108 	p = value + sizeof(bch_acl_header);
109 	while (p < end) {
110 		const bch_acl_entry *entry = p;
111 
112 		if (p + sizeof(bch_acl_entry_short) > end)
113 			goto invalid;
114 
115 		switch (le16_to_cpu(entry->e_tag)) {
116 		case ACL_USER_OBJ:
117 		case ACL_GROUP_OBJ:
118 		case ACL_MASK:
119 		case ACL_OTHER:
120 			p += sizeof(bch_acl_entry_short);
121 			break;
122 		case ACL_USER:
123 		case ACL_GROUP:
124 			p += sizeof(bch_acl_entry);
125 			break;
126 		default:
127 			goto invalid;
128 		}
129 
130 		count++;
131 	}
132 
133 	if (p > end)
134 		goto invalid;
135 
136 	if (!count)
137 		return NULL;
138 
139 	acl = allocate_dropping_locks(trans, ret,
140 			posix_acl_alloc(count, _gfp));
141 	if (!acl)
142 		return ERR_PTR(-ENOMEM);
143 	if (ret) {
144 		kfree(acl);
145 		return ERR_PTR(ret);
146 	}
147 
148 	out = acl->a_entries;
149 
150 	p = value + sizeof(bch_acl_header);
151 	while (p < end) {
152 		const bch_acl_entry *in = p;
153 
154 		out->e_tag  = le16_to_cpu(in->e_tag);
155 		out->e_perm = le16_to_cpu(in->e_perm);
156 
157 		switch (out->e_tag) {
158 		case ACL_USER_OBJ:
159 		case ACL_GROUP_OBJ:
160 		case ACL_MASK:
161 		case ACL_OTHER:
162 			p += sizeof(bch_acl_entry_short);
163 			break;
164 		case ACL_USER:
165 			out->e_uid = make_kuid(&init_user_ns,
166 					       le32_to_cpu(in->e_id));
167 			p += sizeof(bch_acl_entry);
168 			break;
169 		case ACL_GROUP:
170 			out->e_gid = make_kgid(&init_user_ns,
171 					       le32_to_cpu(in->e_id));
172 			p += sizeof(bch_acl_entry);
173 			break;
174 		}
175 
176 		out++;
177 	}
178 
179 	BUG_ON(out != acl->a_entries + acl->a_count);
180 
181 	return acl;
182 invalid:
183 	pr_err("invalid acl entry");
184 	return ERR_PTR(-EINVAL);
185 }
186 
187 #define acl_for_each_entry(acl, acl_e)			\
188 	for (acl_e = acl->a_entries;			\
189 	     acl_e < acl->a_entries + acl->a_count;	\
190 	     acl_e++)
191 
192 /*
193  * Convert from in-memory to filesystem representation.
194  */
195 static struct bkey_i_xattr *
196 bch2_acl_to_xattr(struct btree_trans *trans,
197 		  const struct posix_acl *acl,
198 		  int type)
199 {
200 	struct bkey_i_xattr *xattr;
201 	bch_acl_header *acl_header;
202 	const struct posix_acl_entry *acl_e;
203 	void *outptr;
204 	unsigned nr_short = 0, nr_long = 0, acl_len, u64s;
205 
206 	acl_for_each_entry(acl, acl_e) {
207 		switch (acl_e->e_tag) {
208 		case ACL_USER:
209 		case ACL_GROUP:
210 			nr_long++;
211 			break;
212 		case ACL_USER_OBJ:
213 		case ACL_GROUP_OBJ:
214 		case ACL_MASK:
215 		case ACL_OTHER:
216 			nr_short++;
217 			break;
218 		default:
219 			return ERR_PTR(-EINVAL);
220 		}
221 	}
222 
223 	acl_len = bch2_acl_size(nr_short, nr_long);
224 	u64s = BKEY_U64s + xattr_val_u64s(0, acl_len);
225 
226 	if (u64s > U8_MAX)
227 		return ERR_PTR(-E2BIG);
228 
229 	xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
230 	if (IS_ERR(xattr))
231 		return xattr;
232 
233 	bkey_xattr_init(&xattr->k_i);
234 	xattr->k.u64s		= u64s;
235 	xattr->v.x_type		= acl_to_xattr_type(type);
236 	xattr->v.x_name_len	= 0;
237 	xattr->v.x_val_len	= cpu_to_le16(acl_len);
238 
239 	acl_header = xattr_val(&xattr->v);
240 	acl_header->a_version = cpu_to_le32(BCH_ACL_VERSION);
241 
242 	outptr = (void *) acl_header + sizeof(*acl_header);
243 
244 	acl_for_each_entry(acl, acl_e) {
245 		bch_acl_entry *entry = outptr;
246 
247 		entry->e_tag = cpu_to_le16(acl_e->e_tag);
248 		entry->e_perm = cpu_to_le16(acl_e->e_perm);
249 		switch (acl_e->e_tag) {
250 		case ACL_USER:
251 			entry->e_id = cpu_to_le32(
252 				from_kuid(&init_user_ns, acl_e->e_uid));
253 			outptr += sizeof(bch_acl_entry);
254 			break;
255 		case ACL_GROUP:
256 			entry->e_id = cpu_to_le32(
257 				from_kgid(&init_user_ns, acl_e->e_gid));
258 			outptr += sizeof(bch_acl_entry);
259 			break;
260 
261 		case ACL_USER_OBJ:
262 		case ACL_GROUP_OBJ:
263 		case ACL_MASK:
264 		case ACL_OTHER:
265 			outptr += sizeof(bch_acl_entry_short);
266 			break;
267 		}
268 	}
269 
270 	BUG_ON(outptr != xattr_val(&xattr->v) + acl_len);
271 
272 	return xattr;
273 }
274 
275 struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
276 			       struct dentry *dentry, int type)
277 {
278 	struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
279 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
280 	struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
281 	struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
282 	struct btree_trans *trans = bch2_trans_get(c);
283 	struct btree_iter iter = { NULL };
284 	struct posix_acl *acl = NULL;
285 retry:
286 	bch2_trans_begin(trans);
287 
288 	struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
289 					     &hash, inode_inum(inode), &search, 0);
290 	int ret = bkey_err(k);
291 	if (ret)
292 		goto err;
293 
294 	struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
295 	acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
296 				 le16_to_cpu(xattr.v->x_val_len));
297 	ret = PTR_ERR_OR_ZERO(acl);
298 err:
299 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
300 		goto retry;
301 
302 	if (ret)
303 		acl = !bch2_err_matches(ret, ENOENT) ? ERR_PTR(ret) : NULL;
304 
305 	if (!IS_ERR_OR_NULL(acl))
306 		set_cached_acl(&inode->v, type, acl);
307 
308 	bch2_trans_iter_exit(trans, &iter);
309 	bch2_trans_put(trans);
310 	return acl;
311 }
312 
313 int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
314 		       struct bch_inode_unpacked *inode_u,
315 		       struct posix_acl *acl, int type)
316 {
317 	struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode_u);
318 	int ret;
319 
320 	if (type == ACL_TYPE_DEFAULT &&
321 	    !S_ISDIR(inode_u->bi_mode))
322 		return acl ? -EACCES : 0;
323 
324 	if (acl) {
325 		struct bkey_i_xattr *xattr =
326 			bch2_acl_to_xattr(trans, acl, type);
327 		if (IS_ERR(xattr))
328 			return PTR_ERR(xattr);
329 
330 		ret = bch2_hash_set(trans, bch2_xattr_hash_desc, &hash_info,
331 				    inum, &xattr->k_i, 0);
332 	} else {
333 		struct xattr_search_key search =
334 			X_SEARCH(acl_to_xattr_type(type), "", 0);
335 
336 		ret = bch2_hash_delete(trans, bch2_xattr_hash_desc, &hash_info,
337 				       inum, &search);
338 	}
339 
340 	return bch2_err_matches(ret, ENOENT) ? 0 : ret;
341 }
342 
343 int bch2_set_acl(struct mnt_idmap *idmap,
344 		 struct dentry *dentry,
345 		 struct posix_acl *_acl, int type)
346 {
347 	struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
348 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
349 	struct btree_trans *trans = bch2_trans_get(c);
350 	struct btree_iter inode_iter = { NULL };
351 	struct bch_inode_unpacked inode_u;
352 	struct posix_acl *acl;
353 	umode_t mode;
354 	int ret;
355 
356 	mutex_lock(&inode->ei_update_lock);
357 retry:
358 	bch2_trans_begin(trans);
359 	acl = _acl;
360 
361 	ret   = bch2_subvol_is_ro_trans(trans, inode->ei_subvol) ?:
362 		bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
363 			      BTREE_ITER_intent);
364 	if (ret)
365 		goto btree_err;
366 
367 	mode = inode_u.bi_mode;
368 
369 	if (type == ACL_TYPE_ACCESS) {
370 		ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
371 		if (ret)
372 			goto btree_err;
373 	}
374 
375 	ret = bch2_set_acl_trans(trans, inode_inum(inode), &inode_u, acl, type);
376 	if (ret)
377 		goto btree_err;
378 
379 	inode_u.bi_ctime	= bch2_current_time(c);
380 	inode_u.bi_mode		= mode;
381 
382 	ret =   bch2_inode_write(trans, &inode_iter, &inode_u) ?:
383 		bch2_trans_commit(trans, NULL, NULL, 0);
384 btree_err:
385 	bch2_trans_iter_exit(trans, &inode_iter);
386 
387 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
388 		goto retry;
389 	if (unlikely(ret))
390 		goto err;
391 
392 	bch2_inode_update_after_write(trans, inode, &inode_u,
393 				      ATTR_CTIME|ATTR_MODE);
394 
395 	set_cached_acl(&inode->v, type, acl);
396 err:
397 	mutex_unlock(&inode->ei_update_lock);
398 	bch2_trans_put(trans);
399 
400 	return ret;
401 }
402 
403 int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
404 		   struct bch_inode_unpacked *inode,
405 		   umode_t mode,
406 		   struct posix_acl **new_acl)
407 {
408 	struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
409 	struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
410 	struct btree_iter iter;
411 	struct posix_acl *acl = NULL;
412 
413 	struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
414 			       &hash_info, inum, &search, BTREE_ITER_intent);
415 	int ret = bkey_err(k);
416 	if (ret)
417 		return bch2_err_matches(ret, ENOENT) ? 0 : ret;
418 
419 	struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
420 
421 	acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
422 			le16_to_cpu(xattr.v->x_val_len));
423 	ret = PTR_ERR_OR_ZERO(acl);
424 	if (ret)
425 		goto err;
426 
427 	ret = allocate_dropping_locks_errcode(trans, __posix_acl_chmod(&acl, _gfp, mode));
428 	if (ret)
429 		goto err;
430 
431 	struct bkey_i_xattr *new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
432 	ret = PTR_ERR_OR_ZERO(new);
433 	if (ret)
434 		goto err;
435 
436 	new->k.p = iter.pos;
437 	ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
438 	*new_acl = acl;
439 	acl = NULL;
440 err:
441 	bch2_trans_iter_exit(trans, &iter);
442 	if (!IS_ERR_OR_NULL(acl))
443 		kfree(acl);
444 	return ret;
445 }
446 
447 #endif /* CONFIG_BCACHEFS_POSIX_ACL */
448