xref: /linux/fs/bcachefs/acl.c (revision c7546e2c3cb739a3c1a2f5acaf9bb629d401afe5)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 
5 #include "acl.h"
6 #include "xattr.h"
7 
8 #include <linux/posix_acl.h>
9 
10 static const char * const acl_types[] = {
11 	[ACL_USER_OBJ]	= "user_obj",
12 	[ACL_USER]	= "user",
13 	[ACL_GROUP_OBJ]	= "group_obj",
14 	[ACL_GROUP]	= "group",
15 	[ACL_MASK]	= "mask",
16 	[ACL_OTHER]	= "other",
17 	NULL,
18 };
19 
20 void bch2_acl_to_text(struct printbuf *out, const void *value, size_t size)
21 {
22 	const void *p, *end = value + size;
23 
24 	if (!value ||
25 	    size < sizeof(bch_acl_header) ||
26 	    ((bch_acl_header *)value)->a_version != cpu_to_le32(BCH_ACL_VERSION))
27 		return;
28 
29 	p = value + sizeof(bch_acl_header);
30 	while (p < end) {
31 		const bch_acl_entry *in = p;
32 		unsigned tag = le16_to_cpu(in->e_tag);
33 
34 		prt_str(out, acl_types[tag]);
35 
36 		switch (tag) {
37 		case ACL_USER_OBJ:
38 		case ACL_GROUP_OBJ:
39 		case ACL_MASK:
40 		case ACL_OTHER:
41 			p += sizeof(bch_acl_entry_short);
42 			break;
43 		case ACL_USER:
44 			prt_printf(out, " uid %u", le32_to_cpu(in->e_id));
45 			p += sizeof(bch_acl_entry);
46 			break;
47 		case ACL_GROUP:
48 			prt_printf(out, " gid %u", le32_to_cpu(in->e_id));
49 			p += sizeof(bch_acl_entry);
50 			break;
51 		}
52 
53 		prt_printf(out, " %o", le16_to_cpu(in->e_perm));
54 
55 		if (p != end)
56 			prt_char(out, ' ');
57 	}
58 }
59 
60 #ifdef CONFIG_BCACHEFS_POSIX_ACL
61 
62 #include "fs.h"
63 
64 #include <linux/fs.h>
65 #include <linux/posix_acl_xattr.h>
66 #include <linux/sched.h>
67 #include <linux/slab.h>
68 
69 static inline size_t bch2_acl_size(unsigned nr_short, unsigned nr_long)
70 {
71 	return sizeof(bch_acl_header) +
72 		sizeof(bch_acl_entry_short) * nr_short +
73 		sizeof(bch_acl_entry) * nr_long;
74 }
75 
76 static inline int acl_to_xattr_type(int type)
77 {
78 	switch (type) {
79 	case ACL_TYPE_ACCESS:
80 		return KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS;
81 	case ACL_TYPE_DEFAULT:
82 		return KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT;
83 	default:
84 		BUG();
85 	}
86 }
87 
88 /*
89  * Convert from filesystem to in-memory representation.
90  */
91 static struct posix_acl *bch2_acl_from_disk(struct btree_trans *trans,
92 					    const void *value, size_t size)
93 {
94 	const void *p, *end = value + size;
95 	struct posix_acl *acl;
96 	struct posix_acl_entry *out;
97 	unsigned count = 0;
98 	int ret;
99 
100 	if (!value)
101 		return NULL;
102 	if (size < sizeof(bch_acl_header))
103 		goto invalid;
104 	if (((bch_acl_header *)value)->a_version !=
105 	    cpu_to_le32(BCH_ACL_VERSION))
106 		goto invalid;
107 
108 	p = value + sizeof(bch_acl_header);
109 	while (p < end) {
110 		const bch_acl_entry *entry = p;
111 
112 		if (p + sizeof(bch_acl_entry_short) > end)
113 			goto invalid;
114 
115 		switch (le16_to_cpu(entry->e_tag)) {
116 		case ACL_USER_OBJ:
117 		case ACL_GROUP_OBJ:
118 		case ACL_MASK:
119 		case ACL_OTHER:
120 			p += sizeof(bch_acl_entry_short);
121 			break;
122 		case ACL_USER:
123 		case ACL_GROUP:
124 			p += sizeof(bch_acl_entry);
125 			break;
126 		default:
127 			goto invalid;
128 		}
129 
130 		count++;
131 	}
132 
133 	if (p > end)
134 		goto invalid;
135 
136 	if (!count)
137 		return NULL;
138 
139 	acl = allocate_dropping_locks(trans, ret,
140 			posix_acl_alloc(count, _gfp));
141 	if (!acl)
142 		return ERR_PTR(-ENOMEM);
143 	if (ret) {
144 		kfree(acl);
145 		return ERR_PTR(ret);
146 	}
147 
148 	out = acl->a_entries;
149 
150 	p = value + sizeof(bch_acl_header);
151 	while (p < end) {
152 		const bch_acl_entry *in = p;
153 
154 		out->e_tag  = le16_to_cpu(in->e_tag);
155 		out->e_perm = le16_to_cpu(in->e_perm);
156 
157 		switch (out->e_tag) {
158 		case ACL_USER_OBJ:
159 		case ACL_GROUP_OBJ:
160 		case ACL_MASK:
161 		case ACL_OTHER:
162 			p += sizeof(bch_acl_entry_short);
163 			break;
164 		case ACL_USER:
165 			out->e_uid = make_kuid(&init_user_ns,
166 					       le32_to_cpu(in->e_id));
167 			p += sizeof(bch_acl_entry);
168 			break;
169 		case ACL_GROUP:
170 			out->e_gid = make_kgid(&init_user_ns,
171 					       le32_to_cpu(in->e_id));
172 			p += sizeof(bch_acl_entry);
173 			break;
174 		}
175 
176 		out++;
177 	}
178 
179 	BUG_ON(out != acl->a_entries + acl->a_count);
180 
181 	return acl;
182 invalid:
183 	pr_err("invalid acl entry");
184 	return ERR_PTR(-EINVAL);
185 }
186 
187 #define acl_for_each_entry(acl, acl_e)			\
188 	for (acl_e = acl->a_entries;			\
189 	     acl_e < acl->a_entries + acl->a_count;	\
190 	     acl_e++)
191 
192 /*
193  * Convert from in-memory to filesystem representation.
194  */
195 static struct bkey_i_xattr *
196 bch2_acl_to_xattr(struct btree_trans *trans,
197 		  const struct posix_acl *acl,
198 		  int type)
199 {
200 	struct bkey_i_xattr *xattr;
201 	bch_acl_header *acl_header;
202 	const struct posix_acl_entry *acl_e;
203 	void *outptr;
204 	unsigned nr_short = 0, nr_long = 0, acl_len, u64s;
205 
206 	acl_for_each_entry(acl, acl_e) {
207 		switch (acl_e->e_tag) {
208 		case ACL_USER:
209 		case ACL_GROUP:
210 			nr_long++;
211 			break;
212 		case ACL_USER_OBJ:
213 		case ACL_GROUP_OBJ:
214 		case ACL_MASK:
215 		case ACL_OTHER:
216 			nr_short++;
217 			break;
218 		default:
219 			return ERR_PTR(-EINVAL);
220 		}
221 	}
222 
223 	acl_len = bch2_acl_size(nr_short, nr_long);
224 	u64s = BKEY_U64s + xattr_val_u64s(0, acl_len);
225 
226 	if (u64s > U8_MAX)
227 		return ERR_PTR(-E2BIG);
228 
229 	xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
230 	if (IS_ERR(xattr))
231 		return xattr;
232 
233 	bkey_xattr_init(&xattr->k_i);
234 	xattr->k.u64s		= u64s;
235 	xattr->v.x_type		= acl_to_xattr_type(type);
236 	xattr->v.x_name_len	= 0;
237 	xattr->v.x_val_len	= cpu_to_le16(acl_len);
238 
239 	acl_header = xattr_val(&xattr->v);
240 	acl_header->a_version = cpu_to_le32(BCH_ACL_VERSION);
241 
242 	outptr = (void *) acl_header + sizeof(*acl_header);
243 
244 	acl_for_each_entry(acl, acl_e) {
245 		bch_acl_entry *entry = outptr;
246 
247 		entry->e_tag = cpu_to_le16(acl_e->e_tag);
248 		entry->e_perm = cpu_to_le16(acl_e->e_perm);
249 		switch (acl_e->e_tag) {
250 		case ACL_USER:
251 			entry->e_id = cpu_to_le32(
252 				from_kuid(&init_user_ns, acl_e->e_uid));
253 			outptr += sizeof(bch_acl_entry);
254 			break;
255 		case ACL_GROUP:
256 			entry->e_id = cpu_to_le32(
257 				from_kgid(&init_user_ns, acl_e->e_gid));
258 			outptr += sizeof(bch_acl_entry);
259 			break;
260 
261 		case ACL_USER_OBJ:
262 		case ACL_GROUP_OBJ:
263 		case ACL_MASK:
264 		case ACL_OTHER:
265 			outptr += sizeof(bch_acl_entry_short);
266 			break;
267 		}
268 	}
269 
270 	BUG_ON(outptr != xattr_val(&xattr->v) + acl_len);
271 
272 	return xattr;
273 }
274 
275 struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
276 {
277 	struct bch_inode_info *inode = to_bch_ei(vinode);
278 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
279 	struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
280 	struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
281 	struct btree_iter iter = { NULL };
282 	struct posix_acl *acl = NULL;
283 
284 	if (rcu)
285 		return ERR_PTR(-ECHILD);
286 
287 	struct btree_trans *trans = bch2_trans_get(c);
288 retry:
289 	bch2_trans_begin(trans);
290 
291 	struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
292 					     &hash, inode_inum(inode), &search, 0);
293 	int ret = bkey_err(k);
294 	if (ret)
295 		goto err;
296 
297 	struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
298 	acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
299 				 le16_to_cpu(xattr.v->x_val_len));
300 	ret = PTR_ERR_OR_ZERO(acl);
301 err:
302 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
303 		goto retry;
304 
305 	if (ret)
306 		acl = !bch2_err_matches(ret, ENOENT) ? ERR_PTR(ret) : NULL;
307 
308 	if (!IS_ERR_OR_NULL(acl))
309 		set_cached_acl(&inode->v, type, acl);
310 
311 	bch2_trans_iter_exit(trans, &iter);
312 	bch2_trans_put(trans);
313 	return acl;
314 }
315 
316 int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
317 		       struct bch_inode_unpacked *inode_u,
318 		       struct posix_acl *acl, int type)
319 {
320 	struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode_u);
321 	int ret;
322 
323 	if (type == ACL_TYPE_DEFAULT &&
324 	    !S_ISDIR(inode_u->bi_mode))
325 		return acl ? -EACCES : 0;
326 
327 	if (acl) {
328 		struct bkey_i_xattr *xattr =
329 			bch2_acl_to_xattr(trans, acl, type);
330 		if (IS_ERR(xattr))
331 			return PTR_ERR(xattr);
332 
333 		ret = bch2_hash_set(trans, bch2_xattr_hash_desc, &hash_info,
334 				    inum, &xattr->k_i, 0);
335 	} else {
336 		struct xattr_search_key search =
337 			X_SEARCH(acl_to_xattr_type(type), "", 0);
338 
339 		ret = bch2_hash_delete(trans, bch2_xattr_hash_desc, &hash_info,
340 				       inum, &search);
341 	}
342 
343 	return bch2_err_matches(ret, ENOENT) ? 0 : ret;
344 }
345 
346 int bch2_set_acl(struct mnt_idmap *idmap,
347 		 struct dentry *dentry,
348 		 struct posix_acl *_acl, int type)
349 {
350 	struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
351 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
352 	struct btree_iter inode_iter = { NULL };
353 	struct bch_inode_unpacked inode_u;
354 	struct posix_acl *acl;
355 	umode_t mode;
356 	int ret;
357 
358 	mutex_lock(&inode->ei_update_lock);
359 	struct btree_trans *trans = bch2_trans_get(c);
360 retry:
361 	bch2_trans_begin(trans);
362 	acl = _acl;
363 
364 	ret   = bch2_subvol_is_ro_trans(trans, inode->ei_inum.subvol) ?:
365 		bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
366 			      BTREE_ITER_intent);
367 	if (ret)
368 		goto btree_err;
369 
370 	mode = inode_u.bi_mode;
371 
372 	if (type == ACL_TYPE_ACCESS) {
373 		ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
374 		if (ret)
375 			goto btree_err;
376 	}
377 
378 	ret = bch2_set_acl_trans(trans, inode_inum(inode), &inode_u, acl, type);
379 	if (ret)
380 		goto btree_err;
381 
382 	inode_u.bi_ctime	= bch2_current_time(c);
383 	inode_u.bi_mode		= mode;
384 
385 	ret =   bch2_inode_write(trans, &inode_iter, &inode_u) ?:
386 		bch2_trans_commit(trans, NULL, NULL, 0);
387 btree_err:
388 	bch2_trans_iter_exit(trans, &inode_iter);
389 
390 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
391 		goto retry;
392 	if (unlikely(ret))
393 		goto err;
394 
395 	bch2_inode_update_after_write(trans, inode, &inode_u,
396 				      ATTR_CTIME|ATTR_MODE);
397 
398 	set_cached_acl(&inode->v, type, acl);
399 err:
400 	bch2_trans_put(trans);
401 	mutex_unlock(&inode->ei_update_lock);
402 
403 	return ret;
404 }
405 
406 int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
407 		   struct bch_inode_unpacked *inode,
408 		   umode_t mode,
409 		   struct posix_acl **new_acl)
410 {
411 	struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
412 	struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
413 	struct btree_iter iter;
414 	struct posix_acl *acl = NULL;
415 
416 	struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
417 			       &hash_info, inum, &search, BTREE_ITER_intent);
418 	int ret = bkey_err(k);
419 	if (ret)
420 		return bch2_err_matches(ret, ENOENT) ? 0 : ret;
421 
422 	struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
423 
424 	acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
425 			le16_to_cpu(xattr.v->x_val_len));
426 	ret = PTR_ERR_OR_ZERO(acl);
427 	if (ret)
428 		goto err;
429 
430 	ret = allocate_dropping_locks_errcode(trans, __posix_acl_chmod(&acl, _gfp, mode));
431 	if (ret)
432 		goto err;
433 
434 	struct bkey_i_xattr *new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
435 	ret = PTR_ERR_OR_ZERO(new);
436 	if (ret)
437 		goto err;
438 
439 	new->k.p = iter.pos;
440 	ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
441 	*new_acl = acl;
442 	acl = NULL;
443 err:
444 	bch2_trans_iter_exit(trans, &iter);
445 	if (!IS_ERR_OR_NULL(acl))
446 		kfree(acl);
447 	return ret;
448 }
449 
450 #endif /* CONFIG_BCACHEFS_POSIX_ACL */
451