xref: /linux/fs/erofs/xattr.c (revision 97d5f2e9ee12cdc7214d5835d35c59404cfafee6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021-2022, Alibaba Cloud
6  */
7 #include <linux/security.h>
8 #include "xattr.h"
9 
10 static inline erofs_blk_t erofs_xattr_blkaddr(struct super_block *sb,
11 					      unsigned int xattr_id)
12 {
13 	return EROFS_SB(sb)->xattr_blkaddr +
14 	       erofs_blknr(sb, xattr_id * sizeof(__u32));
15 }
16 
17 static inline unsigned int erofs_xattr_blkoff(struct super_block *sb,
18 					      unsigned int xattr_id)
19 {
20 	return erofs_blkoff(sb, xattr_id * sizeof(__u32));
21 }
22 
23 struct xattr_iter {
24 	struct super_block *sb;
25 	struct erofs_buf buf;
26 	void *kaddr;
27 
28 	erofs_blk_t blkaddr;
29 	unsigned int ofs;
30 };
31 
32 static int erofs_init_inode_xattrs(struct inode *inode)
33 {
34 	struct erofs_inode *const vi = EROFS_I(inode);
35 	struct xattr_iter it;
36 	unsigned int i;
37 	struct erofs_xattr_ibody_header *ih;
38 	struct super_block *sb = inode->i_sb;
39 	int ret = 0;
40 
41 	/* the most case is that xattrs of this inode are initialized. */
42 	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
43 		/*
44 		 * paired with smp_mb() at the end of the function to ensure
45 		 * fields will only be observed after the bit is set.
46 		 */
47 		smp_mb();
48 		return 0;
49 	}
50 
51 	if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
52 		return -ERESTARTSYS;
53 
54 	/* someone has initialized xattrs for us? */
55 	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
56 		goto out_unlock;
57 
58 	/*
59 	 * bypass all xattr operations if ->xattr_isize is not greater than
60 	 * sizeof(struct erofs_xattr_ibody_header), in detail:
61 	 * 1) it is not enough to contain erofs_xattr_ibody_header then
62 	 *    ->xattr_isize should be 0 (it means no xattr);
63 	 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
64 	 *    undefined right now (maybe use later with some new sb feature).
65 	 */
66 	if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
67 		erofs_err(sb,
68 			  "xattr_isize %d of nid %llu is not supported yet",
69 			  vi->xattr_isize, vi->nid);
70 		ret = -EOPNOTSUPP;
71 		goto out_unlock;
72 	} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
73 		if (vi->xattr_isize) {
74 			erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid);
75 			DBG_BUGON(1);
76 			ret = -EFSCORRUPTED;
77 			goto out_unlock;	/* xattr ondisk layout error */
78 		}
79 		ret = -ENOATTR;
80 		goto out_unlock;
81 	}
82 
83 	it.buf = __EROFS_BUF_INITIALIZER;
84 	it.blkaddr = erofs_blknr(sb, erofs_iloc(inode) + vi->inode_isize);
85 	it.ofs = erofs_blkoff(sb, erofs_iloc(inode) + vi->inode_isize);
86 
87 	/* read in shared xattr array (non-atomic, see kmalloc below) */
88 	it.kaddr = erofs_read_metabuf(&it.buf, sb, it.blkaddr, EROFS_KMAP);
89 	if (IS_ERR(it.kaddr)) {
90 		ret = PTR_ERR(it.kaddr);
91 		goto out_unlock;
92 	}
93 
94 	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
95 	vi->xattr_shared_count = ih->h_shared_count;
96 	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
97 						sizeof(uint), GFP_KERNEL);
98 	if (!vi->xattr_shared_xattrs) {
99 		erofs_put_metabuf(&it.buf);
100 		ret = -ENOMEM;
101 		goto out_unlock;
102 	}
103 
104 	/* let's skip ibody header */
105 	it.ofs += sizeof(struct erofs_xattr_ibody_header);
106 
107 	for (i = 0; i < vi->xattr_shared_count; ++i) {
108 		if (it.ofs >= sb->s_blocksize) {
109 			/* cannot be unaligned */
110 			DBG_BUGON(it.ofs != sb->s_blocksize);
111 
112 			it.kaddr = erofs_read_metabuf(&it.buf, sb, ++it.blkaddr,
113 						      EROFS_KMAP);
114 			if (IS_ERR(it.kaddr)) {
115 				kfree(vi->xattr_shared_xattrs);
116 				vi->xattr_shared_xattrs = NULL;
117 				ret = PTR_ERR(it.kaddr);
118 				goto out_unlock;
119 			}
120 			it.ofs = 0;
121 		}
122 		vi->xattr_shared_xattrs[i] =
123 			le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
124 		it.ofs += sizeof(__le32);
125 	}
126 	erofs_put_metabuf(&it.buf);
127 
128 	/* paired with smp_mb() at the beginning of the function. */
129 	smp_mb();
130 	set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
131 
132 out_unlock:
133 	clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
134 	return ret;
135 }
136 
137 /*
138  * the general idea for these return values is
139  * if    0 is returned, go on processing the current xattr;
140  *       1 (> 0) is returned, skip this round to process the next xattr;
141  *    -err (< 0) is returned, an error (maybe ENOXATTR) occurred
142  *                            and need to be handled
143  */
144 struct xattr_iter_handlers {
145 	int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
146 	int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
147 		    unsigned int len);
148 	int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
149 	void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
150 		      unsigned int len);
151 };
152 
153 static inline int xattr_iter_fixup(struct xattr_iter *it)
154 {
155 	if (it->ofs < it->sb->s_blocksize)
156 		return 0;
157 
158 	it->blkaddr += erofs_blknr(it->sb, it->ofs);
159 	it->kaddr = erofs_read_metabuf(&it->buf, it->sb, it->blkaddr,
160 				       EROFS_KMAP);
161 	if (IS_ERR(it->kaddr))
162 		return PTR_ERR(it->kaddr);
163 	it->ofs = erofs_blkoff(it->sb, it->ofs);
164 	return 0;
165 }
166 
167 static int inline_xattr_iter_begin(struct xattr_iter *it,
168 				   struct inode *inode)
169 {
170 	struct erofs_inode *const vi = EROFS_I(inode);
171 	unsigned int xattr_header_sz, inline_xattr_ofs;
172 
173 	xattr_header_sz = sizeof(struct erofs_xattr_ibody_header) +
174 			  sizeof(u32) * vi->xattr_shared_count;
175 	if (xattr_header_sz >= vi->xattr_isize) {
176 		DBG_BUGON(xattr_header_sz > vi->xattr_isize);
177 		return -ENOATTR;
178 	}
179 
180 	inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
181 
182 	it->blkaddr = erofs_blknr(it->sb, erofs_iloc(inode) + inline_xattr_ofs);
183 	it->ofs = erofs_blkoff(it->sb, erofs_iloc(inode) + inline_xattr_ofs);
184 	it->kaddr = erofs_read_metabuf(&it->buf, inode->i_sb, it->blkaddr,
185 				       EROFS_KMAP);
186 	if (IS_ERR(it->kaddr))
187 		return PTR_ERR(it->kaddr);
188 	return vi->xattr_isize - xattr_header_sz;
189 }
190 
191 /*
192  * Regardless of success or failure, `xattr_foreach' will end up with
193  * `ofs' pointing to the next xattr item rather than an arbitrary position.
194  */
195 static int xattr_foreach(struct xattr_iter *it,
196 			 const struct xattr_iter_handlers *op,
197 			 unsigned int *tlimit)
198 {
199 	struct erofs_xattr_entry entry;
200 	unsigned int value_sz, processed, slice;
201 	int err;
202 
203 	/* 0. fixup blkaddr, ofs, ipage */
204 	err = xattr_iter_fixup(it);
205 	if (err)
206 		return err;
207 
208 	/*
209 	 * 1. read xattr entry to the memory,
210 	 *    since we do EROFS_XATTR_ALIGN
211 	 *    therefore entry should be in the page
212 	 */
213 	entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
214 	if (tlimit) {
215 		unsigned int entry_sz = erofs_xattr_entry_size(&entry);
216 
217 		/* xattr on-disk corruption: xattr entry beyond xattr_isize */
218 		if (*tlimit < entry_sz) {
219 			DBG_BUGON(1);
220 			return -EFSCORRUPTED;
221 		}
222 		*tlimit -= entry_sz;
223 	}
224 
225 	it->ofs += sizeof(struct erofs_xattr_entry);
226 	value_sz = le16_to_cpu(entry.e_value_size);
227 
228 	/* handle entry */
229 	err = op->entry(it, &entry);
230 	if (err) {
231 		it->ofs += entry.e_name_len + value_sz;
232 		goto out;
233 	}
234 
235 	/* 2. handle xattr name (ofs will finally be at the end of name) */
236 	processed = 0;
237 
238 	while (processed < entry.e_name_len) {
239 		if (it->ofs >= it->sb->s_blocksize) {
240 			DBG_BUGON(it->ofs > it->sb->s_blocksize);
241 
242 			err = xattr_iter_fixup(it);
243 			if (err)
244 				goto out;
245 			it->ofs = 0;
246 		}
247 
248 		slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs,
249 			      entry.e_name_len - processed);
250 
251 		/* handle name */
252 		err = op->name(it, processed, it->kaddr + it->ofs, slice);
253 		if (err) {
254 			it->ofs += entry.e_name_len - processed + value_sz;
255 			goto out;
256 		}
257 
258 		it->ofs += slice;
259 		processed += slice;
260 	}
261 
262 	/* 3. handle xattr value */
263 	processed = 0;
264 
265 	if (op->alloc_buffer) {
266 		err = op->alloc_buffer(it, value_sz);
267 		if (err) {
268 			it->ofs += value_sz;
269 			goto out;
270 		}
271 	}
272 
273 	while (processed < value_sz) {
274 		if (it->ofs >= it->sb->s_blocksize) {
275 			DBG_BUGON(it->ofs > it->sb->s_blocksize);
276 
277 			err = xattr_iter_fixup(it);
278 			if (err)
279 				goto out;
280 			it->ofs = 0;
281 		}
282 
283 		slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs,
284 			      value_sz - processed);
285 		op->value(it, processed, it->kaddr + it->ofs, slice);
286 		it->ofs += slice;
287 		processed += slice;
288 	}
289 
290 out:
291 	/* xattrs should be 4-byte aligned (on-disk constraint) */
292 	it->ofs = EROFS_XATTR_ALIGN(it->ofs);
293 	return err < 0 ? err : 0;
294 }
295 
296 struct getxattr_iter {
297 	struct xattr_iter it;
298 
299 	char *buffer;
300 	int buffer_size, index, infix_len;
301 	struct qstr name;
302 };
303 
304 static int erofs_xattr_long_entrymatch(struct getxattr_iter *it,
305 				       struct erofs_xattr_entry *entry)
306 {
307 	struct erofs_sb_info *sbi = EROFS_SB(it->it.sb);
308 	struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
309 		(entry->e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
310 
311 	if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
312 		return -ENOATTR;
313 
314 	if (it->index != pf->prefix->base_index ||
315 	    it->name.len != entry->e_name_len + pf->infix_len)
316 		return -ENOATTR;
317 
318 	if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len))
319 		return -ENOATTR;
320 
321 	it->infix_len = pf->infix_len;
322 	return 0;
323 }
324 
325 static int xattr_entrymatch(struct xattr_iter *_it,
326 			    struct erofs_xattr_entry *entry)
327 {
328 	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
329 
330 	/* should also match the infix for long name prefixes */
331 	if (entry->e_name_index & EROFS_XATTR_LONG_PREFIX)
332 		return erofs_xattr_long_entrymatch(it, entry);
333 
334 	if (it->index != entry->e_name_index ||
335 	    it->name.len != entry->e_name_len)
336 		return -ENOATTR;
337 	it->infix_len = 0;
338 	return 0;
339 }
340 
341 static int xattr_namematch(struct xattr_iter *_it,
342 			   unsigned int processed, char *buf, unsigned int len)
343 {
344 	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
345 
346 	if (memcmp(buf, it->name.name + it->infix_len + processed, len))
347 		return -ENOATTR;
348 	return 0;
349 }
350 
351 static int xattr_checkbuffer(struct xattr_iter *_it,
352 			     unsigned int value_sz)
353 {
354 	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
355 	int err = it->buffer_size < value_sz ? -ERANGE : 0;
356 
357 	it->buffer_size = value_sz;
358 	return !it->buffer ? 1 : err;
359 }
360 
361 static void xattr_copyvalue(struct xattr_iter *_it,
362 			    unsigned int processed,
363 			    char *buf, unsigned int len)
364 {
365 	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
366 
367 	memcpy(it->buffer + processed, buf, len);
368 }
369 
370 static const struct xattr_iter_handlers find_xattr_handlers = {
371 	.entry = xattr_entrymatch,
372 	.name = xattr_namematch,
373 	.alloc_buffer = xattr_checkbuffer,
374 	.value = xattr_copyvalue
375 };
376 
377 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
378 {
379 	int ret;
380 	unsigned int remaining;
381 
382 	ret = inline_xattr_iter_begin(&it->it, inode);
383 	if (ret < 0)
384 		return ret;
385 
386 	remaining = ret;
387 	while (remaining) {
388 		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
389 		if (ret != -ENOATTR)
390 			break;
391 	}
392 	return ret ? ret : it->buffer_size;
393 }
394 
395 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
396 {
397 	struct erofs_inode *const vi = EROFS_I(inode);
398 	struct super_block *const sb = it->it.sb;
399 	unsigned int i, xsid;
400 	int ret = -ENOATTR;
401 
402 	for (i = 0; i < vi->xattr_shared_count; ++i) {
403 		xsid = vi->xattr_shared_xattrs[i];
404 		it->it.blkaddr = erofs_xattr_blkaddr(sb, xsid);
405 		it->it.ofs = erofs_xattr_blkoff(sb, xsid);
406 		it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb,
407 						  it->it.blkaddr, EROFS_KMAP);
408 		if (IS_ERR(it->it.kaddr))
409 			return PTR_ERR(it->it.kaddr);
410 
411 		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
412 		if (ret != -ENOATTR)
413 			break;
414 	}
415 	return ret ? ret : it->buffer_size;
416 }
417 
418 static bool erofs_xattr_user_list(struct dentry *dentry)
419 {
420 	return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER);
421 }
422 
423 static bool erofs_xattr_trusted_list(struct dentry *dentry)
424 {
425 	return capable(CAP_SYS_ADMIN);
426 }
427 
428 int erofs_getxattr(struct inode *inode, int index,
429 		   const char *name,
430 		   void *buffer, size_t buffer_size)
431 {
432 	int ret;
433 	struct getxattr_iter it;
434 
435 	if (!name)
436 		return -EINVAL;
437 
438 	ret = erofs_init_inode_xattrs(inode);
439 	if (ret)
440 		return ret;
441 
442 	it.index = index;
443 	it.name.len = strlen(name);
444 	if (it.name.len > EROFS_NAME_LEN)
445 		return -ERANGE;
446 
447 	it.it.buf = __EROFS_BUF_INITIALIZER;
448 	it.name.name = name;
449 
450 	it.buffer = buffer;
451 	it.buffer_size = buffer_size;
452 
453 	it.it.sb = inode->i_sb;
454 	ret = inline_getxattr(inode, &it);
455 	if (ret == -ENOATTR)
456 		ret = shared_getxattr(inode, &it);
457 	erofs_put_metabuf(&it.it.buf);
458 	return ret;
459 }
460 
461 static int erofs_xattr_generic_get(const struct xattr_handler *handler,
462 				   struct dentry *unused, struct inode *inode,
463 				   const char *name, void *buffer, size_t size)
464 {
465 	if (handler->flags == EROFS_XATTR_INDEX_USER &&
466 	    !test_opt(&EROFS_I_SB(inode)->opt, XATTR_USER))
467 		return -EOPNOTSUPP;
468 
469 	return erofs_getxattr(inode, handler->flags, name, buffer, size);
470 }
471 
472 const struct xattr_handler erofs_xattr_user_handler = {
473 	.prefix	= XATTR_USER_PREFIX,
474 	.flags	= EROFS_XATTR_INDEX_USER,
475 	.list	= erofs_xattr_user_list,
476 	.get	= erofs_xattr_generic_get,
477 };
478 
479 const struct xattr_handler erofs_xattr_trusted_handler = {
480 	.prefix	= XATTR_TRUSTED_PREFIX,
481 	.flags	= EROFS_XATTR_INDEX_TRUSTED,
482 	.list	= erofs_xattr_trusted_list,
483 	.get	= erofs_xattr_generic_get,
484 };
485 
486 #ifdef CONFIG_EROFS_FS_SECURITY
487 const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
488 	.prefix	= XATTR_SECURITY_PREFIX,
489 	.flags	= EROFS_XATTR_INDEX_SECURITY,
490 	.get	= erofs_xattr_generic_get,
491 };
492 #endif
493 
494 const struct xattr_handler *erofs_xattr_handlers[] = {
495 	&erofs_xattr_user_handler,
496 	&erofs_xattr_trusted_handler,
497 #ifdef CONFIG_EROFS_FS_SECURITY
498 	&erofs_xattr_security_handler,
499 #endif
500 	NULL,
501 };
502 
503 struct listxattr_iter {
504 	struct xattr_iter it;
505 
506 	struct dentry *dentry;
507 	char *buffer;
508 	int buffer_size, buffer_ofs;
509 };
510 
511 static int xattr_entrylist(struct xattr_iter *_it,
512 			   struct erofs_xattr_entry *entry)
513 {
514 	struct listxattr_iter *it =
515 		container_of(_it, struct listxattr_iter, it);
516 	unsigned int base_index = entry->e_name_index;
517 	unsigned int prefix_len, infix_len = 0;
518 	const char *prefix, *infix = NULL;
519 
520 	if (entry->e_name_index & EROFS_XATTR_LONG_PREFIX) {
521 		struct erofs_sb_info *sbi = EROFS_SB(_it->sb);
522 		struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
523 			(entry->e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
524 
525 		if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
526 			return 1;
527 		infix = pf->prefix->infix;
528 		infix_len = pf->infix_len;
529 		base_index = pf->prefix->base_index;
530 	}
531 
532 	prefix = erofs_xattr_prefix(base_index, it->dentry);
533 	if (!prefix)
534 		return 1;
535 	prefix_len = strlen(prefix);
536 
537 	if (!it->buffer) {
538 		it->buffer_ofs += prefix_len + infix_len +
539 					entry->e_name_len + 1;
540 		return 1;
541 	}
542 
543 	if (it->buffer_ofs + prefix_len + infix_len +
544 		+ entry->e_name_len + 1 > it->buffer_size)
545 		return -ERANGE;
546 
547 	memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
548 	memcpy(it->buffer + it->buffer_ofs + prefix_len, infix, infix_len);
549 	it->buffer_ofs += prefix_len + infix_len;
550 	return 0;
551 }
552 
553 static int xattr_namelist(struct xattr_iter *_it,
554 			  unsigned int processed, char *buf, unsigned int len)
555 {
556 	struct listxattr_iter *it =
557 		container_of(_it, struct listxattr_iter, it);
558 
559 	memcpy(it->buffer + it->buffer_ofs, buf, len);
560 	it->buffer_ofs += len;
561 	return 0;
562 }
563 
564 static int xattr_skipvalue(struct xattr_iter *_it,
565 			   unsigned int value_sz)
566 {
567 	struct listxattr_iter *it =
568 		container_of(_it, struct listxattr_iter, it);
569 
570 	it->buffer[it->buffer_ofs++] = '\0';
571 	return 1;
572 }
573 
574 static const struct xattr_iter_handlers list_xattr_handlers = {
575 	.entry = xattr_entrylist,
576 	.name = xattr_namelist,
577 	.alloc_buffer = xattr_skipvalue,
578 	.value = NULL
579 };
580 
581 static int inline_listxattr(struct listxattr_iter *it)
582 {
583 	int ret;
584 	unsigned int remaining;
585 
586 	ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
587 	if (ret < 0)
588 		return ret;
589 
590 	remaining = ret;
591 	while (remaining) {
592 		ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
593 		if (ret)
594 			break;
595 	}
596 	return ret ? ret : it->buffer_ofs;
597 }
598 
599 static int shared_listxattr(struct listxattr_iter *it)
600 {
601 	struct inode *const inode = d_inode(it->dentry);
602 	struct erofs_inode *const vi = EROFS_I(inode);
603 	struct super_block *const sb = it->it.sb;
604 	unsigned int i, xsid;
605 	int ret = 0;
606 
607 	for (i = 0; i < vi->xattr_shared_count; ++i) {
608 		xsid = vi->xattr_shared_xattrs[i];
609 		it->it.blkaddr = erofs_xattr_blkaddr(sb, xsid);
610 		it->it.ofs = erofs_xattr_blkoff(sb, xsid);
611 		it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb,
612 						  it->it.blkaddr, EROFS_KMAP);
613 		if (IS_ERR(it->it.kaddr))
614 			return PTR_ERR(it->it.kaddr);
615 
616 		ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
617 		if (ret)
618 			break;
619 	}
620 	return ret ? ret : it->buffer_ofs;
621 }
622 
623 ssize_t erofs_listxattr(struct dentry *dentry,
624 			char *buffer, size_t buffer_size)
625 {
626 	int ret;
627 	struct listxattr_iter it;
628 
629 	ret = erofs_init_inode_xattrs(d_inode(dentry));
630 	if (ret == -ENOATTR)
631 		return 0;
632 	if (ret)
633 		return ret;
634 
635 	it.it.buf = __EROFS_BUF_INITIALIZER;
636 	it.dentry = dentry;
637 	it.buffer = buffer;
638 	it.buffer_size = buffer_size;
639 	it.buffer_ofs = 0;
640 
641 	it.it.sb = dentry->d_sb;
642 
643 	ret = inline_listxattr(&it);
644 	if (ret >= 0 || ret == -ENOATTR)
645 		ret = shared_listxattr(&it);
646 	erofs_put_metabuf(&it.it.buf);
647 	return ret;
648 }
649 
650 void erofs_xattr_prefixes_cleanup(struct super_block *sb)
651 {
652 	struct erofs_sb_info *sbi = EROFS_SB(sb);
653 	int i;
654 
655 	if (sbi->xattr_prefixes) {
656 		for (i = 0; i < sbi->xattr_prefix_count; i++)
657 			kfree(sbi->xattr_prefixes[i].prefix);
658 		kfree(sbi->xattr_prefixes);
659 		sbi->xattr_prefixes = NULL;
660 	}
661 }
662 
663 int erofs_xattr_prefixes_init(struct super_block *sb)
664 {
665 	struct erofs_sb_info *sbi = EROFS_SB(sb);
666 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
667 	erofs_off_t pos = (erofs_off_t)sbi->xattr_prefix_start << 2;
668 	struct erofs_xattr_prefix_item *pfs;
669 	int ret = 0, i, len;
670 
671 	if (!sbi->xattr_prefix_count)
672 		return 0;
673 
674 	pfs = kzalloc(sbi->xattr_prefix_count * sizeof(*pfs), GFP_KERNEL);
675 	if (!pfs)
676 		return -ENOMEM;
677 
678 	if (sbi->packed_inode)
679 		buf.inode = sbi->packed_inode;
680 	else
681 		erofs_init_metabuf(&buf, sb);
682 
683 	for (i = 0; i < sbi->xattr_prefix_count; i++) {
684 		void *ptr = erofs_read_metadata(sb, &buf, &pos, &len);
685 
686 		if (IS_ERR(ptr)) {
687 			ret = PTR_ERR(ptr);
688 			break;
689 		} else if (len < sizeof(*pfs->prefix) ||
690 			   len > EROFS_NAME_LEN + sizeof(*pfs->prefix)) {
691 			kfree(ptr);
692 			ret = -EFSCORRUPTED;
693 			break;
694 		}
695 		pfs[i].prefix = ptr;
696 		pfs[i].infix_len = len - sizeof(struct erofs_xattr_long_prefix);
697 	}
698 
699 	erofs_put_metabuf(&buf);
700 	sbi->xattr_prefixes = pfs;
701 	if (ret)
702 		erofs_xattr_prefixes_cleanup(sb);
703 	return ret;
704 }
705 
706 #ifdef CONFIG_EROFS_FS_POSIX_ACL
707 struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu)
708 {
709 	struct posix_acl *acl;
710 	int prefix, rc;
711 	char *value = NULL;
712 
713 	if (rcu)
714 		return ERR_PTR(-ECHILD);
715 
716 	switch (type) {
717 	case ACL_TYPE_ACCESS:
718 		prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
719 		break;
720 	case ACL_TYPE_DEFAULT:
721 		prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
722 		break;
723 	default:
724 		return ERR_PTR(-EINVAL);
725 	}
726 
727 	rc = erofs_getxattr(inode, prefix, "", NULL, 0);
728 	if (rc > 0) {
729 		value = kmalloc(rc, GFP_KERNEL);
730 		if (!value)
731 			return ERR_PTR(-ENOMEM);
732 		rc = erofs_getxattr(inode, prefix, "", value, rc);
733 	}
734 
735 	if (rc == -ENOATTR)
736 		acl = NULL;
737 	else if (rc < 0)
738 		acl = ERR_PTR(rc);
739 	else
740 		acl = posix_acl_from_xattr(&init_user_ns, value, rc);
741 	kfree(value);
742 	return acl;
743 }
744 #endif
745