1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) International Business Machines Corp., 2000-2004
4 * Copyright (C) Christoph Hellwig, 2002
5 */
6
7 #include <linux/capability.h>
8 #include <linux/fs.h>
9 #include <linux/xattr.h>
10 #include <linux/posix_acl_xattr.h>
11 #include <linux/slab.h>
12 #include <linux/quotaops.h>
13 #include <linux/security.h>
14 #include "jfs_incore.h"
15 #include "jfs_superblock.h"
16 #include "jfs_dmap.h"
17 #include "jfs_debug.h"
18 #include "jfs_dinode.h"
19 #include "jfs_extent.h"
20 #include "jfs_metapage.h"
21 #include "jfs_xattr.h"
22 #include "jfs_acl.h"
23
24 /*
25 * jfs_xattr.c: extended attribute service
26 *
27 * Overall design --
28 *
29 * Format:
30 *
31 * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit
32 * value) and a variable (0 or more) number of extended attribute
33 * entries. Each extended attribute entry (jfs_ea) is a <name,value> double
34 * where <name> is constructed from a null-terminated ascii string
35 * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data
36 * (1 ... 65535 bytes). The in-memory format is
37 *
38 * 0 1 2 4 4 + namelen + 1
39 * +-------+--------+--------+----------------+-------------------+
40 * | Flags | Name | Value | Name String \0 | Data . . . . |
41 * | | Length | Length | | |
42 * +-------+--------+--------+----------------+-------------------+
43 *
44 * A jfs_ea_list then is structured as
45 *
46 * 0 4 4 + EA_SIZE(ea1)
47 * +------------+-------------------+--------------------+-----
48 * | Overall EA | First FEA Element | Second FEA Element | .....
49 * | List Size | | |
50 * +------------+-------------------+--------------------+-----
51 *
52 * On-disk:
53 *
54 * FEALISTs are stored on disk using blocks allocated by dbAlloc() and
55 * written directly. An EA list may be in-lined in the inode if there is
56 * sufficient room available.
57 */
58
59 struct ea_buffer {
60 int flag; /* Indicates what storage xattr points to */
61 int max_size; /* largest xattr that fits in current buffer */
62 dxd_t new_ea; /* dxd to replace ea when modifying xattr */
63 struct metapage *mp; /* metapage containing ea list */
64 struct jfs_ea_list *xattr; /* buffer containing ea list */
65 };
66
67 /*
68 * ea_buffer.flag values
69 */
70 #define EA_INLINE 0x0001
71 #define EA_EXTENT 0x0002
72 #define EA_NEW 0x0004
73 #define EA_MALLOC 0x0008
74
75
76 /*
77 * Mapping of on-disk attribute names: for on-disk attribute names with an
78 * unknown prefix (not "system.", "user.", "security.", or "trusted."), the
79 * prefix "os2." is prepended. On the way back to disk, "os2." prefixes are
80 * stripped and we make sure that the remaining name does not start with one
81 * of the know prefixes.
82 */
83
is_known_namespace(const char * name)84 static int is_known_namespace(const char *name)
85 {
86 if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
87 strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
88 strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
89 strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
90 return false;
91
92 return true;
93 }
94
name_size(struct jfs_ea * ea)95 static inline int name_size(struct jfs_ea *ea)
96 {
97 if (is_known_namespace(ea->name))
98 return ea->namelen;
99 else
100 return ea->namelen + XATTR_OS2_PREFIX_LEN;
101 }
102
copy_name(char * buffer,struct jfs_ea * ea)103 static inline int copy_name(char *buffer, struct jfs_ea *ea)
104 {
105 int len = ea->namelen;
106
107 if (!is_known_namespace(ea->name)) {
108 memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
109 buffer += XATTR_OS2_PREFIX_LEN;
110 len += XATTR_OS2_PREFIX_LEN;
111 }
112 memcpy(buffer, ea->name, ea->namelen);
113 buffer[ea->namelen] = 0;
114
115 return len;
116 }
117
118 /* Forward references */
119 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
120
121 /*
122 * NAME: ea_write_inline
123 *
124 * FUNCTION: Attempt to write an EA inline if area is available
125 *
126 * PRE CONDITIONS:
127 * Already verified that the specified EA is small enough to fit inline
128 *
129 * PARAMETERS:
130 * ip - Inode pointer
131 * ealist - EA list pointer
132 * size - size of ealist in bytes
133 * ea - dxd_t structure to be filled in with necessary EA information
134 * if we successfully copy the EA inline
135 *
136 * NOTES:
137 * Checks if the inode's inline area is available. If so, copies EA inline
138 * and sets <ea> fields appropriately. Otherwise, returns failure, EA will
139 * have to be put into an extent.
140 *
141 * RETURNS: 0 for successful copy to inline area; -1 if area not available
142 */
ea_write_inline(struct inode * ip,struct jfs_ea_list * ealist,int size,dxd_t * ea)143 static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist,
144 int size, dxd_t * ea)
145 {
146 struct jfs_inode_info *ji = JFS_IP(ip);
147
148 /*
149 * Make sure we have an EA -- the NULL EA list is valid, but you
150 * can't copy it!
151 */
152 if (ealist && size > sizeof (struct jfs_ea_list)) {
153 assert(size <= sizeof (ji->i_inline_ea));
154
155 /*
156 * See if the space is available or if it is already being
157 * used for an inline EA.
158 */
159 if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE))
160 return -EPERM;
161
162 DXDsize(ea, size);
163 DXDlength(ea, 0);
164 DXDaddress(ea, 0);
165 memcpy(ji->i_inline_ea, ealist, size);
166 ea->flag = DXD_INLINE;
167 ji->mode2 &= ~INLINEEA;
168 } else {
169 ea->flag = 0;
170 DXDsize(ea, 0);
171 DXDlength(ea, 0);
172 DXDaddress(ea, 0);
173
174 /* Free up INLINE area */
175 if (ji->ea.flag & DXD_INLINE)
176 ji->mode2 |= INLINEEA;
177 }
178
179 return 0;
180 }
181
182 /*
183 * NAME: ea_write
184 *
185 * FUNCTION: Write an EA for an inode
186 *
187 * PRE CONDITIONS: EA has been verified
188 *
189 * PARAMETERS:
190 * ip - Inode pointer
191 * ealist - EA list pointer
192 * size - size of ealist in bytes
193 * ea - dxd_t structure to be filled in appropriately with where the
194 * EA was copied
195 *
196 * NOTES: Will write EA inline if able to, otherwise allocates blocks for an
197 * extent and synchronously writes it to those blocks.
198 *
199 * RETURNS: 0 for success; Anything else indicates failure
200 */
ea_write(struct inode * ip,struct jfs_ea_list * ealist,int size,dxd_t * ea)201 static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
202 dxd_t * ea)
203 {
204 struct super_block *sb = ip->i_sb;
205 struct jfs_inode_info *ji = JFS_IP(ip);
206 struct jfs_sb_info *sbi = JFS_SBI(sb);
207 int nblocks;
208 s64 blkno;
209 int rc = 0, i;
210 char *cp;
211 s32 nbytes, nb;
212 s32 bytes_to_write;
213 struct metapage *mp;
214
215 /*
216 * Quick check to see if this is an in-linable EA. Short EAs
217 * and empty EAs are all in-linable, provided the space exists.
218 */
219 if (!ealist || size <= sizeof (ji->i_inline_ea)) {
220 if (!ea_write_inline(ip, ealist, size, ea))
221 return 0;
222 }
223
224 /* figure out how many blocks we need */
225 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
226
227 /* Allocate new blocks to quota. */
228 rc = dquot_alloc_block(ip, nblocks);
229 if (rc)
230 return rc;
231
232 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
233 if (rc) {
234 /*Rollback quota allocation. */
235 dquot_free_block(ip, nblocks);
236 return rc;
237 }
238
239 /*
240 * Now have nblocks worth of storage to stuff into the FEALIST.
241 * loop over the FEALIST copying data into the buffer one page at
242 * a time.
243 */
244 cp = (char *) ealist;
245 nbytes = size;
246 for (i = 0; i < nblocks; i += sbi->nbperpage) {
247 /*
248 * Determine how many bytes for this request, and round up to
249 * the nearest aggregate block size
250 */
251 nb = min(PSIZE, nbytes);
252 bytes_to_write =
253 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
254 << sb->s_blocksize_bits;
255
256 if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) {
257 rc = -EIO;
258 goto failed;
259 }
260
261 memcpy(mp->data, cp, nb);
262
263 /*
264 * We really need a way to propagate errors for
265 * forced writes like this one. --hch
266 *
267 * (__write_metapage => release_metapage => flush_metapage)
268 */
269 #ifdef _JFS_FIXME
270 if ((rc = flush_metapage(mp))) {
271 /*
272 * the write failed -- this means that the buffer
273 * is still assigned and the blocks are not being
274 * used. this seems like the best error recovery
275 * we can get ...
276 */
277 goto failed;
278 }
279 #else
280 flush_metapage(mp);
281 #endif
282
283 cp += PSIZE;
284 nbytes -= nb;
285 }
286
287 ea->flag = DXD_EXTENT;
288 DXDsize(ea, le32_to_cpu(ealist->size));
289 DXDlength(ea, nblocks);
290 DXDaddress(ea, blkno);
291
292 /* Free up INLINE area */
293 if (ji->ea.flag & DXD_INLINE)
294 ji->mode2 |= INLINEEA;
295
296 return 0;
297
298 failed:
299 /* Rollback quota allocation. */
300 dquot_free_block(ip, nblocks);
301
302 dbFree(ip, blkno, nblocks);
303 return rc;
304 }
305
306 /*
307 * NAME: ea_read_inline
308 *
309 * FUNCTION: Read an inlined EA into user's buffer
310 *
311 * PARAMETERS:
312 * ip - Inode pointer
313 * ealist - Pointer to buffer to fill in with EA
314 *
315 * RETURNS: 0
316 */
ea_read_inline(struct inode * ip,struct jfs_ea_list * ealist)317 static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist)
318 {
319 struct jfs_inode_info *ji = JFS_IP(ip);
320 int ea_size = sizeDXD(&ji->ea);
321
322 if (ea_size == 0) {
323 ealist->size = 0;
324 return 0;
325 }
326
327 /* Sanity Check */
328 if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea)))
329 return -EIO;
330 if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size)
331 != ea_size)
332 return -EIO;
333
334 memcpy(ealist, ji->i_inline_ea, ea_size);
335 return 0;
336 }
337
338 /*
339 * NAME: ea_read
340 *
341 * FUNCTION: copy EA data into user's buffer
342 *
343 * PARAMETERS:
344 * ip - Inode pointer
345 * ealist - Pointer to buffer to fill in with EA
346 *
347 * NOTES: If EA is inline calls ea_read_inline() to copy EA.
348 *
349 * RETURNS: 0 for success; other indicates failure
350 */
ea_read(struct inode * ip,struct jfs_ea_list * ealist)351 static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
352 {
353 struct super_block *sb = ip->i_sb;
354 struct jfs_inode_info *ji = JFS_IP(ip);
355 struct jfs_sb_info *sbi = JFS_SBI(sb);
356 int nblocks;
357 s64 blkno;
358 char *cp = (char *) ealist;
359 int i;
360 int nbytes, nb;
361 s32 bytes_to_read;
362 struct metapage *mp;
363
364 /* quick check for in-line EA */
365 if (ji->ea.flag & DXD_INLINE)
366 return ea_read_inline(ip, ealist);
367
368 nbytes = sizeDXD(&ji->ea);
369 if (!nbytes) {
370 jfs_error(sb, "nbytes is 0\n");
371 return -EIO;
372 }
373
374 /*
375 * Figure out how many blocks were allocated when this EA list was
376 * originally written to disk.
377 */
378 nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage;
379 blkno = addressDXD(&ji->ea) << sbi->l2nbperpage;
380
381 /*
382 * I have found the disk blocks which were originally used to store
383 * the FEALIST. now i loop over each contiguous block copying the
384 * data into the buffer.
385 */
386 for (i = 0; i < nblocks; i += sbi->nbperpage) {
387 /*
388 * Determine how many bytes for this request, and round up to
389 * the nearest aggregate block size
390 */
391 nb = min(PSIZE, nbytes);
392 bytes_to_read =
393 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
394 << sb->s_blocksize_bits;
395
396 if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1)))
397 return -EIO;
398
399 memcpy(cp, mp->data, nb);
400 release_metapage(mp);
401
402 cp += PSIZE;
403 nbytes -= nb;
404 }
405
406 return 0;
407 }
408
409 /*
410 * NAME: ea_get
411 *
412 * FUNCTION: Returns buffer containing existing extended attributes.
413 * The size of the buffer will be the larger of the existing
414 * attributes size, or min_size.
415 *
416 * The buffer, which may be inlined in the inode or in the
417 * page cache must be release by calling ea_release or ea_put
418 *
419 * PARAMETERS:
420 * inode - Inode pointer
421 * ea_buf - Structure to be populated with ealist and its metadata
422 * min_size- minimum size of buffer to be returned
423 *
424 * RETURNS: 0 for success; Other indicates failure
425 */
ea_get(struct inode * inode,struct ea_buffer * ea_buf,int min_size)426 static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
427 {
428 struct jfs_inode_info *ji = JFS_IP(inode);
429 struct super_block *sb = inode->i_sb;
430 int size;
431 int ea_size = sizeDXD(&ji->ea);
432 int blocks_needed, current_blocks;
433 s64 blkno;
434 int rc;
435 int quota_allocation = 0;
436
437 memset(&ea_buf->new_ea, 0, sizeof(ea_buf->new_ea));
438
439 /* When fsck.jfs clears a bad ea, it doesn't clear the size */
440 if (ji->ea.flag == 0)
441 ea_size = 0;
442
443 if (ea_size == 0) {
444 if (min_size == 0) {
445 ea_buf->flag = 0;
446 ea_buf->max_size = 0;
447 ea_buf->xattr = NULL;
448 return 0;
449 }
450 if ((min_size <= sizeof (ji->i_inline_ea)) &&
451 (ji->mode2 & INLINEEA)) {
452 ea_buf->flag = EA_INLINE | EA_NEW;
453 ea_buf->max_size = sizeof (ji->i_inline_ea);
454 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
455 DXDlength(&ea_buf->new_ea, 0);
456 DXDaddress(&ea_buf->new_ea, 0);
457 ea_buf->new_ea.flag = DXD_INLINE;
458 DXDsize(&ea_buf->new_ea, min_size);
459 return 0;
460 }
461 current_blocks = 0;
462 } else if (ji->ea.flag & DXD_INLINE) {
463 if (min_size <= sizeof (ji->i_inline_ea)) {
464 ea_buf->flag = EA_INLINE;
465 ea_buf->max_size = sizeof (ji->i_inline_ea);
466 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
467 goto size_check;
468 }
469 current_blocks = 0;
470 } else {
471 if (!(ji->ea.flag & DXD_EXTENT)) {
472 jfs_error(sb, "invalid ea.flag\n");
473 return -EIO;
474 }
475 current_blocks = (ea_size + sb->s_blocksize - 1) >>
476 sb->s_blocksize_bits;
477 }
478 size = max(min_size, ea_size);
479
480 if (size > PSIZE) {
481 /*
482 * To keep the rest of the code simple. Allocate a
483 * contiguous buffer to work with. Make the buffer large
484 * enough to make use of the whole extent.
485 */
486 ea_buf->max_size = (size + sb->s_blocksize - 1) &
487 ~(sb->s_blocksize - 1);
488
489 ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
490 if (ea_buf->xattr == NULL)
491 return -ENOMEM;
492
493 ea_buf->flag = EA_MALLOC;
494
495 if (ea_size == 0)
496 return 0;
497
498 if ((rc = ea_read(inode, ea_buf->xattr))) {
499 kfree(ea_buf->xattr);
500 ea_buf->xattr = NULL;
501 return rc;
502 }
503 goto size_check;
504 }
505 blocks_needed = (min_size + sb->s_blocksize - 1) >>
506 sb->s_blocksize_bits;
507
508 if (blocks_needed > current_blocks) {
509 /* Allocate new blocks to quota. */
510 rc = dquot_alloc_block(inode, blocks_needed);
511 if (rc)
512 return -EDQUOT;
513
514 quota_allocation = blocks_needed;
515
516 rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
517 &blkno);
518 if (rc)
519 goto clean_up;
520
521 DXDlength(&ea_buf->new_ea, blocks_needed);
522 DXDaddress(&ea_buf->new_ea, blkno);
523 ea_buf->new_ea.flag = DXD_EXTENT;
524 DXDsize(&ea_buf->new_ea, min_size);
525
526 ea_buf->flag = EA_EXTENT | EA_NEW;
527
528 ea_buf->mp = get_metapage(inode, blkno,
529 blocks_needed << sb->s_blocksize_bits,
530 1);
531 if (ea_buf->mp == NULL) {
532 dbFree(inode, blkno, (s64) blocks_needed);
533 rc = -EIO;
534 goto clean_up;
535 }
536 ea_buf->xattr = ea_buf->mp->data;
537 ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
538 ~(sb->s_blocksize - 1);
539 if (ea_size == 0)
540 return 0;
541 if ((rc = ea_read(inode, ea_buf->xattr))) {
542 discard_metapage(ea_buf->mp);
543 dbFree(inode, blkno, (s64) blocks_needed);
544 goto clean_up;
545 }
546 goto size_check;
547 }
548 ea_buf->flag = EA_EXTENT;
549 ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
550 lengthDXD(&ji->ea) << sb->s_blocksize_bits,
551 1);
552 if (ea_buf->mp == NULL) {
553 rc = -EIO;
554 goto clean_up;
555 }
556 ea_buf->xattr = ea_buf->mp->data;
557 ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
558 ~(sb->s_blocksize - 1);
559
560 size_check:
561 if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
562 int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr));
563
564 printk(KERN_ERR "ea_get: invalid extended attribute\n");
565 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
566 ea_buf->xattr, size, 1);
567 ea_release(inode, ea_buf);
568 rc = -EIO;
569 goto clean_up;
570 }
571
572 return ea_size;
573
574 clean_up:
575 /* Rollback quota allocation */
576 if (quota_allocation)
577 dquot_free_block(inode, quota_allocation);
578
579 return (rc);
580 }
581
ea_release(struct inode * inode,struct ea_buffer * ea_buf)582 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
583 {
584 if (ea_buf->flag & EA_MALLOC)
585 kfree(ea_buf->xattr);
586 else if (ea_buf->flag & EA_EXTENT) {
587 assert(ea_buf->mp);
588 release_metapage(ea_buf->mp);
589
590 if (ea_buf->flag & EA_NEW)
591 dbFree(inode, addressDXD(&ea_buf->new_ea),
592 lengthDXD(&ea_buf->new_ea));
593 }
594 }
595
ea_put(tid_t tid,struct inode * inode,struct ea_buffer * ea_buf,int new_size)596 static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
597 int new_size)
598 {
599 struct jfs_inode_info *ji = JFS_IP(inode);
600 unsigned long old_blocks, new_blocks;
601 int rc = 0;
602
603 if (new_size == 0) {
604 ea_release(inode, ea_buf);
605 ea_buf = NULL;
606 } else if (ea_buf->flag & EA_INLINE) {
607 assert(new_size <= sizeof (ji->i_inline_ea));
608 ji->mode2 &= ~INLINEEA;
609 ea_buf->new_ea.flag = DXD_INLINE;
610 DXDsize(&ea_buf->new_ea, new_size);
611 DXDaddress(&ea_buf->new_ea, 0);
612 DXDlength(&ea_buf->new_ea, 0);
613 } else if (ea_buf->flag & EA_MALLOC) {
614 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
615 kfree(ea_buf->xattr);
616 } else if (ea_buf->flag & EA_NEW) {
617 /* We have already allocated a new dxd */
618 flush_metapage(ea_buf->mp);
619 } else {
620 /* ->xattr must point to original ea's metapage */
621 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
622 discard_metapage(ea_buf->mp);
623 }
624 if (rc)
625 return rc;
626
627 old_blocks = new_blocks = 0;
628
629 if (ji->ea.flag & DXD_EXTENT) {
630 invalidate_dxd_metapages(inode, ji->ea);
631 old_blocks = lengthDXD(&ji->ea);
632 }
633
634 if (ea_buf) {
635 txEA(tid, inode, &ji->ea, &ea_buf->new_ea);
636 if (ea_buf->new_ea.flag & DXD_EXTENT) {
637 new_blocks = lengthDXD(&ea_buf->new_ea);
638 if (ji->ea.flag & DXD_INLINE)
639 ji->mode2 |= INLINEEA;
640 }
641 ji->ea = ea_buf->new_ea;
642 } else {
643 txEA(tid, inode, &ji->ea, NULL);
644 if (ji->ea.flag & DXD_INLINE)
645 ji->mode2 |= INLINEEA;
646 ji->ea.flag = 0;
647 ji->ea.size = 0;
648 }
649
650 /* If old blocks exist, they must be removed from quota allocation. */
651 if (old_blocks)
652 dquot_free_block(inode, old_blocks);
653
654 inode_set_ctime_current(inode);
655
656 return 0;
657 }
658
__jfs_setxattr(tid_t tid,struct inode * inode,const char * name,const void * value,size_t value_len,int flags)659 int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name,
660 const void *value, size_t value_len, int flags)
661 {
662 struct jfs_ea_list *ealist;
663 struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL;
664 struct ea_buffer ea_buf;
665 int old_ea_size = 0;
666 int xattr_size;
667 int new_size;
668 int namelen = strlen(name);
669 int found = 0;
670 int rc;
671 int length;
672
673 down_write(&JFS_IP(inode)->xattr_sem);
674
675 xattr_size = ea_get(inode, &ea_buf, 0);
676 if (xattr_size < 0) {
677 rc = xattr_size;
678 goto out;
679 }
680
681 again:
682 ealist = (struct jfs_ea_list *) ea_buf.xattr;
683 new_size = sizeof (struct jfs_ea_list);
684
685 if (xattr_size) {
686 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist);
687 ea = NEXT_EA(ea)) {
688 if ((namelen == ea->namelen) &&
689 (memcmp(name, ea->name, namelen) == 0)) {
690 found = 1;
691 if (flags & XATTR_CREATE) {
692 rc = -EEXIST;
693 goto release;
694 }
695 old_ea = ea;
696 old_ea_size = EA_SIZE(ea);
697 next_ea = NEXT_EA(ea);
698 } else
699 new_size += EA_SIZE(ea);
700 }
701 }
702
703 if (!found) {
704 if (flags & XATTR_REPLACE) {
705 rc = -ENODATA;
706 goto release;
707 }
708 if (value == NULL) {
709 rc = 0;
710 goto release;
711 }
712 }
713 if (value)
714 new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len;
715
716 if (new_size > ea_buf.max_size) {
717 /*
718 * We need to allocate more space for merged ea list.
719 * We should only have loop to again: once.
720 */
721 ea_release(inode, &ea_buf);
722 xattr_size = ea_get(inode, &ea_buf, new_size);
723 if (xattr_size < 0) {
724 rc = xattr_size;
725 goto out;
726 }
727 goto again;
728 }
729
730 /* Remove old ea of the same name */
731 if (found) {
732 /* number of bytes following target EA */
733 length = (char *) END_EALIST(ealist) - (char *) next_ea;
734 if (length > 0)
735 memmove(old_ea, next_ea, length);
736 xattr_size -= old_ea_size;
737 }
738
739 /* Add new entry to the end */
740 if (value) {
741 if (xattr_size == 0)
742 /* Completely new ea list */
743 xattr_size = sizeof (struct jfs_ea_list);
744
745 /*
746 * The size of EA value is limitted by on-disk format up to
747 * __le16, there would be an overflow if the size is equal
748 * to XATTR_SIZE_MAX (65536). In order to avoid this issue,
749 * we can pre-checkup the value size against USHRT_MAX, and
750 * return -E2BIG in this case, which is consistent with the
751 * VFS setxattr interface.
752 */
753 if (value_len >= USHRT_MAX) {
754 rc = -E2BIG;
755 goto release;
756 }
757
758 ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
759 ea->flag = 0;
760 ea->namelen = namelen;
761 ea->valuelen = (cpu_to_le16(value_len));
762 memcpy(ea->name, name, namelen);
763 ea->name[namelen] = 0;
764 if (value_len)
765 memcpy(&ea->name[namelen + 1], value, value_len);
766 xattr_size += EA_SIZE(ea);
767 }
768
769 /* DEBUG - If we did this right, these number match */
770 if (xattr_size != new_size) {
771 printk(KERN_ERR
772 "__jfs_setxattr: xattr_size = %d, new_size = %d\n",
773 xattr_size, new_size);
774
775 rc = -EINVAL;
776 goto release;
777 }
778
779 /*
780 * If we're left with an empty list, there's no ea
781 */
782 if (new_size == sizeof (struct jfs_ea_list))
783 new_size = 0;
784
785 ealist->size = cpu_to_le32(new_size);
786
787 rc = ea_put(tid, inode, &ea_buf, new_size);
788
789 goto out;
790 release:
791 ea_release(inode, &ea_buf);
792 out:
793 up_write(&JFS_IP(inode)->xattr_sem);
794
795 return rc;
796 }
797
__jfs_getxattr(struct inode * inode,const char * name,void * data,size_t buf_size)798 ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
799 size_t buf_size)
800 {
801 struct jfs_ea_list *ealist;
802 struct jfs_ea *ea, *ealist_end;
803 struct ea_buffer ea_buf;
804 int xattr_size;
805 ssize_t size;
806 int namelen = strlen(name);
807 char *value;
808
809 down_read(&JFS_IP(inode)->xattr_sem);
810
811 xattr_size = ea_get(inode, &ea_buf, 0);
812
813 if (xattr_size < 0) {
814 size = xattr_size;
815 goto out;
816 }
817
818 if (xattr_size == 0)
819 goto not_found;
820
821 ealist = (struct jfs_ea_list *) ea_buf.xattr;
822 ealist_end = END_EALIST(ealist);
823
824 /* Find the named attribute */
825 for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
826 if (unlikely(ea + 1 > ealist_end) ||
827 unlikely(NEXT_EA(ea) > ealist_end)) {
828 size = -EUCLEAN;
829 goto release;
830 }
831
832 if ((namelen == ea->namelen) &&
833 memcmp(name, ea->name, namelen) == 0) {
834 /* Found it */
835 size = le16_to_cpu(ea->valuelen);
836 if (!data)
837 goto release;
838 else if (size > buf_size) {
839 size = -ERANGE;
840 goto release;
841 }
842 value = ((char *) &ea->name) + ea->namelen + 1;
843 memcpy(data, value, size);
844 goto release;
845 }
846 }
847 not_found:
848 size = -ENODATA;
849 release:
850 ea_release(inode, &ea_buf);
851 out:
852 up_read(&JFS_IP(inode)->xattr_sem);
853
854 return size;
855 }
856
857 /*
858 * No special permissions are needed to list attributes except for trusted.*
859 */
can_list(struct jfs_ea * ea)860 static inline int can_list(struct jfs_ea *ea)
861 {
862 return (strncmp(ea->name, XATTR_TRUSTED_PREFIX,
863 XATTR_TRUSTED_PREFIX_LEN) ||
864 capable(CAP_SYS_ADMIN));
865 }
866
jfs_listxattr(struct dentry * dentry,char * data,size_t buf_size)867 ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
868 {
869 struct inode *inode = d_inode(dentry);
870 char *buffer;
871 ssize_t size = 0;
872 int xattr_size;
873 struct jfs_ea_list *ealist;
874 struct jfs_ea *ea, *ealist_end;
875 struct ea_buffer ea_buf;
876
877 down_read(&JFS_IP(inode)->xattr_sem);
878
879 xattr_size = ea_get(inode, &ea_buf, 0);
880 if (xattr_size < 0) {
881 size = xattr_size;
882 goto out;
883 }
884
885 if (xattr_size == 0)
886 goto release;
887
888 ealist = (struct jfs_ea_list *) ea_buf.xattr;
889 ealist_end = END_EALIST(ealist);
890
891 /* compute required size of list */
892 for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
893 if (unlikely(ea + 1 > ealist_end) ||
894 unlikely(NEXT_EA(ea) > ealist_end)) {
895 size = -EUCLEAN;
896 goto release;
897 }
898
899 if (can_list(ea))
900 size += name_size(ea) + 1;
901 }
902
903 if (!data)
904 goto release;
905
906 if (size > buf_size) {
907 size = -ERANGE;
908 goto release;
909 }
910
911 /* Copy attribute names to buffer */
912 buffer = data;
913 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
914 if (can_list(ea)) {
915 int namelen = copy_name(buffer, ea);
916 buffer += namelen + 1;
917 }
918 }
919
920 release:
921 ea_release(inode, &ea_buf);
922 out:
923 up_read(&JFS_IP(inode)->xattr_sem);
924 return size;
925 }
926
__jfs_xattr_set(struct inode * inode,const char * name,const void * value,size_t size,int flags)927 static int __jfs_xattr_set(struct inode *inode, const char *name,
928 const void *value, size_t size, int flags)
929 {
930 struct jfs_inode_info *ji = JFS_IP(inode);
931 tid_t tid;
932 int rc;
933
934 tid = txBegin(inode->i_sb, 0);
935 mutex_lock(&ji->commit_mutex);
936 rc = __jfs_setxattr(tid, inode, name, value, size, flags);
937 if (!rc)
938 rc = txCommit(tid, 1, &inode, 0);
939 txEnd(tid);
940 mutex_unlock(&ji->commit_mutex);
941
942 return rc;
943 }
944
jfs_xattr_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * value,size_t size)945 static int jfs_xattr_get(const struct xattr_handler *handler,
946 struct dentry *unused, struct inode *inode,
947 const char *name, void *value, size_t size)
948 {
949 name = xattr_full_name(handler, name);
950 return __jfs_getxattr(inode, name, value, size);
951 }
952
jfs_xattr_set(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)953 static int jfs_xattr_set(const struct xattr_handler *handler,
954 struct mnt_idmap *idmap,
955 struct dentry *unused, struct inode *inode,
956 const char *name, const void *value,
957 size_t size, int flags)
958 {
959 name = xattr_full_name(handler, name);
960 return __jfs_xattr_set(inode, name, value, size, flags);
961 }
962
jfs_xattr_get_os2(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * value,size_t size)963 static int jfs_xattr_get_os2(const struct xattr_handler *handler,
964 struct dentry *unused, struct inode *inode,
965 const char *name, void *value, size_t size)
966 {
967 if (is_known_namespace(name))
968 return -EOPNOTSUPP;
969 return __jfs_getxattr(inode, name, value, size);
970 }
971
jfs_xattr_set_os2(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)972 static int jfs_xattr_set_os2(const struct xattr_handler *handler,
973 struct mnt_idmap *idmap,
974 struct dentry *unused, struct inode *inode,
975 const char *name, const void *value,
976 size_t size, int flags)
977 {
978 if (is_known_namespace(name))
979 return -EOPNOTSUPP;
980 return __jfs_xattr_set(inode, name, value, size, flags);
981 }
982
983 static const struct xattr_handler jfs_user_xattr_handler = {
984 .prefix = XATTR_USER_PREFIX,
985 .get = jfs_xattr_get,
986 .set = jfs_xattr_set,
987 };
988
989 static const struct xattr_handler jfs_os2_xattr_handler = {
990 .prefix = XATTR_OS2_PREFIX,
991 .get = jfs_xattr_get_os2,
992 .set = jfs_xattr_set_os2,
993 };
994
995 static const struct xattr_handler jfs_security_xattr_handler = {
996 .prefix = XATTR_SECURITY_PREFIX,
997 .get = jfs_xattr_get,
998 .set = jfs_xattr_set,
999 };
1000
1001 static const struct xattr_handler jfs_trusted_xattr_handler = {
1002 .prefix = XATTR_TRUSTED_PREFIX,
1003 .get = jfs_xattr_get,
1004 .set = jfs_xattr_set,
1005 };
1006
1007 const struct xattr_handler * const jfs_xattr_handlers[] = {
1008 &jfs_os2_xattr_handler,
1009 &jfs_user_xattr_handler,
1010 &jfs_security_xattr_handler,
1011 &jfs_trusted_xattr_handler,
1012 NULL,
1013 };
1014
1015
1016 #ifdef CONFIG_JFS_SECURITY
jfs_initxattrs(struct inode * inode,const struct xattr * xattr_array,void * fs_info)1017 static int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
1018 void *fs_info)
1019 {
1020 const struct xattr *xattr;
1021 tid_t *tid = fs_info;
1022 char *name;
1023 int err = 0;
1024
1025 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
1026 name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
1027 strlen(xattr->name) + 1, GFP_NOFS);
1028 if (!name) {
1029 err = -ENOMEM;
1030 break;
1031 }
1032 strcpy(name, XATTR_SECURITY_PREFIX);
1033 strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
1034
1035 err = __jfs_setxattr(*tid, inode, name,
1036 xattr->value, xattr->value_len, 0);
1037 kfree(name);
1038 if (err < 0)
1039 break;
1040 }
1041 return err;
1042 }
1043
jfs_init_security(tid_t tid,struct inode * inode,struct inode * dir,const struct qstr * qstr)1044 int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
1045 const struct qstr *qstr)
1046 {
1047 return security_inode_init_security(inode, dir, qstr,
1048 &jfs_initxattrs, &tid);
1049 }
1050 #endif
1051