1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2004 3 * Copyright (C) Christoph Hellwig, 2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/capability.h> 21 #include <linux/fs.h> 22 #include <linux/xattr.h> 23 #include <linux/posix_acl_xattr.h> 24 #include <linux/slab.h> 25 #include <linux/quotaops.h> 26 #include <linux/security.h> 27 #include "jfs_incore.h" 28 #include "jfs_superblock.h" 29 #include "jfs_dmap.h" 30 #include "jfs_debug.h" 31 #include "jfs_dinode.h" 32 #include "jfs_extent.h" 33 #include "jfs_metapage.h" 34 #include "jfs_xattr.h" 35 #include "jfs_acl.h" 36 37 /* 38 * jfs_xattr.c: extended attribute service 39 * 40 * Overall design -- 41 * 42 * Format: 43 * 44 * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit 45 * value) and a variable (0 or more) number of extended attribute 46 * entries. Each extended attribute entry (jfs_ea) is a <name,value> double 47 * where <name> is constructed from a null-terminated ascii string 48 * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data 49 * (1 ... 65535 bytes). The in-memory format is 50 * 51 * 0 1 2 4 4 + namelen + 1 52 * +-------+--------+--------+----------------+-------------------+ 53 * | Flags | Name | Value | Name String \0 | Data . . . . | 54 * | | Length | Length | | | 55 * +-------+--------+--------+----------------+-------------------+ 56 * 57 * A jfs_ea_list then is structured as 58 * 59 * 0 4 4 + EA_SIZE(ea1) 60 * +------------+-------------------+--------------------+----- 61 * | Overall EA | First FEA Element | Second FEA Element | ..... 62 * | List Size | | | 63 * +------------+-------------------+--------------------+----- 64 * 65 * On-disk: 66 * 67 * FEALISTs are stored on disk using blocks allocated by dbAlloc() and 68 * written directly. An EA list may be in-lined in the inode if there is 69 * sufficient room available. 70 */ 71 72 struct ea_buffer { 73 int flag; /* Indicates what storage xattr points to */ 74 int max_size; /* largest xattr that fits in current buffer */ 75 dxd_t new_ea; /* dxd to replace ea when modifying xattr */ 76 struct metapage *mp; /* metapage containing ea list */ 77 struct jfs_ea_list *xattr; /* buffer containing ea list */ 78 }; 79 80 /* 81 * ea_buffer.flag values 82 */ 83 #define EA_INLINE 0x0001 84 #define EA_EXTENT 0x0002 85 #define EA_NEW 0x0004 86 #define EA_MALLOC 0x0008 87 88 89 static int is_known_namespace(const char *name) 90 { 91 if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) && 92 strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) && 93 strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) && 94 strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) 95 return false; 96 97 return true; 98 } 99 100 /* 101 * These three routines are used to recognize on-disk extended attributes 102 * that are in a recognized namespace. If the attribute is not recognized, 103 * "os2." is prepended to the name 104 */ 105 static int is_os2_xattr(struct jfs_ea *ea) 106 { 107 return !is_known_namespace(ea->name); 108 } 109 110 static inline int name_size(struct jfs_ea *ea) 111 { 112 if (is_os2_xattr(ea)) 113 return ea->namelen + XATTR_OS2_PREFIX_LEN; 114 else 115 return ea->namelen; 116 } 117 118 static inline int copy_name(char *buffer, struct jfs_ea *ea) 119 { 120 int len = ea->namelen; 121 122 if (is_os2_xattr(ea)) { 123 memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN); 124 buffer += XATTR_OS2_PREFIX_LEN; 125 len += XATTR_OS2_PREFIX_LEN; 126 } 127 memcpy(buffer, ea->name, ea->namelen); 128 buffer[ea->namelen] = 0; 129 130 return len; 131 } 132 133 /* Forward references */ 134 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf); 135 136 /* 137 * NAME: ea_write_inline 138 * 139 * FUNCTION: Attempt to write an EA inline if area is available 140 * 141 * PRE CONDITIONS: 142 * Already verified that the specified EA is small enough to fit inline 143 * 144 * PARAMETERS: 145 * ip - Inode pointer 146 * ealist - EA list pointer 147 * size - size of ealist in bytes 148 * ea - dxd_t structure to be filled in with necessary EA information 149 * if we successfully copy the EA inline 150 * 151 * NOTES: 152 * Checks if the inode's inline area is available. If so, copies EA inline 153 * and sets <ea> fields appropriately. Otherwise, returns failure, EA will 154 * have to be put into an extent. 155 * 156 * RETURNS: 0 for successful copy to inline area; -1 if area not available 157 */ 158 static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist, 159 int size, dxd_t * ea) 160 { 161 struct jfs_inode_info *ji = JFS_IP(ip); 162 163 /* 164 * Make sure we have an EA -- the NULL EA list is valid, but you 165 * can't copy it! 166 */ 167 if (ealist && size > sizeof (struct jfs_ea_list)) { 168 assert(size <= sizeof (ji->i_inline_ea)); 169 170 /* 171 * See if the space is available or if it is already being 172 * used for an inline EA. 173 */ 174 if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE)) 175 return -EPERM; 176 177 DXDsize(ea, size); 178 DXDlength(ea, 0); 179 DXDaddress(ea, 0); 180 memcpy(ji->i_inline_ea, ealist, size); 181 ea->flag = DXD_INLINE; 182 ji->mode2 &= ~INLINEEA; 183 } else { 184 ea->flag = 0; 185 DXDsize(ea, 0); 186 DXDlength(ea, 0); 187 DXDaddress(ea, 0); 188 189 /* Free up INLINE area */ 190 if (ji->ea.flag & DXD_INLINE) 191 ji->mode2 |= INLINEEA; 192 } 193 194 return 0; 195 } 196 197 /* 198 * NAME: ea_write 199 * 200 * FUNCTION: Write an EA for an inode 201 * 202 * PRE CONDITIONS: EA has been verified 203 * 204 * PARAMETERS: 205 * ip - Inode pointer 206 * ealist - EA list pointer 207 * size - size of ealist in bytes 208 * ea - dxd_t structure to be filled in appropriately with where the 209 * EA was copied 210 * 211 * NOTES: Will write EA inline if able to, otherwise allocates blocks for an 212 * extent and synchronously writes it to those blocks. 213 * 214 * RETURNS: 0 for success; Anything else indicates failure 215 */ 216 static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size, 217 dxd_t * ea) 218 { 219 struct super_block *sb = ip->i_sb; 220 struct jfs_inode_info *ji = JFS_IP(ip); 221 struct jfs_sb_info *sbi = JFS_SBI(sb); 222 int nblocks; 223 s64 blkno; 224 int rc = 0, i; 225 char *cp; 226 s32 nbytes, nb; 227 s32 bytes_to_write; 228 struct metapage *mp; 229 230 /* 231 * Quick check to see if this is an in-linable EA. Short EAs 232 * and empty EAs are all in-linable, provided the space exists. 233 */ 234 if (!ealist || size <= sizeof (ji->i_inline_ea)) { 235 if (!ea_write_inline(ip, ealist, size, ea)) 236 return 0; 237 } 238 239 /* figure out how many blocks we need */ 240 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; 241 242 /* Allocate new blocks to quota. */ 243 rc = dquot_alloc_block(ip, nblocks); 244 if (rc) 245 return rc; 246 247 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); 248 if (rc) { 249 /*Rollback quota allocation. */ 250 dquot_free_block(ip, nblocks); 251 return rc; 252 } 253 254 /* 255 * Now have nblocks worth of storage to stuff into the FEALIST. 256 * loop over the FEALIST copying data into the buffer one page at 257 * a time. 258 */ 259 cp = (char *) ealist; 260 nbytes = size; 261 for (i = 0; i < nblocks; i += sbi->nbperpage) { 262 /* 263 * Determine how many bytes for this request, and round up to 264 * the nearest aggregate block size 265 */ 266 nb = min(PSIZE, nbytes); 267 bytes_to_write = 268 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits)) 269 << sb->s_blocksize_bits; 270 271 if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) { 272 rc = -EIO; 273 goto failed; 274 } 275 276 memcpy(mp->data, cp, nb); 277 278 /* 279 * We really need a way to propagate errors for 280 * forced writes like this one. --hch 281 * 282 * (__write_metapage => release_metapage => flush_metapage) 283 */ 284 #ifdef _JFS_FIXME 285 if ((rc = flush_metapage(mp))) { 286 /* 287 * the write failed -- this means that the buffer 288 * is still assigned and the blocks are not being 289 * used. this seems like the best error recovery 290 * we can get ... 291 */ 292 goto failed; 293 } 294 #else 295 flush_metapage(mp); 296 #endif 297 298 cp += PSIZE; 299 nbytes -= nb; 300 } 301 302 ea->flag = DXD_EXTENT; 303 DXDsize(ea, le32_to_cpu(ealist->size)); 304 DXDlength(ea, nblocks); 305 DXDaddress(ea, blkno); 306 307 /* Free up INLINE area */ 308 if (ji->ea.flag & DXD_INLINE) 309 ji->mode2 |= INLINEEA; 310 311 return 0; 312 313 failed: 314 /* Rollback quota allocation. */ 315 dquot_free_block(ip, nblocks); 316 317 dbFree(ip, blkno, nblocks); 318 return rc; 319 } 320 321 /* 322 * NAME: ea_read_inline 323 * 324 * FUNCTION: Read an inlined EA into user's buffer 325 * 326 * PARAMETERS: 327 * ip - Inode pointer 328 * ealist - Pointer to buffer to fill in with EA 329 * 330 * RETURNS: 0 331 */ 332 static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist) 333 { 334 struct jfs_inode_info *ji = JFS_IP(ip); 335 int ea_size = sizeDXD(&ji->ea); 336 337 if (ea_size == 0) { 338 ealist->size = 0; 339 return 0; 340 } 341 342 /* Sanity Check */ 343 if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea))) 344 return -EIO; 345 if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size) 346 != ea_size) 347 return -EIO; 348 349 memcpy(ealist, ji->i_inline_ea, ea_size); 350 return 0; 351 } 352 353 /* 354 * NAME: ea_read 355 * 356 * FUNCTION: copy EA data into user's buffer 357 * 358 * PARAMETERS: 359 * ip - Inode pointer 360 * ealist - Pointer to buffer to fill in with EA 361 * 362 * NOTES: If EA is inline calls ea_read_inline() to copy EA. 363 * 364 * RETURNS: 0 for success; other indicates failure 365 */ 366 static int ea_read(struct inode *ip, struct jfs_ea_list *ealist) 367 { 368 struct super_block *sb = ip->i_sb; 369 struct jfs_inode_info *ji = JFS_IP(ip); 370 struct jfs_sb_info *sbi = JFS_SBI(sb); 371 int nblocks; 372 s64 blkno; 373 char *cp = (char *) ealist; 374 int i; 375 int nbytes, nb; 376 s32 bytes_to_read; 377 struct metapage *mp; 378 379 /* quick check for in-line EA */ 380 if (ji->ea.flag & DXD_INLINE) 381 return ea_read_inline(ip, ealist); 382 383 nbytes = sizeDXD(&ji->ea); 384 if (!nbytes) { 385 jfs_error(sb, "ea_read: nbytes is 0"); 386 return -EIO; 387 } 388 389 /* 390 * Figure out how many blocks were allocated when this EA list was 391 * originally written to disk. 392 */ 393 nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage; 394 blkno = addressDXD(&ji->ea) << sbi->l2nbperpage; 395 396 /* 397 * I have found the disk blocks which were originally used to store 398 * the FEALIST. now i loop over each contiguous block copying the 399 * data into the buffer. 400 */ 401 for (i = 0; i < nblocks; i += sbi->nbperpage) { 402 /* 403 * Determine how many bytes for this request, and round up to 404 * the nearest aggregate block size 405 */ 406 nb = min(PSIZE, nbytes); 407 bytes_to_read = 408 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits)) 409 << sb->s_blocksize_bits; 410 411 if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1))) 412 return -EIO; 413 414 memcpy(cp, mp->data, nb); 415 release_metapage(mp); 416 417 cp += PSIZE; 418 nbytes -= nb; 419 } 420 421 return 0; 422 } 423 424 /* 425 * NAME: ea_get 426 * 427 * FUNCTION: Returns buffer containing existing extended attributes. 428 * The size of the buffer will be the larger of the existing 429 * attributes size, or min_size. 430 * 431 * The buffer, which may be inlined in the inode or in the 432 * page cache must be release by calling ea_release or ea_put 433 * 434 * PARAMETERS: 435 * inode - Inode pointer 436 * ea_buf - Structure to be populated with ealist and its metadata 437 * min_size- minimum size of buffer to be returned 438 * 439 * RETURNS: 0 for success; Other indicates failure 440 */ 441 static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) 442 { 443 struct jfs_inode_info *ji = JFS_IP(inode); 444 struct super_block *sb = inode->i_sb; 445 int size; 446 int ea_size = sizeDXD(&ji->ea); 447 int blocks_needed, current_blocks; 448 s64 blkno; 449 int rc; 450 int quota_allocation = 0; 451 452 /* When fsck.jfs clears a bad ea, it doesn't clear the size */ 453 if (ji->ea.flag == 0) 454 ea_size = 0; 455 456 if (ea_size == 0) { 457 if (min_size == 0) { 458 ea_buf->flag = 0; 459 ea_buf->max_size = 0; 460 ea_buf->xattr = NULL; 461 return 0; 462 } 463 if ((min_size <= sizeof (ji->i_inline_ea)) && 464 (ji->mode2 & INLINEEA)) { 465 ea_buf->flag = EA_INLINE | EA_NEW; 466 ea_buf->max_size = sizeof (ji->i_inline_ea); 467 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea; 468 DXDlength(&ea_buf->new_ea, 0); 469 DXDaddress(&ea_buf->new_ea, 0); 470 ea_buf->new_ea.flag = DXD_INLINE; 471 DXDsize(&ea_buf->new_ea, min_size); 472 return 0; 473 } 474 current_blocks = 0; 475 } else if (ji->ea.flag & DXD_INLINE) { 476 if (min_size <= sizeof (ji->i_inline_ea)) { 477 ea_buf->flag = EA_INLINE; 478 ea_buf->max_size = sizeof (ji->i_inline_ea); 479 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea; 480 goto size_check; 481 } 482 current_blocks = 0; 483 } else { 484 if (!(ji->ea.flag & DXD_EXTENT)) { 485 jfs_error(sb, "ea_get: invalid ea.flag)"); 486 return -EIO; 487 } 488 current_blocks = (ea_size + sb->s_blocksize - 1) >> 489 sb->s_blocksize_bits; 490 } 491 size = max(min_size, ea_size); 492 493 if (size > PSIZE) { 494 /* 495 * To keep the rest of the code simple. Allocate a 496 * contiguous buffer to work with 497 */ 498 ea_buf->xattr = kmalloc(size, GFP_KERNEL); 499 if (ea_buf->xattr == NULL) 500 return -ENOMEM; 501 502 ea_buf->flag = EA_MALLOC; 503 ea_buf->max_size = (size + sb->s_blocksize - 1) & 504 ~(sb->s_blocksize - 1); 505 506 if (ea_size == 0) 507 return 0; 508 509 if ((rc = ea_read(inode, ea_buf->xattr))) { 510 kfree(ea_buf->xattr); 511 ea_buf->xattr = NULL; 512 return rc; 513 } 514 goto size_check; 515 } 516 blocks_needed = (min_size + sb->s_blocksize - 1) >> 517 sb->s_blocksize_bits; 518 519 if (blocks_needed > current_blocks) { 520 /* Allocate new blocks to quota. */ 521 rc = dquot_alloc_block(inode, blocks_needed); 522 if (rc) 523 return -EDQUOT; 524 525 quota_allocation = blocks_needed; 526 527 rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed, 528 &blkno); 529 if (rc) 530 goto clean_up; 531 532 DXDlength(&ea_buf->new_ea, blocks_needed); 533 DXDaddress(&ea_buf->new_ea, blkno); 534 ea_buf->new_ea.flag = DXD_EXTENT; 535 DXDsize(&ea_buf->new_ea, min_size); 536 537 ea_buf->flag = EA_EXTENT | EA_NEW; 538 539 ea_buf->mp = get_metapage(inode, blkno, 540 blocks_needed << sb->s_blocksize_bits, 541 1); 542 if (ea_buf->mp == NULL) { 543 dbFree(inode, blkno, (s64) blocks_needed); 544 rc = -EIO; 545 goto clean_up; 546 } 547 ea_buf->xattr = ea_buf->mp->data; 548 ea_buf->max_size = (min_size + sb->s_blocksize - 1) & 549 ~(sb->s_blocksize - 1); 550 if (ea_size == 0) 551 return 0; 552 if ((rc = ea_read(inode, ea_buf->xattr))) { 553 discard_metapage(ea_buf->mp); 554 dbFree(inode, blkno, (s64) blocks_needed); 555 goto clean_up; 556 } 557 goto size_check; 558 } 559 ea_buf->flag = EA_EXTENT; 560 ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea), 561 lengthDXD(&ji->ea) << sb->s_blocksize_bits, 562 1); 563 if (ea_buf->mp == NULL) { 564 rc = -EIO; 565 goto clean_up; 566 } 567 ea_buf->xattr = ea_buf->mp->data; 568 ea_buf->max_size = (ea_size + sb->s_blocksize - 1) & 569 ~(sb->s_blocksize - 1); 570 571 size_check: 572 if (EALIST_SIZE(ea_buf->xattr) != ea_size) { 573 printk(KERN_ERR "ea_get: invalid extended attribute\n"); 574 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, 575 ea_buf->xattr, ea_size, 1); 576 ea_release(inode, ea_buf); 577 rc = -EIO; 578 goto clean_up; 579 } 580 581 return ea_size; 582 583 clean_up: 584 /* Rollback quota allocation */ 585 if (quota_allocation) 586 dquot_free_block(inode, quota_allocation); 587 588 return (rc); 589 } 590 591 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf) 592 { 593 if (ea_buf->flag & EA_MALLOC) 594 kfree(ea_buf->xattr); 595 else if (ea_buf->flag & EA_EXTENT) { 596 assert(ea_buf->mp); 597 release_metapage(ea_buf->mp); 598 599 if (ea_buf->flag & EA_NEW) 600 dbFree(inode, addressDXD(&ea_buf->new_ea), 601 lengthDXD(&ea_buf->new_ea)); 602 } 603 } 604 605 static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf, 606 int new_size) 607 { 608 struct jfs_inode_info *ji = JFS_IP(inode); 609 unsigned long old_blocks, new_blocks; 610 int rc = 0; 611 612 if (new_size == 0) { 613 ea_release(inode, ea_buf); 614 ea_buf = NULL; 615 } else if (ea_buf->flag & EA_INLINE) { 616 assert(new_size <= sizeof (ji->i_inline_ea)); 617 ji->mode2 &= ~INLINEEA; 618 ea_buf->new_ea.flag = DXD_INLINE; 619 DXDsize(&ea_buf->new_ea, new_size); 620 DXDaddress(&ea_buf->new_ea, 0); 621 DXDlength(&ea_buf->new_ea, 0); 622 } else if (ea_buf->flag & EA_MALLOC) { 623 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea); 624 kfree(ea_buf->xattr); 625 } else if (ea_buf->flag & EA_NEW) { 626 /* We have already allocated a new dxd */ 627 flush_metapage(ea_buf->mp); 628 } else { 629 /* ->xattr must point to original ea's metapage */ 630 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea); 631 discard_metapage(ea_buf->mp); 632 } 633 if (rc) 634 return rc; 635 636 old_blocks = new_blocks = 0; 637 638 if (ji->ea.flag & DXD_EXTENT) { 639 invalidate_dxd_metapages(inode, ji->ea); 640 old_blocks = lengthDXD(&ji->ea); 641 } 642 643 if (ea_buf) { 644 txEA(tid, inode, &ji->ea, &ea_buf->new_ea); 645 if (ea_buf->new_ea.flag & DXD_EXTENT) { 646 new_blocks = lengthDXD(&ea_buf->new_ea); 647 if (ji->ea.flag & DXD_INLINE) 648 ji->mode2 |= INLINEEA; 649 } 650 ji->ea = ea_buf->new_ea; 651 } else { 652 txEA(tid, inode, &ji->ea, NULL); 653 if (ji->ea.flag & DXD_INLINE) 654 ji->mode2 |= INLINEEA; 655 ji->ea.flag = 0; 656 ji->ea.size = 0; 657 } 658 659 /* If old blocks exist, they must be removed from quota allocation. */ 660 if (old_blocks) 661 dquot_free_block(inode, old_blocks); 662 663 inode->i_ctime = CURRENT_TIME; 664 665 return 0; 666 } 667 668 /* 669 * can_set_system_xattr 670 * 671 * This code is specific to the system.* namespace. It contains policy 672 * which doesn't belong in the main xattr codepath. 673 */ 674 static int can_set_system_xattr(struct inode *inode, const char *name, 675 const void *value, size_t value_len) 676 { 677 #ifdef CONFIG_JFS_POSIX_ACL 678 struct posix_acl *acl; 679 int rc; 680 681 if (!inode_owner_or_capable(inode)) 682 return -EPERM; 683 684 /* 685 * POSIX_ACL_XATTR_ACCESS is tied to i_mode 686 */ 687 if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) { 688 acl = posix_acl_from_xattr(&init_user_ns, value, value_len); 689 if (IS_ERR(acl)) { 690 rc = PTR_ERR(acl); 691 printk(KERN_ERR "posix_acl_from_xattr returned %d\n", 692 rc); 693 return rc; 694 } 695 if (acl) { 696 rc = posix_acl_equiv_mode(acl, &inode->i_mode); 697 posix_acl_release(acl); 698 if (rc < 0) { 699 printk(KERN_ERR 700 "posix_acl_equiv_mode returned %d\n", 701 rc); 702 return rc; 703 } 704 mark_inode_dirty(inode); 705 } 706 /* 707 * We're changing the ACL. Get rid of the cached one 708 */ 709 forget_cached_acl(inode, ACL_TYPE_ACCESS); 710 711 return 0; 712 } else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) { 713 acl = posix_acl_from_xattr(&init_user_ns, value, value_len); 714 if (IS_ERR(acl)) { 715 rc = PTR_ERR(acl); 716 printk(KERN_ERR "posix_acl_from_xattr returned %d\n", 717 rc); 718 return rc; 719 } 720 posix_acl_release(acl); 721 722 /* 723 * We're changing the default ACL. Get rid of the cached one 724 */ 725 forget_cached_acl(inode, ACL_TYPE_DEFAULT); 726 727 return 0; 728 } 729 #endif /* CONFIG_JFS_POSIX_ACL */ 730 return -EOPNOTSUPP; 731 } 732 733 /* 734 * Most of the permission checking is done by xattr_permission in the vfs. 735 * The local file system is responsible for handling the system.* namespace. 736 * We also need to verify that this is a namespace that we recognize. 737 */ 738 static int can_set_xattr(struct inode *inode, const char *name, 739 const void *value, size_t value_len) 740 { 741 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 742 return can_set_system_xattr(inode, name, value, value_len); 743 744 if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) { 745 /* 746 * This makes sure that we aren't trying to set an 747 * attribute in a different namespace by prefixing it 748 * with "os2." 749 */ 750 if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN)) 751 return -EOPNOTSUPP; 752 return 0; 753 } 754 755 /* 756 * Don't allow setting an attribute in an unknown namespace. 757 */ 758 if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) && 759 strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) && 760 strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) 761 return -EOPNOTSUPP; 762 763 return 0; 764 } 765 766 int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name, 767 const void *value, size_t value_len, int flags) 768 { 769 struct jfs_ea_list *ealist; 770 struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL; 771 struct ea_buffer ea_buf; 772 int old_ea_size = 0; 773 int xattr_size; 774 int new_size; 775 int namelen = strlen(name); 776 char *os2name = NULL; 777 int found = 0; 778 int rc; 779 int length; 780 781 if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { 782 os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1, 783 GFP_KERNEL); 784 if (!os2name) 785 return -ENOMEM; 786 strcpy(os2name, name + XATTR_OS2_PREFIX_LEN); 787 name = os2name; 788 namelen -= XATTR_OS2_PREFIX_LEN; 789 } 790 791 down_write(&JFS_IP(inode)->xattr_sem); 792 793 xattr_size = ea_get(inode, &ea_buf, 0); 794 if (xattr_size < 0) { 795 rc = xattr_size; 796 goto out; 797 } 798 799 again: 800 ealist = (struct jfs_ea_list *) ea_buf.xattr; 801 new_size = sizeof (struct jfs_ea_list); 802 803 if (xattr_size) { 804 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); 805 ea = NEXT_EA(ea)) { 806 if ((namelen == ea->namelen) && 807 (memcmp(name, ea->name, namelen) == 0)) { 808 found = 1; 809 if (flags & XATTR_CREATE) { 810 rc = -EEXIST; 811 goto release; 812 } 813 old_ea = ea; 814 old_ea_size = EA_SIZE(ea); 815 next_ea = NEXT_EA(ea); 816 } else 817 new_size += EA_SIZE(ea); 818 } 819 } 820 821 if (!found) { 822 if (flags & XATTR_REPLACE) { 823 rc = -ENODATA; 824 goto release; 825 } 826 if (value == NULL) { 827 rc = 0; 828 goto release; 829 } 830 } 831 if (value) 832 new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len; 833 834 if (new_size > ea_buf.max_size) { 835 /* 836 * We need to allocate more space for merged ea list. 837 * We should only have loop to again: once. 838 */ 839 ea_release(inode, &ea_buf); 840 xattr_size = ea_get(inode, &ea_buf, new_size); 841 if (xattr_size < 0) { 842 rc = xattr_size; 843 goto out; 844 } 845 goto again; 846 } 847 848 /* Remove old ea of the same name */ 849 if (found) { 850 /* number of bytes following target EA */ 851 length = (char *) END_EALIST(ealist) - (char *) next_ea; 852 if (length > 0) 853 memmove(old_ea, next_ea, length); 854 xattr_size -= old_ea_size; 855 } 856 857 /* Add new entry to the end */ 858 if (value) { 859 if (xattr_size == 0) 860 /* Completely new ea list */ 861 xattr_size = sizeof (struct jfs_ea_list); 862 863 ea = (struct jfs_ea *) ((char *) ealist + xattr_size); 864 ea->flag = 0; 865 ea->namelen = namelen; 866 ea->valuelen = (cpu_to_le16(value_len)); 867 memcpy(ea->name, name, namelen); 868 ea->name[namelen] = 0; 869 if (value_len) 870 memcpy(&ea->name[namelen + 1], value, value_len); 871 xattr_size += EA_SIZE(ea); 872 } 873 874 /* DEBUG - If we did this right, these number match */ 875 if (xattr_size != new_size) { 876 printk(KERN_ERR 877 "jfs_xsetattr: xattr_size = %d, new_size = %d\n", 878 xattr_size, new_size); 879 880 rc = -EINVAL; 881 goto release; 882 } 883 884 /* 885 * If we're left with an empty list, there's no ea 886 */ 887 if (new_size == sizeof (struct jfs_ea_list)) 888 new_size = 0; 889 890 ealist->size = cpu_to_le32(new_size); 891 892 rc = ea_put(tid, inode, &ea_buf, new_size); 893 894 goto out; 895 release: 896 ea_release(inode, &ea_buf); 897 out: 898 up_write(&JFS_IP(inode)->xattr_sem); 899 900 kfree(os2name); 901 902 return rc; 903 } 904 905 int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, 906 size_t value_len, int flags) 907 { 908 struct inode *inode = dentry->d_inode; 909 struct jfs_inode_info *ji = JFS_IP(inode); 910 int rc; 911 tid_t tid; 912 913 if ((rc = can_set_xattr(inode, name, value, value_len))) 914 return rc; 915 916 if (value == NULL) { /* empty EA, do not remove */ 917 value = ""; 918 value_len = 0; 919 } 920 921 tid = txBegin(inode->i_sb, 0); 922 mutex_lock(&ji->commit_mutex); 923 rc = __jfs_setxattr(tid, dentry->d_inode, name, value, value_len, 924 flags); 925 if (!rc) 926 rc = txCommit(tid, 1, &inode, 0); 927 txEnd(tid); 928 mutex_unlock(&ji->commit_mutex); 929 930 return rc; 931 } 932 933 ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data, 934 size_t buf_size) 935 { 936 struct jfs_ea_list *ealist; 937 struct jfs_ea *ea; 938 struct ea_buffer ea_buf; 939 int xattr_size; 940 ssize_t size; 941 int namelen = strlen(name); 942 char *value; 943 944 down_read(&JFS_IP(inode)->xattr_sem); 945 946 xattr_size = ea_get(inode, &ea_buf, 0); 947 948 if (xattr_size < 0) { 949 size = xattr_size; 950 goto out; 951 } 952 953 if (xattr_size == 0) 954 goto not_found; 955 956 ealist = (struct jfs_ea_list *) ea_buf.xattr; 957 958 /* Find the named attribute */ 959 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) 960 if ((namelen == ea->namelen) && 961 memcmp(name, ea->name, namelen) == 0) { 962 /* Found it */ 963 size = le16_to_cpu(ea->valuelen); 964 if (!data) 965 goto release; 966 else if (size > buf_size) { 967 size = -ERANGE; 968 goto release; 969 } 970 value = ((char *) &ea->name) + ea->namelen + 1; 971 memcpy(data, value, size); 972 goto release; 973 } 974 not_found: 975 size = -ENODATA; 976 release: 977 ea_release(inode, &ea_buf); 978 out: 979 up_read(&JFS_IP(inode)->xattr_sem); 980 981 return size; 982 } 983 984 ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data, 985 size_t buf_size) 986 { 987 int err; 988 989 if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { 990 /* 991 * skip past "os2." prefix 992 */ 993 name += XATTR_OS2_PREFIX_LEN; 994 /* 995 * Don't allow retrieving properly prefixed attributes 996 * by prepending them with "os2." 997 */ 998 if (is_known_namespace(name)) 999 return -EOPNOTSUPP; 1000 } 1001 1002 err = __jfs_getxattr(dentry->d_inode, name, data, buf_size); 1003 1004 return err; 1005 } 1006 1007 /* 1008 * No special permissions are needed to list attributes except for trusted.* 1009 */ 1010 static inline int can_list(struct jfs_ea *ea) 1011 { 1012 return (strncmp(ea->name, XATTR_TRUSTED_PREFIX, 1013 XATTR_TRUSTED_PREFIX_LEN) || 1014 capable(CAP_SYS_ADMIN)); 1015 } 1016 1017 ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size) 1018 { 1019 struct inode *inode = dentry->d_inode; 1020 char *buffer; 1021 ssize_t size = 0; 1022 int xattr_size; 1023 struct jfs_ea_list *ealist; 1024 struct jfs_ea *ea; 1025 struct ea_buffer ea_buf; 1026 1027 down_read(&JFS_IP(inode)->xattr_sem); 1028 1029 xattr_size = ea_get(inode, &ea_buf, 0); 1030 if (xattr_size < 0) { 1031 size = xattr_size; 1032 goto out; 1033 } 1034 1035 if (xattr_size == 0) 1036 goto release; 1037 1038 ealist = (struct jfs_ea_list *) ea_buf.xattr; 1039 1040 /* compute required size of list */ 1041 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) { 1042 if (can_list(ea)) 1043 size += name_size(ea) + 1; 1044 } 1045 1046 if (!data) 1047 goto release; 1048 1049 if (size > buf_size) { 1050 size = -ERANGE; 1051 goto release; 1052 } 1053 1054 /* Copy attribute names to buffer */ 1055 buffer = data; 1056 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) { 1057 if (can_list(ea)) { 1058 int namelen = copy_name(buffer, ea); 1059 buffer += namelen + 1; 1060 } 1061 } 1062 1063 release: 1064 ea_release(inode, &ea_buf); 1065 out: 1066 up_read(&JFS_IP(inode)->xattr_sem); 1067 return size; 1068 } 1069 1070 int jfs_removexattr(struct dentry *dentry, const char *name) 1071 { 1072 struct inode *inode = dentry->d_inode; 1073 struct jfs_inode_info *ji = JFS_IP(inode); 1074 int rc; 1075 tid_t tid; 1076 1077 if ((rc = can_set_xattr(inode, name, NULL, 0))) 1078 return rc; 1079 1080 tid = txBegin(inode->i_sb, 0); 1081 mutex_lock(&ji->commit_mutex); 1082 rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE); 1083 if (!rc) 1084 rc = txCommit(tid, 1, &inode, 0); 1085 txEnd(tid); 1086 mutex_unlock(&ji->commit_mutex); 1087 1088 return rc; 1089 } 1090 1091 #ifdef CONFIG_JFS_SECURITY 1092 int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, 1093 void *fs_info) 1094 { 1095 const struct xattr *xattr; 1096 tid_t *tid = fs_info; 1097 char *name; 1098 int err = 0; 1099 1100 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 1101 name = kmalloc(XATTR_SECURITY_PREFIX_LEN + 1102 strlen(xattr->name) + 1, GFP_NOFS); 1103 if (!name) { 1104 err = -ENOMEM; 1105 break; 1106 } 1107 strcpy(name, XATTR_SECURITY_PREFIX); 1108 strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); 1109 1110 err = __jfs_setxattr(*tid, inode, name, 1111 xattr->value, xattr->value_len, 0); 1112 kfree(name); 1113 if (err < 0) 1114 break; 1115 } 1116 return err; 1117 } 1118 1119 int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir, 1120 const struct qstr *qstr) 1121 { 1122 return security_inode_init_security(inode, dir, qstr, 1123 &jfs_initxattrs, &tid); 1124 } 1125 #endif 1126