1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2004 3 * Copyright (C) Christoph Hellwig, 2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/fs.h> 21 #include <linux/xattr.h> 22 #include <linux/posix_acl_xattr.h> 23 #include <linux/quotaops.h> 24 #include "jfs_incore.h" 25 #include "jfs_superblock.h" 26 #include "jfs_dmap.h" 27 #include "jfs_debug.h" 28 #include "jfs_dinode.h" 29 #include "jfs_extent.h" 30 #include "jfs_metapage.h" 31 #include "jfs_xattr.h" 32 #include "jfs_acl.h" 33 34 /* 35 * jfs_xattr.c: extended attribute service 36 * 37 * Overall design -- 38 * 39 * Format: 40 * 41 * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit 42 * value) and a variable (0 or more) number of extended attribute 43 * entries. Each extended attribute entry (jfs_ea) is a <name,value> double 44 * where <name> is constructed from a null-terminated ascii string 45 * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data 46 * (1 ... 65535 bytes). The in-memory format is 47 * 48 * 0 1 2 4 4 + namelen + 1 49 * +-------+--------+--------+----------------+-------------------+ 50 * | Flags | Name | Value | Name String \0 | Data . . . . | 51 * | | Length | Length | | | 52 * +-------+--------+--------+----------------+-------------------+ 53 * 54 * A jfs_ea_list then is structured as 55 * 56 * 0 4 4 + EA_SIZE(ea1) 57 * +------------+-------------------+--------------------+----- 58 * | Overall EA | First FEA Element | Second FEA Element | ..... 59 * | List Size | | | 60 * +------------+-------------------+--------------------+----- 61 * 62 * On-disk: 63 * 64 * FEALISTs are stored on disk using blocks allocated by dbAlloc() and 65 * written directly. An EA list may be in-lined in the inode if there is 66 * sufficient room available. 67 */ 68 69 struct ea_buffer { 70 int flag; /* Indicates what storage xattr points to */ 71 int max_size; /* largest xattr that fits in current buffer */ 72 dxd_t new_ea; /* dxd to replace ea when modifying xattr */ 73 struct metapage *mp; /* metapage containing ea list */ 74 struct jfs_ea_list *xattr; /* buffer containing ea list */ 75 }; 76 77 /* 78 * ea_buffer.flag values 79 */ 80 #define EA_INLINE 0x0001 81 #define EA_EXTENT 0x0002 82 #define EA_NEW 0x0004 83 #define EA_MALLOC 0x0008 84 85 /* Namespaces */ 86 #define XATTR_SYSTEM_PREFIX "system." 87 #define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1) 88 89 #define XATTR_USER_PREFIX "user." 90 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1) 91 92 #define XATTR_OS2_PREFIX "os2." 93 #define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1) 94 95 /* XATTR_SECURITY_PREFIX is defined in include/linux/xattr.h */ 96 #define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1) 97 98 #define XATTR_TRUSTED_PREFIX "trusted." 99 #define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1) 100 101 /* 102 * These three routines are used to recognize on-disk extended attributes 103 * that are in a recognized namespace. If the attribute is not recognized, 104 * "os2." is prepended to the name 105 */ 106 static inline int is_os2_xattr(struct jfs_ea *ea) 107 { 108 /* 109 * Check for "system." 110 */ 111 if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) && 112 !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 113 return FALSE; 114 /* 115 * Check for "user." 116 */ 117 if ((ea->namelen >= XATTR_USER_PREFIX_LEN) && 118 !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) 119 return FALSE; 120 /* 121 * Check for "security." 122 */ 123 if ((ea->namelen >= XATTR_SECURITY_PREFIX_LEN) && 124 !strncmp(ea->name, XATTR_SECURITY_PREFIX, 125 XATTR_SECURITY_PREFIX_LEN)) 126 return FALSE; 127 /* 128 * Check for "trusted." 129 */ 130 if ((ea->namelen >= XATTR_TRUSTED_PREFIX_LEN) && 131 !strncmp(ea->name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) 132 return FALSE; 133 /* 134 * Add any other valid namespace prefixes here 135 */ 136 137 /* 138 * We assume it's OS/2's flat namespace 139 */ 140 return TRUE; 141 } 142 143 static inline int name_size(struct jfs_ea *ea) 144 { 145 if (is_os2_xattr(ea)) 146 return ea->namelen + XATTR_OS2_PREFIX_LEN; 147 else 148 return ea->namelen; 149 } 150 151 static inline int copy_name(char *buffer, struct jfs_ea *ea) 152 { 153 int len = ea->namelen; 154 155 if (is_os2_xattr(ea)) { 156 memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN); 157 buffer += XATTR_OS2_PREFIX_LEN; 158 len += XATTR_OS2_PREFIX_LEN; 159 } 160 memcpy(buffer, ea->name, ea->namelen); 161 buffer[ea->namelen] = 0; 162 163 return len; 164 } 165 166 /* Forward references */ 167 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf); 168 169 /* 170 * NAME: ea_write_inline 171 * 172 * FUNCTION: Attempt to write an EA inline if area is available 173 * 174 * PRE CONDITIONS: 175 * Already verified that the specified EA is small enough to fit inline 176 * 177 * PARAMETERS: 178 * ip - Inode pointer 179 * ealist - EA list pointer 180 * size - size of ealist in bytes 181 * ea - dxd_t structure to be filled in with necessary EA information 182 * if we successfully copy the EA inline 183 * 184 * NOTES: 185 * Checks if the inode's inline area is available. If so, copies EA inline 186 * and sets <ea> fields appropriately. Otherwise, returns failure, EA will 187 * have to be put into an extent. 188 * 189 * RETURNS: 0 for successful copy to inline area; -1 if area not available 190 */ 191 static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist, 192 int size, dxd_t * ea) 193 { 194 struct jfs_inode_info *ji = JFS_IP(ip); 195 196 /* 197 * Make sure we have an EA -- the NULL EA list is valid, but you 198 * can't copy it! 199 */ 200 if (ealist && size > sizeof (struct jfs_ea_list)) { 201 assert(size <= sizeof (ji->i_inline_ea)); 202 203 /* 204 * See if the space is available or if it is already being 205 * used for an inline EA. 206 */ 207 if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE)) 208 return -EPERM; 209 210 DXDsize(ea, size); 211 DXDlength(ea, 0); 212 DXDaddress(ea, 0); 213 memcpy(ji->i_inline_ea, ealist, size); 214 ea->flag = DXD_INLINE; 215 ji->mode2 &= ~INLINEEA; 216 } else { 217 ea->flag = 0; 218 DXDsize(ea, 0); 219 DXDlength(ea, 0); 220 DXDaddress(ea, 0); 221 222 /* Free up INLINE area */ 223 if (ji->ea.flag & DXD_INLINE) 224 ji->mode2 |= INLINEEA; 225 } 226 227 return 0; 228 } 229 230 /* 231 * NAME: ea_write 232 * 233 * FUNCTION: Write an EA for an inode 234 * 235 * PRE CONDITIONS: EA has been verified 236 * 237 * PARAMETERS: 238 * ip - Inode pointer 239 * ealist - EA list pointer 240 * size - size of ealist in bytes 241 * ea - dxd_t structure to be filled in appropriately with where the 242 * EA was copied 243 * 244 * NOTES: Will write EA inline if able to, otherwise allocates blocks for an 245 * extent and synchronously writes it to those blocks. 246 * 247 * RETURNS: 0 for success; Anything else indicates failure 248 */ 249 static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size, 250 dxd_t * ea) 251 { 252 struct super_block *sb = ip->i_sb; 253 struct jfs_inode_info *ji = JFS_IP(ip); 254 struct jfs_sb_info *sbi = JFS_SBI(sb); 255 int nblocks; 256 s64 blkno; 257 int rc = 0, i; 258 char *cp; 259 s32 nbytes, nb; 260 s32 bytes_to_write; 261 struct metapage *mp; 262 263 /* 264 * Quick check to see if this is an in-linable EA. Short EAs 265 * and empty EAs are all in-linable, provided the space exists. 266 */ 267 if (!ealist || size <= sizeof (ji->i_inline_ea)) { 268 if (!ea_write_inline(ip, ealist, size, ea)) 269 return 0; 270 } 271 272 /* figure out how many blocks we need */ 273 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; 274 275 /* Allocate new blocks to quota. */ 276 if (DQUOT_ALLOC_BLOCK(ip, nblocks)) { 277 return -EDQUOT; 278 } 279 280 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); 281 if (rc) { 282 /*Rollback quota allocation. */ 283 DQUOT_FREE_BLOCK(ip, nblocks); 284 return rc; 285 } 286 287 /* 288 * Now have nblocks worth of storage to stuff into the FEALIST. 289 * loop over the FEALIST copying data into the buffer one page at 290 * a time. 291 */ 292 cp = (char *) ealist; 293 nbytes = size; 294 for (i = 0; i < nblocks; i += sbi->nbperpage) { 295 /* 296 * Determine how many bytes for this request, and round up to 297 * the nearest aggregate block size 298 */ 299 nb = min(PSIZE, nbytes); 300 bytes_to_write = 301 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits)) 302 << sb->s_blocksize_bits; 303 304 if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) { 305 rc = -EIO; 306 goto failed; 307 } 308 309 memcpy(mp->data, cp, nb); 310 311 /* 312 * We really need a way to propagate errors for 313 * forced writes like this one. --hch 314 * 315 * (__write_metapage => release_metapage => flush_metapage) 316 */ 317 #ifdef _JFS_FIXME 318 if ((rc = flush_metapage(mp))) { 319 /* 320 * the write failed -- this means that the buffer 321 * is still assigned and the blocks are not being 322 * used. this seems like the best error recovery 323 * we can get ... 324 */ 325 goto failed; 326 } 327 #else 328 flush_metapage(mp); 329 #endif 330 331 cp += PSIZE; 332 nbytes -= nb; 333 } 334 335 ea->flag = DXD_EXTENT; 336 DXDsize(ea, le32_to_cpu(ealist->size)); 337 DXDlength(ea, nblocks); 338 DXDaddress(ea, blkno); 339 340 /* Free up INLINE area */ 341 if (ji->ea.flag & DXD_INLINE) 342 ji->mode2 |= INLINEEA; 343 344 return 0; 345 346 failed: 347 /* Rollback quota allocation. */ 348 DQUOT_FREE_BLOCK(ip, nblocks); 349 350 dbFree(ip, blkno, nblocks); 351 return rc; 352 } 353 354 /* 355 * NAME: ea_read_inline 356 * 357 * FUNCTION: Read an inlined EA into user's buffer 358 * 359 * PARAMETERS: 360 * ip - Inode pointer 361 * ealist - Pointer to buffer to fill in with EA 362 * 363 * RETURNS: 0 364 */ 365 static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist) 366 { 367 struct jfs_inode_info *ji = JFS_IP(ip); 368 int ea_size = sizeDXD(&ji->ea); 369 370 if (ea_size == 0) { 371 ealist->size = 0; 372 return 0; 373 } 374 375 /* Sanity Check */ 376 if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea))) 377 return -EIO; 378 if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size) 379 != ea_size) 380 return -EIO; 381 382 memcpy(ealist, ji->i_inline_ea, ea_size); 383 return 0; 384 } 385 386 /* 387 * NAME: ea_read 388 * 389 * FUNCTION: copy EA data into user's buffer 390 * 391 * PARAMETERS: 392 * ip - Inode pointer 393 * ealist - Pointer to buffer to fill in with EA 394 * 395 * NOTES: If EA is inline calls ea_read_inline() to copy EA. 396 * 397 * RETURNS: 0 for success; other indicates failure 398 */ 399 static int ea_read(struct inode *ip, struct jfs_ea_list *ealist) 400 { 401 struct super_block *sb = ip->i_sb; 402 struct jfs_inode_info *ji = JFS_IP(ip); 403 struct jfs_sb_info *sbi = JFS_SBI(sb); 404 int nblocks; 405 s64 blkno; 406 char *cp = (char *) ealist; 407 int i; 408 int nbytes, nb; 409 s32 bytes_to_read; 410 struct metapage *mp; 411 412 /* quick check for in-line EA */ 413 if (ji->ea.flag & DXD_INLINE) 414 return ea_read_inline(ip, ealist); 415 416 nbytes = sizeDXD(&ji->ea); 417 if (!nbytes) { 418 jfs_error(sb, "ea_read: nbytes is 0"); 419 return -EIO; 420 } 421 422 /* 423 * Figure out how many blocks were allocated when this EA list was 424 * originally written to disk. 425 */ 426 nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage; 427 blkno = addressDXD(&ji->ea) << sbi->l2nbperpage; 428 429 /* 430 * I have found the disk blocks which were originally used to store 431 * the FEALIST. now i loop over each contiguous block copying the 432 * data into the buffer. 433 */ 434 for (i = 0; i < nblocks; i += sbi->nbperpage) { 435 /* 436 * Determine how many bytes for this request, and round up to 437 * the nearest aggregate block size 438 */ 439 nb = min(PSIZE, nbytes); 440 bytes_to_read = 441 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits)) 442 << sb->s_blocksize_bits; 443 444 if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1))) 445 return -EIO; 446 447 memcpy(cp, mp->data, nb); 448 release_metapage(mp); 449 450 cp += PSIZE; 451 nbytes -= nb; 452 } 453 454 return 0; 455 } 456 457 /* 458 * NAME: ea_get 459 * 460 * FUNCTION: Returns buffer containing existing extended attributes. 461 * The size of the buffer will be the larger of the existing 462 * attributes size, or min_size. 463 * 464 * The buffer, which may be inlined in the inode or in the 465 * page cache must be release by calling ea_release or ea_put 466 * 467 * PARAMETERS: 468 * inode - Inode pointer 469 * ea_buf - Structure to be populated with ealist and its metadata 470 * min_size- minimum size of buffer to be returned 471 * 472 * RETURNS: 0 for success; Other indicates failure 473 */ 474 static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) 475 { 476 struct jfs_inode_info *ji = JFS_IP(inode); 477 struct super_block *sb = inode->i_sb; 478 int size; 479 int ea_size = sizeDXD(&ji->ea); 480 int blocks_needed, current_blocks; 481 s64 blkno; 482 int rc; 483 int quota_allocation = 0; 484 485 /* When fsck.jfs clears a bad ea, it doesn't clear the size */ 486 if (ji->ea.flag == 0) 487 ea_size = 0; 488 489 if (ea_size == 0) { 490 if (min_size == 0) { 491 ea_buf->flag = 0; 492 ea_buf->max_size = 0; 493 ea_buf->xattr = NULL; 494 return 0; 495 } 496 if ((min_size <= sizeof (ji->i_inline_ea)) && 497 (ji->mode2 & INLINEEA)) { 498 ea_buf->flag = EA_INLINE | EA_NEW; 499 ea_buf->max_size = sizeof (ji->i_inline_ea); 500 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea; 501 DXDlength(&ea_buf->new_ea, 0); 502 DXDaddress(&ea_buf->new_ea, 0); 503 ea_buf->new_ea.flag = DXD_INLINE; 504 DXDsize(&ea_buf->new_ea, min_size); 505 return 0; 506 } 507 current_blocks = 0; 508 } else if (ji->ea.flag & DXD_INLINE) { 509 if (min_size <= sizeof (ji->i_inline_ea)) { 510 ea_buf->flag = EA_INLINE; 511 ea_buf->max_size = sizeof (ji->i_inline_ea); 512 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea; 513 goto size_check; 514 } 515 current_blocks = 0; 516 } else { 517 if (!(ji->ea.flag & DXD_EXTENT)) { 518 jfs_error(sb, "ea_get: invalid ea.flag)"); 519 return -EIO; 520 } 521 current_blocks = (ea_size + sb->s_blocksize - 1) >> 522 sb->s_blocksize_bits; 523 } 524 size = max(min_size, ea_size); 525 526 if (size > PSIZE) { 527 /* 528 * To keep the rest of the code simple. Allocate a 529 * contiguous buffer to work with 530 */ 531 ea_buf->xattr = kmalloc(size, GFP_KERNEL); 532 if (ea_buf->xattr == NULL) 533 return -ENOMEM; 534 535 ea_buf->flag = EA_MALLOC; 536 ea_buf->max_size = (size + sb->s_blocksize - 1) & 537 ~(sb->s_blocksize - 1); 538 539 if (ea_size == 0) 540 return 0; 541 542 if ((rc = ea_read(inode, ea_buf->xattr))) { 543 kfree(ea_buf->xattr); 544 ea_buf->xattr = NULL; 545 return rc; 546 } 547 goto size_check; 548 } 549 blocks_needed = (min_size + sb->s_blocksize - 1) >> 550 sb->s_blocksize_bits; 551 552 if (blocks_needed > current_blocks) { 553 /* Allocate new blocks to quota. */ 554 if (DQUOT_ALLOC_BLOCK(inode, blocks_needed)) 555 return -EDQUOT; 556 557 quota_allocation = blocks_needed; 558 559 rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed, 560 &blkno); 561 if (rc) 562 goto clean_up; 563 564 DXDlength(&ea_buf->new_ea, blocks_needed); 565 DXDaddress(&ea_buf->new_ea, blkno); 566 ea_buf->new_ea.flag = DXD_EXTENT; 567 DXDsize(&ea_buf->new_ea, min_size); 568 569 ea_buf->flag = EA_EXTENT | EA_NEW; 570 571 ea_buf->mp = get_metapage(inode, blkno, 572 blocks_needed << sb->s_blocksize_bits, 573 1); 574 if (ea_buf->mp == NULL) { 575 dbFree(inode, blkno, (s64) blocks_needed); 576 rc = -EIO; 577 goto clean_up; 578 } 579 ea_buf->xattr = ea_buf->mp->data; 580 ea_buf->max_size = (min_size + sb->s_blocksize - 1) & 581 ~(sb->s_blocksize - 1); 582 if (ea_size == 0) 583 return 0; 584 if ((rc = ea_read(inode, ea_buf->xattr))) { 585 discard_metapage(ea_buf->mp); 586 dbFree(inode, blkno, (s64) blocks_needed); 587 goto clean_up; 588 } 589 goto size_check; 590 } 591 ea_buf->flag = EA_EXTENT; 592 ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea), 593 lengthDXD(&ji->ea) << sb->s_blocksize_bits, 594 1); 595 if (ea_buf->mp == NULL) { 596 rc = -EIO; 597 goto clean_up; 598 } 599 ea_buf->xattr = ea_buf->mp->data; 600 ea_buf->max_size = (ea_size + sb->s_blocksize - 1) & 601 ~(sb->s_blocksize - 1); 602 603 size_check: 604 if (EALIST_SIZE(ea_buf->xattr) != ea_size) { 605 printk(KERN_ERR "ea_get: invalid extended attribute\n"); 606 dump_mem("xattr", ea_buf->xattr, ea_size); 607 ea_release(inode, ea_buf); 608 rc = -EIO; 609 goto clean_up; 610 } 611 612 return ea_size; 613 614 clean_up: 615 /* Rollback quota allocation */ 616 if (quota_allocation) 617 DQUOT_FREE_BLOCK(inode, quota_allocation); 618 619 return (rc); 620 } 621 622 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf) 623 { 624 if (ea_buf->flag & EA_MALLOC) 625 kfree(ea_buf->xattr); 626 else if (ea_buf->flag & EA_EXTENT) { 627 assert(ea_buf->mp); 628 release_metapage(ea_buf->mp); 629 630 if (ea_buf->flag & EA_NEW) 631 dbFree(inode, addressDXD(&ea_buf->new_ea), 632 lengthDXD(&ea_buf->new_ea)); 633 } 634 } 635 636 static int ea_put(struct inode *inode, struct ea_buffer *ea_buf, int new_size) 637 { 638 struct jfs_inode_info *ji = JFS_IP(inode); 639 unsigned long old_blocks, new_blocks; 640 int rc = 0; 641 tid_t tid; 642 643 if (new_size == 0) { 644 ea_release(inode, ea_buf); 645 ea_buf = NULL; 646 } else if (ea_buf->flag & EA_INLINE) { 647 assert(new_size <= sizeof (ji->i_inline_ea)); 648 ji->mode2 &= ~INLINEEA; 649 ea_buf->new_ea.flag = DXD_INLINE; 650 DXDsize(&ea_buf->new_ea, new_size); 651 DXDaddress(&ea_buf->new_ea, 0); 652 DXDlength(&ea_buf->new_ea, 0); 653 } else if (ea_buf->flag & EA_MALLOC) { 654 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea); 655 kfree(ea_buf->xattr); 656 } else if (ea_buf->flag & EA_NEW) { 657 /* We have already allocated a new dxd */ 658 flush_metapage(ea_buf->mp); 659 } else { 660 /* ->xattr must point to original ea's metapage */ 661 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea); 662 discard_metapage(ea_buf->mp); 663 } 664 if (rc) 665 return rc; 666 667 tid = txBegin(inode->i_sb, 0); 668 down(&ji->commit_sem); 669 670 old_blocks = new_blocks = 0; 671 672 if (ji->ea.flag & DXD_EXTENT) { 673 invalidate_dxd_metapages(inode, ji->ea); 674 old_blocks = lengthDXD(&ji->ea); 675 } 676 677 if (ea_buf) { 678 txEA(tid, inode, &ji->ea, &ea_buf->new_ea); 679 if (ea_buf->new_ea.flag & DXD_EXTENT) { 680 new_blocks = lengthDXD(&ea_buf->new_ea); 681 if (ji->ea.flag & DXD_INLINE) 682 ji->mode2 |= INLINEEA; 683 } 684 ji->ea = ea_buf->new_ea; 685 } else { 686 txEA(tid, inode, &ji->ea, NULL); 687 if (ji->ea.flag & DXD_INLINE) 688 ji->mode2 |= INLINEEA; 689 ji->ea.flag = 0; 690 ji->ea.size = 0; 691 } 692 693 /* If old blocks exist, they must be removed from quota allocation. */ 694 if (old_blocks) 695 DQUOT_FREE_BLOCK(inode, old_blocks); 696 697 inode->i_ctime = CURRENT_TIME; 698 rc = txCommit(tid, 1, &inode, 0); 699 txEnd(tid); 700 up(&ji->commit_sem); 701 702 return rc; 703 } 704 705 /* 706 * can_set_system_xattr 707 * 708 * This code is specific to the system.* namespace. It contains policy 709 * which doesn't belong in the main xattr codepath. 710 */ 711 static int can_set_system_xattr(struct inode *inode, const char *name, 712 const void *value, size_t value_len) 713 { 714 #ifdef CONFIG_JFS_POSIX_ACL 715 struct posix_acl *acl; 716 int rc; 717 718 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 719 return -EPERM; 720 721 /* 722 * POSIX_ACL_XATTR_ACCESS is tied to i_mode 723 */ 724 if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) { 725 acl = posix_acl_from_xattr(value, value_len); 726 if (IS_ERR(acl)) { 727 rc = PTR_ERR(acl); 728 printk(KERN_ERR "posix_acl_from_xattr returned %d\n", 729 rc); 730 return rc; 731 } 732 if (acl) { 733 mode_t mode = inode->i_mode; 734 rc = posix_acl_equiv_mode(acl, &mode); 735 posix_acl_release(acl); 736 if (rc < 0) { 737 printk(KERN_ERR 738 "posix_acl_equiv_mode returned %d\n", 739 rc); 740 return rc; 741 } 742 inode->i_mode = mode; 743 mark_inode_dirty(inode); 744 } 745 /* 746 * We're changing the ACL. Get rid of the cached one 747 */ 748 acl =JFS_IP(inode)->i_acl; 749 if (acl != JFS_ACL_NOT_CACHED) 750 posix_acl_release(acl); 751 JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED; 752 753 return 0; 754 } else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) { 755 acl = posix_acl_from_xattr(value, value_len); 756 if (IS_ERR(acl)) { 757 rc = PTR_ERR(acl); 758 printk(KERN_ERR "posix_acl_from_xattr returned %d\n", 759 rc); 760 return rc; 761 } 762 posix_acl_release(acl); 763 764 /* 765 * We're changing the default ACL. Get rid of the cached one 766 */ 767 acl =JFS_IP(inode)->i_default_acl; 768 if (acl && (acl != JFS_ACL_NOT_CACHED)) 769 posix_acl_release(acl); 770 JFS_IP(inode)->i_default_acl = JFS_ACL_NOT_CACHED; 771 772 return 0; 773 } 774 #endif /* CONFIG_JFS_POSIX_ACL */ 775 return -EOPNOTSUPP; 776 } 777 778 static int can_set_xattr(struct inode *inode, const char *name, 779 const void *value, size_t value_len) 780 { 781 if (IS_RDONLY(inode)) 782 return -EROFS; 783 784 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 785 return -EPERM; 786 787 if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0) 788 /* 789 * "system.*" 790 */ 791 return can_set_system_xattr(inode, name, value, value_len); 792 793 if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) 794 return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM); 795 796 #ifdef CONFIG_JFS_SECURITY 797 if (strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) 798 == 0) 799 return 0; /* Leave it to the security module */ 800 #endif 801 802 if((strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) != 0) && 803 (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) != 0)) 804 return -EOPNOTSUPP; 805 806 if (!S_ISREG(inode->i_mode) && 807 (!S_ISDIR(inode->i_mode) || inode->i_mode &S_ISVTX)) 808 return -EPERM; 809 810 return permission(inode, MAY_WRITE, NULL); 811 } 812 813 int __jfs_setxattr(struct inode *inode, const char *name, const void *value, 814 size_t value_len, int flags) 815 { 816 struct jfs_ea_list *ealist; 817 struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL; 818 struct ea_buffer ea_buf; 819 int old_ea_size = 0; 820 int xattr_size; 821 int new_size; 822 int namelen = strlen(name); 823 char *os2name = NULL; 824 int found = 0; 825 int rc; 826 int length; 827 828 if ((rc = can_set_xattr(inode, name, value, value_len))) 829 return rc; 830 831 if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { 832 os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1, 833 GFP_KERNEL); 834 if (!os2name) 835 return -ENOMEM; 836 strcpy(os2name, name + XATTR_OS2_PREFIX_LEN); 837 name = os2name; 838 namelen -= XATTR_OS2_PREFIX_LEN; 839 } 840 841 down_write(&JFS_IP(inode)->xattr_sem); 842 843 xattr_size = ea_get(inode, &ea_buf, 0); 844 if (xattr_size < 0) { 845 rc = xattr_size; 846 goto out; 847 } 848 849 again: 850 ealist = (struct jfs_ea_list *) ea_buf.xattr; 851 new_size = sizeof (struct jfs_ea_list); 852 853 if (xattr_size) { 854 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); 855 ea = NEXT_EA(ea)) { 856 if ((namelen == ea->namelen) && 857 (memcmp(name, ea->name, namelen) == 0)) { 858 found = 1; 859 if (flags & XATTR_CREATE) { 860 rc = -EEXIST; 861 goto release; 862 } 863 old_ea = ea; 864 old_ea_size = EA_SIZE(ea); 865 next_ea = NEXT_EA(ea); 866 } else 867 new_size += EA_SIZE(ea); 868 } 869 } 870 871 if (!found) { 872 if (flags & XATTR_REPLACE) { 873 rc = -ENODATA; 874 goto release; 875 } 876 if (value == NULL) { 877 rc = 0; 878 goto release; 879 } 880 } 881 if (value) 882 new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len; 883 884 if (new_size > ea_buf.max_size) { 885 /* 886 * We need to allocate more space for merged ea list. 887 * We should only have loop to again: once. 888 */ 889 ea_release(inode, &ea_buf); 890 xattr_size = ea_get(inode, &ea_buf, new_size); 891 if (xattr_size < 0) { 892 rc = xattr_size; 893 goto out; 894 } 895 goto again; 896 } 897 898 /* Remove old ea of the same name */ 899 if (found) { 900 /* number of bytes following target EA */ 901 length = (char *) END_EALIST(ealist) - (char *) next_ea; 902 if (length > 0) 903 memmove(old_ea, next_ea, length); 904 xattr_size -= old_ea_size; 905 } 906 907 /* Add new entry to the end */ 908 if (value) { 909 if (xattr_size == 0) 910 /* Completely new ea list */ 911 xattr_size = sizeof (struct jfs_ea_list); 912 913 ea = (struct jfs_ea *) ((char *) ealist + xattr_size); 914 ea->flag = 0; 915 ea->namelen = namelen; 916 ea->valuelen = (cpu_to_le16(value_len)); 917 memcpy(ea->name, name, namelen); 918 ea->name[namelen] = 0; 919 if (value_len) 920 memcpy(&ea->name[namelen + 1], value, value_len); 921 xattr_size += EA_SIZE(ea); 922 } 923 924 /* DEBUG - If we did this right, these number match */ 925 if (xattr_size != new_size) { 926 printk(KERN_ERR 927 "jfs_xsetattr: xattr_size = %d, new_size = %d\n", 928 xattr_size, new_size); 929 930 rc = -EINVAL; 931 goto release; 932 } 933 934 /* 935 * If we're left with an empty list, there's no ea 936 */ 937 if (new_size == sizeof (struct jfs_ea_list)) 938 new_size = 0; 939 940 ealist->size = cpu_to_le32(new_size); 941 942 rc = ea_put(inode, &ea_buf, new_size); 943 944 goto out; 945 release: 946 ea_release(inode, &ea_buf); 947 out: 948 up_write(&JFS_IP(inode)->xattr_sem); 949 950 kfree(os2name); 951 952 return rc; 953 } 954 955 int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, 956 size_t value_len, int flags) 957 { 958 if (value == NULL) { /* empty EA, do not remove */ 959 value = ""; 960 value_len = 0; 961 } 962 963 return __jfs_setxattr(dentry->d_inode, name, value, value_len, flags); 964 } 965 966 static int can_get_xattr(struct inode *inode, const char *name) 967 { 968 #ifdef CONFIG_JFS_SECURITY 969 if(strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) 970 return 0; 971 #endif 972 973 if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) 974 return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM); 975 976 if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0) 977 return 0; 978 979 return permission(inode, MAY_READ, NULL); 980 } 981 982 ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data, 983 size_t buf_size) 984 { 985 struct jfs_ea_list *ealist; 986 struct jfs_ea *ea; 987 struct ea_buffer ea_buf; 988 int xattr_size; 989 ssize_t size; 990 int namelen = strlen(name); 991 char *os2name = NULL; 992 int rc; 993 char *value; 994 995 if ((rc = can_get_xattr(inode, name))) 996 return rc; 997 998 if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { 999 os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1, 1000 GFP_KERNEL); 1001 if (!os2name) 1002 return -ENOMEM; 1003 strcpy(os2name, name + XATTR_OS2_PREFIX_LEN); 1004 name = os2name; 1005 namelen -= XATTR_OS2_PREFIX_LEN; 1006 } 1007 1008 down_read(&JFS_IP(inode)->xattr_sem); 1009 1010 xattr_size = ea_get(inode, &ea_buf, 0); 1011 1012 if (xattr_size < 0) { 1013 size = xattr_size; 1014 goto out; 1015 } 1016 1017 if (xattr_size == 0) 1018 goto not_found; 1019 1020 ealist = (struct jfs_ea_list *) ea_buf.xattr; 1021 1022 /* Find the named attribute */ 1023 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) 1024 if ((namelen == ea->namelen) && 1025 memcmp(name, ea->name, namelen) == 0) { 1026 /* Found it */ 1027 size = le16_to_cpu(ea->valuelen); 1028 if (!data) 1029 goto release; 1030 else if (size > buf_size) { 1031 size = -ERANGE; 1032 goto release; 1033 } 1034 value = ((char *) &ea->name) + ea->namelen + 1; 1035 memcpy(data, value, size); 1036 goto release; 1037 } 1038 not_found: 1039 size = -ENODATA; 1040 release: 1041 ea_release(inode, &ea_buf); 1042 out: 1043 up_read(&JFS_IP(inode)->xattr_sem); 1044 1045 kfree(os2name); 1046 1047 return size; 1048 } 1049 1050 ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data, 1051 size_t buf_size) 1052 { 1053 int err; 1054 1055 err = __jfs_getxattr(dentry->d_inode, name, data, buf_size); 1056 1057 return err; 1058 } 1059 1060 /* 1061 * No special permissions are needed to list attributes except for trusted.* 1062 */ 1063 static inline int can_list(struct jfs_ea *ea) 1064 { 1065 return (strncmp(ea->name, XATTR_TRUSTED_PREFIX, 1066 XATTR_TRUSTED_PREFIX_LEN) || 1067 capable(CAP_SYS_ADMIN)); 1068 } 1069 1070 ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size) 1071 { 1072 struct inode *inode = dentry->d_inode; 1073 char *buffer; 1074 ssize_t size = 0; 1075 int xattr_size; 1076 struct jfs_ea_list *ealist; 1077 struct jfs_ea *ea; 1078 struct ea_buffer ea_buf; 1079 1080 down_read(&JFS_IP(inode)->xattr_sem); 1081 1082 xattr_size = ea_get(inode, &ea_buf, 0); 1083 if (xattr_size < 0) { 1084 size = xattr_size; 1085 goto out; 1086 } 1087 1088 if (xattr_size == 0) 1089 goto release; 1090 1091 ealist = (struct jfs_ea_list *) ea_buf.xattr; 1092 1093 /* compute required size of list */ 1094 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) { 1095 if (can_list(ea)) 1096 size += name_size(ea) + 1; 1097 } 1098 1099 if (!data) 1100 goto release; 1101 1102 if (size > buf_size) { 1103 size = -ERANGE; 1104 goto release; 1105 } 1106 1107 /* Copy attribute names to buffer */ 1108 buffer = data; 1109 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) { 1110 if (can_list(ea)) { 1111 int namelen = copy_name(buffer, ea); 1112 buffer += namelen + 1; 1113 } 1114 } 1115 1116 release: 1117 ea_release(inode, &ea_buf); 1118 out: 1119 up_read(&JFS_IP(inode)->xattr_sem); 1120 return size; 1121 } 1122 1123 int jfs_removexattr(struct dentry *dentry, const char *name) 1124 { 1125 return __jfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE); 1126 } 1127