1 /* 2 * linux/fs/ext2/xattr.c 3 * 4 * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de> 5 * 6 * Fix by Harrison Xing <harrison@mountainviewdata.com>. 7 * Extended attributes for symlinks and special files added per 8 * suggestion of Luka Renko <luka.renko@hermes.si>. 9 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, 10 * Red Hat Inc. 11 * 12 */ 13 14 /* 15 * Extended attributes are stored on disk blocks allocated outside of 16 * any inode. The i_file_acl field is then made to point to this allocated 17 * block. If all extended attributes of an inode are identical, these 18 * inodes may share the same extended attribute block. Such situations 19 * are automatically detected by keeping a cache of recent attribute block 20 * numbers and hashes over the block's contents in memory. 21 * 22 * 23 * Extended attribute block layout: 24 * 25 * +------------------+ 26 * | header | 27 * | entry 1 | | 28 * | entry 2 | | growing downwards 29 * | entry 3 | v 30 * | four null bytes | 31 * | . . . | 32 * | value 1 | ^ 33 * | value 3 | | growing upwards 34 * | value 2 | | 35 * +------------------+ 36 * 37 * The block header is followed by multiple entry descriptors. These entry 38 * descriptors are variable in size, and aligned to EXT2_XATTR_PAD 39 * byte boundaries. The entry descriptors are sorted by attribute name, 40 * so that two extended attribute blocks can be compared efficiently. 41 * 42 * Attribute values are aligned to the end of the block, stored in 43 * no specific order. They are also padded to EXT2_XATTR_PAD byte 44 * boundaries. No additional gaps are left between them. 45 * 46 * Locking strategy 47 * ---------------- 48 * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem. 49 * EA blocks are only changed if they are exclusive to an inode, so 50 * holding xattr_sem also means that nothing but the EA block's reference 51 * count will change. Multiple writers to an EA block are synchronized 52 * by the bh lock. No more than a single bh lock is held at any time 53 * to avoid deadlocks. 54 */ 55 56 #include <linux/buffer_head.h> 57 #include <linux/init.h> 58 #include <linux/slab.h> 59 #include <linux/mbcache.h> 60 #include <linux/quotaops.h> 61 #include <linux/rwsem.h> 62 #include <linux/security.h> 63 #include "ext2.h" 64 #include "xattr.h" 65 #include "acl.h" 66 67 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data)) 68 #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr)) 69 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) 70 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) 71 72 #ifdef EXT2_XATTR_DEBUG 73 # define ea_idebug(inode, f...) do { \ 74 printk(KERN_DEBUG "inode %s:%ld: ", \ 75 inode->i_sb->s_id, inode->i_ino); \ 76 printk(f); \ 77 printk("\n"); \ 78 } while (0) 79 # define ea_bdebug(bh, f...) do { \ 80 printk(KERN_DEBUG "block %pg:%lu: ", \ 81 bh->b_bdev, (unsigned long) bh->b_blocknr); \ 82 printk(f); \ 83 printk("\n"); \ 84 } while (0) 85 #else 86 # define ea_idebug(f...) 87 # define ea_bdebug(f...) 88 #endif 89 90 static int ext2_xattr_set2(struct inode *, struct buffer_head *, 91 struct ext2_xattr_header *); 92 93 static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *); 94 static struct buffer_head *ext2_xattr_cache_find(struct inode *, 95 struct ext2_xattr_header *); 96 static void ext2_xattr_rehash(struct ext2_xattr_header *, 97 struct ext2_xattr_entry *); 98 99 static const struct xattr_handler *ext2_xattr_handler_map[] = { 100 [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler, 101 #ifdef CONFIG_EXT2_FS_POSIX_ACL 102 [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, 103 [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler, 104 #endif 105 [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler, 106 #ifdef CONFIG_EXT2_FS_SECURITY 107 [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler, 108 #endif 109 }; 110 111 const struct xattr_handler *ext2_xattr_handlers[] = { 112 &ext2_xattr_user_handler, 113 &ext2_xattr_trusted_handler, 114 #ifdef CONFIG_EXT2_FS_POSIX_ACL 115 &posix_acl_access_xattr_handler, 116 &posix_acl_default_xattr_handler, 117 #endif 118 #ifdef CONFIG_EXT2_FS_SECURITY 119 &ext2_xattr_security_handler, 120 #endif 121 NULL 122 }; 123 124 static inline const struct xattr_handler * 125 ext2_xattr_handler(int name_index) 126 { 127 const struct xattr_handler *handler = NULL; 128 129 if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map)) 130 handler = ext2_xattr_handler_map[name_index]; 131 return handler; 132 } 133 134 /* 135 * ext2_xattr_get() 136 * 137 * Copy an extended attribute into the buffer 138 * provided, or compute the buffer size required. 139 * Buffer is NULL to compute the size of the buffer required. 140 * 141 * Returns a negative error number on failure, or the number of bytes 142 * used / required on success. 143 */ 144 int 145 ext2_xattr_get(struct inode *inode, int name_index, const char *name, 146 void *buffer, size_t buffer_size) 147 { 148 struct buffer_head *bh = NULL; 149 struct ext2_xattr_entry *entry; 150 size_t name_len, size; 151 char *end; 152 int error; 153 struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; 154 155 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", 156 name_index, name, buffer, (long)buffer_size); 157 158 if (name == NULL) 159 return -EINVAL; 160 name_len = strlen(name); 161 if (name_len > 255) 162 return -ERANGE; 163 164 down_read(&EXT2_I(inode)->xattr_sem); 165 error = -ENODATA; 166 if (!EXT2_I(inode)->i_file_acl) 167 goto cleanup; 168 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); 169 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 170 error = -EIO; 171 if (!bh) 172 goto cleanup; 173 ea_bdebug(bh, "b_count=%d, refcount=%d", 174 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 175 end = bh->b_data + bh->b_size; 176 if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 177 HDR(bh)->h_blocks != cpu_to_le32(1)) { 178 bad_block: ext2_error(inode->i_sb, "ext2_xattr_get", 179 "inode %ld: bad block %d", inode->i_ino, 180 EXT2_I(inode)->i_file_acl); 181 error = -EIO; 182 goto cleanup; 183 } 184 185 /* find named attribute */ 186 entry = FIRST_ENTRY(bh); 187 while (!IS_LAST_ENTRY(entry)) { 188 struct ext2_xattr_entry *next = 189 EXT2_XATTR_NEXT(entry); 190 if ((char *)next >= end) 191 goto bad_block; 192 if (name_index == entry->e_name_index && 193 name_len == entry->e_name_len && 194 memcmp(name, entry->e_name, name_len) == 0) 195 goto found; 196 entry = next; 197 } 198 if (ext2_xattr_cache_insert(ext2_mb_cache, bh)) 199 ea_idebug(inode, "cache insert failed"); 200 error = -ENODATA; 201 goto cleanup; 202 found: 203 /* check the buffer size */ 204 if (entry->e_value_block != 0) 205 goto bad_block; 206 size = le32_to_cpu(entry->e_value_size); 207 if (size > inode->i_sb->s_blocksize || 208 le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) 209 goto bad_block; 210 211 if (ext2_xattr_cache_insert(ext2_mb_cache, bh)) 212 ea_idebug(inode, "cache insert failed"); 213 if (buffer) { 214 error = -ERANGE; 215 if (size > buffer_size) 216 goto cleanup; 217 /* return value of attribute */ 218 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), 219 size); 220 } 221 error = size; 222 223 cleanup: 224 brelse(bh); 225 up_read(&EXT2_I(inode)->xattr_sem); 226 227 return error; 228 } 229 230 /* 231 * ext2_xattr_list() 232 * 233 * Copy a list of attribute names into the buffer 234 * provided, or compute the buffer size required. 235 * Buffer is NULL to compute the size of the buffer required. 236 * 237 * Returns a negative error number on failure, or the number of bytes 238 * used / required on success. 239 */ 240 static int 241 ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) 242 { 243 struct inode *inode = d_inode(dentry); 244 struct buffer_head *bh = NULL; 245 struct ext2_xattr_entry *entry; 246 char *end; 247 size_t rest = buffer_size; 248 int error; 249 struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; 250 251 ea_idebug(inode, "buffer=%p, buffer_size=%ld", 252 buffer, (long)buffer_size); 253 254 down_read(&EXT2_I(inode)->xattr_sem); 255 error = 0; 256 if (!EXT2_I(inode)->i_file_acl) 257 goto cleanup; 258 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); 259 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 260 error = -EIO; 261 if (!bh) 262 goto cleanup; 263 ea_bdebug(bh, "b_count=%d, refcount=%d", 264 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 265 end = bh->b_data + bh->b_size; 266 if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 267 HDR(bh)->h_blocks != cpu_to_le32(1)) { 268 bad_block: ext2_error(inode->i_sb, "ext2_xattr_list", 269 "inode %ld: bad block %d", inode->i_ino, 270 EXT2_I(inode)->i_file_acl); 271 error = -EIO; 272 goto cleanup; 273 } 274 275 /* check the on-disk data structure */ 276 entry = FIRST_ENTRY(bh); 277 while (!IS_LAST_ENTRY(entry)) { 278 struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry); 279 280 if ((char *)next >= end) 281 goto bad_block; 282 entry = next; 283 } 284 if (ext2_xattr_cache_insert(ext2_mb_cache, bh)) 285 ea_idebug(inode, "cache insert failed"); 286 287 /* list the attribute names */ 288 for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); 289 entry = EXT2_XATTR_NEXT(entry)) { 290 const struct xattr_handler *handler = 291 ext2_xattr_handler(entry->e_name_index); 292 293 if (handler && (!handler->list || handler->list(dentry))) { 294 const char *prefix = handler->prefix ?: handler->name; 295 size_t prefix_len = strlen(prefix); 296 size_t size = prefix_len + entry->e_name_len + 1; 297 298 if (buffer) { 299 if (size > rest) { 300 error = -ERANGE; 301 goto cleanup; 302 } 303 memcpy(buffer, prefix, prefix_len); 304 buffer += prefix_len; 305 memcpy(buffer, entry->e_name, entry->e_name_len); 306 buffer += entry->e_name_len; 307 *buffer++ = 0; 308 } 309 rest -= size; 310 } 311 } 312 error = buffer_size - rest; /* total size */ 313 314 cleanup: 315 brelse(bh); 316 up_read(&EXT2_I(inode)->xattr_sem); 317 318 return error; 319 } 320 321 /* 322 * Inode operation listxattr() 323 * 324 * d_inode(dentry)->i_mutex: don't care 325 */ 326 ssize_t 327 ext2_listxattr(struct dentry *dentry, char *buffer, size_t size) 328 { 329 return ext2_xattr_list(dentry, buffer, size); 330 } 331 332 /* 333 * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is 334 * not set, set it. 335 */ 336 static void ext2_xattr_update_super_block(struct super_block *sb) 337 { 338 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) 339 return; 340 341 spin_lock(&EXT2_SB(sb)->s_lock); 342 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); 343 spin_unlock(&EXT2_SB(sb)->s_lock); 344 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 345 } 346 347 /* 348 * ext2_xattr_set() 349 * 350 * Create, replace or remove an extended attribute for this inode. Value 351 * is NULL to remove an existing extended attribute, and non-NULL to 352 * either replace an existing extended attribute, or create a new extended 353 * attribute. The flags XATTR_REPLACE and XATTR_CREATE 354 * specify that an extended attribute must exist and must not exist 355 * previous to the call, respectively. 356 * 357 * Returns 0, or a negative error number on failure. 358 */ 359 int 360 ext2_xattr_set(struct inode *inode, int name_index, const char *name, 361 const void *value, size_t value_len, int flags) 362 { 363 struct super_block *sb = inode->i_sb; 364 struct buffer_head *bh = NULL; 365 struct ext2_xattr_header *header = NULL; 366 struct ext2_xattr_entry *here, *last; 367 size_t name_len, free, min_offs = sb->s_blocksize; 368 int not_found = 1, error; 369 char *end; 370 371 /* 372 * header -- Points either into bh, or to a temporarily 373 * allocated buffer. 374 * here -- The named entry found, or the place for inserting, within 375 * the block pointed to by header. 376 * last -- Points right after the last named entry within the block 377 * pointed to by header. 378 * min_offs -- The offset of the first value (values are aligned 379 * towards the end of the block). 380 * end -- Points right after the block pointed to by header. 381 */ 382 383 ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", 384 name_index, name, value, (long)value_len); 385 386 if (value == NULL) 387 value_len = 0; 388 if (name == NULL) 389 return -EINVAL; 390 name_len = strlen(name); 391 if (name_len > 255 || value_len > sb->s_blocksize) 392 return -ERANGE; 393 down_write(&EXT2_I(inode)->xattr_sem); 394 if (EXT2_I(inode)->i_file_acl) { 395 /* The inode already has an extended attribute block. */ 396 bh = sb_bread(sb, EXT2_I(inode)->i_file_acl); 397 error = -EIO; 398 if (!bh) 399 goto cleanup; 400 ea_bdebug(bh, "b_count=%d, refcount=%d", 401 atomic_read(&(bh->b_count)), 402 le32_to_cpu(HDR(bh)->h_refcount)); 403 header = HDR(bh); 404 end = bh->b_data + bh->b_size; 405 if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 406 header->h_blocks != cpu_to_le32(1)) { 407 bad_block: ext2_error(sb, "ext2_xattr_set", 408 "inode %ld: bad block %d", inode->i_ino, 409 EXT2_I(inode)->i_file_acl); 410 error = -EIO; 411 goto cleanup; 412 } 413 /* Find the named attribute. */ 414 here = FIRST_ENTRY(bh); 415 while (!IS_LAST_ENTRY(here)) { 416 struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here); 417 if ((char *)next >= end) 418 goto bad_block; 419 if (!here->e_value_block && here->e_value_size) { 420 size_t offs = le16_to_cpu(here->e_value_offs); 421 if (offs < min_offs) 422 min_offs = offs; 423 } 424 not_found = name_index - here->e_name_index; 425 if (!not_found) 426 not_found = name_len - here->e_name_len; 427 if (!not_found) 428 not_found = memcmp(name, here->e_name,name_len); 429 if (not_found <= 0) 430 break; 431 here = next; 432 } 433 last = here; 434 /* We still need to compute min_offs and last. */ 435 while (!IS_LAST_ENTRY(last)) { 436 struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last); 437 if ((char *)next >= end) 438 goto bad_block; 439 if (!last->e_value_block && last->e_value_size) { 440 size_t offs = le16_to_cpu(last->e_value_offs); 441 if (offs < min_offs) 442 min_offs = offs; 443 } 444 last = next; 445 } 446 447 /* Check whether we have enough space left. */ 448 free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); 449 } else { 450 /* We will use a new extended attribute block. */ 451 free = sb->s_blocksize - 452 sizeof(struct ext2_xattr_header) - sizeof(__u32); 453 here = last = NULL; /* avoid gcc uninitialized warning. */ 454 } 455 456 if (not_found) { 457 /* Request to remove a nonexistent attribute? */ 458 error = -ENODATA; 459 if (flags & XATTR_REPLACE) 460 goto cleanup; 461 error = 0; 462 if (value == NULL) 463 goto cleanup; 464 } else { 465 /* Request to create an existing attribute? */ 466 error = -EEXIST; 467 if (flags & XATTR_CREATE) 468 goto cleanup; 469 if (!here->e_value_block && here->e_value_size) { 470 size_t size = le32_to_cpu(here->e_value_size); 471 472 if (le16_to_cpu(here->e_value_offs) + size > 473 sb->s_blocksize || size > sb->s_blocksize) 474 goto bad_block; 475 free += EXT2_XATTR_SIZE(size); 476 } 477 free += EXT2_XATTR_LEN(name_len); 478 } 479 error = -ENOSPC; 480 if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len)) 481 goto cleanup; 482 483 /* Here we know that we can set the new attribute. */ 484 485 if (header) { 486 /* assert(header == HDR(bh)); */ 487 lock_buffer(bh); 488 if (header->h_refcount == cpu_to_le32(1)) { 489 __u32 hash = le32_to_cpu(header->h_hash); 490 491 ea_bdebug(bh, "modifying in-place"); 492 /* 493 * This must happen under buffer lock for 494 * ext2_xattr_set2() to reliably detect modified block 495 */ 496 mb_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache, 497 hash, bh->b_blocknr); 498 499 /* keep the buffer locked while modifying it. */ 500 } else { 501 int offset; 502 503 unlock_buffer(bh); 504 ea_bdebug(bh, "cloning"); 505 header = kmalloc(bh->b_size, GFP_KERNEL); 506 error = -ENOMEM; 507 if (header == NULL) 508 goto cleanup; 509 memcpy(header, HDR(bh), bh->b_size); 510 header->h_refcount = cpu_to_le32(1); 511 512 offset = (char *)here - bh->b_data; 513 here = ENTRY((char *)header + offset); 514 offset = (char *)last - bh->b_data; 515 last = ENTRY((char *)header + offset); 516 } 517 } else { 518 /* Allocate a buffer where we construct the new block. */ 519 header = kzalloc(sb->s_blocksize, GFP_KERNEL); 520 error = -ENOMEM; 521 if (header == NULL) 522 goto cleanup; 523 end = (char *)header + sb->s_blocksize; 524 header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC); 525 header->h_blocks = header->h_refcount = cpu_to_le32(1); 526 last = here = ENTRY(header+1); 527 } 528 529 /* Iff we are modifying the block in-place, bh is locked here. */ 530 531 if (not_found) { 532 /* Insert the new name. */ 533 size_t size = EXT2_XATTR_LEN(name_len); 534 size_t rest = (char *)last - (char *)here; 535 memmove((char *)here + size, here, rest); 536 memset(here, 0, size); 537 here->e_name_index = name_index; 538 here->e_name_len = name_len; 539 memcpy(here->e_name, name, name_len); 540 } else { 541 if (!here->e_value_block && here->e_value_size) { 542 char *first_val = (char *)header + min_offs; 543 size_t offs = le16_to_cpu(here->e_value_offs); 544 char *val = (char *)header + offs; 545 size_t size = EXT2_XATTR_SIZE( 546 le32_to_cpu(here->e_value_size)); 547 548 if (size == EXT2_XATTR_SIZE(value_len)) { 549 /* The old and the new value have the same 550 size. Just replace. */ 551 here->e_value_size = cpu_to_le32(value_len); 552 memset(val + size - EXT2_XATTR_PAD, 0, 553 EXT2_XATTR_PAD); /* Clear pad bytes. */ 554 memcpy(val, value, value_len); 555 goto skip_replace; 556 } 557 558 /* Remove the old value. */ 559 memmove(first_val + size, first_val, val - first_val); 560 memset(first_val, 0, size); 561 here->e_value_offs = 0; 562 min_offs += size; 563 564 /* Adjust all value offsets. */ 565 last = ENTRY(header+1); 566 while (!IS_LAST_ENTRY(last)) { 567 size_t o = le16_to_cpu(last->e_value_offs); 568 if (!last->e_value_block && o < offs) 569 last->e_value_offs = 570 cpu_to_le16(o + size); 571 last = EXT2_XATTR_NEXT(last); 572 } 573 } 574 if (value == NULL) { 575 /* Remove the old name. */ 576 size_t size = EXT2_XATTR_LEN(name_len); 577 last = ENTRY((char *)last - size); 578 memmove(here, (char*)here + size, 579 (char*)last - (char*)here); 580 memset(last, 0, size); 581 } 582 } 583 584 if (value != NULL) { 585 /* Insert the new value. */ 586 here->e_value_size = cpu_to_le32(value_len); 587 if (value_len) { 588 size_t size = EXT2_XATTR_SIZE(value_len); 589 char *val = (char *)header + min_offs - size; 590 here->e_value_offs = 591 cpu_to_le16((char *)val - (char *)header); 592 memset(val + size - EXT2_XATTR_PAD, 0, 593 EXT2_XATTR_PAD); /* Clear the pad bytes. */ 594 memcpy(val, value, value_len); 595 } 596 } 597 598 skip_replace: 599 if (IS_LAST_ENTRY(ENTRY(header+1))) { 600 /* This block is now empty. */ 601 if (bh && header == HDR(bh)) 602 unlock_buffer(bh); /* we were modifying in-place. */ 603 error = ext2_xattr_set2(inode, bh, NULL); 604 } else { 605 ext2_xattr_rehash(header, here); 606 if (bh && header == HDR(bh)) 607 unlock_buffer(bh); /* we were modifying in-place. */ 608 error = ext2_xattr_set2(inode, bh, header); 609 } 610 611 cleanup: 612 brelse(bh); 613 if (!(bh && header == HDR(bh))) 614 kfree(header); 615 up_write(&EXT2_I(inode)->xattr_sem); 616 617 return error; 618 } 619 620 /* 621 * Second half of ext2_xattr_set(): Update the file system. 622 */ 623 static int 624 ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, 625 struct ext2_xattr_header *header) 626 { 627 struct super_block *sb = inode->i_sb; 628 struct buffer_head *new_bh = NULL; 629 int error; 630 struct mb_cache *ext2_mb_cache = EXT2_SB(sb)->s_mb_cache; 631 632 if (header) { 633 new_bh = ext2_xattr_cache_find(inode, header); 634 if (new_bh) { 635 /* We found an identical block in the cache. */ 636 if (new_bh == old_bh) { 637 ea_bdebug(new_bh, "keeping this block"); 638 } else { 639 /* The old block is released after updating 640 the inode. */ 641 ea_bdebug(new_bh, "reusing block"); 642 643 error = dquot_alloc_block(inode, 1); 644 if (error) { 645 unlock_buffer(new_bh); 646 goto cleanup; 647 } 648 le32_add_cpu(&HDR(new_bh)->h_refcount, 1); 649 ea_bdebug(new_bh, "refcount now=%d", 650 le32_to_cpu(HDR(new_bh)->h_refcount)); 651 } 652 unlock_buffer(new_bh); 653 } else if (old_bh && header == HDR(old_bh)) { 654 /* Keep this block. No need to lock the block as we 655 don't need to change the reference count. */ 656 new_bh = old_bh; 657 get_bh(new_bh); 658 ext2_xattr_cache_insert(ext2_mb_cache, new_bh); 659 } else { 660 /* We need to allocate a new block */ 661 ext2_fsblk_t goal = ext2_group_first_block_no(sb, 662 EXT2_I(inode)->i_block_group); 663 int block = ext2_new_block(inode, goal, &error); 664 if (error) 665 goto cleanup; 666 ea_idebug(inode, "creating block %d", block); 667 668 new_bh = sb_getblk(sb, block); 669 if (unlikely(!new_bh)) { 670 ext2_free_blocks(inode, block, 1); 671 mark_inode_dirty(inode); 672 error = -ENOMEM; 673 goto cleanup; 674 } 675 lock_buffer(new_bh); 676 memcpy(new_bh->b_data, header, new_bh->b_size); 677 set_buffer_uptodate(new_bh); 678 unlock_buffer(new_bh); 679 ext2_xattr_cache_insert(ext2_mb_cache, new_bh); 680 681 ext2_xattr_update_super_block(sb); 682 } 683 mark_buffer_dirty(new_bh); 684 if (IS_SYNC(inode)) { 685 sync_dirty_buffer(new_bh); 686 error = -EIO; 687 if (buffer_req(new_bh) && !buffer_uptodate(new_bh)) 688 goto cleanup; 689 } 690 } 691 692 /* Update the inode. */ 693 EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; 694 inode->i_ctime = current_time(inode); 695 if (IS_SYNC(inode)) { 696 error = sync_inode_metadata(inode, 1); 697 /* In case sync failed due to ENOSPC the inode was actually 698 * written (only some dirty data were not) so we just proceed 699 * as if nothing happened and cleanup the unused block */ 700 if (error && error != -ENOSPC) { 701 if (new_bh && new_bh != old_bh) { 702 dquot_free_block_nodirty(inode, 1); 703 mark_inode_dirty(inode); 704 } 705 goto cleanup; 706 } 707 } else 708 mark_inode_dirty(inode); 709 710 error = 0; 711 if (old_bh && old_bh != new_bh) { 712 /* 713 * If there was an old block and we are no longer using it, 714 * release the old block. 715 */ 716 lock_buffer(old_bh); 717 if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) { 718 __u32 hash = le32_to_cpu(HDR(old_bh)->h_hash); 719 720 /* 721 * This must happen under buffer lock for 722 * ext2_xattr_set2() to reliably detect freed block 723 */ 724 mb_cache_entry_delete_block(ext2_mb_cache, 725 hash, old_bh->b_blocknr); 726 /* Free the old block. */ 727 ea_bdebug(old_bh, "freeing"); 728 ext2_free_blocks(inode, old_bh->b_blocknr, 1); 729 mark_inode_dirty(inode); 730 /* We let our caller release old_bh, so we 731 * need to duplicate the buffer before. */ 732 get_bh(old_bh); 733 bforget(old_bh); 734 } else { 735 /* Decrement the refcount only. */ 736 le32_add_cpu(&HDR(old_bh)->h_refcount, -1); 737 dquot_free_block_nodirty(inode, 1); 738 mark_inode_dirty(inode); 739 mark_buffer_dirty(old_bh); 740 ea_bdebug(old_bh, "refcount now=%d", 741 le32_to_cpu(HDR(old_bh)->h_refcount)); 742 } 743 unlock_buffer(old_bh); 744 } 745 746 cleanup: 747 brelse(new_bh); 748 749 return error; 750 } 751 752 /* 753 * ext2_xattr_delete_inode() 754 * 755 * Free extended attribute resources associated with this inode. This 756 * is called immediately before an inode is freed. 757 */ 758 void 759 ext2_xattr_delete_inode(struct inode *inode) 760 { 761 struct buffer_head *bh = NULL; 762 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb); 763 764 down_write(&EXT2_I(inode)->xattr_sem); 765 if (!EXT2_I(inode)->i_file_acl) 766 goto cleanup; 767 768 if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 0)) { 769 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 770 "inode %ld: xattr block %d is out of data blocks range", 771 inode->i_ino, EXT2_I(inode)->i_file_acl); 772 goto cleanup; 773 } 774 775 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 776 if (!bh) { 777 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 778 "inode %ld: block %d read error", inode->i_ino, 779 EXT2_I(inode)->i_file_acl); 780 goto cleanup; 781 } 782 ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); 783 if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 784 HDR(bh)->h_blocks != cpu_to_le32(1)) { 785 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 786 "inode %ld: bad block %d", inode->i_ino, 787 EXT2_I(inode)->i_file_acl); 788 goto cleanup; 789 } 790 lock_buffer(bh); 791 if (HDR(bh)->h_refcount == cpu_to_le32(1)) { 792 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); 793 794 /* 795 * This must happen under buffer lock for ext2_xattr_set2() to 796 * reliably detect freed block 797 */ 798 mb_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache, 799 hash, bh->b_blocknr); 800 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); 801 get_bh(bh); 802 bforget(bh); 803 unlock_buffer(bh); 804 } else { 805 le32_add_cpu(&HDR(bh)->h_refcount, -1); 806 ea_bdebug(bh, "refcount now=%d", 807 le32_to_cpu(HDR(bh)->h_refcount)); 808 unlock_buffer(bh); 809 mark_buffer_dirty(bh); 810 if (IS_SYNC(inode)) 811 sync_dirty_buffer(bh); 812 dquot_free_block_nodirty(inode, 1); 813 } 814 EXT2_I(inode)->i_file_acl = 0; 815 816 cleanup: 817 brelse(bh); 818 up_write(&EXT2_I(inode)->xattr_sem); 819 } 820 821 /* 822 * ext2_xattr_cache_insert() 823 * 824 * Create a new entry in the extended attribute cache, and insert 825 * it unless such an entry is already in the cache. 826 * 827 * Returns 0, or a negative error number on failure. 828 */ 829 static int 830 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) 831 { 832 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); 833 int error; 834 835 error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1); 836 if (error) { 837 if (error == -EBUSY) { 838 ea_bdebug(bh, "already in cache (%d cache entries)", 839 atomic_read(&ext2_xattr_cache->c_entry_count)); 840 error = 0; 841 } 842 } else 843 ea_bdebug(bh, "inserting [%x]", (int)hash); 844 return error; 845 } 846 847 /* 848 * ext2_xattr_cmp() 849 * 850 * Compare two extended attribute blocks for equality. 851 * 852 * Returns 0 if the blocks are equal, 1 if they differ, and 853 * a negative error number on errors. 854 */ 855 static int 856 ext2_xattr_cmp(struct ext2_xattr_header *header1, 857 struct ext2_xattr_header *header2) 858 { 859 struct ext2_xattr_entry *entry1, *entry2; 860 861 entry1 = ENTRY(header1+1); 862 entry2 = ENTRY(header2+1); 863 while (!IS_LAST_ENTRY(entry1)) { 864 if (IS_LAST_ENTRY(entry2)) 865 return 1; 866 if (entry1->e_hash != entry2->e_hash || 867 entry1->e_name_index != entry2->e_name_index || 868 entry1->e_name_len != entry2->e_name_len || 869 entry1->e_value_size != entry2->e_value_size || 870 memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) 871 return 1; 872 if (entry1->e_value_block != 0 || entry2->e_value_block != 0) 873 return -EIO; 874 if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), 875 (char *)header2 + le16_to_cpu(entry2->e_value_offs), 876 le32_to_cpu(entry1->e_value_size))) 877 return 1; 878 879 entry1 = EXT2_XATTR_NEXT(entry1); 880 entry2 = EXT2_XATTR_NEXT(entry2); 881 } 882 if (!IS_LAST_ENTRY(entry2)) 883 return 1; 884 return 0; 885 } 886 887 /* 888 * ext2_xattr_cache_find() 889 * 890 * Find an identical extended attribute block. 891 * 892 * Returns a locked buffer head to the block found, or NULL if such 893 * a block was not found or an error occurred. 894 */ 895 static struct buffer_head * 896 ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) 897 { 898 __u32 hash = le32_to_cpu(header->h_hash); 899 struct mb_cache_entry *ce; 900 struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache; 901 902 if (!header->h_hash) 903 return NULL; /* never share */ 904 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 905 again: 906 ce = mb_cache_entry_find_first(ext2_mb_cache, hash); 907 while (ce) { 908 struct buffer_head *bh; 909 910 bh = sb_bread(inode->i_sb, ce->e_block); 911 if (!bh) { 912 ext2_error(inode->i_sb, "ext2_xattr_cache_find", 913 "inode %ld: block %ld read error", 914 inode->i_ino, (unsigned long) ce->e_block); 915 } else { 916 lock_buffer(bh); 917 /* 918 * We have to be careful about races with freeing or 919 * rehashing of xattr block. Once we hold buffer lock 920 * xattr block's state is stable so we can check 921 * whether the block got freed / rehashed or not. 922 * Since we unhash mbcache entry under buffer lock when 923 * freeing / rehashing xattr block, checking whether 924 * entry is still hashed is reliable. 925 */ 926 if (hlist_bl_unhashed(&ce->e_hash_list)) { 927 mb_cache_entry_put(ext2_mb_cache, ce); 928 unlock_buffer(bh); 929 brelse(bh); 930 goto again; 931 } else if (le32_to_cpu(HDR(bh)->h_refcount) > 932 EXT2_XATTR_REFCOUNT_MAX) { 933 ea_idebug(inode, "block %ld refcount %d>%d", 934 (unsigned long) ce->e_block, 935 le32_to_cpu(HDR(bh)->h_refcount), 936 EXT2_XATTR_REFCOUNT_MAX); 937 } else if (!ext2_xattr_cmp(header, HDR(bh))) { 938 ea_bdebug(bh, "b_count=%d", 939 atomic_read(&(bh->b_count))); 940 mb_cache_entry_touch(ext2_mb_cache, ce); 941 mb_cache_entry_put(ext2_mb_cache, ce); 942 return bh; 943 } 944 unlock_buffer(bh); 945 brelse(bh); 946 } 947 ce = mb_cache_entry_find_next(ext2_mb_cache, ce); 948 } 949 return NULL; 950 } 951 952 #define NAME_HASH_SHIFT 5 953 #define VALUE_HASH_SHIFT 16 954 955 /* 956 * ext2_xattr_hash_entry() 957 * 958 * Compute the hash of an extended attribute. 959 */ 960 static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header, 961 struct ext2_xattr_entry *entry) 962 { 963 __u32 hash = 0; 964 char *name = entry->e_name; 965 int n; 966 967 for (n=0; n < entry->e_name_len; n++) { 968 hash = (hash << NAME_HASH_SHIFT) ^ 969 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ 970 *name++; 971 } 972 973 if (entry->e_value_block == 0 && entry->e_value_size != 0) { 974 __le32 *value = (__le32 *)((char *)header + 975 le16_to_cpu(entry->e_value_offs)); 976 for (n = (le32_to_cpu(entry->e_value_size) + 977 EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) { 978 hash = (hash << VALUE_HASH_SHIFT) ^ 979 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ 980 le32_to_cpu(*value++); 981 } 982 } 983 entry->e_hash = cpu_to_le32(hash); 984 } 985 986 #undef NAME_HASH_SHIFT 987 #undef VALUE_HASH_SHIFT 988 989 #define BLOCK_HASH_SHIFT 16 990 991 /* 992 * ext2_xattr_rehash() 993 * 994 * Re-compute the extended attribute hash value after an entry has changed. 995 */ 996 static void ext2_xattr_rehash(struct ext2_xattr_header *header, 997 struct ext2_xattr_entry *entry) 998 { 999 struct ext2_xattr_entry *here; 1000 __u32 hash = 0; 1001 1002 ext2_xattr_hash_entry(header, entry); 1003 here = ENTRY(header+1); 1004 while (!IS_LAST_ENTRY(here)) { 1005 if (!here->e_hash) { 1006 /* Block is not shared if an entry's hash value == 0 */ 1007 hash = 0; 1008 break; 1009 } 1010 hash = (hash << BLOCK_HASH_SHIFT) ^ 1011 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ 1012 le32_to_cpu(here->e_hash); 1013 here = EXT2_XATTR_NEXT(here); 1014 } 1015 header->h_hash = cpu_to_le32(hash); 1016 } 1017 1018 #undef BLOCK_HASH_SHIFT 1019 1020 #define HASH_BUCKET_BITS 10 1021 1022 struct mb_cache *ext2_xattr_create_cache(void) 1023 { 1024 return mb_cache_create(HASH_BUCKET_BITS); 1025 } 1026 1027 void ext2_xattr_destroy_cache(struct mb_cache *cache) 1028 { 1029 if (cache) 1030 mb_cache_destroy(cache); 1031 } 1032