1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/slab.h> 8 #include <linux/spinlock.h> 9 #include <linux/completion.h> 10 #include <linux/buffer_head.h> 11 #include <linux/xattr.h> 12 #include <linux/gfs2_ondisk.h> 13 #include <linux/posix_acl_xattr.h> 14 #include <linux/uaccess.h> 15 16 #include "gfs2.h" 17 #include "incore.h" 18 #include "acl.h" 19 #include "xattr.h" 20 #include "glock.h" 21 #include "inode.h" 22 #include "meta_io.h" 23 #include "quota.h" 24 #include "rgrp.h" 25 #include "super.h" 26 #include "trans.h" 27 #include "util.h" 28 29 /* 30 * ea_calc_size - returns the actual number of bytes the request will take up 31 * (not counting any unstuffed data blocks) 32 * 33 * Returns: 1 if the EA should be stuffed 34 */ 35 36 static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize, 37 unsigned int *size) 38 { 39 unsigned int jbsize = sdp->sd_jbsize; 40 41 /* Stuffed */ 42 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8); 43 44 if (*size <= jbsize) 45 return 1; 46 47 /* Unstuffed */ 48 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + 49 (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8); 50 51 return 0; 52 } 53 54 static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize) 55 { 56 unsigned int size; 57 58 if (dsize > GFS2_EA_MAX_DATA_LEN) 59 return -ERANGE; 60 61 ea_calc_size(sdp, nsize, dsize, &size); 62 63 /* This can only happen with 512 byte blocks */ 64 if (size > sdp->sd_jbsize) 65 return -ERANGE; 66 67 return 0; 68 } 69 70 static bool gfs2_eatype_valid(struct gfs2_sbd *sdp, u8 type) 71 { 72 switch(sdp->sd_sb.sb_fs_format) { 73 case GFS2_FS_FORMAT_MAX: 74 return true; 75 76 case GFS2_FS_FORMAT_MIN: 77 return type <= GFS2_EATYPE_SECURITY; 78 79 default: 80 return false; 81 } 82 } 83 84 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh, 85 struct gfs2_ea_header *ea, 86 struct gfs2_ea_header *prev, void *private); 87 88 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh, 89 ea_call_t ea_call, void *data) 90 { 91 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 92 struct gfs2_ea_header *ea, *prev = NULL; 93 int error = 0; 94 95 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA)) 96 return -EIO; 97 98 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) { 99 if (!GFS2_EA_REC_LEN(ea)) { 100 gfs2_consist_inode(ip); 101 return -EIO; 102 } 103 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <= 104 bh->b_data + bh->b_size)) { 105 gfs2_consist_inode(ip); 106 return -EIO; 107 } 108 if (!gfs2_eatype_valid(sdp, ea->ea_type)) { 109 gfs2_consist_inode(ip); 110 return -EIO; 111 } 112 error = ea_call(ip, bh, ea, prev, data); 113 if (error) 114 return error; 115 116 if (GFS2_EA_IS_LAST(ea)) { 117 if ((char *)GFS2_EA2NEXT(ea) != 118 bh->b_data + bh->b_size) { 119 gfs2_consist_inode(ip); 120 return -EIO; 121 } 122 break; 123 } 124 } 125 126 return error; 127 } 128 129 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) 130 { 131 struct buffer_head *bh, *eabh; 132 __be64 *eablk, *end; 133 int error; 134 135 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &bh); 136 if (error) 137 return error; 138 139 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) { 140 error = ea_foreach_i(ip, bh, ea_call, data); 141 goto out; 142 } 143 144 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) { 145 error = -EIO; 146 goto out; 147 } 148 149 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)); 150 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs; 151 152 for (; eablk < end; eablk++) { 153 u64 bn; 154 155 if (!*eablk) 156 break; 157 bn = be64_to_cpu(*eablk); 158 159 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, 0, &eabh); 160 if (error) 161 break; 162 error = ea_foreach_i(ip, eabh, ea_call, data); 163 brelse(eabh); 164 if (error) 165 break; 166 } 167 out: 168 brelse(bh); 169 return error; 170 } 171 172 struct ea_find { 173 int type; 174 const char *name; 175 size_t namel; 176 struct gfs2_ea_location *ef_el; 177 }; 178 179 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh, 180 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, 181 void *private) 182 { 183 struct ea_find *ef = private; 184 185 if (ea->ea_type == GFS2_EATYPE_UNUSED) 186 return 0; 187 188 if (ea->ea_type == ef->type) { 189 if (ea->ea_name_len == ef->namel && 190 !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) { 191 struct gfs2_ea_location *el = ef->ef_el; 192 get_bh(bh); 193 el->el_bh = bh; 194 el->el_ea = ea; 195 el->el_prev = prev; 196 return 1; 197 } 198 } 199 200 return 0; 201 } 202 203 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 204 struct gfs2_ea_location *el) 205 { 206 struct ea_find ef; 207 int error; 208 209 ef.type = type; 210 ef.name = name; 211 ef.namel = strlen(name); 212 ef.ef_el = el; 213 214 memset(el, 0, sizeof(struct gfs2_ea_location)); 215 216 error = ea_foreach(ip, ea_find_i, &ef); 217 if (error > 0) 218 return 0; 219 220 return error; 221 } 222 223 /* 224 * ea_dealloc_unstuffed 225 * 226 * Take advantage of the fact that all unstuffed blocks are 227 * allocated from the same RG. But watch, this may not always 228 * be true. 229 * 230 * Returns: errno 231 */ 232 233 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, 234 struct gfs2_ea_header *ea, 235 struct gfs2_ea_header *prev, void *private) 236 { 237 int *leave = private; 238 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 239 struct gfs2_rgrpd *rgd; 240 struct gfs2_holder rg_gh; 241 __be64 *dataptrs; 242 u64 bn = 0; 243 u64 bstart = 0; 244 unsigned int blen = 0; 245 unsigned int blks = 0; 246 unsigned int x; 247 int error; 248 249 error = gfs2_rindex_update(sdp); 250 if (error) 251 return error; 252 253 if (GFS2_EA_IS_STUFFED(ea)) 254 return 0; 255 256 dataptrs = GFS2_EA2DATAPTRS(ea); 257 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) { 258 if (*dataptrs) { 259 blks++; 260 bn = be64_to_cpu(*dataptrs); 261 } 262 } 263 if (!blks) 264 return 0; 265 266 rgd = gfs2_blk2rgrpd(sdp, bn, 1); 267 if (!rgd) { 268 gfs2_consist_inode(ip); 269 return -EIO; 270 } 271 272 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 273 LM_FLAG_NODE_SCOPE, &rg_gh); 274 if (error) 275 return error; 276 277 error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE + 278 RES_EATTR + RES_STATFS + RES_QUOTA, blks); 279 if (error) 280 goto out_gunlock; 281 282 gfs2_trans_add_meta(ip->i_gl, bh); 283 284 dataptrs = GFS2_EA2DATAPTRS(ea); 285 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) { 286 if (!*dataptrs) 287 break; 288 bn = be64_to_cpu(*dataptrs); 289 290 if (bstart + blen == bn) 291 blen++; 292 else { 293 if (bstart) 294 gfs2_free_meta(ip, rgd, bstart, blen); 295 bstart = bn; 296 blen = 1; 297 } 298 299 *dataptrs = 0; 300 gfs2_add_inode_blocks(&ip->i_inode, -1); 301 } 302 if (bstart) 303 gfs2_free_meta(ip, rgd, bstart, blen); 304 305 if (prev && !leave) { 306 u32 len; 307 308 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea); 309 prev->ea_rec_len = cpu_to_be32(len); 310 311 if (GFS2_EA_IS_LAST(ea)) 312 prev->ea_flags |= GFS2_EAFLAG_LAST; 313 } else { 314 ea->ea_type = GFS2_EATYPE_UNUSED; 315 ea->ea_num_ptrs = 0; 316 } 317 318 inode_set_ctime_current(&ip->i_inode); 319 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC); 320 321 gfs2_trans_end(sdp); 322 323 out_gunlock: 324 gfs2_glock_dq_uninit(&rg_gh); 325 return error; 326 } 327 328 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, 329 struct gfs2_ea_header *ea, 330 struct gfs2_ea_header *prev, int leave) 331 { 332 int error; 333 334 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); 335 if (error) 336 return error; 337 338 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 339 if (error) 340 goto out_alloc; 341 342 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL); 343 344 gfs2_quota_unhold(ip); 345 out_alloc: 346 return error; 347 } 348 349 struct ea_list { 350 struct gfs2_ea_request *ei_er; 351 unsigned int ei_size; 352 }; 353 354 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh, 355 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, 356 void *private) 357 { 358 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 359 struct ea_list *ei = private; 360 struct gfs2_ea_request *er = ei->ei_er; 361 unsigned int ea_size; 362 char *prefix; 363 unsigned int l; 364 365 if (ea->ea_type == GFS2_EATYPE_UNUSED) 366 return 0; 367 368 BUG_ON(ea->ea_type > GFS2_EATYPE_SECURITY && 369 sdp->sd_sb.sb_fs_format == GFS2_FS_FORMAT_MIN); 370 switch (ea->ea_type) { 371 case GFS2_EATYPE_USR: 372 prefix = "user."; 373 l = 5; 374 break; 375 case GFS2_EATYPE_SYS: 376 prefix = "system."; 377 l = 7; 378 break; 379 case GFS2_EATYPE_SECURITY: 380 prefix = "security."; 381 l = 9; 382 break; 383 case GFS2_EATYPE_TRUSTED: 384 prefix = "trusted."; 385 l = 8; 386 break; 387 default: 388 return 0; 389 } 390 391 ea_size = l + ea->ea_name_len + 1; 392 if (er->er_data_len) { 393 if (ei->ei_size + ea_size > er->er_data_len) 394 return -ERANGE; 395 396 memcpy(er->er_data + ei->ei_size, prefix, l); 397 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea), 398 ea->ea_name_len); 399 er->er_data[ei->ei_size + ea_size - 1] = 0; 400 } 401 402 ei->ei_size += ea_size; 403 404 return 0; 405 } 406 407 /** 408 * gfs2_listxattr - List gfs2 extended attributes 409 * @dentry: The dentry whose inode we are interested in 410 * @buffer: The buffer to write the results 411 * @size: The size of the buffer 412 * 413 * Returns: actual size of data on success, -errno on error 414 */ 415 416 ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size) 417 { 418 struct gfs2_inode *ip = GFS2_I(d_inode(dentry)); 419 struct gfs2_ea_request er; 420 struct gfs2_holder i_gh; 421 int error; 422 423 memset(&er, 0, sizeof(struct gfs2_ea_request)); 424 if (size) { 425 er.er_data = buffer; 426 er.er_data_len = size; 427 } 428 429 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 430 if (error) 431 return error; 432 433 if (ip->i_eattr) { 434 struct ea_list ei = { .ei_er = &er, .ei_size = 0 }; 435 436 error = ea_foreach(ip, ea_list_i, &ei); 437 if (!error) 438 error = ei.ei_size; 439 } 440 441 gfs2_glock_dq_uninit(&i_gh); 442 443 return error; 444 } 445 446 /** 447 * gfs2_iter_unstuffed - copies the unstuffed xattr data to/from the 448 * request buffer 449 * @ip: The GFS2 inode 450 * @ea: The extended attribute header structure 451 * @din: The data to be copied in 452 * @dout: The data to be copied out (one of din,dout will be NULL) 453 * 454 * Returns: errno 455 */ 456 457 static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, 458 const char *din, char *dout) 459 { 460 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 461 struct buffer_head **bh; 462 unsigned int amount = GFS2_EA_DATA_LEN(ea); 463 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize); 464 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea); 465 unsigned int x; 466 int error = 0; 467 unsigned char *pos; 468 unsigned cp_size; 469 470 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS); 471 if (!bh) 472 return -ENOMEM; 473 474 for (x = 0; x < nptrs; x++) { 475 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, 0, 476 bh + x); 477 if (error) { 478 while (x--) 479 brelse(bh[x]); 480 goto out; 481 } 482 dataptrs++; 483 } 484 485 for (x = 0; x < nptrs; x++) { 486 error = gfs2_meta_wait(sdp, bh[x]); 487 if (error) { 488 for (; x < nptrs; x++) 489 brelse(bh[x]); 490 goto out; 491 } 492 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) { 493 for (; x < nptrs; x++) 494 brelse(bh[x]); 495 error = -EIO; 496 goto out; 497 } 498 499 pos = bh[x]->b_data + sizeof(struct gfs2_meta_header); 500 cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize; 501 502 if (dout) { 503 memcpy(dout, pos, cp_size); 504 dout += sdp->sd_jbsize; 505 } 506 507 if (din) { 508 gfs2_trans_add_meta(ip->i_gl, bh[x]); 509 memcpy(pos, din, cp_size); 510 din += sdp->sd_jbsize; 511 } 512 513 amount -= sdp->sd_jbsize; 514 brelse(bh[x]); 515 } 516 517 out: 518 kfree(bh); 519 return error; 520 } 521 522 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, 523 char *data, size_t size) 524 { 525 int ret; 526 size_t len = GFS2_EA_DATA_LEN(el->el_ea); 527 if (len > size) 528 return -ERANGE; 529 530 if (GFS2_EA_IS_STUFFED(el->el_ea)) { 531 memcpy(data, GFS2_EA2DATA(el->el_ea), len); 532 return len; 533 } 534 ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data); 535 if (ret < 0) 536 return ret; 537 return len; 538 } 539 540 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata) 541 { 542 struct gfs2_ea_location el; 543 int error; 544 int len; 545 char *data; 546 547 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el); 548 if (error) 549 return error; 550 if (!el.el_ea) 551 goto out; 552 if (!GFS2_EA_DATA_LEN(el.el_ea)) 553 goto out; 554 555 len = GFS2_EA_DATA_LEN(el.el_ea); 556 data = kmalloc(len, GFP_NOFS); 557 error = -ENOMEM; 558 if (data == NULL) 559 goto out; 560 561 error = gfs2_ea_get_copy(ip, &el, data, len); 562 if (error < 0) 563 kfree(data); 564 else 565 *ppdata = data; 566 out: 567 brelse(el.el_bh); 568 return error; 569 } 570 571 /** 572 * __gfs2_xattr_get - Get a GFS2 extended attribute 573 * @inode: The inode 574 * @name: The name of the extended attribute 575 * @buffer: The buffer to write the result into 576 * @size: The size of the buffer 577 * @type: The type of extended attribute 578 * 579 * Returns: actual size of data on success, -errno on error 580 */ 581 static int __gfs2_xattr_get(struct inode *inode, const char *name, 582 void *buffer, size_t size, int type) 583 { 584 struct gfs2_inode *ip = GFS2_I(inode); 585 struct gfs2_ea_location el; 586 int error; 587 588 if (!ip->i_eattr) 589 return -ENODATA; 590 if (strlen(name) > GFS2_EA_MAX_NAME_LEN) 591 return -EINVAL; 592 593 error = gfs2_ea_find(ip, type, name, &el); 594 if (error) 595 return error; 596 if (!el.el_ea) 597 return -ENODATA; 598 if (size) 599 error = gfs2_ea_get_copy(ip, &el, buffer, size); 600 else 601 error = GFS2_EA_DATA_LEN(el.el_ea); 602 brelse(el.el_bh); 603 604 return error; 605 } 606 607 static int gfs2_xattr_get(const struct xattr_handler *handler, 608 struct dentry *unused, struct inode *inode, 609 const char *name, void *buffer, size_t size) 610 { 611 struct gfs2_inode *ip = GFS2_I(inode); 612 struct gfs2_holder gh; 613 int ret; 614 615 /* During lookup, SELinux calls this function with the glock locked. */ 616 617 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { 618 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 619 if (ret) 620 return ret; 621 } else { 622 gfs2_holder_mark_uninitialized(&gh); 623 } 624 ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags); 625 if (gfs2_holder_initialized(&gh)) 626 gfs2_glock_dq_uninit(&gh); 627 return ret; 628 } 629 630 /** 631 * ea_alloc_blk - allocates a new block for extended attributes. 632 * @ip: A pointer to the inode that's getting extended attributes 633 * @bhp: Pointer to pointer to a struct buffer_head 634 * 635 * Returns: errno 636 */ 637 638 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp) 639 { 640 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 641 struct gfs2_ea_header *ea; 642 unsigned int n = 1; 643 u64 block; 644 int error; 645 646 error = gfs2_alloc_blocks(ip, &block, &n, 0); 647 if (error) 648 return error; 649 gfs2_trans_remove_revoke(sdp, block, 1); 650 *bhp = gfs2_meta_new(ip->i_gl, block); 651 gfs2_trans_add_meta(ip->i_gl, *bhp); 652 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA); 653 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header)); 654 655 ea = GFS2_EA_BH2FIRST(*bhp); 656 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize); 657 ea->ea_type = GFS2_EATYPE_UNUSED; 658 ea->ea_flags = GFS2_EAFLAG_LAST; 659 ea->ea_num_ptrs = 0; 660 661 gfs2_add_inode_blocks(&ip->i_inode, 1); 662 663 return 0; 664 } 665 666 /** 667 * ea_write - writes the request info to an ea, creating new blocks if 668 * necessary 669 * @ip: inode that is being modified 670 * @ea: the location of the new ea in a block 671 * @er: the write request 672 * 673 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags 674 * 675 * returns : errno 676 */ 677 678 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea, 679 struct gfs2_ea_request *er) 680 { 681 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 682 int error; 683 684 ea->ea_data_len = cpu_to_be32(er->er_data_len); 685 ea->ea_name_len = er->er_name_len; 686 ea->ea_type = er->er_type; 687 ea->__pad = 0; 688 689 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len); 690 691 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) { 692 ea->ea_num_ptrs = 0; 693 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len); 694 } else { 695 __be64 *dataptr = GFS2_EA2DATAPTRS(ea); 696 const char *data = er->er_data; 697 unsigned int data_len = er->er_data_len; 698 unsigned int copy; 699 unsigned int x; 700 701 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize); 702 for (x = 0; x < ea->ea_num_ptrs; x++) { 703 struct buffer_head *bh; 704 u64 block; 705 int mh_size = sizeof(struct gfs2_meta_header); 706 unsigned int n = 1; 707 708 error = gfs2_alloc_blocks(ip, &block, &n, 0); 709 if (error) 710 return error; 711 gfs2_trans_remove_revoke(sdp, block, 1); 712 bh = gfs2_meta_new(ip->i_gl, block); 713 gfs2_trans_add_meta(ip->i_gl, bh); 714 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED); 715 716 gfs2_add_inode_blocks(&ip->i_inode, 1); 717 718 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize : 719 data_len; 720 memcpy(bh->b_data + mh_size, data, copy); 721 if (copy < sdp->sd_jbsize) 722 memset(bh->b_data + mh_size + copy, 0, 723 sdp->sd_jbsize - copy); 724 725 *dataptr++ = cpu_to_be64(bh->b_blocknr); 726 data += copy; 727 data_len -= copy; 728 729 brelse(bh); 730 } 731 732 gfs2_assert_withdraw(sdp, !data_len); 733 } 734 735 return 0; 736 } 737 738 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip, 739 struct gfs2_ea_request *er, void *private); 740 741 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, 742 unsigned int blks, 743 ea_skeleton_call_t skeleton_call, void *private) 744 { 745 struct gfs2_alloc_parms ap = { .target = blks }; 746 int error; 747 748 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); 749 if (error) 750 return error; 751 752 error = gfs2_quota_lock_check(ip, &ap); 753 if (error) 754 return error; 755 756 error = gfs2_inplace_reserve(ip, &ap); 757 if (error) 758 goto out_gunlock_q; 759 760 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), 761 blks + gfs2_rg_blocks(ip, blks) + 762 RES_DINODE + RES_STATFS + RES_QUOTA, 0); 763 if (error) 764 goto out_ipres; 765 766 error = skeleton_call(ip, er, private); 767 if (error) 768 goto out_end_trans; 769 770 inode_set_ctime_current(&ip->i_inode); 771 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC); 772 773 out_end_trans: 774 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 775 out_ipres: 776 gfs2_inplace_release(ip); 777 out_gunlock_q: 778 gfs2_quota_unlock(ip); 779 return error; 780 } 781 782 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er, 783 void *private) 784 { 785 struct buffer_head *bh; 786 int error; 787 788 error = ea_alloc_blk(ip, &bh); 789 if (error) 790 return error; 791 792 ip->i_eattr = bh->b_blocknr; 793 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er); 794 795 brelse(bh); 796 797 return error; 798 } 799 800 /* 801 * ea_init - initializes a new eattr block 802 * 803 * Returns: errno 804 */ 805 static int ea_init(struct gfs2_inode *ip, int type, const char *name, 806 const void *data, size_t size) 807 { 808 struct gfs2_ea_request er; 809 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize; 810 unsigned int blks = 1; 811 812 er.er_type = type; 813 er.er_name = name; 814 er.er_name_len = strlen(name); 815 er.er_data = (void *)data; 816 er.er_data_len = size; 817 818 if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize) 819 blks += DIV_ROUND_UP(er.er_data_len, jbsize); 820 821 return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL); 822 } 823 824 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea) 825 { 826 u32 ea_size = GFS2_EA_SIZE(ea); 827 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea + 828 ea_size); 829 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size; 830 int last = ea->ea_flags & GFS2_EAFLAG_LAST; 831 832 ea->ea_rec_len = cpu_to_be32(ea_size); 833 ea->ea_flags ^= last; 834 835 new->ea_rec_len = cpu_to_be32(new_size); 836 new->ea_flags = last; 837 838 return new; 839 } 840 841 static void ea_set_remove_stuffed(struct gfs2_inode *ip, 842 struct gfs2_ea_location *el) 843 { 844 struct gfs2_ea_header *ea = el->el_ea; 845 struct gfs2_ea_header *prev = el->el_prev; 846 u32 len; 847 848 gfs2_trans_add_meta(ip->i_gl, el->el_bh); 849 850 if (!prev || !GFS2_EA_IS_STUFFED(ea)) { 851 ea->ea_type = GFS2_EATYPE_UNUSED; 852 return; 853 } else if (GFS2_EA2NEXT(prev) != ea) { 854 prev = GFS2_EA2NEXT(prev); 855 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea); 856 } 857 858 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea); 859 prev->ea_rec_len = cpu_to_be32(len); 860 861 if (GFS2_EA_IS_LAST(ea)) 862 prev->ea_flags |= GFS2_EAFLAG_LAST; 863 } 864 865 struct ea_set { 866 int ea_split; 867 868 struct gfs2_ea_request *es_er; 869 struct gfs2_ea_location *es_el; 870 871 struct buffer_head *es_bh; 872 struct gfs2_ea_header *es_ea; 873 }; 874 875 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh, 876 struct gfs2_ea_header *ea, struct ea_set *es) 877 { 878 struct gfs2_ea_request *er = es->es_er; 879 int error; 880 881 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0); 882 if (error) 883 return error; 884 885 gfs2_trans_add_meta(ip->i_gl, bh); 886 887 if (es->ea_split) 888 ea = ea_split_ea(ea); 889 890 ea_write(ip, ea, er); 891 892 if (es->es_el) 893 ea_set_remove_stuffed(ip, es->es_el); 894 895 inode_set_ctime_current(&ip->i_inode); 896 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC); 897 898 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 899 return error; 900 } 901 902 static int ea_set_simple_alloc(struct gfs2_inode *ip, 903 struct gfs2_ea_request *er, void *private) 904 { 905 struct ea_set *es = private; 906 struct gfs2_ea_header *ea = es->es_ea; 907 int error; 908 909 gfs2_trans_add_meta(ip->i_gl, es->es_bh); 910 911 if (es->ea_split) 912 ea = ea_split_ea(ea); 913 914 error = ea_write(ip, ea, er); 915 if (error) 916 return error; 917 918 if (es->es_el) 919 ea_set_remove_stuffed(ip, es->es_el); 920 921 return 0; 922 } 923 924 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh, 925 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, 926 void *private) 927 { 928 struct ea_set *es = private; 929 unsigned int size; 930 int stuffed; 931 int error; 932 933 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len, 934 es->es_er->er_data_len, &size); 935 936 if (ea->ea_type == GFS2_EATYPE_UNUSED) { 937 if (GFS2_EA_REC_LEN(ea) < size) 938 return 0; 939 if (!GFS2_EA_IS_STUFFED(ea)) { 940 error = ea_remove_unstuffed(ip, bh, ea, prev, 1); 941 if (error) 942 return error; 943 } 944 es->ea_split = 0; 945 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size) 946 es->ea_split = 1; 947 else 948 return 0; 949 950 if (stuffed) { 951 error = ea_set_simple_noalloc(ip, bh, ea, es); 952 if (error) 953 return error; 954 } else { 955 unsigned int blks; 956 957 es->es_bh = bh; 958 es->es_ea = ea; 959 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len, 960 GFS2_SB(&ip->i_inode)->sd_jbsize); 961 962 error = ea_alloc_skeleton(ip, es->es_er, blks, 963 ea_set_simple_alloc, es); 964 if (error) 965 return error; 966 } 967 968 return 1; 969 } 970 971 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, 972 void *private) 973 { 974 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 975 struct buffer_head *indbh, *newbh; 976 __be64 *eablk; 977 int error; 978 int mh_size = sizeof(struct gfs2_meta_header); 979 980 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { 981 __be64 *end; 982 983 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, 984 &indbh); 985 if (error) 986 return error; 987 988 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) { 989 error = -EIO; 990 goto out; 991 } 992 993 eablk = (__be64 *)(indbh->b_data + mh_size); 994 end = eablk + sdp->sd_inptrs; 995 996 for (; eablk < end; eablk++) 997 if (!*eablk) 998 break; 999 1000 if (eablk == end) { 1001 error = -ENOSPC; 1002 goto out; 1003 } 1004 1005 gfs2_trans_add_meta(ip->i_gl, indbh); 1006 } else { 1007 u64 blk; 1008 unsigned int n = 1; 1009 error = gfs2_alloc_blocks(ip, &blk, &n, 0); 1010 if (error) 1011 return error; 1012 gfs2_trans_remove_revoke(sdp, blk, 1); 1013 indbh = gfs2_meta_new(ip->i_gl, blk); 1014 gfs2_trans_add_meta(ip->i_gl, indbh); 1015 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN); 1016 gfs2_buffer_clear_tail(indbh, mh_size); 1017 1018 eablk = (__be64 *)(indbh->b_data + mh_size); 1019 *eablk = cpu_to_be64(ip->i_eattr); 1020 ip->i_eattr = blk; 1021 ip->i_diskflags |= GFS2_DIF_EA_INDIRECT; 1022 gfs2_add_inode_blocks(&ip->i_inode, 1); 1023 1024 eablk++; 1025 } 1026 1027 error = ea_alloc_blk(ip, &newbh); 1028 if (error) 1029 goto out; 1030 1031 *eablk = cpu_to_be64((u64)newbh->b_blocknr); 1032 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er); 1033 brelse(newbh); 1034 if (error) 1035 goto out; 1036 1037 if (private) 1038 ea_set_remove_stuffed(ip, private); 1039 1040 out: 1041 brelse(indbh); 1042 return error; 1043 } 1044 1045 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name, 1046 const void *value, size_t size, struct gfs2_ea_location *el) 1047 { 1048 struct gfs2_ea_request er; 1049 struct ea_set es; 1050 unsigned int blks = 2; 1051 int error; 1052 1053 er.er_type = type; 1054 er.er_name = name; 1055 er.er_data = (void *)value; 1056 er.er_name_len = strlen(name); 1057 er.er_data_len = size; 1058 1059 memset(&es, 0, sizeof(struct ea_set)); 1060 es.es_er = &er; 1061 es.es_el = el; 1062 1063 error = ea_foreach(ip, ea_set_simple, &es); 1064 if (error > 0) 1065 return 0; 1066 if (error) 1067 return error; 1068 1069 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) 1070 blks++; 1071 if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize) 1072 blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize); 1073 1074 return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el); 1075 } 1076 1077 static int ea_set_remove_unstuffed(struct gfs2_inode *ip, 1078 struct gfs2_ea_location *el) 1079 { 1080 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) { 1081 el->el_prev = GFS2_EA2NEXT(el->el_prev); 1082 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), 1083 GFS2_EA2NEXT(el->el_prev) == el->el_ea); 1084 } 1085 1086 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0); 1087 } 1088 1089 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el) 1090 { 1091 struct gfs2_ea_header *ea = el->el_ea; 1092 struct gfs2_ea_header *prev = el->el_prev; 1093 int error; 1094 1095 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); 1096 if (error) 1097 return error; 1098 1099 gfs2_trans_add_meta(ip->i_gl, el->el_bh); 1100 1101 if (prev) { 1102 u32 len; 1103 1104 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea); 1105 prev->ea_rec_len = cpu_to_be32(len); 1106 1107 if (GFS2_EA_IS_LAST(ea)) 1108 prev->ea_flags |= GFS2_EAFLAG_LAST; 1109 } else { 1110 ea->ea_type = GFS2_EATYPE_UNUSED; 1111 } 1112 1113 inode_set_ctime_current(&ip->i_inode); 1114 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC); 1115 1116 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 1117 1118 return error; 1119 } 1120 1121 /** 1122 * gfs2_xattr_remove - Remove a GFS2 extended attribute 1123 * @ip: The inode 1124 * @type: The type of the extended attribute 1125 * @name: The name of the extended attribute 1126 * 1127 * This is not called directly by the VFS since we use the (common) 1128 * scheme of making a "set with NULL data" mean a remove request. Note 1129 * that this is different from a set with zero length data. 1130 * 1131 * Returns: 0, or errno on failure 1132 */ 1133 1134 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name) 1135 { 1136 struct gfs2_ea_location el; 1137 int error; 1138 1139 if (!ip->i_eattr) 1140 return -ENODATA; 1141 1142 error = gfs2_ea_find(ip, type, name, &el); 1143 if (error) 1144 return error; 1145 if (!el.el_ea) 1146 return -ENODATA; 1147 1148 if (GFS2_EA_IS_STUFFED(el.el_ea)) 1149 error = ea_remove_stuffed(ip, &el); 1150 else 1151 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0); 1152 1153 brelse(el.el_bh); 1154 1155 return error; 1156 } 1157 1158 /** 1159 * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute 1160 * @inode: The inode 1161 * @name: The name of the extended attribute 1162 * @value: The value of the extended attribute (NULL for remove) 1163 * @size: The size of the @value argument 1164 * @flags: Create or Replace 1165 * @type: The type of the extended attribute 1166 * 1167 * See gfs2_xattr_remove() for details of the removal of xattrs. 1168 * 1169 * Returns: 0 or errno on failure 1170 */ 1171 1172 int __gfs2_xattr_set(struct inode *inode, const char *name, 1173 const void *value, size_t size, int flags, int type) 1174 { 1175 struct gfs2_inode *ip = GFS2_I(inode); 1176 struct gfs2_sbd *sdp = GFS2_SB(inode); 1177 struct gfs2_ea_location el; 1178 unsigned int namel = strlen(name); 1179 int error; 1180 1181 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 1182 return -EPERM; 1183 if (namel > GFS2_EA_MAX_NAME_LEN) 1184 return -ERANGE; 1185 1186 if (value == NULL) { 1187 error = gfs2_xattr_remove(ip, type, name); 1188 if (error == -ENODATA && !(flags & XATTR_REPLACE)) 1189 error = 0; 1190 return error; 1191 } 1192 1193 if (ea_check_size(sdp, namel, size)) 1194 return -ERANGE; 1195 1196 if (!ip->i_eattr) { 1197 if (flags & XATTR_REPLACE) 1198 return -ENODATA; 1199 return ea_init(ip, type, name, value, size); 1200 } 1201 1202 error = gfs2_ea_find(ip, type, name, &el); 1203 if (error) 1204 return error; 1205 1206 if (el.el_ea) { 1207 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) { 1208 brelse(el.el_bh); 1209 return -EPERM; 1210 } 1211 1212 error = -EEXIST; 1213 if (!(flags & XATTR_CREATE)) { 1214 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea); 1215 error = ea_set_i(ip, type, name, value, size, &el); 1216 if (!error && unstuffed) 1217 ea_set_remove_unstuffed(ip, &el); 1218 } 1219 1220 brelse(el.el_bh); 1221 return error; 1222 } 1223 1224 error = -ENODATA; 1225 if (!(flags & XATTR_REPLACE)) 1226 error = ea_set_i(ip, type, name, value, size, NULL); 1227 1228 return error; 1229 } 1230 1231 static int gfs2_xattr_set(const struct xattr_handler *handler, 1232 struct mnt_idmap *idmap, 1233 struct dentry *unused, struct inode *inode, 1234 const char *name, const void *value, 1235 size_t size, int flags) 1236 { 1237 struct gfs2_inode *ip = GFS2_I(inode); 1238 struct gfs2_holder gh; 1239 int ret; 1240 1241 ret = gfs2_qa_get(ip); 1242 if (ret) 1243 return ret; 1244 1245 /* May be called from gfs_setattr with the glock locked. */ 1246 1247 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { 1248 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 1249 if (ret) 1250 goto out; 1251 } else { 1252 if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) { 1253 ret = -EIO; 1254 goto out; 1255 } 1256 gfs2_holder_mark_uninitialized(&gh); 1257 } 1258 ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags); 1259 if (gfs2_holder_initialized(&gh)) 1260 gfs2_glock_dq_uninit(&gh); 1261 out: 1262 gfs2_qa_put(ip); 1263 return ret; 1264 } 1265 1266 static int ea_dealloc_indirect(struct gfs2_inode *ip) 1267 { 1268 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1269 struct gfs2_rgrp_list rlist; 1270 struct gfs2_rgrpd *rgd; 1271 struct buffer_head *indbh, *dibh; 1272 __be64 *eablk, *end; 1273 unsigned int rg_blocks = 0; 1274 u64 bstart = 0; 1275 unsigned int blen = 0; 1276 unsigned int blks = 0; 1277 unsigned int x; 1278 int error; 1279 1280 error = gfs2_rindex_update(sdp); 1281 if (error) 1282 return error; 1283 1284 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 1285 1286 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &indbh); 1287 if (error) 1288 return error; 1289 1290 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) { 1291 error = -EIO; 1292 goto out; 1293 } 1294 1295 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); 1296 end = eablk + sdp->sd_inptrs; 1297 1298 for (; eablk < end; eablk++) { 1299 u64 bn; 1300 1301 if (!*eablk) 1302 break; 1303 bn = be64_to_cpu(*eablk); 1304 1305 if (bstart + blen == bn) 1306 blen++; 1307 else { 1308 if (bstart) 1309 gfs2_rlist_add(ip, &rlist, bstart); 1310 bstart = bn; 1311 blen = 1; 1312 } 1313 blks++; 1314 } 1315 if (bstart) 1316 gfs2_rlist_add(ip, &rlist, bstart); 1317 else 1318 goto out; 1319 1320 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE); 1321 1322 for (x = 0; x < rlist.rl_rgrps; x++) { 1323 rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl); 1324 rg_blocks += rgd->rd_length; 1325 } 1326 1327 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs); 1328 if (error) 1329 goto out_rlist_free; 1330 1331 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT + 1332 RES_STATFS + RES_QUOTA, blks); 1333 if (error) 1334 goto out_gunlock; 1335 1336 gfs2_trans_add_meta(ip->i_gl, indbh); 1337 1338 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); 1339 bstart = 0; 1340 rgd = NULL; 1341 blen = 0; 1342 1343 for (; eablk < end; eablk++) { 1344 u64 bn; 1345 1346 if (!*eablk) 1347 break; 1348 bn = be64_to_cpu(*eablk); 1349 1350 if (bstart + blen == bn) 1351 blen++; 1352 else { 1353 if (bstart) 1354 gfs2_free_meta(ip, rgd, bstart, blen); 1355 bstart = bn; 1356 rgd = gfs2_blk2rgrpd(sdp, bstart, true); 1357 blen = 1; 1358 } 1359 1360 *eablk = 0; 1361 gfs2_add_inode_blocks(&ip->i_inode, -1); 1362 } 1363 if (bstart) 1364 gfs2_free_meta(ip, rgd, bstart, blen); 1365 1366 ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT; 1367 1368 error = gfs2_meta_inode_buffer(ip, &dibh); 1369 if (!error) { 1370 gfs2_trans_add_meta(ip->i_gl, dibh); 1371 gfs2_dinode_out(ip, dibh->b_data); 1372 brelse(dibh); 1373 } 1374 1375 gfs2_trans_end(sdp); 1376 1377 out_gunlock: 1378 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); 1379 out_rlist_free: 1380 gfs2_rlist_free(&rlist); 1381 out: 1382 brelse(indbh); 1383 return error; 1384 } 1385 1386 static int ea_dealloc_block(struct gfs2_inode *ip) 1387 { 1388 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1389 struct gfs2_rgrpd *rgd; 1390 struct buffer_head *dibh; 1391 struct gfs2_holder gh; 1392 int error; 1393 1394 error = gfs2_rindex_update(sdp); 1395 if (error) 1396 return error; 1397 1398 rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1); 1399 if (!rgd) { 1400 gfs2_consist_inode(ip); 1401 return -EIO; 1402 } 1403 1404 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1405 LM_FLAG_NODE_SCOPE, &gh); 1406 if (error) 1407 return error; 1408 1409 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS + 1410 RES_QUOTA, 1); 1411 if (error) 1412 goto out_gunlock; 1413 1414 gfs2_free_meta(ip, rgd, ip->i_eattr, 1); 1415 1416 ip->i_eattr = 0; 1417 gfs2_add_inode_blocks(&ip->i_inode, -1); 1418 1419 if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { 1420 error = gfs2_meta_inode_buffer(ip, &dibh); 1421 if (!error) { 1422 gfs2_trans_add_meta(ip->i_gl, dibh); 1423 gfs2_dinode_out(ip, dibh->b_data); 1424 brelse(dibh); 1425 } 1426 } 1427 1428 gfs2_trans_end(sdp); 1429 1430 out_gunlock: 1431 gfs2_glock_dq_uninit(&gh); 1432 return error; 1433 } 1434 1435 /** 1436 * gfs2_ea_dealloc - deallocate the extended attribute fork 1437 * @ip: the inode 1438 * 1439 * Returns: errno 1440 */ 1441 1442 int gfs2_ea_dealloc(struct gfs2_inode *ip) 1443 { 1444 int error; 1445 1446 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); 1447 if (error) 1448 return error; 1449 1450 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 1451 if (error) 1452 return error; 1453 1454 if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { 1455 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL); 1456 if (error) 1457 goto out_quota; 1458 1459 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { 1460 error = ea_dealloc_indirect(ip); 1461 if (error) 1462 goto out_quota; 1463 } 1464 } 1465 1466 error = ea_dealloc_block(ip); 1467 1468 out_quota: 1469 gfs2_quota_unhold(ip); 1470 return error; 1471 } 1472 1473 static const struct xattr_handler gfs2_xattr_user_handler = { 1474 .prefix = XATTR_USER_PREFIX, 1475 .flags = GFS2_EATYPE_USR, 1476 .get = gfs2_xattr_get, 1477 .set = gfs2_xattr_set, 1478 }; 1479 1480 static const struct xattr_handler gfs2_xattr_security_handler = { 1481 .prefix = XATTR_SECURITY_PREFIX, 1482 .flags = GFS2_EATYPE_SECURITY, 1483 .get = gfs2_xattr_get, 1484 .set = gfs2_xattr_set, 1485 }; 1486 1487 static bool 1488 gfs2_xattr_trusted_list(struct dentry *dentry) 1489 { 1490 return capable(CAP_SYS_ADMIN); 1491 } 1492 1493 static const struct xattr_handler gfs2_xattr_trusted_handler = { 1494 .prefix = XATTR_TRUSTED_PREFIX, 1495 .flags = GFS2_EATYPE_TRUSTED, 1496 .list = gfs2_xattr_trusted_list, 1497 .get = gfs2_xattr_get, 1498 .set = gfs2_xattr_set, 1499 }; 1500 1501 const struct xattr_handler * const gfs2_xattr_handlers_max[] = { 1502 /* GFS2_FS_FORMAT_MAX */ 1503 &gfs2_xattr_trusted_handler, 1504 1505 /* GFS2_FS_FORMAT_MIN */ 1506 &gfs2_xattr_user_handler, 1507 &gfs2_xattr_security_handler, 1508 NULL, 1509 }; 1510 1511 const struct xattr_handler * const *gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1; 1512