1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_rtalloc.h" 15 #include "xfs_iwalk.h" 16 #include "xfs_itable.h" 17 #include "xfs_error.h" 18 #include "xfs_da_format.h" 19 #include "xfs_da_btree.h" 20 #include "xfs_attr.h" 21 #include "xfs_bmap.h" 22 #include "xfs_bmap_util.h" 23 #include "xfs_fsops.h" 24 #include "xfs_discard.h" 25 #include "xfs_quota.h" 26 #include "xfs_export.h" 27 #include "xfs_trace.h" 28 #include "xfs_icache.h" 29 #include "xfs_trans.h" 30 #include "xfs_acl.h" 31 #include "xfs_btree.h" 32 #include <linux/fsmap.h> 33 #include "xfs_fsmap.h" 34 #include "scrub/xfs_scrub.h" 35 #include "xfs_sb.h" 36 #include "xfs_ag.h" 37 #include "xfs_health.h" 38 #include "xfs_reflink.h" 39 #include "xfs_ioctl.h" 40 #include "xfs_xattr.h" 41 #include "xfs_rtbitmap.h" 42 43 #include <linux/mount.h> 44 #include <linux/namei.h> 45 #include <linux/fileattr.h> 46 47 /* 48 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to 49 * a file or fs handle. 50 * 51 * XFS_IOC_PATH_TO_FSHANDLE 52 * returns fs handle for a mount point or path within that mount point 53 * XFS_IOC_FD_TO_HANDLE 54 * returns full handle for a FD opened in user space 55 * XFS_IOC_PATH_TO_HANDLE 56 * returns full handle for a path 57 */ 58 int 59 xfs_find_handle( 60 unsigned int cmd, 61 xfs_fsop_handlereq_t *hreq) 62 { 63 int hsize; 64 xfs_handle_t handle; 65 struct inode *inode; 66 struct fd f = {NULL}; 67 struct path path; 68 int error; 69 struct xfs_inode *ip; 70 71 if (cmd == XFS_IOC_FD_TO_HANDLE) { 72 f = fdget(hreq->fd); 73 if (!f.file) 74 return -EBADF; 75 inode = file_inode(f.file); 76 } else { 77 error = user_path_at(AT_FDCWD, hreq->path, 0, &path); 78 if (error) 79 return error; 80 inode = d_inode(path.dentry); 81 } 82 ip = XFS_I(inode); 83 84 /* 85 * We can only generate handles for inodes residing on a XFS filesystem, 86 * and only for regular files, directories or symbolic links. 87 */ 88 error = -EINVAL; 89 if (inode->i_sb->s_magic != XFS_SB_MAGIC) 90 goto out_put; 91 92 error = -EBADF; 93 if (!S_ISREG(inode->i_mode) && 94 !S_ISDIR(inode->i_mode) && 95 !S_ISLNK(inode->i_mode)) 96 goto out_put; 97 98 99 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t)); 100 101 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) { 102 /* 103 * This handle only contains an fsid, zero the rest. 104 */ 105 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); 106 hsize = sizeof(xfs_fsid_t); 107 } else { 108 handle.ha_fid.fid_len = sizeof(xfs_fid_t) - 109 sizeof(handle.ha_fid.fid_len); 110 handle.ha_fid.fid_pad = 0; 111 handle.ha_fid.fid_gen = inode->i_generation; 112 handle.ha_fid.fid_ino = ip->i_ino; 113 hsize = sizeof(xfs_handle_t); 114 } 115 116 error = -EFAULT; 117 if (copy_to_user(hreq->ohandle, &handle, hsize) || 118 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) 119 goto out_put; 120 121 error = 0; 122 123 out_put: 124 if (cmd == XFS_IOC_FD_TO_HANDLE) 125 fdput(f); 126 else 127 path_put(&path); 128 return error; 129 } 130 131 /* 132 * No need to do permission checks on the various pathname components 133 * as the handle operations are privileged. 134 */ 135 STATIC int 136 xfs_handle_acceptable( 137 void *context, 138 struct dentry *dentry) 139 { 140 return 1; 141 } 142 143 /* 144 * Convert userspace handle data into a dentry. 145 */ 146 struct dentry * 147 xfs_handle_to_dentry( 148 struct file *parfilp, 149 void __user *uhandle, 150 u32 hlen) 151 { 152 xfs_handle_t handle; 153 struct xfs_fid64 fid; 154 155 /* 156 * Only allow handle opens under a directory. 157 */ 158 if (!S_ISDIR(file_inode(parfilp)->i_mode)) 159 return ERR_PTR(-ENOTDIR); 160 161 if (hlen != sizeof(xfs_handle_t)) 162 return ERR_PTR(-EINVAL); 163 if (copy_from_user(&handle, uhandle, hlen)) 164 return ERR_PTR(-EFAULT); 165 if (handle.ha_fid.fid_len != 166 sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) 167 return ERR_PTR(-EINVAL); 168 169 memset(&fid, 0, sizeof(struct fid)); 170 fid.ino = handle.ha_fid.fid_ino; 171 fid.gen = handle.ha_fid.fid_gen; 172 173 return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, 174 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, 175 xfs_handle_acceptable, NULL); 176 } 177 178 STATIC struct dentry * 179 xfs_handlereq_to_dentry( 180 struct file *parfilp, 181 xfs_fsop_handlereq_t *hreq) 182 { 183 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); 184 } 185 186 int 187 xfs_open_by_handle( 188 struct file *parfilp, 189 xfs_fsop_handlereq_t *hreq) 190 { 191 const struct cred *cred = current_cred(); 192 int error; 193 int fd; 194 int permflag; 195 struct file *filp; 196 struct inode *inode; 197 struct dentry *dentry; 198 fmode_t fmode; 199 struct path path; 200 201 if (!capable(CAP_SYS_ADMIN)) 202 return -EPERM; 203 204 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 205 if (IS_ERR(dentry)) 206 return PTR_ERR(dentry); 207 inode = d_inode(dentry); 208 209 /* Restrict xfs_open_by_handle to directories & regular files. */ 210 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { 211 error = -EPERM; 212 goto out_dput; 213 } 214 215 #if BITS_PER_LONG != 32 216 hreq->oflags |= O_LARGEFILE; 217 #endif 218 219 permflag = hreq->oflags; 220 fmode = OPEN_FMODE(permflag); 221 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && 222 (fmode & FMODE_WRITE) && IS_APPEND(inode)) { 223 error = -EPERM; 224 goto out_dput; 225 } 226 227 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { 228 error = -EPERM; 229 goto out_dput; 230 } 231 232 /* Can't write directories. */ 233 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { 234 error = -EISDIR; 235 goto out_dput; 236 } 237 238 fd = get_unused_fd_flags(0); 239 if (fd < 0) { 240 error = fd; 241 goto out_dput; 242 } 243 244 path.mnt = parfilp->f_path.mnt; 245 path.dentry = dentry; 246 filp = dentry_open(&path, hreq->oflags, cred); 247 dput(dentry); 248 if (IS_ERR(filp)) { 249 put_unused_fd(fd); 250 return PTR_ERR(filp); 251 } 252 253 if (S_ISREG(inode->i_mode)) { 254 filp->f_flags |= O_NOATIME; 255 filp->f_mode |= FMODE_NOCMTIME; 256 } 257 258 fd_install(fd, filp); 259 return fd; 260 261 out_dput: 262 dput(dentry); 263 return error; 264 } 265 266 int 267 xfs_readlink_by_handle( 268 struct file *parfilp, 269 xfs_fsop_handlereq_t *hreq) 270 { 271 struct dentry *dentry; 272 __u32 olen; 273 int error; 274 275 if (!capable(CAP_SYS_ADMIN)) 276 return -EPERM; 277 278 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 279 if (IS_ERR(dentry)) 280 return PTR_ERR(dentry); 281 282 /* Restrict this handle operation to symlinks only. */ 283 if (!d_is_symlink(dentry)) { 284 error = -EINVAL; 285 goto out_dput; 286 } 287 288 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { 289 error = -EFAULT; 290 goto out_dput; 291 } 292 293 error = vfs_readlink(dentry, hreq->ohandle, olen); 294 295 out_dput: 296 dput(dentry); 297 return error; 298 } 299 300 /* 301 * Format an attribute and copy it out to the user's buffer. 302 * Take care to check values and protect against them changing later, 303 * we may be reading them directly out of a user buffer. 304 */ 305 static void 306 xfs_ioc_attr_put_listent( 307 struct xfs_attr_list_context *context, 308 int flags, 309 unsigned char *name, 310 int namelen, 311 int valuelen) 312 { 313 struct xfs_attrlist *alist = context->buffer; 314 struct xfs_attrlist_ent *aep; 315 int arraytop; 316 317 ASSERT(!context->seen_enough); 318 ASSERT(context->count >= 0); 319 ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); 320 ASSERT(context->firstu >= sizeof(*alist)); 321 ASSERT(context->firstu <= context->bufsize); 322 323 /* 324 * Only list entries in the right namespace. 325 */ 326 if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK)) 327 return; 328 329 arraytop = sizeof(*alist) + 330 context->count * sizeof(alist->al_offset[0]); 331 332 /* decrement by the actual bytes used by the attr */ 333 context->firstu -= round_up(offsetof(struct xfs_attrlist_ent, a_name) + 334 namelen + 1, sizeof(uint32_t)); 335 if (context->firstu < arraytop) { 336 trace_xfs_attr_list_full(context); 337 alist->al_more = 1; 338 context->seen_enough = 1; 339 return; 340 } 341 342 aep = context->buffer + context->firstu; 343 aep->a_valuelen = valuelen; 344 memcpy(aep->a_name, name, namelen); 345 aep->a_name[namelen] = 0; 346 alist->al_offset[context->count++] = context->firstu; 347 alist->al_count = context->count; 348 trace_xfs_attr_list_add(context); 349 } 350 351 static unsigned int 352 xfs_attr_filter( 353 u32 ioc_flags) 354 { 355 if (ioc_flags & XFS_IOC_ATTR_ROOT) 356 return XFS_ATTR_ROOT; 357 if (ioc_flags & XFS_IOC_ATTR_SECURE) 358 return XFS_ATTR_SECURE; 359 return 0; 360 } 361 362 static unsigned int 363 xfs_attr_flags( 364 u32 ioc_flags) 365 { 366 if (ioc_flags & XFS_IOC_ATTR_CREATE) 367 return XATTR_CREATE; 368 if (ioc_flags & XFS_IOC_ATTR_REPLACE) 369 return XATTR_REPLACE; 370 return 0; 371 } 372 373 int 374 xfs_ioc_attr_list( 375 struct xfs_inode *dp, 376 void __user *ubuf, 377 size_t bufsize, 378 int flags, 379 struct xfs_attrlist_cursor __user *ucursor) 380 { 381 struct xfs_attr_list_context context = { }; 382 struct xfs_attrlist *alist; 383 void *buffer; 384 int error; 385 386 if (bufsize < sizeof(struct xfs_attrlist) || 387 bufsize > XFS_XATTR_LIST_MAX) 388 return -EINVAL; 389 390 /* 391 * Reject flags, only allow namespaces. 392 */ 393 if (flags & ~(XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE)) 394 return -EINVAL; 395 if (flags == (XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE)) 396 return -EINVAL; 397 398 /* 399 * Validate the cursor. 400 */ 401 if (copy_from_user(&context.cursor, ucursor, sizeof(context.cursor))) 402 return -EFAULT; 403 if (context.cursor.pad1 || context.cursor.pad2) 404 return -EINVAL; 405 if (!context.cursor.initted && 406 (context.cursor.hashval || context.cursor.blkno || 407 context.cursor.offset)) 408 return -EINVAL; 409 410 buffer = kvzalloc(bufsize, GFP_KERNEL); 411 if (!buffer) 412 return -ENOMEM; 413 414 /* 415 * Initialize the output buffer. 416 */ 417 context.dp = dp; 418 context.resynch = 1; 419 context.attr_filter = xfs_attr_filter(flags); 420 context.buffer = buffer; 421 context.bufsize = round_down(bufsize, sizeof(uint32_t)); 422 context.firstu = context.bufsize; 423 context.put_listent = xfs_ioc_attr_put_listent; 424 425 alist = context.buffer; 426 alist->al_count = 0; 427 alist->al_more = 0; 428 alist->al_offset[0] = context.bufsize; 429 430 error = xfs_attr_list(&context); 431 if (error) 432 goto out_free; 433 434 if (copy_to_user(ubuf, buffer, bufsize) || 435 copy_to_user(ucursor, &context.cursor, sizeof(context.cursor))) 436 error = -EFAULT; 437 out_free: 438 kmem_free(buffer); 439 return error; 440 } 441 442 STATIC int 443 xfs_attrlist_by_handle( 444 struct file *parfilp, 445 struct xfs_fsop_attrlist_handlereq __user *p) 446 { 447 struct xfs_fsop_attrlist_handlereq al_hreq; 448 struct dentry *dentry; 449 int error = -ENOMEM; 450 451 if (!capable(CAP_SYS_ADMIN)) 452 return -EPERM; 453 if (copy_from_user(&al_hreq, p, sizeof(al_hreq))) 454 return -EFAULT; 455 456 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); 457 if (IS_ERR(dentry)) 458 return PTR_ERR(dentry); 459 460 error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), al_hreq.buffer, 461 al_hreq.buflen, al_hreq.flags, &p->pos); 462 dput(dentry); 463 return error; 464 } 465 466 static int 467 xfs_attrmulti_attr_get( 468 struct inode *inode, 469 unsigned char *name, 470 unsigned char __user *ubuf, 471 uint32_t *len, 472 uint32_t flags) 473 { 474 struct xfs_da_args args = { 475 .dp = XFS_I(inode), 476 .attr_filter = xfs_attr_filter(flags), 477 .attr_flags = xfs_attr_flags(flags), 478 .name = name, 479 .namelen = strlen(name), 480 .valuelen = *len, 481 }; 482 int error; 483 484 if (*len > XFS_XATTR_SIZE_MAX) 485 return -EINVAL; 486 487 error = xfs_attr_get(&args); 488 if (error) 489 goto out_kfree; 490 491 *len = args.valuelen; 492 if (copy_to_user(ubuf, args.value, args.valuelen)) 493 error = -EFAULT; 494 495 out_kfree: 496 kmem_free(args.value); 497 return error; 498 } 499 500 static int 501 xfs_attrmulti_attr_set( 502 struct inode *inode, 503 unsigned char *name, 504 const unsigned char __user *ubuf, 505 uint32_t len, 506 uint32_t flags) 507 { 508 struct xfs_da_args args = { 509 .dp = XFS_I(inode), 510 .attr_filter = xfs_attr_filter(flags), 511 .attr_flags = xfs_attr_flags(flags), 512 .name = name, 513 .namelen = strlen(name), 514 }; 515 int error; 516 517 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 518 return -EPERM; 519 520 if (ubuf) { 521 if (len > XFS_XATTR_SIZE_MAX) 522 return -EINVAL; 523 args.value = memdup_user(ubuf, len); 524 if (IS_ERR(args.value)) 525 return PTR_ERR(args.value); 526 args.valuelen = len; 527 } 528 529 error = xfs_attr_change(&args); 530 if (!error && (flags & XFS_IOC_ATTR_ROOT)) 531 xfs_forget_acl(inode, name); 532 kfree(args.value); 533 return error; 534 } 535 536 int 537 xfs_ioc_attrmulti_one( 538 struct file *parfilp, 539 struct inode *inode, 540 uint32_t opcode, 541 void __user *uname, 542 void __user *value, 543 uint32_t *len, 544 uint32_t flags) 545 { 546 unsigned char *name; 547 int error; 548 549 if ((flags & XFS_IOC_ATTR_ROOT) && (flags & XFS_IOC_ATTR_SECURE)) 550 return -EINVAL; 551 552 name = strndup_user(uname, MAXNAMELEN); 553 if (IS_ERR(name)) 554 return PTR_ERR(name); 555 556 switch (opcode) { 557 case ATTR_OP_GET: 558 error = xfs_attrmulti_attr_get(inode, name, value, len, flags); 559 break; 560 case ATTR_OP_REMOVE: 561 value = NULL; 562 *len = 0; 563 fallthrough; 564 case ATTR_OP_SET: 565 error = mnt_want_write_file(parfilp); 566 if (error) 567 break; 568 error = xfs_attrmulti_attr_set(inode, name, value, *len, flags); 569 mnt_drop_write_file(parfilp); 570 break; 571 default: 572 error = -EINVAL; 573 break; 574 } 575 576 kfree(name); 577 return error; 578 } 579 580 STATIC int 581 xfs_attrmulti_by_handle( 582 struct file *parfilp, 583 void __user *arg) 584 { 585 int error; 586 xfs_attr_multiop_t *ops; 587 xfs_fsop_attrmulti_handlereq_t am_hreq; 588 struct dentry *dentry; 589 unsigned int i, size; 590 591 if (!capable(CAP_SYS_ADMIN)) 592 return -EPERM; 593 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) 594 return -EFAULT; 595 596 /* overflow check */ 597 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) 598 return -E2BIG; 599 600 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); 601 if (IS_ERR(dentry)) 602 return PTR_ERR(dentry); 603 604 error = -E2BIG; 605 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); 606 if (!size || size > 16 * PAGE_SIZE) 607 goto out_dput; 608 609 ops = memdup_user(am_hreq.ops, size); 610 if (IS_ERR(ops)) { 611 error = PTR_ERR(ops); 612 goto out_dput; 613 } 614 615 error = 0; 616 for (i = 0; i < am_hreq.opcount; i++) { 617 ops[i].am_error = xfs_ioc_attrmulti_one(parfilp, 618 d_inode(dentry), ops[i].am_opcode, 619 ops[i].am_attrname, ops[i].am_attrvalue, 620 &ops[i].am_length, ops[i].am_flags); 621 } 622 623 if (copy_to_user(am_hreq.ops, ops, size)) 624 error = -EFAULT; 625 626 kfree(ops); 627 out_dput: 628 dput(dentry); 629 return error; 630 } 631 632 /* Return 0 on success or positive error */ 633 int 634 xfs_fsbulkstat_one_fmt( 635 struct xfs_ibulk *breq, 636 const struct xfs_bulkstat *bstat) 637 { 638 struct xfs_bstat bs1; 639 640 xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat); 641 if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1))) 642 return -EFAULT; 643 return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat)); 644 } 645 646 int 647 xfs_fsinumbers_fmt( 648 struct xfs_ibulk *breq, 649 const struct xfs_inumbers *igrp) 650 { 651 struct xfs_inogrp ig1; 652 653 xfs_inumbers_to_inogrp(&ig1, igrp); 654 if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp))) 655 return -EFAULT; 656 return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp)); 657 } 658 659 STATIC int 660 xfs_ioc_fsbulkstat( 661 struct file *file, 662 unsigned int cmd, 663 void __user *arg) 664 { 665 struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount; 666 struct xfs_fsop_bulkreq bulkreq; 667 struct xfs_ibulk breq = { 668 .mp = mp, 669 .idmap = file_mnt_idmap(file), 670 .ocount = 0, 671 }; 672 xfs_ino_t lastino; 673 int error; 674 675 /* done = 1 if there are more stats to get and if bulkstat */ 676 /* should be called again (unused here, but used in dmapi) */ 677 678 if (!capable(CAP_SYS_ADMIN)) 679 return -EPERM; 680 681 if (xfs_is_shutdown(mp)) 682 return -EIO; 683 684 if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq))) 685 return -EFAULT; 686 687 if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64))) 688 return -EFAULT; 689 690 if (bulkreq.icount <= 0) 691 return -EINVAL; 692 693 if (bulkreq.ubuffer == NULL) 694 return -EINVAL; 695 696 breq.ubuffer = bulkreq.ubuffer; 697 breq.icount = bulkreq.icount; 698 699 /* 700 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number 701 * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect 702 * that *lastip contains either zero or the number of the last inode to 703 * be examined by the previous call and return results starting with 704 * the next inode after that. The new bulk request back end functions 705 * take the inode to start with, so we have to compute the startino 706 * parameter from lastino to maintain correct function. lastino == 0 707 * is a special case because it has traditionally meant "first inode 708 * in filesystem". 709 */ 710 if (cmd == XFS_IOC_FSINUMBERS) { 711 breq.startino = lastino ? lastino + 1 : 0; 712 error = xfs_inumbers(&breq, xfs_fsinumbers_fmt); 713 lastino = breq.startino - 1; 714 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) { 715 breq.startino = lastino; 716 breq.icount = 1; 717 error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt); 718 } else { /* XFS_IOC_FSBULKSTAT */ 719 breq.startino = lastino ? lastino + 1 : 0; 720 error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt); 721 lastino = breq.startino - 1; 722 } 723 724 if (error) 725 return error; 726 727 if (bulkreq.lastip != NULL && 728 copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t))) 729 return -EFAULT; 730 731 if (bulkreq.ocount != NULL && 732 copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32))) 733 return -EFAULT; 734 735 return 0; 736 } 737 738 /* Return 0 on success or positive error */ 739 static int 740 xfs_bulkstat_fmt( 741 struct xfs_ibulk *breq, 742 const struct xfs_bulkstat *bstat) 743 { 744 if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat))) 745 return -EFAULT; 746 return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat)); 747 } 748 749 /* 750 * Check the incoming bulk request @hdr from userspace and initialize the 751 * internal @breq bulk request appropriately. Returns 0 if the bulk request 752 * should proceed; -ECANCELED if there's nothing to do; or the usual 753 * negative error code. 754 */ 755 static int 756 xfs_bulk_ireq_setup( 757 struct xfs_mount *mp, 758 const struct xfs_bulk_ireq *hdr, 759 struct xfs_ibulk *breq, 760 void __user *ubuffer) 761 { 762 if (hdr->icount == 0 || 763 (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) || 764 memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved))) 765 return -EINVAL; 766 767 breq->startino = hdr->ino; 768 breq->ubuffer = ubuffer; 769 breq->icount = hdr->icount; 770 breq->ocount = 0; 771 breq->flags = 0; 772 773 /* 774 * The @ino parameter is a special value, so we must look it up here. 775 * We're not allowed to have IREQ_AGNO, and we only return one inode 776 * worth of data. 777 */ 778 if (hdr->flags & XFS_BULK_IREQ_SPECIAL) { 779 if (hdr->flags & XFS_BULK_IREQ_AGNO) 780 return -EINVAL; 781 782 switch (hdr->ino) { 783 case XFS_BULK_IREQ_SPECIAL_ROOT: 784 breq->startino = mp->m_sb.sb_rootino; 785 break; 786 default: 787 return -EINVAL; 788 } 789 breq->icount = 1; 790 } 791 792 /* 793 * The IREQ_AGNO flag means that we only want results from a given AG. 794 * If @hdr->ino is zero, we start iterating in that AG. If @hdr->ino is 795 * beyond the specified AG then we return no results. 796 */ 797 if (hdr->flags & XFS_BULK_IREQ_AGNO) { 798 if (hdr->agno >= mp->m_sb.sb_agcount) 799 return -EINVAL; 800 801 if (breq->startino == 0) 802 breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0); 803 else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno) 804 return -EINVAL; 805 806 breq->flags |= XFS_IBULK_SAME_AG; 807 808 /* Asking for an inode past the end of the AG? We're done! */ 809 if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno) 810 return -ECANCELED; 811 } else if (hdr->agno) 812 return -EINVAL; 813 814 /* Asking for an inode past the end of the FS? We're done! */ 815 if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount) 816 return -ECANCELED; 817 818 if (hdr->flags & XFS_BULK_IREQ_NREXT64) 819 breq->flags |= XFS_IBULK_NREXT64; 820 821 return 0; 822 } 823 824 /* 825 * Update the userspace bulk request @hdr to reflect the end state of the 826 * internal bulk request @breq. 827 */ 828 static void 829 xfs_bulk_ireq_teardown( 830 struct xfs_bulk_ireq *hdr, 831 struct xfs_ibulk *breq) 832 { 833 hdr->ino = breq->startino; 834 hdr->ocount = breq->ocount; 835 } 836 837 /* Handle the v5 bulkstat ioctl. */ 838 STATIC int 839 xfs_ioc_bulkstat( 840 struct file *file, 841 unsigned int cmd, 842 struct xfs_bulkstat_req __user *arg) 843 { 844 struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount; 845 struct xfs_bulk_ireq hdr; 846 struct xfs_ibulk breq = { 847 .mp = mp, 848 .idmap = file_mnt_idmap(file), 849 }; 850 int error; 851 852 if (!capable(CAP_SYS_ADMIN)) 853 return -EPERM; 854 855 if (xfs_is_shutdown(mp)) 856 return -EIO; 857 858 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr))) 859 return -EFAULT; 860 861 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat); 862 if (error == -ECANCELED) 863 goto out_teardown; 864 if (error < 0) 865 return error; 866 867 error = xfs_bulkstat(&breq, xfs_bulkstat_fmt); 868 if (error) 869 return error; 870 871 out_teardown: 872 xfs_bulk_ireq_teardown(&hdr, &breq); 873 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr))) 874 return -EFAULT; 875 876 return 0; 877 } 878 879 STATIC int 880 xfs_inumbers_fmt( 881 struct xfs_ibulk *breq, 882 const struct xfs_inumbers *igrp) 883 { 884 if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers))) 885 return -EFAULT; 886 return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers)); 887 } 888 889 /* Handle the v5 inumbers ioctl. */ 890 STATIC int 891 xfs_ioc_inumbers( 892 struct xfs_mount *mp, 893 unsigned int cmd, 894 struct xfs_inumbers_req __user *arg) 895 { 896 struct xfs_bulk_ireq hdr; 897 struct xfs_ibulk breq = { 898 .mp = mp, 899 }; 900 int error; 901 902 if (!capable(CAP_SYS_ADMIN)) 903 return -EPERM; 904 905 if (xfs_is_shutdown(mp)) 906 return -EIO; 907 908 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr))) 909 return -EFAULT; 910 911 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers); 912 if (error == -ECANCELED) 913 goto out_teardown; 914 if (error < 0) 915 return error; 916 917 error = xfs_inumbers(&breq, xfs_inumbers_fmt); 918 if (error) 919 return error; 920 921 out_teardown: 922 xfs_bulk_ireq_teardown(&hdr, &breq); 923 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr))) 924 return -EFAULT; 925 926 return 0; 927 } 928 929 STATIC int 930 xfs_ioc_fsgeometry( 931 struct xfs_mount *mp, 932 void __user *arg, 933 int struct_version) 934 { 935 struct xfs_fsop_geom fsgeo; 936 size_t len; 937 938 xfs_fs_geometry(mp, &fsgeo, struct_version); 939 940 if (struct_version <= 3) 941 len = sizeof(struct xfs_fsop_geom_v1); 942 else if (struct_version == 4) 943 len = sizeof(struct xfs_fsop_geom_v4); 944 else { 945 xfs_fsop_geom_health(mp, &fsgeo); 946 len = sizeof(fsgeo); 947 } 948 949 if (copy_to_user(arg, &fsgeo, len)) 950 return -EFAULT; 951 return 0; 952 } 953 954 STATIC int 955 xfs_ioc_ag_geometry( 956 struct xfs_mount *mp, 957 void __user *arg) 958 { 959 struct xfs_perag *pag; 960 struct xfs_ag_geometry ageo; 961 int error; 962 963 if (copy_from_user(&ageo, arg, sizeof(ageo))) 964 return -EFAULT; 965 if (ageo.ag_flags) 966 return -EINVAL; 967 if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved))) 968 return -EINVAL; 969 970 pag = xfs_perag_get(mp, ageo.ag_number); 971 if (!pag) 972 return -EINVAL; 973 974 error = xfs_ag_get_geometry(pag, &ageo); 975 xfs_perag_put(pag); 976 if (error) 977 return error; 978 979 if (copy_to_user(arg, &ageo, sizeof(ageo))) 980 return -EFAULT; 981 return 0; 982 } 983 984 /* 985 * Linux extended inode flags interface. 986 */ 987 988 static void 989 xfs_fill_fsxattr( 990 struct xfs_inode *ip, 991 int whichfork, 992 struct fileattr *fa) 993 { 994 struct xfs_mount *mp = ip->i_mount; 995 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 996 997 fileattr_fill_xflags(fa, xfs_ip2xflags(ip)); 998 999 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) { 1000 fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); 1001 } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { 1002 /* 1003 * Don't let a misaligned extent size hint on a directory 1004 * escape to userspace if it won't pass the setattr checks 1005 * later. 1006 */ 1007 if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && 1008 xfs_extlen_to_rtxmod(mp, ip->i_extsize) > 0) { 1009 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | 1010 FS_XFLAG_EXTSZINHERIT); 1011 fa->fsx_extsize = 0; 1012 } else { 1013 fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); 1014 } 1015 } 1016 1017 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 1018 fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize); 1019 fa->fsx_projid = ip->i_projid; 1020 if (ifp && !xfs_need_iread_extents(ifp)) 1021 fa->fsx_nextents = xfs_iext_count(ifp); 1022 else 1023 fa->fsx_nextents = xfs_ifork_nextents(ifp); 1024 } 1025 1026 STATIC int 1027 xfs_ioc_fsgetxattra( 1028 xfs_inode_t *ip, 1029 void __user *arg) 1030 { 1031 struct fileattr fa; 1032 1033 xfs_ilock(ip, XFS_ILOCK_SHARED); 1034 xfs_fill_fsxattr(ip, XFS_ATTR_FORK, &fa); 1035 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1036 1037 return copy_fsxattr_to_user(&fa, arg); 1038 } 1039 1040 int 1041 xfs_fileattr_get( 1042 struct dentry *dentry, 1043 struct fileattr *fa) 1044 { 1045 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 1046 1047 if (d_is_special(dentry)) 1048 return -ENOTTY; 1049 1050 xfs_ilock(ip, XFS_ILOCK_SHARED); 1051 xfs_fill_fsxattr(ip, XFS_DATA_FORK, fa); 1052 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1053 1054 return 0; 1055 } 1056 1057 STATIC uint16_t 1058 xfs_flags2diflags( 1059 struct xfs_inode *ip, 1060 unsigned int xflags) 1061 { 1062 /* can't set PREALLOC this way, just preserve it */ 1063 uint16_t di_flags = 1064 (ip->i_diflags & XFS_DIFLAG_PREALLOC); 1065 1066 if (xflags & FS_XFLAG_IMMUTABLE) 1067 di_flags |= XFS_DIFLAG_IMMUTABLE; 1068 if (xflags & FS_XFLAG_APPEND) 1069 di_flags |= XFS_DIFLAG_APPEND; 1070 if (xflags & FS_XFLAG_SYNC) 1071 di_flags |= XFS_DIFLAG_SYNC; 1072 if (xflags & FS_XFLAG_NOATIME) 1073 di_flags |= XFS_DIFLAG_NOATIME; 1074 if (xflags & FS_XFLAG_NODUMP) 1075 di_flags |= XFS_DIFLAG_NODUMP; 1076 if (xflags & FS_XFLAG_NODEFRAG) 1077 di_flags |= XFS_DIFLAG_NODEFRAG; 1078 if (xflags & FS_XFLAG_FILESTREAM) 1079 di_flags |= XFS_DIFLAG_FILESTREAM; 1080 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1081 if (xflags & FS_XFLAG_RTINHERIT) 1082 di_flags |= XFS_DIFLAG_RTINHERIT; 1083 if (xflags & FS_XFLAG_NOSYMLINKS) 1084 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1085 if (xflags & FS_XFLAG_EXTSZINHERIT) 1086 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1087 if (xflags & FS_XFLAG_PROJINHERIT) 1088 di_flags |= XFS_DIFLAG_PROJINHERIT; 1089 } else if (S_ISREG(VFS_I(ip)->i_mode)) { 1090 if (xflags & FS_XFLAG_REALTIME) 1091 di_flags |= XFS_DIFLAG_REALTIME; 1092 if (xflags & FS_XFLAG_EXTSIZE) 1093 di_flags |= XFS_DIFLAG_EXTSIZE; 1094 } 1095 1096 return di_flags; 1097 } 1098 1099 STATIC uint64_t 1100 xfs_flags2diflags2( 1101 struct xfs_inode *ip, 1102 unsigned int xflags) 1103 { 1104 uint64_t di_flags2 = 1105 (ip->i_diflags2 & (XFS_DIFLAG2_REFLINK | 1106 XFS_DIFLAG2_BIGTIME | 1107 XFS_DIFLAG2_NREXT64)); 1108 1109 if (xflags & FS_XFLAG_DAX) 1110 di_flags2 |= XFS_DIFLAG2_DAX; 1111 if (xflags & FS_XFLAG_COWEXTSIZE) 1112 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 1113 1114 return di_flags2; 1115 } 1116 1117 static int 1118 xfs_ioctl_setattr_xflags( 1119 struct xfs_trans *tp, 1120 struct xfs_inode *ip, 1121 struct fileattr *fa) 1122 { 1123 struct xfs_mount *mp = ip->i_mount; 1124 uint64_t i_flags2; 1125 1126 /* Can't change realtime flag if any extents are allocated. */ 1127 if ((ip->i_df.if_nextents || ip->i_delayed_blks) && 1128 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME)) 1129 return -EINVAL; 1130 1131 /* If realtime flag is set then must have realtime device */ 1132 if (fa->fsx_xflags & FS_XFLAG_REALTIME) { 1133 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || 1134 xfs_extlen_to_rtxmod(mp, ip->i_extsize)) 1135 return -EINVAL; 1136 } 1137 1138 /* Clear reflink if we are actually able to set the rt flag. */ 1139 if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip)) 1140 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1141 1142 /* diflags2 only valid for v3 inodes. */ 1143 i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags); 1144 if (i_flags2 && !xfs_has_v3inodes(mp)) 1145 return -EINVAL; 1146 1147 ip->i_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); 1148 ip->i_diflags2 = i_flags2; 1149 1150 xfs_diflags_to_iflags(ip, false); 1151 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1152 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1153 XFS_STATS_INC(mp, xs_ig_attrchg); 1154 return 0; 1155 } 1156 1157 static void 1158 xfs_ioctl_setattr_prepare_dax( 1159 struct xfs_inode *ip, 1160 struct fileattr *fa) 1161 { 1162 struct xfs_mount *mp = ip->i_mount; 1163 struct inode *inode = VFS_I(ip); 1164 1165 if (S_ISDIR(inode->i_mode)) 1166 return; 1167 1168 if (xfs_has_dax_always(mp) || xfs_has_dax_never(mp)) 1169 return; 1170 1171 if (((fa->fsx_xflags & FS_XFLAG_DAX) && 1172 !(ip->i_diflags2 & XFS_DIFLAG2_DAX)) || 1173 (!(fa->fsx_xflags & FS_XFLAG_DAX) && 1174 (ip->i_diflags2 & XFS_DIFLAG2_DAX))) 1175 d_mark_dontcache(inode); 1176 } 1177 1178 /* 1179 * Set up the transaction structure for the setattr operation, checking that we 1180 * have permission to do so. On success, return a clean transaction and the 1181 * inode locked exclusively ready for further operation specific checks. On 1182 * failure, return an error without modifying or locking the inode. 1183 */ 1184 static struct xfs_trans * 1185 xfs_ioctl_setattr_get_trans( 1186 struct xfs_inode *ip, 1187 struct xfs_dquot *pdqp) 1188 { 1189 struct xfs_mount *mp = ip->i_mount; 1190 struct xfs_trans *tp; 1191 int error = -EROFS; 1192 1193 if (xfs_is_readonly(mp)) 1194 goto out_error; 1195 error = -EIO; 1196 if (xfs_is_shutdown(mp)) 1197 goto out_error; 1198 1199 error = xfs_trans_alloc_ichange(ip, NULL, NULL, pdqp, 1200 has_capability_noaudit(current, CAP_FOWNER), &tp); 1201 if (error) 1202 goto out_error; 1203 1204 if (xfs_has_wsync(mp)) 1205 xfs_trans_set_sync(tp); 1206 1207 return tp; 1208 1209 out_error: 1210 return ERR_PTR(error); 1211 } 1212 1213 /* 1214 * Validate a proposed extent size hint. For regular files, the hint can only 1215 * be changed if no extents are allocated. 1216 */ 1217 static int 1218 xfs_ioctl_setattr_check_extsize( 1219 struct xfs_inode *ip, 1220 struct fileattr *fa) 1221 { 1222 struct xfs_mount *mp = ip->i_mount; 1223 xfs_failaddr_t failaddr; 1224 uint16_t new_diflags; 1225 1226 if (!fa->fsx_valid) 1227 return 0; 1228 1229 if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents && 1230 XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize) 1231 return -EINVAL; 1232 1233 if (fa->fsx_extsize & mp->m_blockmask) 1234 return -EINVAL; 1235 1236 new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); 1237 1238 /* 1239 * Inode verifiers do not check that the extent size hint is an integer 1240 * multiple of the rt extent size on a directory with both rtinherit 1241 * and extszinherit flags set. Don't let sysadmins misconfigure 1242 * directories. 1243 */ 1244 if ((new_diflags & XFS_DIFLAG_RTINHERIT) && 1245 (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) { 1246 unsigned int rtextsize_bytes; 1247 1248 rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize); 1249 if (fa->fsx_extsize % rtextsize_bytes) 1250 return -EINVAL; 1251 } 1252 1253 failaddr = xfs_inode_validate_extsize(ip->i_mount, 1254 XFS_B_TO_FSB(mp, fa->fsx_extsize), 1255 VFS_I(ip)->i_mode, new_diflags); 1256 return failaddr != NULL ? -EINVAL : 0; 1257 } 1258 1259 static int 1260 xfs_ioctl_setattr_check_cowextsize( 1261 struct xfs_inode *ip, 1262 struct fileattr *fa) 1263 { 1264 struct xfs_mount *mp = ip->i_mount; 1265 xfs_failaddr_t failaddr; 1266 uint64_t new_diflags2; 1267 uint16_t new_diflags; 1268 1269 if (!fa->fsx_valid) 1270 return 0; 1271 1272 if (fa->fsx_cowextsize & mp->m_blockmask) 1273 return -EINVAL; 1274 1275 new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); 1276 new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags); 1277 1278 failaddr = xfs_inode_validate_cowextsize(ip->i_mount, 1279 XFS_B_TO_FSB(mp, fa->fsx_cowextsize), 1280 VFS_I(ip)->i_mode, new_diflags, new_diflags2); 1281 return failaddr != NULL ? -EINVAL : 0; 1282 } 1283 1284 static int 1285 xfs_ioctl_setattr_check_projid( 1286 struct xfs_inode *ip, 1287 struct fileattr *fa) 1288 { 1289 if (!fa->fsx_valid) 1290 return 0; 1291 1292 /* Disallow 32bit project ids if 32bit IDs are not enabled. */ 1293 if (fa->fsx_projid > (uint16_t)-1 && 1294 !xfs_has_projid32(ip->i_mount)) 1295 return -EINVAL; 1296 return 0; 1297 } 1298 1299 int 1300 xfs_fileattr_set( 1301 struct mnt_idmap *idmap, 1302 struct dentry *dentry, 1303 struct fileattr *fa) 1304 { 1305 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 1306 struct xfs_mount *mp = ip->i_mount; 1307 struct xfs_trans *tp; 1308 struct xfs_dquot *pdqp = NULL; 1309 struct xfs_dquot *olddquot = NULL; 1310 int error; 1311 1312 trace_xfs_ioctl_setattr(ip); 1313 1314 if (d_is_special(dentry)) 1315 return -ENOTTY; 1316 1317 if (!fa->fsx_valid) { 1318 if (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | 1319 FS_NOATIME_FL | FS_NODUMP_FL | 1320 FS_SYNC_FL | FS_DAX_FL | FS_PROJINHERIT_FL)) 1321 return -EOPNOTSUPP; 1322 } 1323 1324 error = xfs_ioctl_setattr_check_projid(ip, fa); 1325 if (error) 1326 return error; 1327 1328 /* 1329 * If disk quotas is on, we make sure that the dquots do exist on disk, 1330 * before we start any other transactions. Trying to do this later 1331 * is messy. We don't care to take a readlock to look at the ids 1332 * in inode here, because we can't hold it across the trans_reserve. 1333 * If the IDs do change before we take the ilock, we're covered 1334 * because the i_*dquot fields will get updated anyway. 1335 */ 1336 if (fa->fsx_valid && XFS_IS_QUOTA_ON(mp)) { 1337 error = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid, 1338 VFS_I(ip)->i_gid, fa->fsx_projid, 1339 XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp); 1340 if (error) 1341 return error; 1342 } 1343 1344 xfs_ioctl_setattr_prepare_dax(ip, fa); 1345 1346 tp = xfs_ioctl_setattr_get_trans(ip, pdqp); 1347 if (IS_ERR(tp)) { 1348 error = PTR_ERR(tp); 1349 goto error_free_dquots; 1350 } 1351 1352 error = xfs_ioctl_setattr_check_extsize(ip, fa); 1353 if (error) 1354 goto error_trans_cancel; 1355 1356 error = xfs_ioctl_setattr_check_cowextsize(ip, fa); 1357 if (error) 1358 goto error_trans_cancel; 1359 1360 error = xfs_ioctl_setattr_xflags(tp, ip, fa); 1361 if (error) 1362 goto error_trans_cancel; 1363 1364 if (!fa->fsx_valid) 1365 goto skip_xattr; 1366 /* 1367 * Change file ownership. Must be the owner or privileged. CAP_FSETID 1368 * overrides the following restrictions: 1369 * 1370 * The set-user-ID and set-group-ID bits of a file will be cleared upon 1371 * successful return from chown() 1372 */ 1373 1374 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) && 1375 !capable_wrt_inode_uidgid(idmap, VFS_I(ip), CAP_FSETID)) 1376 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID); 1377 1378 /* Change the ownerships and register project quota modifications */ 1379 if (ip->i_projid != fa->fsx_projid) { 1380 if (XFS_IS_PQUOTA_ON(mp)) { 1381 olddquot = xfs_qm_vop_chown(tp, ip, 1382 &ip->i_pdquot, pdqp); 1383 } 1384 ip->i_projid = fa->fsx_projid; 1385 } 1386 1387 /* 1388 * Only set the extent size hint if we've already determined that the 1389 * extent size hint should be set on the inode. If no extent size flags 1390 * are set on the inode then unconditionally clear the extent size hint. 1391 */ 1392 if (ip->i_diflags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT)) 1393 ip->i_extsize = XFS_B_TO_FSB(mp, fa->fsx_extsize); 1394 else 1395 ip->i_extsize = 0; 1396 1397 if (xfs_has_v3inodes(mp)) { 1398 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 1399 ip->i_cowextsize = XFS_B_TO_FSB(mp, fa->fsx_cowextsize); 1400 else 1401 ip->i_cowextsize = 0; 1402 } 1403 1404 skip_xattr: 1405 error = xfs_trans_commit(tp); 1406 1407 /* 1408 * Release any dquot(s) the inode had kept before chown. 1409 */ 1410 xfs_qm_dqrele(olddquot); 1411 xfs_qm_dqrele(pdqp); 1412 1413 return error; 1414 1415 error_trans_cancel: 1416 xfs_trans_cancel(tp); 1417 error_free_dquots: 1418 xfs_qm_dqrele(pdqp); 1419 return error; 1420 } 1421 1422 static bool 1423 xfs_getbmap_format( 1424 struct kgetbmap *p, 1425 struct getbmapx __user *u, 1426 size_t recsize) 1427 { 1428 if (put_user(p->bmv_offset, &u->bmv_offset) || 1429 put_user(p->bmv_block, &u->bmv_block) || 1430 put_user(p->bmv_length, &u->bmv_length) || 1431 put_user(0, &u->bmv_count) || 1432 put_user(0, &u->bmv_entries)) 1433 return false; 1434 if (recsize < sizeof(struct getbmapx)) 1435 return true; 1436 if (put_user(0, &u->bmv_iflags) || 1437 put_user(p->bmv_oflags, &u->bmv_oflags) || 1438 put_user(0, &u->bmv_unused1) || 1439 put_user(0, &u->bmv_unused2)) 1440 return false; 1441 return true; 1442 } 1443 1444 STATIC int 1445 xfs_ioc_getbmap( 1446 struct file *file, 1447 unsigned int cmd, 1448 void __user *arg) 1449 { 1450 struct getbmapx bmx = { 0 }; 1451 struct kgetbmap *buf; 1452 size_t recsize; 1453 int error, i; 1454 1455 switch (cmd) { 1456 case XFS_IOC_GETBMAPA: 1457 bmx.bmv_iflags = BMV_IF_ATTRFORK; 1458 fallthrough; 1459 case XFS_IOC_GETBMAP: 1460 /* struct getbmap is a strict subset of struct getbmapx. */ 1461 recsize = sizeof(struct getbmap); 1462 break; 1463 case XFS_IOC_GETBMAPX: 1464 recsize = sizeof(struct getbmapx); 1465 break; 1466 default: 1467 return -EINVAL; 1468 } 1469 1470 if (copy_from_user(&bmx, arg, recsize)) 1471 return -EFAULT; 1472 1473 if (bmx.bmv_count < 2) 1474 return -EINVAL; 1475 if (bmx.bmv_count >= INT_MAX / recsize) 1476 return -ENOMEM; 1477 1478 buf = kvcalloc(bmx.bmv_count, sizeof(*buf), GFP_KERNEL); 1479 if (!buf) 1480 return -ENOMEM; 1481 1482 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf); 1483 if (error) 1484 goto out_free_buf; 1485 1486 error = -EFAULT; 1487 if (copy_to_user(arg, &bmx, recsize)) 1488 goto out_free_buf; 1489 arg += recsize; 1490 1491 for (i = 0; i < bmx.bmv_entries; i++) { 1492 if (!xfs_getbmap_format(buf + i, arg, recsize)) 1493 goto out_free_buf; 1494 arg += recsize; 1495 } 1496 1497 error = 0; 1498 out_free_buf: 1499 kmem_free(buf); 1500 return error; 1501 } 1502 1503 STATIC int 1504 xfs_ioc_getfsmap( 1505 struct xfs_inode *ip, 1506 struct fsmap_head __user *arg) 1507 { 1508 struct xfs_fsmap_head xhead = {0}; 1509 struct fsmap_head head; 1510 struct fsmap *recs; 1511 unsigned int count; 1512 __u32 last_flags = 0; 1513 bool done = false; 1514 int error; 1515 1516 if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) 1517 return -EFAULT; 1518 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) || 1519 memchr_inv(head.fmh_keys[0].fmr_reserved, 0, 1520 sizeof(head.fmh_keys[0].fmr_reserved)) || 1521 memchr_inv(head.fmh_keys[1].fmr_reserved, 0, 1522 sizeof(head.fmh_keys[1].fmr_reserved))) 1523 return -EINVAL; 1524 1525 /* 1526 * Use an internal memory buffer so that we don't have to copy fsmap 1527 * data to userspace while holding locks. Start by trying to allocate 1528 * up to 128k for the buffer, but fall back to a single page if needed. 1529 */ 1530 count = min_t(unsigned int, head.fmh_count, 1531 131072 / sizeof(struct fsmap)); 1532 recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL); 1533 if (!recs) { 1534 count = min_t(unsigned int, head.fmh_count, 1535 PAGE_SIZE / sizeof(struct fsmap)); 1536 recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL); 1537 if (!recs) 1538 return -ENOMEM; 1539 } 1540 1541 xhead.fmh_iflags = head.fmh_iflags; 1542 xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]); 1543 xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]); 1544 1545 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); 1546 trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]); 1547 1548 head.fmh_entries = 0; 1549 do { 1550 struct fsmap __user *user_recs; 1551 struct fsmap *last_rec; 1552 1553 user_recs = &arg->fmh_recs[head.fmh_entries]; 1554 xhead.fmh_entries = 0; 1555 xhead.fmh_count = min_t(unsigned int, count, 1556 head.fmh_count - head.fmh_entries); 1557 1558 /* Run query, record how many entries we got. */ 1559 error = xfs_getfsmap(ip->i_mount, &xhead, recs); 1560 switch (error) { 1561 case 0: 1562 /* 1563 * There are no more records in the result set. Copy 1564 * whatever we got to userspace and break out. 1565 */ 1566 done = true; 1567 break; 1568 case -ECANCELED: 1569 /* 1570 * The internal memory buffer is full. Copy whatever 1571 * records we got to userspace and go again if we have 1572 * not yet filled the userspace buffer. 1573 */ 1574 error = 0; 1575 break; 1576 default: 1577 goto out_free; 1578 } 1579 head.fmh_entries += xhead.fmh_entries; 1580 head.fmh_oflags = xhead.fmh_oflags; 1581 1582 /* 1583 * If the caller wanted a record count or there aren't any 1584 * new records to return, we're done. 1585 */ 1586 if (head.fmh_count == 0 || xhead.fmh_entries == 0) 1587 break; 1588 1589 /* Copy all the records we got out to userspace. */ 1590 if (copy_to_user(user_recs, recs, 1591 xhead.fmh_entries * sizeof(struct fsmap))) { 1592 error = -EFAULT; 1593 goto out_free; 1594 } 1595 1596 /* Remember the last record flags we copied to userspace. */ 1597 last_rec = &recs[xhead.fmh_entries - 1]; 1598 last_flags = last_rec->fmr_flags; 1599 1600 /* Set up the low key for the next iteration. */ 1601 xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec); 1602 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); 1603 } while (!done && head.fmh_entries < head.fmh_count); 1604 1605 /* 1606 * If there are no more records in the query result set and we're not 1607 * in counting mode, mark the last record returned with the LAST flag. 1608 */ 1609 if (done && head.fmh_count > 0 && head.fmh_entries > 0) { 1610 struct fsmap __user *user_rec; 1611 1612 last_flags |= FMR_OF_LAST; 1613 user_rec = &arg->fmh_recs[head.fmh_entries - 1]; 1614 1615 if (copy_to_user(&user_rec->fmr_flags, &last_flags, 1616 sizeof(last_flags))) { 1617 error = -EFAULT; 1618 goto out_free; 1619 } 1620 } 1621 1622 /* copy back header */ 1623 if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) { 1624 error = -EFAULT; 1625 goto out_free; 1626 } 1627 1628 out_free: 1629 kmem_free(recs); 1630 return error; 1631 } 1632 1633 STATIC int 1634 xfs_ioc_scrub_metadata( 1635 struct file *file, 1636 void __user *arg) 1637 { 1638 struct xfs_scrub_metadata scrub; 1639 int error; 1640 1641 if (!capable(CAP_SYS_ADMIN)) 1642 return -EPERM; 1643 1644 if (copy_from_user(&scrub, arg, sizeof(scrub))) 1645 return -EFAULT; 1646 1647 error = xfs_scrub_metadata(file, &scrub); 1648 if (error) 1649 return error; 1650 1651 if (copy_to_user(arg, &scrub, sizeof(scrub))) 1652 return -EFAULT; 1653 1654 return 0; 1655 } 1656 1657 int 1658 xfs_ioc_swapext( 1659 xfs_swapext_t *sxp) 1660 { 1661 xfs_inode_t *ip, *tip; 1662 struct fd f, tmp; 1663 int error = 0; 1664 1665 /* Pull information for the target fd */ 1666 f = fdget((int)sxp->sx_fdtarget); 1667 if (!f.file) { 1668 error = -EINVAL; 1669 goto out; 1670 } 1671 1672 if (!(f.file->f_mode & FMODE_WRITE) || 1673 !(f.file->f_mode & FMODE_READ) || 1674 (f.file->f_flags & O_APPEND)) { 1675 error = -EBADF; 1676 goto out_put_file; 1677 } 1678 1679 tmp = fdget((int)sxp->sx_fdtmp); 1680 if (!tmp.file) { 1681 error = -EINVAL; 1682 goto out_put_file; 1683 } 1684 1685 if (!(tmp.file->f_mode & FMODE_WRITE) || 1686 !(tmp.file->f_mode & FMODE_READ) || 1687 (tmp.file->f_flags & O_APPEND)) { 1688 error = -EBADF; 1689 goto out_put_tmp_file; 1690 } 1691 1692 if (IS_SWAPFILE(file_inode(f.file)) || 1693 IS_SWAPFILE(file_inode(tmp.file))) { 1694 error = -EINVAL; 1695 goto out_put_tmp_file; 1696 } 1697 1698 /* 1699 * We need to ensure that the fds passed in point to XFS inodes 1700 * before we cast and access them as XFS structures as we have no 1701 * control over what the user passes us here. 1702 */ 1703 if (f.file->f_op != &xfs_file_operations || 1704 tmp.file->f_op != &xfs_file_operations) { 1705 error = -EINVAL; 1706 goto out_put_tmp_file; 1707 } 1708 1709 ip = XFS_I(file_inode(f.file)); 1710 tip = XFS_I(file_inode(tmp.file)); 1711 1712 if (ip->i_mount != tip->i_mount) { 1713 error = -EINVAL; 1714 goto out_put_tmp_file; 1715 } 1716 1717 if (ip->i_ino == tip->i_ino) { 1718 error = -EINVAL; 1719 goto out_put_tmp_file; 1720 } 1721 1722 if (xfs_is_shutdown(ip->i_mount)) { 1723 error = -EIO; 1724 goto out_put_tmp_file; 1725 } 1726 1727 error = xfs_swap_extents(ip, tip, sxp); 1728 1729 out_put_tmp_file: 1730 fdput(tmp); 1731 out_put_file: 1732 fdput(f); 1733 out: 1734 return error; 1735 } 1736 1737 static int 1738 xfs_ioc_getlabel( 1739 struct xfs_mount *mp, 1740 char __user *user_label) 1741 { 1742 struct xfs_sb *sbp = &mp->m_sb; 1743 char label[XFSLABEL_MAX + 1]; 1744 1745 /* Paranoia */ 1746 BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX); 1747 1748 /* 1 larger than sb_fname, so this ensures a trailing NUL char */ 1749 memset(label, 0, sizeof(label)); 1750 spin_lock(&mp->m_sb_lock); 1751 strncpy(label, sbp->sb_fname, XFSLABEL_MAX); 1752 spin_unlock(&mp->m_sb_lock); 1753 1754 if (copy_to_user(user_label, label, sizeof(label))) 1755 return -EFAULT; 1756 return 0; 1757 } 1758 1759 static int 1760 xfs_ioc_setlabel( 1761 struct file *filp, 1762 struct xfs_mount *mp, 1763 char __user *newlabel) 1764 { 1765 struct xfs_sb *sbp = &mp->m_sb; 1766 char label[XFSLABEL_MAX + 1]; 1767 size_t len; 1768 int error; 1769 1770 if (!capable(CAP_SYS_ADMIN)) 1771 return -EPERM; 1772 /* 1773 * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much 1774 * smaller, at 12 bytes. We copy one more to be sure we find the 1775 * (required) NULL character to test the incoming label length. 1776 * NB: The on disk label doesn't need to be null terminated. 1777 */ 1778 if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1)) 1779 return -EFAULT; 1780 len = strnlen(label, XFSLABEL_MAX + 1); 1781 if (len > sizeof(sbp->sb_fname)) 1782 return -EINVAL; 1783 1784 error = mnt_want_write_file(filp); 1785 if (error) 1786 return error; 1787 1788 spin_lock(&mp->m_sb_lock); 1789 memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname)); 1790 memcpy(sbp->sb_fname, label, len); 1791 spin_unlock(&mp->m_sb_lock); 1792 1793 /* 1794 * Now we do several things to satisfy userspace. 1795 * In addition to normal logging of the primary superblock, we also 1796 * immediately write these changes to sector zero for the primary, then 1797 * update all backup supers (as xfs_db does for a label change), then 1798 * invalidate the block device page cache. This is so that any prior 1799 * buffered reads from userspace (i.e. from blkid) are invalidated, 1800 * and userspace will see the newly-written label. 1801 */ 1802 error = xfs_sync_sb_buf(mp); 1803 if (error) 1804 goto out; 1805 /* 1806 * growfs also updates backup supers so lock against that. 1807 */ 1808 mutex_lock(&mp->m_growlock); 1809 error = xfs_update_secondary_sbs(mp); 1810 mutex_unlock(&mp->m_growlock); 1811 1812 invalidate_bdev(mp->m_ddev_targp->bt_bdev); 1813 1814 out: 1815 mnt_drop_write_file(filp); 1816 return error; 1817 } 1818 1819 static inline int 1820 xfs_fs_eofblocks_from_user( 1821 struct xfs_fs_eofblocks *src, 1822 struct xfs_icwalk *dst) 1823 { 1824 if (src->eof_version != XFS_EOFBLOCKS_VERSION) 1825 return -EINVAL; 1826 1827 if (src->eof_flags & ~XFS_EOF_FLAGS_VALID) 1828 return -EINVAL; 1829 1830 if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) || 1831 memchr_inv(src->pad64, 0, sizeof(src->pad64))) 1832 return -EINVAL; 1833 1834 dst->icw_flags = 0; 1835 if (src->eof_flags & XFS_EOF_FLAGS_SYNC) 1836 dst->icw_flags |= XFS_ICWALK_FLAG_SYNC; 1837 if (src->eof_flags & XFS_EOF_FLAGS_UID) 1838 dst->icw_flags |= XFS_ICWALK_FLAG_UID; 1839 if (src->eof_flags & XFS_EOF_FLAGS_GID) 1840 dst->icw_flags |= XFS_ICWALK_FLAG_GID; 1841 if (src->eof_flags & XFS_EOF_FLAGS_PRID) 1842 dst->icw_flags |= XFS_ICWALK_FLAG_PRID; 1843 if (src->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) 1844 dst->icw_flags |= XFS_ICWALK_FLAG_MINFILESIZE; 1845 1846 dst->icw_prid = src->eof_prid; 1847 dst->icw_min_file_size = src->eof_min_file_size; 1848 1849 dst->icw_uid = INVALID_UID; 1850 if (src->eof_flags & XFS_EOF_FLAGS_UID) { 1851 dst->icw_uid = make_kuid(current_user_ns(), src->eof_uid); 1852 if (!uid_valid(dst->icw_uid)) 1853 return -EINVAL; 1854 } 1855 1856 dst->icw_gid = INVALID_GID; 1857 if (src->eof_flags & XFS_EOF_FLAGS_GID) { 1858 dst->icw_gid = make_kgid(current_user_ns(), src->eof_gid); 1859 if (!gid_valid(dst->icw_gid)) 1860 return -EINVAL; 1861 } 1862 return 0; 1863 } 1864 1865 /* 1866 * These long-unused ioctls were removed from the official ioctl API in 5.17, 1867 * but retain these definitions so that we can log warnings about them. 1868 */ 1869 #define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64) 1870 #define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64) 1871 #define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64) 1872 #define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64) 1873 1874 /* 1875 * Note: some of the ioctl's return positive numbers as a 1876 * byte count indicating success, such as readlink_by_handle. 1877 * So we don't "sign flip" like most other routines. This means 1878 * true errors need to be returned as a negative value. 1879 */ 1880 long 1881 xfs_file_ioctl( 1882 struct file *filp, 1883 unsigned int cmd, 1884 unsigned long p) 1885 { 1886 struct inode *inode = file_inode(filp); 1887 struct xfs_inode *ip = XFS_I(inode); 1888 struct xfs_mount *mp = ip->i_mount; 1889 void __user *arg = (void __user *)p; 1890 int error; 1891 1892 trace_xfs_file_ioctl(ip); 1893 1894 switch (cmd) { 1895 case FITRIM: 1896 return xfs_ioc_trim(mp, arg); 1897 case FS_IOC_GETFSLABEL: 1898 return xfs_ioc_getlabel(mp, arg); 1899 case FS_IOC_SETFSLABEL: 1900 return xfs_ioc_setlabel(filp, mp, arg); 1901 case XFS_IOC_ALLOCSP: 1902 case XFS_IOC_FREESP: 1903 case XFS_IOC_ALLOCSP64: 1904 case XFS_IOC_FREESP64: 1905 xfs_warn_once(mp, 1906 "%s should use fallocate; XFS_IOC_{ALLOC,FREE}SP ioctl unsupported", 1907 current->comm); 1908 return -ENOTTY; 1909 case XFS_IOC_DIOINFO: { 1910 struct xfs_buftarg *target = xfs_inode_buftarg(ip); 1911 struct dioattr da; 1912 1913 da.d_mem = da.d_miniosz = target->bt_logical_sectorsize; 1914 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); 1915 1916 if (copy_to_user(arg, &da, sizeof(da))) 1917 return -EFAULT; 1918 return 0; 1919 } 1920 1921 case XFS_IOC_FSBULKSTAT_SINGLE: 1922 case XFS_IOC_FSBULKSTAT: 1923 case XFS_IOC_FSINUMBERS: 1924 return xfs_ioc_fsbulkstat(filp, cmd, arg); 1925 1926 case XFS_IOC_BULKSTAT: 1927 return xfs_ioc_bulkstat(filp, cmd, arg); 1928 case XFS_IOC_INUMBERS: 1929 return xfs_ioc_inumbers(mp, cmd, arg); 1930 1931 case XFS_IOC_FSGEOMETRY_V1: 1932 return xfs_ioc_fsgeometry(mp, arg, 3); 1933 case XFS_IOC_FSGEOMETRY_V4: 1934 return xfs_ioc_fsgeometry(mp, arg, 4); 1935 case XFS_IOC_FSGEOMETRY: 1936 return xfs_ioc_fsgeometry(mp, arg, 5); 1937 1938 case XFS_IOC_AG_GEOMETRY: 1939 return xfs_ioc_ag_geometry(mp, arg); 1940 1941 case XFS_IOC_GETVERSION: 1942 return put_user(inode->i_generation, (int __user *)arg); 1943 1944 case XFS_IOC_FSGETXATTRA: 1945 return xfs_ioc_fsgetxattra(ip, arg); 1946 1947 case XFS_IOC_GETBMAP: 1948 case XFS_IOC_GETBMAPA: 1949 case XFS_IOC_GETBMAPX: 1950 return xfs_ioc_getbmap(filp, cmd, arg); 1951 1952 case FS_IOC_GETFSMAP: 1953 return xfs_ioc_getfsmap(ip, arg); 1954 1955 case XFS_IOC_SCRUB_METADATA: 1956 return xfs_ioc_scrub_metadata(filp, arg); 1957 1958 case XFS_IOC_FD_TO_HANDLE: 1959 case XFS_IOC_PATH_TO_HANDLE: 1960 case XFS_IOC_PATH_TO_FSHANDLE: { 1961 xfs_fsop_handlereq_t hreq; 1962 1963 if (copy_from_user(&hreq, arg, sizeof(hreq))) 1964 return -EFAULT; 1965 return xfs_find_handle(cmd, &hreq); 1966 } 1967 case XFS_IOC_OPEN_BY_HANDLE: { 1968 xfs_fsop_handlereq_t hreq; 1969 1970 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1971 return -EFAULT; 1972 return xfs_open_by_handle(filp, &hreq); 1973 } 1974 1975 case XFS_IOC_READLINK_BY_HANDLE: { 1976 xfs_fsop_handlereq_t hreq; 1977 1978 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1979 return -EFAULT; 1980 return xfs_readlink_by_handle(filp, &hreq); 1981 } 1982 case XFS_IOC_ATTRLIST_BY_HANDLE: 1983 return xfs_attrlist_by_handle(filp, arg); 1984 1985 case XFS_IOC_ATTRMULTI_BY_HANDLE: 1986 return xfs_attrmulti_by_handle(filp, arg); 1987 1988 case XFS_IOC_SWAPEXT: { 1989 struct xfs_swapext sxp; 1990 1991 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) 1992 return -EFAULT; 1993 error = mnt_want_write_file(filp); 1994 if (error) 1995 return error; 1996 error = xfs_ioc_swapext(&sxp); 1997 mnt_drop_write_file(filp); 1998 return error; 1999 } 2000 2001 case XFS_IOC_FSCOUNTS: { 2002 xfs_fsop_counts_t out; 2003 2004 xfs_fs_counts(mp, &out); 2005 2006 if (copy_to_user(arg, &out, sizeof(out))) 2007 return -EFAULT; 2008 return 0; 2009 } 2010 2011 case XFS_IOC_SET_RESBLKS: { 2012 xfs_fsop_resblks_t inout; 2013 uint64_t in; 2014 2015 if (!capable(CAP_SYS_ADMIN)) 2016 return -EPERM; 2017 2018 if (xfs_is_readonly(mp)) 2019 return -EROFS; 2020 2021 if (copy_from_user(&inout, arg, sizeof(inout))) 2022 return -EFAULT; 2023 2024 error = mnt_want_write_file(filp); 2025 if (error) 2026 return error; 2027 2028 /* input parameter is passed in resblks field of structure */ 2029 in = inout.resblks; 2030 error = xfs_reserve_blocks(mp, &in, &inout); 2031 mnt_drop_write_file(filp); 2032 if (error) 2033 return error; 2034 2035 if (copy_to_user(arg, &inout, sizeof(inout))) 2036 return -EFAULT; 2037 return 0; 2038 } 2039 2040 case XFS_IOC_GET_RESBLKS: { 2041 xfs_fsop_resblks_t out; 2042 2043 if (!capable(CAP_SYS_ADMIN)) 2044 return -EPERM; 2045 2046 error = xfs_reserve_blocks(mp, NULL, &out); 2047 if (error) 2048 return error; 2049 2050 if (copy_to_user(arg, &out, sizeof(out))) 2051 return -EFAULT; 2052 2053 return 0; 2054 } 2055 2056 case XFS_IOC_FSGROWFSDATA: { 2057 struct xfs_growfs_data in; 2058 2059 if (copy_from_user(&in, arg, sizeof(in))) 2060 return -EFAULT; 2061 2062 error = mnt_want_write_file(filp); 2063 if (error) 2064 return error; 2065 error = xfs_growfs_data(mp, &in); 2066 mnt_drop_write_file(filp); 2067 return error; 2068 } 2069 2070 case XFS_IOC_FSGROWFSLOG: { 2071 struct xfs_growfs_log in; 2072 2073 if (copy_from_user(&in, arg, sizeof(in))) 2074 return -EFAULT; 2075 2076 error = mnt_want_write_file(filp); 2077 if (error) 2078 return error; 2079 error = xfs_growfs_log(mp, &in); 2080 mnt_drop_write_file(filp); 2081 return error; 2082 } 2083 2084 case XFS_IOC_FSGROWFSRT: { 2085 xfs_growfs_rt_t in; 2086 2087 if (copy_from_user(&in, arg, sizeof(in))) 2088 return -EFAULT; 2089 2090 error = mnt_want_write_file(filp); 2091 if (error) 2092 return error; 2093 error = xfs_growfs_rt(mp, &in); 2094 mnt_drop_write_file(filp); 2095 return error; 2096 } 2097 2098 case XFS_IOC_GOINGDOWN: { 2099 uint32_t in; 2100 2101 if (!capable(CAP_SYS_ADMIN)) 2102 return -EPERM; 2103 2104 if (get_user(in, (uint32_t __user *)arg)) 2105 return -EFAULT; 2106 2107 return xfs_fs_goingdown(mp, in); 2108 } 2109 2110 case XFS_IOC_ERROR_INJECTION: { 2111 xfs_error_injection_t in; 2112 2113 if (!capable(CAP_SYS_ADMIN)) 2114 return -EPERM; 2115 2116 if (copy_from_user(&in, arg, sizeof(in))) 2117 return -EFAULT; 2118 2119 return xfs_errortag_add(mp, in.errtag); 2120 } 2121 2122 case XFS_IOC_ERROR_CLEARALL: 2123 if (!capable(CAP_SYS_ADMIN)) 2124 return -EPERM; 2125 2126 return xfs_errortag_clearall(mp); 2127 2128 case XFS_IOC_FREE_EOFBLOCKS: { 2129 struct xfs_fs_eofblocks eofb; 2130 struct xfs_icwalk icw; 2131 2132 if (!capable(CAP_SYS_ADMIN)) 2133 return -EPERM; 2134 2135 if (xfs_is_readonly(mp)) 2136 return -EROFS; 2137 2138 if (copy_from_user(&eofb, arg, sizeof(eofb))) 2139 return -EFAULT; 2140 2141 error = xfs_fs_eofblocks_from_user(&eofb, &icw); 2142 if (error) 2143 return error; 2144 2145 trace_xfs_ioc_free_eofblocks(mp, &icw, _RET_IP_); 2146 2147 sb_start_write(mp->m_super); 2148 error = xfs_blockgc_free_space(mp, &icw); 2149 sb_end_write(mp->m_super); 2150 return error; 2151 } 2152 2153 default: 2154 return -ENOTTY; 2155 } 2156 } 2157