1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_rtalloc.h" 15 #include "xfs_iwalk.h" 16 #include "xfs_itable.h" 17 #include "xfs_error.h" 18 #include "xfs_da_format.h" 19 #include "xfs_da_btree.h" 20 #include "xfs_attr.h" 21 #include "xfs_bmap.h" 22 #include "xfs_bmap_util.h" 23 #include "xfs_fsops.h" 24 #include "xfs_discard.h" 25 #include "xfs_quota.h" 26 #include "xfs_export.h" 27 #include "xfs_trace.h" 28 #include "xfs_icache.h" 29 #include "xfs_trans.h" 30 #include "xfs_acl.h" 31 #include "xfs_btree.h" 32 #include <linux/fsmap.h> 33 #include "xfs_fsmap.h" 34 #include "scrub/xfs_scrub.h" 35 #include "xfs_sb.h" 36 #include "xfs_ag.h" 37 #include "xfs_health.h" 38 #include "xfs_reflink.h" 39 #include "xfs_ioctl.h" 40 #include "xfs_xattr.h" 41 #include "xfs_rtbitmap.h" 42 43 #include <linux/mount.h> 44 #include <linux/namei.h> 45 #include <linux/fileattr.h> 46 47 /* 48 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to 49 * a file or fs handle. 50 * 51 * XFS_IOC_PATH_TO_FSHANDLE 52 * returns fs handle for a mount point or path within that mount point 53 * XFS_IOC_FD_TO_HANDLE 54 * returns full handle for a FD opened in user space 55 * XFS_IOC_PATH_TO_HANDLE 56 * returns full handle for a path 57 */ 58 int 59 xfs_find_handle( 60 unsigned int cmd, 61 xfs_fsop_handlereq_t *hreq) 62 { 63 int hsize; 64 xfs_handle_t handle; 65 struct inode *inode; 66 struct fd f = {NULL}; 67 struct path path; 68 int error; 69 struct xfs_inode *ip; 70 71 if (cmd == XFS_IOC_FD_TO_HANDLE) { 72 f = fdget(hreq->fd); 73 if (!f.file) 74 return -EBADF; 75 inode = file_inode(f.file); 76 } else { 77 error = user_path_at(AT_FDCWD, hreq->path, 0, &path); 78 if (error) 79 return error; 80 inode = d_inode(path.dentry); 81 } 82 ip = XFS_I(inode); 83 84 /* 85 * We can only generate handles for inodes residing on a XFS filesystem, 86 * and only for regular files, directories or symbolic links. 87 */ 88 error = -EINVAL; 89 if (inode->i_sb->s_magic != XFS_SB_MAGIC) 90 goto out_put; 91 92 error = -EBADF; 93 if (!S_ISREG(inode->i_mode) && 94 !S_ISDIR(inode->i_mode) && 95 !S_ISLNK(inode->i_mode)) 96 goto out_put; 97 98 99 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t)); 100 101 if (cmd == XFS_IOC_PATH_TO_FSHANDLE) { 102 /* 103 * This handle only contains an fsid, zero the rest. 104 */ 105 memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); 106 hsize = sizeof(xfs_fsid_t); 107 } else { 108 handle.ha_fid.fid_len = sizeof(xfs_fid_t) - 109 sizeof(handle.ha_fid.fid_len); 110 handle.ha_fid.fid_pad = 0; 111 handle.ha_fid.fid_gen = inode->i_generation; 112 handle.ha_fid.fid_ino = ip->i_ino; 113 hsize = sizeof(xfs_handle_t); 114 } 115 116 error = -EFAULT; 117 if (copy_to_user(hreq->ohandle, &handle, hsize) || 118 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) 119 goto out_put; 120 121 error = 0; 122 123 out_put: 124 if (cmd == XFS_IOC_FD_TO_HANDLE) 125 fdput(f); 126 else 127 path_put(&path); 128 return error; 129 } 130 131 /* 132 * No need to do permission checks on the various pathname components 133 * as the handle operations are privileged. 134 */ 135 STATIC int 136 xfs_handle_acceptable( 137 void *context, 138 struct dentry *dentry) 139 { 140 return 1; 141 } 142 143 /* 144 * Convert userspace handle data into a dentry. 145 */ 146 struct dentry * 147 xfs_handle_to_dentry( 148 struct file *parfilp, 149 void __user *uhandle, 150 u32 hlen) 151 { 152 xfs_handle_t handle; 153 struct xfs_fid64 fid; 154 155 /* 156 * Only allow handle opens under a directory. 157 */ 158 if (!S_ISDIR(file_inode(parfilp)->i_mode)) 159 return ERR_PTR(-ENOTDIR); 160 161 if (hlen != sizeof(xfs_handle_t)) 162 return ERR_PTR(-EINVAL); 163 if (copy_from_user(&handle, uhandle, hlen)) 164 return ERR_PTR(-EFAULT); 165 if (handle.ha_fid.fid_len != 166 sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) 167 return ERR_PTR(-EINVAL); 168 169 memset(&fid, 0, sizeof(struct fid)); 170 fid.ino = handle.ha_fid.fid_ino; 171 fid.gen = handle.ha_fid.fid_gen; 172 173 return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, 174 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, 175 xfs_handle_acceptable, NULL); 176 } 177 178 STATIC struct dentry * 179 xfs_handlereq_to_dentry( 180 struct file *parfilp, 181 xfs_fsop_handlereq_t *hreq) 182 { 183 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); 184 } 185 186 int 187 xfs_open_by_handle( 188 struct file *parfilp, 189 xfs_fsop_handlereq_t *hreq) 190 { 191 const struct cred *cred = current_cred(); 192 int error; 193 int fd; 194 int permflag; 195 struct file *filp; 196 struct inode *inode; 197 struct dentry *dentry; 198 fmode_t fmode; 199 struct path path; 200 201 if (!capable(CAP_SYS_ADMIN)) 202 return -EPERM; 203 204 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 205 if (IS_ERR(dentry)) 206 return PTR_ERR(dentry); 207 inode = d_inode(dentry); 208 209 /* Restrict xfs_open_by_handle to directories & regular files. */ 210 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { 211 error = -EPERM; 212 goto out_dput; 213 } 214 215 #if BITS_PER_LONG != 32 216 hreq->oflags |= O_LARGEFILE; 217 #endif 218 219 permflag = hreq->oflags; 220 fmode = OPEN_FMODE(permflag); 221 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && 222 (fmode & FMODE_WRITE) && IS_APPEND(inode)) { 223 error = -EPERM; 224 goto out_dput; 225 } 226 227 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { 228 error = -EPERM; 229 goto out_dput; 230 } 231 232 /* Can't write directories. */ 233 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { 234 error = -EISDIR; 235 goto out_dput; 236 } 237 238 fd = get_unused_fd_flags(0); 239 if (fd < 0) { 240 error = fd; 241 goto out_dput; 242 } 243 244 path.mnt = parfilp->f_path.mnt; 245 path.dentry = dentry; 246 filp = dentry_open(&path, hreq->oflags, cred); 247 dput(dentry); 248 if (IS_ERR(filp)) { 249 put_unused_fd(fd); 250 return PTR_ERR(filp); 251 } 252 253 if (S_ISREG(inode->i_mode)) { 254 filp->f_flags |= O_NOATIME; 255 filp->f_mode |= FMODE_NOCMTIME; 256 } 257 258 fd_install(fd, filp); 259 return fd; 260 261 out_dput: 262 dput(dentry); 263 return error; 264 } 265 266 int 267 xfs_readlink_by_handle( 268 struct file *parfilp, 269 xfs_fsop_handlereq_t *hreq) 270 { 271 struct dentry *dentry; 272 __u32 olen; 273 int error; 274 275 if (!capable(CAP_SYS_ADMIN)) 276 return -EPERM; 277 278 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 279 if (IS_ERR(dentry)) 280 return PTR_ERR(dentry); 281 282 /* Restrict this handle operation to symlinks only. */ 283 if (!d_is_symlink(dentry)) { 284 error = -EINVAL; 285 goto out_dput; 286 } 287 288 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { 289 error = -EFAULT; 290 goto out_dput; 291 } 292 293 error = vfs_readlink(dentry, hreq->ohandle, olen); 294 295 out_dput: 296 dput(dentry); 297 return error; 298 } 299 300 /* 301 * Format an attribute and copy it out to the user's buffer. 302 * Take care to check values and protect against them changing later, 303 * we may be reading them directly out of a user buffer. 304 */ 305 static void 306 xfs_ioc_attr_put_listent( 307 struct xfs_attr_list_context *context, 308 int flags, 309 unsigned char *name, 310 int namelen, 311 int valuelen) 312 { 313 struct xfs_attrlist *alist = context->buffer; 314 struct xfs_attrlist_ent *aep; 315 int arraytop; 316 317 ASSERT(!context->seen_enough); 318 ASSERT(context->count >= 0); 319 ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); 320 ASSERT(context->firstu >= sizeof(*alist)); 321 ASSERT(context->firstu <= context->bufsize); 322 323 /* 324 * Only list entries in the right namespace. 325 */ 326 if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK)) 327 return; 328 329 arraytop = sizeof(*alist) + 330 context->count * sizeof(alist->al_offset[0]); 331 332 /* decrement by the actual bytes used by the attr */ 333 context->firstu -= round_up(offsetof(struct xfs_attrlist_ent, a_name) + 334 namelen + 1, sizeof(uint32_t)); 335 if (context->firstu < arraytop) { 336 trace_xfs_attr_list_full(context); 337 alist->al_more = 1; 338 context->seen_enough = 1; 339 return; 340 } 341 342 aep = context->buffer + context->firstu; 343 aep->a_valuelen = valuelen; 344 memcpy(aep->a_name, name, namelen); 345 aep->a_name[namelen] = 0; 346 alist->al_offset[context->count++] = context->firstu; 347 alist->al_count = context->count; 348 trace_xfs_attr_list_add(context); 349 } 350 351 static unsigned int 352 xfs_attr_filter( 353 u32 ioc_flags) 354 { 355 if (ioc_flags & XFS_IOC_ATTR_ROOT) 356 return XFS_ATTR_ROOT; 357 if (ioc_flags & XFS_IOC_ATTR_SECURE) 358 return XFS_ATTR_SECURE; 359 return 0; 360 } 361 362 static unsigned int 363 xfs_attr_flags( 364 u32 ioc_flags) 365 { 366 if (ioc_flags & XFS_IOC_ATTR_CREATE) 367 return XATTR_CREATE; 368 if (ioc_flags & XFS_IOC_ATTR_REPLACE) 369 return XATTR_REPLACE; 370 return 0; 371 } 372 373 int 374 xfs_ioc_attr_list( 375 struct xfs_inode *dp, 376 void __user *ubuf, 377 size_t bufsize, 378 int flags, 379 struct xfs_attrlist_cursor __user *ucursor) 380 { 381 struct xfs_attr_list_context context = { }; 382 struct xfs_attrlist *alist; 383 void *buffer; 384 int error; 385 386 if (bufsize < sizeof(struct xfs_attrlist) || 387 bufsize > XFS_XATTR_LIST_MAX) 388 return -EINVAL; 389 390 /* 391 * Reject flags, only allow namespaces. 392 */ 393 if (flags & ~(XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE)) 394 return -EINVAL; 395 if (flags == (XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE)) 396 return -EINVAL; 397 398 /* 399 * Validate the cursor. 400 */ 401 if (copy_from_user(&context.cursor, ucursor, sizeof(context.cursor))) 402 return -EFAULT; 403 if (context.cursor.pad1 || context.cursor.pad2) 404 return -EINVAL; 405 if (!context.cursor.initted && 406 (context.cursor.hashval || context.cursor.blkno || 407 context.cursor.offset)) 408 return -EINVAL; 409 410 buffer = kvzalloc(bufsize, GFP_KERNEL); 411 if (!buffer) 412 return -ENOMEM; 413 414 /* 415 * Initialize the output buffer. 416 */ 417 context.dp = dp; 418 context.resynch = 1; 419 context.attr_filter = xfs_attr_filter(flags); 420 context.buffer = buffer; 421 context.bufsize = round_down(bufsize, sizeof(uint32_t)); 422 context.firstu = context.bufsize; 423 context.put_listent = xfs_ioc_attr_put_listent; 424 425 alist = context.buffer; 426 alist->al_count = 0; 427 alist->al_more = 0; 428 alist->al_offset[0] = context.bufsize; 429 430 error = xfs_attr_list(&context); 431 if (error) 432 goto out_free; 433 434 if (copy_to_user(ubuf, buffer, bufsize) || 435 copy_to_user(ucursor, &context.cursor, sizeof(context.cursor))) 436 error = -EFAULT; 437 out_free: 438 kmem_free(buffer); 439 return error; 440 } 441 442 STATIC int 443 xfs_attrlist_by_handle( 444 struct file *parfilp, 445 struct xfs_fsop_attrlist_handlereq __user *p) 446 { 447 struct xfs_fsop_attrlist_handlereq al_hreq; 448 struct dentry *dentry; 449 int error = -ENOMEM; 450 451 if (!capable(CAP_SYS_ADMIN)) 452 return -EPERM; 453 if (copy_from_user(&al_hreq, p, sizeof(al_hreq))) 454 return -EFAULT; 455 456 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); 457 if (IS_ERR(dentry)) 458 return PTR_ERR(dentry); 459 460 error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), al_hreq.buffer, 461 al_hreq.buflen, al_hreq.flags, &p->pos); 462 dput(dentry); 463 return error; 464 } 465 466 static int 467 xfs_attrmulti_attr_get( 468 struct inode *inode, 469 unsigned char *name, 470 unsigned char __user *ubuf, 471 uint32_t *len, 472 uint32_t flags) 473 { 474 struct xfs_da_args args = { 475 .dp = XFS_I(inode), 476 .attr_filter = xfs_attr_filter(flags), 477 .attr_flags = xfs_attr_flags(flags), 478 .name = name, 479 .namelen = strlen(name), 480 .valuelen = *len, 481 }; 482 int error; 483 484 if (*len > XFS_XATTR_SIZE_MAX) 485 return -EINVAL; 486 487 error = xfs_attr_get(&args); 488 if (error) 489 goto out_kfree; 490 491 *len = args.valuelen; 492 if (copy_to_user(ubuf, args.value, args.valuelen)) 493 error = -EFAULT; 494 495 out_kfree: 496 kmem_free(args.value); 497 return error; 498 } 499 500 static int 501 xfs_attrmulti_attr_set( 502 struct inode *inode, 503 unsigned char *name, 504 const unsigned char __user *ubuf, 505 uint32_t len, 506 uint32_t flags) 507 { 508 struct xfs_da_args args = { 509 .dp = XFS_I(inode), 510 .attr_filter = xfs_attr_filter(flags), 511 .attr_flags = xfs_attr_flags(flags), 512 .name = name, 513 .namelen = strlen(name), 514 }; 515 int error; 516 517 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 518 return -EPERM; 519 520 if (ubuf) { 521 if (len > XFS_XATTR_SIZE_MAX) 522 return -EINVAL; 523 args.value = memdup_user(ubuf, len); 524 if (IS_ERR(args.value)) 525 return PTR_ERR(args.value); 526 args.valuelen = len; 527 } 528 529 error = xfs_attr_change(&args); 530 if (!error && (flags & XFS_IOC_ATTR_ROOT)) 531 xfs_forget_acl(inode, name); 532 kfree(args.value); 533 return error; 534 } 535 536 int 537 xfs_ioc_attrmulti_one( 538 struct file *parfilp, 539 struct inode *inode, 540 uint32_t opcode, 541 void __user *uname, 542 void __user *value, 543 uint32_t *len, 544 uint32_t flags) 545 { 546 unsigned char *name; 547 int error; 548 549 if ((flags & XFS_IOC_ATTR_ROOT) && (flags & XFS_IOC_ATTR_SECURE)) 550 return -EINVAL; 551 552 name = strndup_user(uname, MAXNAMELEN); 553 if (IS_ERR(name)) 554 return PTR_ERR(name); 555 556 switch (opcode) { 557 case ATTR_OP_GET: 558 error = xfs_attrmulti_attr_get(inode, name, value, len, flags); 559 break; 560 case ATTR_OP_REMOVE: 561 value = NULL; 562 *len = 0; 563 fallthrough; 564 case ATTR_OP_SET: 565 error = mnt_want_write_file(parfilp); 566 if (error) 567 break; 568 error = xfs_attrmulti_attr_set(inode, name, value, *len, flags); 569 mnt_drop_write_file(parfilp); 570 break; 571 default: 572 error = -EINVAL; 573 break; 574 } 575 576 kfree(name); 577 return error; 578 } 579 580 STATIC int 581 xfs_attrmulti_by_handle( 582 struct file *parfilp, 583 void __user *arg) 584 { 585 int error; 586 xfs_attr_multiop_t *ops; 587 xfs_fsop_attrmulti_handlereq_t am_hreq; 588 struct dentry *dentry; 589 unsigned int i, size; 590 591 if (!capable(CAP_SYS_ADMIN)) 592 return -EPERM; 593 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) 594 return -EFAULT; 595 596 /* overflow check */ 597 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) 598 return -E2BIG; 599 600 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); 601 if (IS_ERR(dentry)) 602 return PTR_ERR(dentry); 603 604 error = -E2BIG; 605 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); 606 if (!size || size > 16 * PAGE_SIZE) 607 goto out_dput; 608 609 ops = memdup_user(am_hreq.ops, size); 610 if (IS_ERR(ops)) { 611 error = PTR_ERR(ops); 612 goto out_dput; 613 } 614 615 error = 0; 616 for (i = 0; i < am_hreq.opcount; i++) { 617 ops[i].am_error = xfs_ioc_attrmulti_one(parfilp, 618 d_inode(dentry), ops[i].am_opcode, 619 ops[i].am_attrname, ops[i].am_attrvalue, 620 &ops[i].am_length, ops[i].am_flags); 621 } 622 623 if (copy_to_user(am_hreq.ops, ops, size)) 624 error = -EFAULT; 625 626 kfree(ops); 627 out_dput: 628 dput(dentry); 629 return error; 630 } 631 632 /* Return 0 on success or positive error */ 633 int 634 xfs_fsbulkstat_one_fmt( 635 struct xfs_ibulk *breq, 636 const struct xfs_bulkstat *bstat) 637 { 638 struct xfs_bstat bs1; 639 640 xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat); 641 if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1))) 642 return -EFAULT; 643 return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat)); 644 } 645 646 int 647 xfs_fsinumbers_fmt( 648 struct xfs_ibulk *breq, 649 const struct xfs_inumbers *igrp) 650 { 651 struct xfs_inogrp ig1; 652 653 xfs_inumbers_to_inogrp(&ig1, igrp); 654 if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp))) 655 return -EFAULT; 656 return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp)); 657 } 658 659 STATIC int 660 xfs_ioc_fsbulkstat( 661 struct file *file, 662 unsigned int cmd, 663 void __user *arg) 664 { 665 struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount; 666 struct xfs_fsop_bulkreq bulkreq; 667 struct xfs_ibulk breq = { 668 .mp = mp, 669 .idmap = file_mnt_idmap(file), 670 .ocount = 0, 671 }; 672 xfs_ino_t lastino; 673 int error; 674 675 /* done = 1 if there are more stats to get and if bulkstat */ 676 /* should be called again (unused here, but used in dmapi) */ 677 678 if (!capable(CAP_SYS_ADMIN)) 679 return -EPERM; 680 681 if (xfs_is_shutdown(mp)) 682 return -EIO; 683 684 if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq))) 685 return -EFAULT; 686 687 if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64))) 688 return -EFAULT; 689 690 if (bulkreq.icount <= 0) 691 return -EINVAL; 692 693 if (bulkreq.ubuffer == NULL) 694 return -EINVAL; 695 696 breq.ubuffer = bulkreq.ubuffer; 697 breq.icount = bulkreq.icount; 698 699 /* 700 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number 701 * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect 702 * that *lastip contains either zero or the number of the last inode to 703 * be examined by the previous call and return results starting with 704 * the next inode after that. The new bulk request back end functions 705 * take the inode to start with, so we have to compute the startino 706 * parameter from lastino to maintain correct function. lastino == 0 707 * is a special case because it has traditionally meant "first inode 708 * in filesystem". 709 */ 710 if (cmd == XFS_IOC_FSINUMBERS) { 711 breq.startino = lastino ? lastino + 1 : 0; 712 error = xfs_inumbers(&breq, xfs_fsinumbers_fmt); 713 lastino = breq.startino - 1; 714 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) { 715 breq.startino = lastino; 716 breq.icount = 1; 717 error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt); 718 } else { /* XFS_IOC_FSBULKSTAT */ 719 breq.startino = lastino ? lastino + 1 : 0; 720 error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt); 721 lastino = breq.startino - 1; 722 } 723 724 if (error) 725 return error; 726 727 if (bulkreq.lastip != NULL && 728 copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t))) 729 return -EFAULT; 730 731 if (bulkreq.ocount != NULL && 732 copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32))) 733 return -EFAULT; 734 735 return 0; 736 } 737 738 /* Return 0 on success or positive error */ 739 static int 740 xfs_bulkstat_fmt( 741 struct xfs_ibulk *breq, 742 const struct xfs_bulkstat *bstat) 743 { 744 if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat))) 745 return -EFAULT; 746 return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat)); 747 } 748 749 /* 750 * Check the incoming bulk request @hdr from userspace and initialize the 751 * internal @breq bulk request appropriately. Returns 0 if the bulk request 752 * should proceed; -ECANCELED if there's nothing to do; or the usual 753 * negative error code. 754 */ 755 static int 756 xfs_bulk_ireq_setup( 757 struct xfs_mount *mp, 758 const struct xfs_bulk_ireq *hdr, 759 struct xfs_ibulk *breq, 760 void __user *ubuffer) 761 { 762 if (hdr->icount == 0 || 763 (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) || 764 memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved))) 765 return -EINVAL; 766 767 breq->startino = hdr->ino; 768 breq->ubuffer = ubuffer; 769 breq->icount = hdr->icount; 770 breq->ocount = 0; 771 breq->flags = 0; 772 773 /* 774 * The @ino parameter is a special value, so we must look it up here. 775 * We're not allowed to have IREQ_AGNO, and we only return one inode 776 * worth of data. 777 */ 778 if (hdr->flags & XFS_BULK_IREQ_SPECIAL) { 779 if (hdr->flags & XFS_BULK_IREQ_AGNO) 780 return -EINVAL; 781 782 switch (hdr->ino) { 783 case XFS_BULK_IREQ_SPECIAL_ROOT: 784 breq->startino = mp->m_sb.sb_rootino; 785 break; 786 default: 787 return -EINVAL; 788 } 789 breq->icount = 1; 790 } 791 792 /* 793 * The IREQ_AGNO flag means that we only want results from a given AG. 794 * If @hdr->ino is zero, we start iterating in that AG. If @hdr->ino is 795 * beyond the specified AG then we return no results. 796 */ 797 if (hdr->flags & XFS_BULK_IREQ_AGNO) { 798 if (hdr->agno >= mp->m_sb.sb_agcount) 799 return -EINVAL; 800 801 if (breq->startino == 0) 802 breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0); 803 else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno) 804 return -EINVAL; 805 806 breq->flags |= XFS_IBULK_SAME_AG; 807 808 /* Asking for an inode past the end of the AG? We're done! */ 809 if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno) 810 return -ECANCELED; 811 } else if (hdr->agno) 812 return -EINVAL; 813 814 /* Asking for an inode past the end of the FS? We're done! */ 815 if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount) 816 return -ECANCELED; 817 818 if (hdr->flags & XFS_BULK_IREQ_NREXT64) 819 breq->flags |= XFS_IBULK_NREXT64; 820 821 return 0; 822 } 823 824 /* 825 * Update the userspace bulk request @hdr to reflect the end state of the 826 * internal bulk request @breq. 827 */ 828 static void 829 xfs_bulk_ireq_teardown( 830 struct xfs_bulk_ireq *hdr, 831 struct xfs_ibulk *breq) 832 { 833 hdr->ino = breq->startino; 834 hdr->ocount = breq->ocount; 835 } 836 837 /* Handle the v5 bulkstat ioctl. */ 838 STATIC int 839 xfs_ioc_bulkstat( 840 struct file *file, 841 unsigned int cmd, 842 struct xfs_bulkstat_req __user *arg) 843 { 844 struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount; 845 struct xfs_bulk_ireq hdr; 846 struct xfs_ibulk breq = { 847 .mp = mp, 848 .idmap = file_mnt_idmap(file), 849 }; 850 int error; 851 852 if (!capable(CAP_SYS_ADMIN)) 853 return -EPERM; 854 855 if (xfs_is_shutdown(mp)) 856 return -EIO; 857 858 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr))) 859 return -EFAULT; 860 861 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat); 862 if (error == -ECANCELED) 863 goto out_teardown; 864 if (error < 0) 865 return error; 866 867 error = xfs_bulkstat(&breq, xfs_bulkstat_fmt); 868 if (error) 869 return error; 870 871 out_teardown: 872 xfs_bulk_ireq_teardown(&hdr, &breq); 873 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr))) 874 return -EFAULT; 875 876 return 0; 877 } 878 879 STATIC int 880 xfs_inumbers_fmt( 881 struct xfs_ibulk *breq, 882 const struct xfs_inumbers *igrp) 883 { 884 if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers))) 885 return -EFAULT; 886 return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers)); 887 } 888 889 /* Handle the v5 inumbers ioctl. */ 890 STATIC int 891 xfs_ioc_inumbers( 892 struct xfs_mount *mp, 893 unsigned int cmd, 894 struct xfs_inumbers_req __user *arg) 895 { 896 struct xfs_bulk_ireq hdr; 897 struct xfs_ibulk breq = { 898 .mp = mp, 899 }; 900 int error; 901 902 if (!capable(CAP_SYS_ADMIN)) 903 return -EPERM; 904 905 if (xfs_is_shutdown(mp)) 906 return -EIO; 907 908 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr))) 909 return -EFAULT; 910 911 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers); 912 if (error == -ECANCELED) 913 goto out_teardown; 914 if (error < 0) 915 return error; 916 917 error = xfs_inumbers(&breq, xfs_inumbers_fmt); 918 if (error) 919 return error; 920 921 out_teardown: 922 xfs_bulk_ireq_teardown(&hdr, &breq); 923 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr))) 924 return -EFAULT; 925 926 return 0; 927 } 928 929 STATIC int 930 xfs_ioc_fsgeometry( 931 struct xfs_mount *mp, 932 void __user *arg, 933 int struct_version) 934 { 935 struct xfs_fsop_geom fsgeo; 936 size_t len; 937 938 xfs_fs_geometry(mp, &fsgeo, struct_version); 939 940 if (struct_version <= 3) 941 len = sizeof(struct xfs_fsop_geom_v1); 942 else if (struct_version == 4) 943 len = sizeof(struct xfs_fsop_geom_v4); 944 else { 945 xfs_fsop_geom_health(mp, &fsgeo); 946 len = sizeof(fsgeo); 947 } 948 949 if (copy_to_user(arg, &fsgeo, len)) 950 return -EFAULT; 951 return 0; 952 } 953 954 STATIC int 955 xfs_ioc_ag_geometry( 956 struct xfs_mount *mp, 957 void __user *arg) 958 { 959 struct xfs_perag *pag; 960 struct xfs_ag_geometry ageo; 961 int error; 962 963 if (copy_from_user(&ageo, arg, sizeof(ageo))) 964 return -EFAULT; 965 if (ageo.ag_flags) 966 return -EINVAL; 967 if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved))) 968 return -EINVAL; 969 970 pag = xfs_perag_get(mp, ageo.ag_number); 971 if (!pag) 972 return -EINVAL; 973 974 error = xfs_ag_get_geometry(pag, &ageo); 975 xfs_perag_put(pag); 976 if (error) 977 return error; 978 979 if (copy_to_user(arg, &ageo, sizeof(ageo))) 980 return -EFAULT; 981 return 0; 982 } 983 984 /* 985 * Linux extended inode flags interface. 986 */ 987 988 static void 989 xfs_fill_fsxattr( 990 struct xfs_inode *ip, 991 int whichfork, 992 struct fileattr *fa) 993 { 994 struct xfs_mount *mp = ip->i_mount; 995 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 996 997 fileattr_fill_xflags(fa, xfs_ip2xflags(ip)); 998 999 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) { 1000 fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); 1001 } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { 1002 /* 1003 * Don't let a misaligned extent size hint on a directory 1004 * escape to userspace if it won't pass the setattr checks 1005 * later. 1006 */ 1007 if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && 1008 xfs_extlen_to_rtxmod(mp, ip->i_extsize) > 0) { 1009 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | 1010 FS_XFLAG_EXTSZINHERIT); 1011 fa->fsx_extsize = 0; 1012 } else { 1013 fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize); 1014 } 1015 } 1016 1017 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 1018 fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize); 1019 fa->fsx_projid = ip->i_projid; 1020 if (ifp && !xfs_need_iread_extents(ifp)) 1021 fa->fsx_nextents = xfs_iext_count(ifp); 1022 else 1023 fa->fsx_nextents = xfs_ifork_nextents(ifp); 1024 } 1025 1026 STATIC int 1027 xfs_ioc_fsgetxattra( 1028 xfs_inode_t *ip, 1029 void __user *arg) 1030 { 1031 struct fileattr fa; 1032 1033 xfs_ilock(ip, XFS_ILOCK_SHARED); 1034 xfs_fill_fsxattr(ip, XFS_ATTR_FORK, &fa); 1035 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1036 1037 return copy_fsxattr_to_user(&fa, arg); 1038 } 1039 1040 int 1041 xfs_fileattr_get( 1042 struct dentry *dentry, 1043 struct fileattr *fa) 1044 { 1045 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 1046 1047 if (d_is_special(dentry)) 1048 return -ENOTTY; 1049 1050 xfs_ilock(ip, XFS_ILOCK_SHARED); 1051 xfs_fill_fsxattr(ip, XFS_DATA_FORK, fa); 1052 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1053 1054 return 0; 1055 } 1056 1057 STATIC uint16_t 1058 xfs_flags2diflags( 1059 struct xfs_inode *ip, 1060 unsigned int xflags) 1061 { 1062 /* can't set PREALLOC this way, just preserve it */ 1063 uint16_t di_flags = 1064 (ip->i_diflags & XFS_DIFLAG_PREALLOC); 1065 1066 if (xflags & FS_XFLAG_IMMUTABLE) 1067 di_flags |= XFS_DIFLAG_IMMUTABLE; 1068 if (xflags & FS_XFLAG_APPEND) 1069 di_flags |= XFS_DIFLAG_APPEND; 1070 if (xflags & FS_XFLAG_SYNC) 1071 di_flags |= XFS_DIFLAG_SYNC; 1072 if (xflags & FS_XFLAG_NOATIME) 1073 di_flags |= XFS_DIFLAG_NOATIME; 1074 if (xflags & FS_XFLAG_NODUMP) 1075 di_flags |= XFS_DIFLAG_NODUMP; 1076 if (xflags & FS_XFLAG_NODEFRAG) 1077 di_flags |= XFS_DIFLAG_NODEFRAG; 1078 if (xflags & FS_XFLAG_FILESTREAM) 1079 di_flags |= XFS_DIFLAG_FILESTREAM; 1080 if (S_ISDIR(VFS_I(ip)->i_mode)) { 1081 if (xflags & FS_XFLAG_RTINHERIT) 1082 di_flags |= XFS_DIFLAG_RTINHERIT; 1083 if (xflags & FS_XFLAG_NOSYMLINKS) 1084 di_flags |= XFS_DIFLAG_NOSYMLINKS; 1085 if (xflags & FS_XFLAG_EXTSZINHERIT) 1086 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 1087 if (xflags & FS_XFLAG_PROJINHERIT) 1088 di_flags |= XFS_DIFLAG_PROJINHERIT; 1089 } else if (S_ISREG(VFS_I(ip)->i_mode)) { 1090 if (xflags & FS_XFLAG_REALTIME) 1091 di_flags |= XFS_DIFLAG_REALTIME; 1092 if (xflags & FS_XFLAG_EXTSIZE) 1093 di_flags |= XFS_DIFLAG_EXTSIZE; 1094 } 1095 1096 return di_flags; 1097 } 1098 1099 STATIC uint64_t 1100 xfs_flags2diflags2( 1101 struct xfs_inode *ip, 1102 unsigned int xflags) 1103 { 1104 uint64_t di_flags2 = 1105 (ip->i_diflags2 & (XFS_DIFLAG2_REFLINK | 1106 XFS_DIFLAG2_BIGTIME | 1107 XFS_DIFLAG2_NREXT64)); 1108 1109 if (xflags & FS_XFLAG_DAX) 1110 di_flags2 |= XFS_DIFLAG2_DAX; 1111 if (xflags & FS_XFLAG_COWEXTSIZE) 1112 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 1113 1114 return di_flags2; 1115 } 1116 1117 static int 1118 xfs_ioctl_setattr_xflags( 1119 struct xfs_trans *tp, 1120 struct xfs_inode *ip, 1121 struct fileattr *fa) 1122 { 1123 struct xfs_mount *mp = ip->i_mount; 1124 bool rtflag = (fa->fsx_xflags & FS_XFLAG_REALTIME); 1125 uint64_t i_flags2; 1126 1127 if (rtflag != XFS_IS_REALTIME_INODE(ip)) { 1128 /* Can't change realtime flag if any extents are allocated. */ 1129 if (ip->i_df.if_nextents || ip->i_delayed_blks) 1130 return -EINVAL; 1131 } 1132 1133 if (rtflag) { 1134 /* If realtime flag is set then must have realtime device */ 1135 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || 1136 xfs_extlen_to_rtxmod(mp, ip->i_extsize)) 1137 return -EINVAL; 1138 1139 /* Clear reflink if we are actually able to set the rt flag. */ 1140 if (xfs_is_reflink_inode(ip)) 1141 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; 1142 } 1143 1144 /* diflags2 only valid for v3 inodes. */ 1145 i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags); 1146 if (i_flags2 && !xfs_has_v3inodes(mp)) 1147 return -EINVAL; 1148 1149 ip->i_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); 1150 ip->i_diflags2 = i_flags2; 1151 1152 xfs_diflags_to_iflags(ip, false); 1153 1154 /* 1155 * Make the stable writes flag match that of the device the inode 1156 * resides on when flipping the RT flag. 1157 */ 1158 if (rtflag != XFS_IS_REALTIME_INODE(ip) && S_ISREG(VFS_I(ip)->i_mode)) 1159 xfs_update_stable_writes(ip); 1160 1161 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); 1162 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1163 XFS_STATS_INC(mp, xs_ig_attrchg); 1164 return 0; 1165 } 1166 1167 static void 1168 xfs_ioctl_setattr_prepare_dax( 1169 struct xfs_inode *ip, 1170 struct fileattr *fa) 1171 { 1172 struct xfs_mount *mp = ip->i_mount; 1173 struct inode *inode = VFS_I(ip); 1174 1175 if (S_ISDIR(inode->i_mode)) 1176 return; 1177 1178 if (xfs_has_dax_always(mp) || xfs_has_dax_never(mp)) 1179 return; 1180 1181 if (((fa->fsx_xflags & FS_XFLAG_DAX) && 1182 !(ip->i_diflags2 & XFS_DIFLAG2_DAX)) || 1183 (!(fa->fsx_xflags & FS_XFLAG_DAX) && 1184 (ip->i_diflags2 & XFS_DIFLAG2_DAX))) 1185 d_mark_dontcache(inode); 1186 } 1187 1188 /* 1189 * Set up the transaction structure for the setattr operation, checking that we 1190 * have permission to do so. On success, return a clean transaction and the 1191 * inode locked exclusively ready for further operation specific checks. On 1192 * failure, return an error without modifying or locking the inode. 1193 */ 1194 static struct xfs_trans * 1195 xfs_ioctl_setattr_get_trans( 1196 struct xfs_inode *ip, 1197 struct xfs_dquot *pdqp) 1198 { 1199 struct xfs_mount *mp = ip->i_mount; 1200 struct xfs_trans *tp; 1201 int error = -EROFS; 1202 1203 if (xfs_is_readonly(mp)) 1204 goto out_error; 1205 error = -EIO; 1206 if (xfs_is_shutdown(mp)) 1207 goto out_error; 1208 1209 error = xfs_trans_alloc_ichange(ip, NULL, NULL, pdqp, 1210 has_capability_noaudit(current, CAP_FOWNER), &tp); 1211 if (error) 1212 goto out_error; 1213 1214 if (xfs_has_wsync(mp)) 1215 xfs_trans_set_sync(tp); 1216 1217 return tp; 1218 1219 out_error: 1220 return ERR_PTR(error); 1221 } 1222 1223 /* 1224 * Validate a proposed extent size hint. For regular files, the hint can only 1225 * be changed if no extents are allocated. 1226 */ 1227 static int 1228 xfs_ioctl_setattr_check_extsize( 1229 struct xfs_inode *ip, 1230 struct fileattr *fa) 1231 { 1232 struct xfs_mount *mp = ip->i_mount; 1233 xfs_failaddr_t failaddr; 1234 uint16_t new_diflags; 1235 1236 if (!fa->fsx_valid) 1237 return 0; 1238 1239 if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents && 1240 XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize) 1241 return -EINVAL; 1242 1243 if (fa->fsx_extsize & mp->m_blockmask) 1244 return -EINVAL; 1245 1246 new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); 1247 1248 /* 1249 * Inode verifiers do not check that the extent size hint is an integer 1250 * multiple of the rt extent size on a directory with both rtinherit 1251 * and extszinherit flags set. Don't let sysadmins misconfigure 1252 * directories. 1253 */ 1254 if ((new_diflags & XFS_DIFLAG_RTINHERIT) && 1255 (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) { 1256 unsigned int rtextsize_bytes; 1257 1258 rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize); 1259 if (fa->fsx_extsize % rtextsize_bytes) 1260 return -EINVAL; 1261 } 1262 1263 failaddr = xfs_inode_validate_extsize(ip->i_mount, 1264 XFS_B_TO_FSB(mp, fa->fsx_extsize), 1265 VFS_I(ip)->i_mode, new_diflags); 1266 return failaddr != NULL ? -EINVAL : 0; 1267 } 1268 1269 static int 1270 xfs_ioctl_setattr_check_cowextsize( 1271 struct xfs_inode *ip, 1272 struct fileattr *fa) 1273 { 1274 struct xfs_mount *mp = ip->i_mount; 1275 xfs_failaddr_t failaddr; 1276 uint64_t new_diflags2; 1277 uint16_t new_diflags; 1278 1279 if (!fa->fsx_valid) 1280 return 0; 1281 1282 if (fa->fsx_cowextsize & mp->m_blockmask) 1283 return -EINVAL; 1284 1285 new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); 1286 new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags); 1287 1288 failaddr = xfs_inode_validate_cowextsize(ip->i_mount, 1289 XFS_B_TO_FSB(mp, fa->fsx_cowextsize), 1290 VFS_I(ip)->i_mode, new_diflags, new_diflags2); 1291 return failaddr != NULL ? -EINVAL : 0; 1292 } 1293 1294 static int 1295 xfs_ioctl_setattr_check_projid( 1296 struct xfs_inode *ip, 1297 struct fileattr *fa) 1298 { 1299 if (!fa->fsx_valid) 1300 return 0; 1301 1302 /* Disallow 32bit project ids if 32bit IDs are not enabled. */ 1303 if (fa->fsx_projid > (uint16_t)-1 && 1304 !xfs_has_projid32(ip->i_mount)) 1305 return -EINVAL; 1306 return 0; 1307 } 1308 1309 int 1310 xfs_fileattr_set( 1311 struct mnt_idmap *idmap, 1312 struct dentry *dentry, 1313 struct fileattr *fa) 1314 { 1315 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 1316 struct xfs_mount *mp = ip->i_mount; 1317 struct xfs_trans *tp; 1318 struct xfs_dquot *pdqp = NULL; 1319 struct xfs_dquot *olddquot = NULL; 1320 int error; 1321 1322 trace_xfs_ioctl_setattr(ip); 1323 1324 if (d_is_special(dentry)) 1325 return -ENOTTY; 1326 1327 if (!fa->fsx_valid) { 1328 if (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | 1329 FS_NOATIME_FL | FS_NODUMP_FL | 1330 FS_SYNC_FL | FS_DAX_FL | FS_PROJINHERIT_FL)) 1331 return -EOPNOTSUPP; 1332 } 1333 1334 error = xfs_ioctl_setattr_check_projid(ip, fa); 1335 if (error) 1336 return error; 1337 1338 /* 1339 * If disk quotas is on, we make sure that the dquots do exist on disk, 1340 * before we start any other transactions. Trying to do this later 1341 * is messy. We don't care to take a readlock to look at the ids 1342 * in inode here, because we can't hold it across the trans_reserve. 1343 * If the IDs do change before we take the ilock, we're covered 1344 * because the i_*dquot fields will get updated anyway. 1345 */ 1346 if (fa->fsx_valid && XFS_IS_QUOTA_ON(mp)) { 1347 error = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid, 1348 VFS_I(ip)->i_gid, fa->fsx_projid, 1349 XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp); 1350 if (error) 1351 return error; 1352 } 1353 1354 xfs_ioctl_setattr_prepare_dax(ip, fa); 1355 1356 tp = xfs_ioctl_setattr_get_trans(ip, pdqp); 1357 if (IS_ERR(tp)) { 1358 error = PTR_ERR(tp); 1359 goto error_free_dquots; 1360 } 1361 1362 error = xfs_ioctl_setattr_check_extsize(ip, fa); 1363 if (error) 1364 goto error_trans_cancel; 1365 1366 error = xfs_ioctl_setattr_check_cowextsize(ip, fa); 1367 if (error) 1368 goto error_trans_cancel; 1369 1370 error = xfs_ioctl_setattr_xflags(tp, ip, fa); 1371 if (error) 1372 goto error_trans_cancel; 1373 1374 if (!fa->fsx_valid) 1375 goto skip_xattr; 1376 /* 1377 * Change file ownership. Must be the owner or privileged. CAP_FSETID 1378 * overrides the following restrictions: 1379 * 1380 * The set-user-ID and set-group-ID bits of a file will be cleared upon 1381 * successful return from chown() 1382 */ 1383 1384 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) && 1385 !capable_wrt_inode_uidgid(idmap, VFS_I(ip), CAP_FSETID)) 1386 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID); 1387 1388 /* Change the ownerships and register project quota modifications */ 1389 if (ip->i_projid != fa->fsx_projid) { 1390 if (XFS_IS_PQUOTA_ON(mp)) { 1391 olddquot = xfs_qm_vop_chown(tp, ip, 1392 &ip->i_pdquot, pdqp); 1393 } 1394 ip->i_projid = fa->fsx_projid; 1395 } 1396 1397 /* 1398 * Only set the extent size hint if we've already determined that the 1399 * extent size hint should be set on the inode. If no extent size flags 1400 * are set on the inode then unconditionally clear the extent size hint. 1401 */ 1402 if (ip->i_diflags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT)) 1403 ip->i_extsize = XFS_B_TO_FSB(mp, fa->fsx_extsize); 1404 else 1405 ip->i_extsize = 0; 1406 1407 if (xfs_has_v3inodes(mp)) { 1408 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) 1409 ip->i_cowextsize = XFS_B_TO_FSB(mp, fa->fsx_cowextsize); 1410 else 1411 ip->i_cowextsize = 0; 1412 } 1413 1414 skip_xattr: 1415 error = xfs_trans_commit(tp); 1416 1417 /* 1418 * Release any dquot(s) the inode had kept before chown. 1419 */ 1420 xfs_qm_dqrele(olddquot); 1421 xfs_qm_dqrele(pdqp); 1422 1423 return error; 1424 1425 error_trans_cancel: 1426 xfs_trans_cancel(tp); 1427 error_free_dquots: 1428 xfs_qm_dqrele(pdqp); 1429 return error; 1430 } 1431 1432 static bool 1433 xfs_getbmap_format( 1434 struct kgetbmap *p, 1435 struct getbmapx __user *u, 1436 size_t recsize) 1437 { 1438 if (put_user(p->bmv_offset, &u->bmv_offset) || 1439 put_user(p->bmv_block, &u->bmv_block) || 1440 put_user(p->bmv_length, &u->bmv_length) || 1441 put_user(0, &u->bmv_count) || 1442 put_user(0, &u->bmv_entries)) 1443 return false; 1444 if (recsize < sizeof(struct getbmapx)) 1445 return true; 1446 if (put_user(0, &u->bmv_iflags) || 1447 put_user(p->bmv_oflags, &u->bmv_oflags) || 1448 put_user(0, &u->bmv_unused1) || 1449 put_user(0, &u->bmv_unused2)) 1450 return false; 1451 return true; 1452 } 1453 1454 STATIC int 1455 xfs_ioc_getbmap( 1456 struct file *file, 1457 unsigned int cmd, 1458 void __user *arg) 1459 { 1460 struct getbmapx bmx = { 0 }; 1461 struct kgetbmap *buf; 1462 size_t recsize; 1463 int error, i; 1464 1465 switch (cmd) { 1466 case XFS_IOC_GETBMAPA: 1467 bmx.bmv_iflags = BMV_IF_ATTRFORK; 1468 fallthrough; 1469 case XFS_IOC_GETBMAP: 1470 /* struct getbmap is a strict subset of struct getbmapx. */ 1471 recsize = sizeof(struct getbmap); 1472 break; 1473 case XFS_IOC_GETBMAPX: 1474 recsize = sizeof(struct getbmapx); 1475 break; 1476 default: 1477 return -EINVAL; 1478 } 1479 1480 if (copy_from_user(&bmx, arg, recsize)) 1481 return -EFAULT; 1482 1483 if (bmx.bmv_count < 2) 1484 return -EINVAL; 1485 if (bmx.bmv_count >= INT_MAX / recsize) 1486 return -ENOMEM; 1487 1488 buf = kvcalloc(bmx.bmv_count, sizeof(*buf), GFP_KERNEL); 1489 if (!buf) 1490 return -ENOMEM; 1491 1492 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf); 1493 if (error) 1494 goto out_free_buf; 1495 1496 error = -EFAULT; 1497 if (copy_to_user(arg, &bmx, recsize)) 1498 goto out_free_buf; 1499 arg += recsize; 1500 1501 for (i = 0; i < bmx.bmv_entries; i++) { 1502 if (!xfs_getbmap_format(buf + i, arg, recsize)) 1503 goto out_free_buf; 1504 arg += recsize; 1505 } 1506 1507 error = 0; 1508 out_free_buf: 1509 kmem_free(buf); 1510 return error; 1511 } 1512 1513 STATIC int 1514 xfs_ioc_getfsmap( 1515 struct xfs_inode *ip, 1516 struct fsmap_head __user *arg) 1517 { 1518 struct xfs_fsmap_head xhead = {0}; 1519 struct fsmap_head head; 1520 struct fsmap *recs; 1521 unsigned int count; 1522 __u32 last_flags = 0; 1523 bool done = false; 1524 int error; 1525 1526 if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) 1527 return -EFAULT; 1528 if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) || 1529 memchr_inv(head.fmh_keys[0].fmr_reserved, 0, 1530 sizeof(head.fmh_keys[0].fmr_reserved)) || 1531 memchr_inv(head.fmh_keys[1].fmr_reserved, 0, 1532 sizeof(head.fmh_keys[1].fmr_reserved))) 1533 return -EINVAL; 1534 1535 /* 1536 * Use an internal memory buffer so that we don't have to copy fsmap 1537 * data to userspace while holding locks. Start by trying to allocate 1538 * up to 128k for the buffer, but fall back to a single page if needed. 1539 */ 1540 count = min_t(unsigned int, head.fmh_count, 1541 131072 / sizeof(struct fsmap)); 1542 recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL); 1543 if (!recs) { 1544 count = min_t(unsigned int, head.fmh_count, 1545 PAGE_SIZE / sizeof(struct fsmap)); 1546 recs = kvcalloc(count, sizeof(struct fsmap), GFP_KERNEL); 1547 if (!recs) 1548 return -ENOMEM; 1549 } 1550 1551 xhead.fmh_iflags = head.fmh_iflags; 1552 xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]); 1553 xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]); 1554 1555 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); 1556 trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]); 1557 1558 head.fmh_entries = 0; 1559 do { 1560 struct fsmap __user *user_recs; 1561 struct fsmap *last_rec; 1562 1563 user_recs = &arg->fmh_recs[head.fmh_entries]; 1564 xhead.fmh_entries = 0; 1565 xhead.fmh_count = min_t(unsigned int, count, 1566 head.fmh_count - head.fmh_entries); 1567 1568 /* Run query, record how many entries we got. */ 1569 error = xfs_getfsmap(ip->i_mount, &xhead, recs); 1570 switch (error) { 1571 case 0: 1572 /* 1573 * There are no more records in the result set. Copy 1574 * whatever we got to userspace and break out. 1575 */ 1576 done = true; 1577 break; 1578 case -ECANCELED: 1579 /* 1580 * The internal memory buffer is full. Copy whatever 1581 * records we got to userspace and go again if we have 1582 * not yet filled the userspace buffer. 1583 */ 1584 error = 0; 1585 break; 1586 default: 1587 goto out_free; 1588 } 1589 head.fmh_entries += xhead.fmh_entries; 1590 head.fmh_oflags = xhead.fmh_oflags; 1591 1592 /* 1593 * If the caller wanted a record count or there aren't any 1594 * new records to return, we're done. 1595 */ 1596 if (head.fmh_count == 0 || xhead.fmh_entries == 0) 1597 break; 1598 1599 /* Copy all the records we got out to userspace. */ 1600 if (copy_to_user(user_recs, recs, 1601 xhead.fmh_entries * sizeof(struct fsmap))) { 1602 error = -EFAULT; 1603 goto out_free; 1604 } 1605 1606 /* Remember the last record flags we copied to userspace. */ 1607 last_rec = &recs[xhead.fmh_entries - 1]; 1608 last_flags = last_rec->fmr_flags; 1609 1610 /* Set up the low key for the next iteration. */ 1611 xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec); 1612 trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]); 1613 } while (!done && head.fmh_entries < head.fmh_count); 1614 1615 /* 1616 * If there are no more records in the query result set and we're not 1617 * in counting mode, mark the last record returned with the LAST flag. 1618 */ 1619 if (done && head.fmh_count > 0 && head.fmh_entries > 0) { 1620 struct fsmap __user *user_rec; 1621 1622 last_flags |= FMR_OF_LAST; 1623 user_rec = &arg->fmh_recs[head.fmh_entries - 1]; 1624 1625 if (copy_to_user(&user_rec->fmr_flags, &last_flags, 1626 sizeof(last_flags))) { 1627 error = -EFAULT; 1628 goto out_free; 1629 } 1630 } 1631 1632 /* copy back header */ 1633 if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) { 1634 error = -EFAULT; 1635 goto out_free; 1636 } 1637 1638 out_free: 1639 kmem_free(recs); 1640 return error; 1641 } 1642 1643 STATIC int 1644 xfs_ioc_scrub_metadata( 1645 struct file *file, 1646 void __user *arg) 1647 { 1648 struct xfs_scrub_metadata scrub; 1649 int error; 1650 1651 if (!capable(CAP_SYS_ADMIN)) 1652 return -EPERM; 1653 1654 if (copy_from_user(&scrub, arg, sizeof(scrub))) 1655 return -EFAULT; 1656 1657 error = xfs_scrub_metadata(file, &scrub); 1658 if (error) 1659 return error; 1660 1661 if (copy_to_user(arg, &scrub, sizeof(scrub))) 1662 return -EFAULT; 1663 1664 return 0; 1665 } 1666 1667 int 1668 xfs_ioc_swapext( 1669 xfs_swapext_t *sxp) 1670 { 1671 xfs_inode_t *ip, *tip; 1672 struct fd f, tmp; 1673 int error = 0; 1674 1675 /* Pull information for the target fd */ 1676 f = fdget((int)sxp->sx_fdtarget); 1677 if (!f.file) { 1678 error = -EINVAL; 1679 goto out; 1680 } 1681 1682 if (!(f.file->f_mode & FMODE_WRITE) || 1683 !(f.file->f_mode & FMODE_READ) || 1684 (f.file->f_flags & O_APPEND)) { 1685 error = -EBADF; 1686 goto out_put_file; 1687 } 1688 1689 tmp = fdget((int)sxp->sx_fdtmp); 1690 if (!tmp.file) { 1691 error = -EINVAL; 1692 goto out_put_file; 1693 } 1694 1695 if (!(tmp.file->f_mode & FMODE_WRITE) || 1696 !(tmp.file->f_mode & FMODE_READ) || 1697 (tmp.file->f_flags & O_APPEND)) { 1698 error = -EBADF; 1699 goto out_put_tmp_file; 1700 } 1701 1702 if (IS_SWAPFILE(file_inode(f.file)) || 1703 IS_SWAPFILE(file_inode(tmp.file))) { 1704 error = -EINVAL; 1705 goto out_put_tmp_file; 1706 } 1707 1708 /* 1709 * We need to ensure that the fds passed in point to XFS inodes 1710 * before we cast and access them as XFS structures as we have no 1711 * control over what the user passes us here. 1712 */ 1713 if (f.file->f_op != &xfs_file_operations || 1714 tmp.file->f_op != &xfs_file_operations) { 1715 error = -EINVAL; 1716 goto out_put_tmp_file; 1717 } 1718 1719 ip = XFS_I(file_inode(f.file)); 1720 tip = XFS_I(file_inode(tmp.file)); 1721 1722 if (ip->i_mount != tip->i_mount) { 1723 error = -EINVAL; 1724 goto out_put_tmp_file; 1725 } 1726 1727 if (ip->i_ino == tip->i_ino) { 1728 error = -EINVAL; 1729 goto out_put_tmp_file; 1730 } 1731 1732 if (xfs_is_shutdown(ip->i_mount)) { 1733 error = -EIO; 1734 goto out_put_tmp_file; 1735 } 1736 1737 error = xfs_swap_extents(ip, tip, sxp); 1738 1739 out_put_tmp_file: 1740 fdput(tmp); 1741 out_put_file: 1742 fdput(f); 1743 out: 1744 return error; 1745 } 1746 1747 static int 1748 xfs_ioc_getlabel( 1749 struct xfs_mount *mp, 1750 char __user *user_label) 1751 { 1752 struct xfs_sb *sbp = &mp->m_sb; 1753 char label[XFSLABEL_MAX + 1]; 1754 1755 /* Paranoia */ 1756 BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX); 1757 1758 /* 1 larger than sb_fname, so this ensures a trailing NUL char */ 1759 memset(label, 0, sizeof(label)); 1760 spin_lock(&mp->m_sb_lock); 1761 strncpy(label, sbp->sb_fname, XFSLABEL_MAX); 1762 spin_unlock(&mp->m_sb_lock); 1763 1764 if (copy_to_user(user_label, label, sizeof(label))) 1765 return -EFAULT; 1766 return 0; 1767 } 1768 1769 static int 1770 xfs_ioc_setlabel( 1771 struct file *filp, 1772 struct xfs_mount *mp, 1773 char __user *newlabel) 1774 { 1775 struct xfs_sb *sbp = &mp->m_sb; 1776 char label[XFSLABEL_MAX + 1]; 1777 size_t len; 1778 int error; 1779 1780 if (!capable(CAP_SYS_ADMIN)) 1781 return -EPERM; 1782 /* 1783 * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much 1784 * smaller, at 12 bytes. We copy one more to be sure we find the 1785 * (required) NULL character to test the incoming label length. 1786 * NB: The on disk label doesn't need to be null terminated. 1787 */ 1788 if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1)) 1789 return -EFAULT; 1790 len = strnlen(label, XFSLABEL_MAX + 1); 1791 if (len > sizeof(sbp->sb_fname)) 1792 return -EINVAL; 1793 1794 error = mnt_want_write_file(filp); 1795 if (error) 1796 return error; 1797 1798 spin_lock(&mp->m_sb_lock); 1799 memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname)); 1800 memcpy(sbp->sb_fname, label, len); 1801 spin_unlock(&mp->m_sb_lock); 1802 1803 /* 1804 * Now we do several things to satisfy userspace. 1805 * In addition to normal logging of the primary superblock, we also 1806 * immediately write these changes to sector zero for the primary, then 1807 * update all backup supers (as xfs_db does for a label change), then 1808 * invalidate the block device page cache. This is so that any prior 1809 * buffered reads from userspace (i.e. from blkid) are invalidated, 1810 * and userspace will see the newly-written label. 1811 */ 1812 error = xfs_sync_sb_buf(mp); 1813 if (error) 1814 goto out; 1815 /* 1816 * growfs also updates backup supers so lock against that. 1817 */ 1818 mutex_lock(&mp->m_growlock); 1819 error = xfs_update_secondary_sbs(mp); 1820 mutex_unlock(&mp->m_growlock); 1821 1822 invalidate_bdev(mp->m_ddev_targp->bt_bdev); 1823 1824 out: 1825 mnt_drop_write_file(filp); 1826 return error; 1827 } 1828 1829 static inline int 1830 xfs_fs_eofblocks_from_user( 1831 struct xfs_fs_eofblocks *src, 1832 struct xfs_icwalk *dst) 1833 { 1834 if (src->eof_version != XFS_EOFBLOCKS_VERSION) 1835 return -EINVAL; 1836 1837 if (src->eof_flags & ~XFS_EOF_FLAGS_VALID) 1838 return -EINVAL; 1839 1840 if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) || 1841 memchr_inv(src->pad64, 0, sizeof(src->pad64))) 1842 return -EINVAL; 1843 1844 dst->icw_flags = 0; 1845 if (src->eof_flags & XFS_EOF_FLAGS_SYNC) 1846 dst->icw_flags |= XFS_ICWALK_FLAG_SYNC; 1847 if (src->eof_flags & XFS_EOF_FLAGS_UID) 1848 dst->icw_flags |= XFS_ICWALK_FLAG_UID; 1849 if (src->eof_flags & XFS_EOF_FLAGS_GID) 1850 dst->icw_flags |= XFS_ICWALK_FLAG_GID; 1851 if (src->eof_flags & XFS_EOF_FLAGS_PRID) 1852 dst->icw_flags |= XFS_ICWALK_FLAG_PRID; 1853 if (src->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) 1854 dst->icw_flags |= XFS_ICWALK_FLAG_MINFILESIZE; 1855 1856 dst->icw_prid = src->eof_prid; 1857 dst->icw_min_file_size = src->eof_min_file_size; 1858 1859 dst->icw_uid = INVALID_UID; 1860 if (src->eof_flags & XFS_EOF_FLAGS_UID) { 1861 dst->icw_uid = make_kuid(current_user_ns(), src->eof_uid); 1862 if (!uid_valid(dst->icw_uid)) 1863 return -EINVAL; 1864 } 1865 1866 dst->icw_gid = INVALID_GID; 1867 if (src->eof_flags & XFS_EOF_FLAGS_GID) { 1868 dst->icw_gid = make_kgid(current_user_ns(), src->eof_gid); 1869 if (!gid_valid(dst->icw_gid)) 1870 return -EINVAL; 1871 } 1872 return 0; 1873 } 1874 1875 /* 1876 * These long-unused ioctls were removed from the official ioctl API in 5.17, 1877 * but retain these definitions so that we can log warnings about them. 1878 */ 1879 #define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64) 1880 #define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64) 1881 #define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64) 1882 #define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64) 1883 1884 /* 1885 * Note: some of the ioctl's return positive numbers as a 1886 * byte count indicating success, such as readlink_by_handle. 1887 * So we don't "sign flip" like most other routines. This means 1888 * true errors need to be returned as a negative value. 1889 */ 1890 long 1891 xfs_file_ioctl( 1892 struct file *filp, 1893 unsigned int cmd, 1894 unsigned long p) 1895 { 1896 struct inode *inode = file_inode(filp); 1897 struct xfs_inode *ip = XFS_I(inode); 1898 struct xfs_mount *mp = ip->i_mount; 1899 void __user *arg = (void __user *)p; 1900 int error; 1901 1902 trace_xfs_file_ioctl(ip); 1903 1904 switch (cmd) { 1905 case FITRIM: 1906 return xfs_ioc_trim(mp, arg); 1907 case FS_IOC_GETFSLABEL: 1908 return xfs_ioc_getlabel(mp, arg); 1909 case FS_IOC_SETFSLABEL: 1910 return xfs_ioc_setlabel(filp, mp, arg); 1911 case XFS_IOC_ALLOCSP: 1912 case XFS_IOC_FREESP: 1913 case XFS_IOC_ALLOCSP64: 1914 case XFS_IOC_FREESP64: 1915 xfs_warn_once(mp, 1916 "%s should use fallocate; XFS_IOC_{ALLOC,FREE}SP ioctl unsupported", 1917 current->comm); 1918 return -ENOTTY; 1919 case XFS_IOC_DIOINFO: { 1920 struct xfs_buftarg *target = xfs_inode_buftarg(ip); 1921 struct dioattr da; 1922 1923 da.d_mem = da.d_miniosz = target->bt_logical_sectorsize; 1924 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); 1925 1926 if (copy_to_user(arg, &da, sizeof(da))) 1927 return -EFAULT; 1928 return 0; 1929 } 1930 1931 case XFS_IOC_FSBULKSTAT_SINGLE: 1932 case XFS_IOC_FSBULKSTAT: 1933 case XFS_IOC_FSINUMBERS: 1934 return xfs_ioc_fsbulkstat(filp, cmd, arg); 1935 1936 case XFS_IOC_BULKSTAT: 1937 return xfs_ioc_bulkstat(filp, cmd, arg); 1938 case XFS_IOC_INUMBERS: 1939 return xfs_ioc_inumbers(mp, cmd, arg); 1940 1941 case XFS_IOC_FSGEOMETRY_V1: 1942 return xfs_ioc_fsgeometry(mp, arg, 3); 1943 case XFS_IOC_FSGEOMETRY_V4: 1944 return xfs_ioc_fsgeometry(mp, arg, 4); 1945 case XFS_IOC_FSGEOMETRY: 1946 return xfs_ioc_fsgeometry(mp, arg, 5); 1947 1948 case XFS_IOC_AG_GEOMETRY: 1949 return xfs_ioc_ag_geometry(mp, arg); 1950 1951 case XFS_IOC_GETVERSION: 1952 return put_user(inode->i_generation, (int __user *)arg); 1953 1954 case XFS_IOC_FSGETXATTRA: 1955 return xfs_ioc_fsgetxattra(ip, arg); 1956 1957 case XFS_IOC_GETBMAP: 1958 case XFS_IOC_GETBMAPA: 1959 case XFS_IOC_GETBMAPX: 1960 return xfs_ioc_getbmap(filp, cmd, arg); 1961 1962 case FS_IOC_GETFSMAP: 1963 return xfs_ioc_getfsmap(ip, arg); 1964 1965 case XFS_IOC_SCRUB_METADATA: 1966 return xfs_ioc_scrub_metadata(filp, arg); 1967 1968 case XFS_IOC_FD_TO_HANDLE: 1969 case XFS_IOC_PATH_TO_HANDLE: 1970 case XFS_IOC_PATH_TO_FSHANDLE: { 1971 xfs_fsop_handlereq_t hreq; 1972 1973 if (copy_from_user(&hreq, arg, sizeof(hreq))) 1974 return -EFAULT; 1975 return xfs_find_handle(cmd, &hreq); 1976 } 1977 case XFS_IOC_OPEN_BY_HANDLE: { 1978 xfs_fsop_handlereq_t hreq; 1979 1980 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1981 return -EFAULT; 1982 return xfs_open_by_handle(filp, &hreq); 1983 } 1984 1985 case XFS_IOC_READLINK_BY_HANDLE: { 1986 xfs_fsop_handlereq_t hreq; 1987 1988 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1989 return -EFAULT; 1990 return xfs_readlink_by_handle(filp, &hreq); 1991 } 1992 case XFS_IOC_ATTRLIST_BY_HANDLE: 1993 return xfs_attrlist_by_handle(filp, arg); 1994 1995 case XFS_IOC_ATTRMULTI_BY_HANDLE: 1996 return xfs_attrmulti_by_handle(filp, arg); 1997 1998 case XFS_IOC_SWAPEXT: { 1999 struct xfs_swapext sxp; 2000 2001 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) 2002 return -EFAULT; 2003 error = mnt_want_write_file(filp); 2004 if (error) 2005 return error; 2006 error = xfs_ioc_swapext(&sxp); 2007 mnt_drop_write_file(filp); 2008 return error; 2009 } 2010 2011 case XFS_IOC_FSCOUNTS: { 2012 xfs_fsop_counts_t out; 2013 2014 xfs_fs_counts(mp, &out); 2015 2016 if (copy_to_user(arg, &out, sizeof(out))) 2017 return -EFAULT; 2018 return 0; 2019 } 2020 2021 case XFS_IOC_SET_RESBLKS: { 2022 xfs_fsop_resblks_t inout; 2023 uint64_t in; 2024 2025 if (!capable(CAP_SYS_ADMIN)) 2026 return -EPERM; 2027 2028 if (xfs_is_readonly(mp)) 2029 return -EROFS; 2030 2031 if (copy_from_user(&inout, arg, sizeof(inout))) 2032 return -EFAULT; 2033 2034 error = mnt_want_write_file(filp); 2035 if (error) 2036 return error; 2037 2038 /* input parameter is passed in resblks field of structure */ 2039 in = inout.resblks; 2040 error = xfs_reserve_blocks(mp, &in, &inout); 2041 mnt_drop_write_file(filp); 2042 if (error) 2043 return error; 2044 2045 if (copy_to_user(arg, &inout, sizeof(inout))) 2046 return -EFAULT; 2047 return 0; 2048 } 2049 2050 case XFS_IOC_GET_RESBLKS: { 2051 xfs_fsop_resblks_t out; 2052 2053 if (!capable(CAP_SYS_ADMIN)) 2054 return -EPERM; 2055 2056 error = xfs_reserve_blocks(mp, NULL, &out); 2057 if (error) 2058 return error; 2059 2060 if (copy_to_user(arg, &out, sizeof(out))) 2061 return -EFAULT; 2062 2063 return 0; 2064 } 2065 2066 case XFS_IOC_FSGROWFSDATA: { 2067 struct xfs_growfs_data in; 2068 2069 if (copy_from_user(&in, arg, sizeof(in))) 2070 return -EFAULT; 2071 2072 error = mnt_want_write_file(filp); 2073 if (error) 2074 return error; 2075 error = xfs_growfs_data(mp, &in); 2076 mnt_drop_write_file(filp); 2077 return error; 2078 } 2079 2080 case XFS_IOC_FSGROWFSLOG: { 2081 struct xfs_growfs_log in; 2082 2083 if (copy_from_user(&in, arg, sizeof(in))) 2084 return -EFAULT; 2085 2086 error = mnt_want_write_file(filp); 2087 if (error) 2088 return error; 2089 error = xfs_growfs_log(mp, &in); 2090 mnt_drop_write_file(filp); 2091 return error; 2092 } 2093 2094 case XFS_IOC_FSGROWFSRT: { 2095 xfs_growfs_rt_t in; 2096 2097 if (copy_from_user(&in, arg, sizeof(in))) 2098 return -EFAULT; 2099 2100 error = mnt_want_write_file(filp); 2101 if (error) 2102 return error; 2103 error = xfs_growfs_rt(mp, &in); 2104 mnt_drop_write_file(filp); 2105 return error; 2106 } 2107 2108 case XFS_IOC_GOINGDOWN: { 2109 uint32_t in; 2110 2111 if (!capable(CAP_SYS_ADMIN)) 2112 return -EPERM; 2113 2114 if (get_user(in, (uint32_t __user *)arg)) 2115 return -EFAULT; 2116 2117 return xfs_fs_goingdown(mp, in); 2118 } 2119 2120 case XFS_IOC_ERROR_INJECTION: { 2121 xfs_error_injection_t in; 2122 2123 if (!capable(CAP_SYS_ADMIN)) 2124 return -EPERM; 2125 2126 if (copy_from_user(&in, arg, sizeof(in))) 2127 return -EFAULT; 2128 2129 return xfs_errortag_add(mp, in.errtag); 2130 } 2131 2132 case XFS_IOC_ERROR_CLEARALL: 2133 if (!capable(CAP_SYS_ADMIN)) 2134 return -EPERM; 2135 2136 return xfs_errortag_clearall(mp); 2137 2138 case XFS_IOC_FREE_EOFBLOCKS: { 2139 struct xfs_fs_eofblocks eofb; 2140 struct xfs_icwalk icw; 2141 2142 if (!capable(CAP_SYS_ADMIN)) 2143 return -EPERM; 2144 2145 if (xfs_is_readonly(mp)) 2146 return -EROFS; 2147 2148 if (copy_from_user(&eofb, arg, sizeof(eofb))) 2149 return -EFAULT; 2150 2151 error = xfs_fs_eofblocks_from_user(&eofb, &icw); 2152 if (error) 2153 return error; 2154 2155 trace_xfs_ioc_free_eofblocks(mp, &icw, _RET_IP_); 2156 2157 sb_start_write(mp->m_super); 2158 error = xfs_blockgc_free_space(mp, &icw); 2159 sb_end_write(mp->m_super); 2160 return error; 2161 } 2162 2163 default: 2164 return -ENOTTY; 2165 } 2166 } 2167