1 /* 2 * Copyright (c) 2004-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include <linux/compat.h> 19 #include <linux/ioctl.h> 20 #include <linux/mount.h> 21 #include <linux/slab.h> 22 #include <asm/uaccess.h> 23 #include "xfs.h" 24 #include "xfs_fs.h" 25 #include "xfs_log.h" 26 #include "xfs_trans.h" 27 #include "xfs_sb.h" 28 #include "xfs_ag.h" 29 #include "xfs_mount.h" 30 #include "xfs_bmap_btree.h" 31 #include "xfs_vnode.h" 32 #include "xfs_dinode.h" 33 #include "xfs_inode.h" 34 #include "xfs_itable.h" 35 #include "xfs_error.h" 36 #include "xfs_dfrag.h" 37 #include "xfs_vnodeops.h" 38 #include "xfs_fsops.h" 39 #include "xfs_alloc.h" 40 #include "xfs_rtalloc.h" 41 #include "xfs_attr.h" 42 #include "xfs_ioctl.h" 43 #include "xfs_ioctl32.h" 44 #include "xfs_trace.h" 45 46 #define _NATIVE_IOC(cmd, type) \ 47 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) 48 49 #ifdef BROKEN_X86_ALIGNMENT 50 STATIC int 51 xfs_compat_flock64_copyin( 52 xfs_flock64_t *bf, 53 compat_xfs_flock64_t __user *arg32) 54 { 55 if (get_user(bf->l_type, &arg32->l_type) || 56 get_user(bf->l_whence, &arg32->l_whence) || 57 get_user(bf->l_start, &arg32->l_start) || 58 get_user(bf->l_len, &arg32->l_len) || 59 get_user(bf->l_sysid, &arg32->l_sysid) || 60 get_user(bf->l_pid, &arg32->l_pid) || 61 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32))) 62 return -XFS_ERROR(EFAULT); 63 return 0; 64 } 65 66 STATIC int 67 xfs_compat_ioc_fsgeometry_v1( 68 struct xfs_mount *mp, 69 compat_xfs_fsop_geom_v1_t __user *arg32) 70 { 71 xfs_fsop_geom_t fsgeo; 72 int error; 73 74 error = xfs_fs_geometry(mp, &fsgeo, 3); 75 if (error) 76 return -error; 77 /* The 32-bit variant simply has some padding at the end */ 78 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1))) 79 return -XFS_ERROR(EFAULT); 80 return 0; 81 } 82 83 STATIC int 84 xfs_compat_growfs_data_copyin( 85 struct xfs_growfs_data *in, 86 compat_xfs_growfs_data_t __user *arg32) 87 { 88 if (get_user(in->newblocks, &arg32->newblocks) || 89 get_user(in->imaxpct, &arg32->imaxpct)) 90 return -XFS_ERROR(EFAULT); 91 return 0; 92 } 93 94 STATIC int 95 xfs_compat_growfs_rt_copyin( 96 struct xfs_growfs_rt *in, 97 compat_xfs_growfs_rt_t __user *arg32) 98 { 99 if (get_user(in->newblocks, &arg32->newblocks) || 100 get_user(in->extsize, &arg32->extsize)) 101 return -XFS_ERROR(EFAULT); 102 return 0; 103 } 104 105 STATIC int 106 xfs_inumbers_fmt_compat( 107 void __user *ubuffer, 108 const xfs_inogrp_t *buffer, 109 long count, 110 long *written) 111 { 112 compat_xfs_inogrp_t __user *p32 = ubuffer; 113 long i; 114 115 for (i = 0; i < count; i++) { 116 if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) || 117 put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) || 118 put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask)) 119 return -XFS_ERROR(EFAULT); 120 } 121 *written = count * sizeof(*p32); 122 return 0; 123 } 124 125 #else 126 #define xfs_inumbers_fmt_compat xfs_inumbers_fmt 127 #endif /* BROKEN_X86_ALIGNMENT */ 128 129 STATIC int 130 xfs_ioctl32_bstime_copyin( 131 xfs_bstime_t *bstime, 132 compat_xfs_bstime_t __user *bstime32) 133 { 134 compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */ 135 136 if (get_user(sec32, &bstime32->tv_sec) || 137 get_user(bstime->tv_nsec, &bstime32->tv_nsec)) 138 return -XFS_ERROR(EFAULT); 139 bstime->tv_sec = sec32; 140 return 0; 141 } 142 143 /* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */ 144 STATIC int 145 xfs_ioctl32_bstat_copyin( 146 xfs_bstat_t *bstat, 147 compat_xfs_bstat_t __user *bstat32) 148 { 149 if (get_user(bstat->bs_ino, &bstat32->bs_ino) || 150 get_user(bstat->bs_mode, &bstat32->bs_mode) || 151 get_user(bstat->bs_nlink, &bstat32->bs_nlink) || 152 get_user(bstat->bs_uid, &bstat32->bs_uid) || 153 get_user(bstat->bs_gid, &bstat32->bs_gid) || 154 get_user(bstat->bs_rdev, &bstat32->bs_rdev) || 155 get_user(bstat->bs_blksize, &bstat32->bs_blksize) || 156 get_user(bstat->bs_size, &bstat32->bs_size) || 157 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) || 158 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) || 159 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) || 160 get_user(bstat->bs_blocks, &bstat32->bs_size) || 161 get_user(bstat->bs_xflags, &bstat32->bs_size) || 162 get_user(bstat->bs_extsize, &bstat32->bs_extsize) || 163 get_user(bstat->bs_extents, &bstat32->bs_extents) || 164 get_user(bstat->bs_gen, &bstat32->bs_gen) || 165 get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) || 166 get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) || 167 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || 168 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || 169 get_user(bstat->bs_aextents, &bstat32->bs_aextents)) 170 return -XFS_ERROR(EFAULT); 171 return 0; 172 } 173 174 /* XFS_IOC_FSBULKSTAT and friends */ 175 176 STATIC int 177 xfs_bstime_store_compat( 178 compat_xfs_bstime_t __user *p32, 179 const xfs_bstime_t *p) 180 { 181 __s32 sec32; 182 183 sec32 = p->tv_sec; 184 if (put_user(sec32, &p32->tv_sec) || 185 put_user(p->tv_nsec, &p32->tv_nsec)) 186 return -XFS_ERROR(EFAULT); 187 return 0; 188 } 189 190 /* Return 0 on success or positive error (to xfs_bulkstat()) */ 191 STATIC int 192 xfs_bulkstat_one_fmt_compat( 193 void __user *ubuffer, 194 int ubsize, 195 int *ubused, 196 const xfs_bstat_t *buffer) 197 { 198 compat_xfs_bstat_t __user *p32 = ubuffer; 199 200 if (ubsize < sizeof(*p32)) 201 return XFS_ERROR(ENOMEM); 202 203 if (put_user(buffer->bs_ino, &p32->bs_ino) || 204 put_user(buffer->bs_mode, &p32->bs_mode) || 205 put_user(buffer->bs_nlink, &p32->bs_nlink) || 206 put_user(buffer->bs_uid, &p32->bs_uid) || 207 put_user(buffer->bs_gid, &p32->bs_gid) || 208 put_user(buffer->bs_rdev, &p32->bs_rdev) || 209 put_user(buffer->bs_blksize, &p32->bs_blksize) || 210 put_user(buffer->bs_size, &p32->bs_size) || 211 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) || 212 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) || 213 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) || 214 put_user(buffer->bs_blocks, &p32->bs_blocks) || 215 put_user(buffer->bs_xflags, &p32->bs_xflags) || 216 put_user(buffer->bs_extsize, &p32->bs_extsize) || 217 put_user(buffer->bs_extents, &p32->bs_extents) || 218 put_user(buffer->bs_gen, &p32->bs_gen) || 219 put_user(buffer->bs_projid, &p32->bs_projid) || 220 put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) || 221 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || 222 put_user(buffer->bs_dmstate, &p32->bs_dmstate) || 223 put_user(buffer->bs_aextents, &p32->bs_aextents)) 224 return XFS_ERROR(EFAULT); 225 if (ubused) 226 *ubused = sizeof(*p32); 227 return 0; 228 } 229 230 STATIC int 231 xfs_bulkstat_one_compat( 232 xfs_mount_t *mp, /* mount point for filesystem */ 233 xfs_ino_t ino, /* inode number to get data for */ 234 void __user *buffer, /* buffer to place output in */ 235 int ubsize, /* size of buffer */ 236 int *ubused, /* bytes used by me */ 237 int *stat) /* BULKSTAT_RV_... */ 238 { 239 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 240 xfs_bulkstat_one_fmt_compat, 241 ubused, stat); 242 } 243 244 /* copied from xfs_ioctl.c */ 245 STATIC int 246 xfs_compat_ioc_bulkstat( 247 xfs_mount_t *mp, 248 unsigned int cmd, 249 compat_xfs_fsop_bulkreq_t __user *p32) 250 { 251 u32 addr; 252 xfs_fsop_bulkreq_t bulkreq; 253 int count; /* # of records returned */ 254 xfs_ino_t inlast; /* last inode number */ 255 int done; 256 int error; 257 258 /* done = 1 if there are more stats to get and if bulkstat */ 259 /* should be called again (unused here, but used in dmapi) */ 260 261 if (!capable(CAP_SYS_ADMIN)) 262 return -XFS_ERROR(EPERM); 263 264 if (XFS_FORCED_SHUTDOWN(mp)) 265 return -XFS_ERROR(EIO); 266 267 if (get_user(addr, &p32->lastip)) 268 return -XFS_ERROR(EFAULT); 269 bulkreq.lastip = compat_ptr(addr); 270 if (get_user(bulkreq.icount, &p32->icount) || 271 get_user(addr, &p32->ubuffer)) 272 return -XFS_ERROR(EFAULT); 273 bulkreq.ubuffer = compat_ptr(addr); 274 if (get_user(addr, &p32->ocount)) 275 return -XFS_ERROR(EFAULT); 276 bulkreq.ocount = compat_ptr(addr); 277 278 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) 279 return -XFS_ERROR(EFAULT); 280 281 if ((count = bulkreq.icount) <= 0) 282 return -XFS_ERROR(EINVAL); 283 284 if (bulkreq.ubuffer == NULL) 285 return -XFS_ERROR(EINVAL); 286 287 if (cmd == XFS_IOC_FSINUMBERS_32) { 288 error = xfs_inumbers(mp, &inlast, &count, 289 bulkreq.ubuffer, xfs_inumbers_fmt_compat); 290 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) { 291 int res; 292 293 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, 294 sizeof(compat_xfs_bstat_t), NULL, &res); 295 } else if (cmd == XFS_IOC_FSBULKSTAT_32) { 296 error = xfs_bulkstat(mp, &inlast, &count, 297 xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), 298 bulkreq.ubuffer, &done); 299 } else 300 error = XFS_ERROR(EINVAL); 301 if (error) 302 return -error; 303 304 if (bulkreq.ocount != NULL) { 305 if (copy_to_user(bulkreq.lastip, &inlast, 306 sizeof(xfs_ino_t))) 307 return -XFS_ERROR(EFAULT); 308 309 if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) 310 return -XFS_ERROR(EFAULT); 311 } 312 313 return 0; 314 } 315 316 STATIC int 317 xfs_compat_handlereq_copyin( 318 xfs_fsop_handlereq_t *hreq, 319 compat_xfs_fsop_handlereq_t __user *arg32) 320 { 321 compat_xfs_fsop_handlereq_t hreq32; 322 323 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t))) 324 return -XFS_ERROR(EFAULT); 325 326 hreq->fd = hreq32.fd; 327 hreq->path = compat_ptr(hreq32.path); 328 hreq->oflags = hreq32.oflags; 329 hreq->ihandle = compat_ptr(hreq32.ihandle); 330 hreq->ihandlen = hreq32.ihandlen; 331 hreq->ohandle = compat_ptr(hreq32.ohandle); 332 hreq->ohandlen = compat_ptr(hreq32.ohandlen); 333 334 return 0; 335 } 336 337 STATIC struct dentry * 338 xfs_compat_handlereq_to_dentry( 339 struct file *parfilp, 340 compat_xfs_fsop_handlereq_t *hreq) 341 { 342 return xfs_handle_to_dentry(parfilp, 343 compat_ptr(hreq->ihandle), hreq->ihandlen); 344 } 345 346 STATIC int 347 xfs_compat_attrlist_by_handle( 348 struct file *parfilp, 349 void __user *arg) 350 { 351 int error; 352 attrlist_cursor_kern_t *cursor; 353 compat_xfs_fsop_attrlist_handlereq_t al_hreq; 354 struct dentry *dentry; 355 char *kbuf; 356 357 if (!capable(CAP_SYS_ADMIN)) 358 return -XFS_ERROR(EPERM); 359 if (copy_from_user(&al_hreq, arg, 360 sizeof(compat_xfs_fsop_attrlist_handlereq_t))) 361 return -XFS_ERROR(EFAULT); 362 if (al_hreq.buflen > XATTR_LIST_MAX) 363 return -XFS_ERROR(EINVAL); 364 365 /* 366 * Reject flags, only allow namespaces. 367 */ 368 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) 369 return -XFS_ERROR(EINVAL); 370 371 dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); 372 if (IS_ERR(dentry)) 373 return PTR_ERR(dentry); 374 375 error = -ENOMEM; 376 kbuf = kmem_zalloc(al_hreq.buflen, KM_SLEEP | KM_MAYFAIL); 377 if (!kbuf) { 378 kbuf = kmem_zalloc_large(al_hreq.buflen); 379 if (!kbuf) 380 goto out_dput; 381 } 382 383 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; 384 error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, 385 al_hreq.flags, cursor); 386 if (error) 387 goto out_kfree; 388 389 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen)) 390 error = -EFAULT; 391 392 out_kfree: 393 if (is_vmalloc_addr(kbuf)) 394 kmem_free_large(kbuf); 395 else 396 kmem_free(kbuf); 397 out_dput: 398 dput(dentry); 399 return error; 400 } 401 402 STATIC int 403 xfs_compat_attrmulti_by_handle( 404 struct file *parfilp, 405 void __user *arg) 406 { 407 int error; 408 compat_xfs_attr_multiop_t *ops; 409 compat_xfs_fsop_attrmulti_handlereq_t am_hreq; 410 struct dentry *dentry; 411 unsigned int i, size; 412 unsigned char *attr_name; 413 414 if (!capable(CAP_SYS_ADMIN)) 415 return -XFS_ERROR(EPERM); 416 if (copy_from_user(&am_hreq, arg, 417 sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) 418 return -XFS_ERROR(EFAULT); 419 420 /* overflow check */ 421 if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t)) 422 return -E2BIG; 423 424 dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq); 425 if (IS_ERR(dentry)) 426 return PTR_ERR(dentry); 427 428 error = E2BIG; 429 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); 430 if (!size || size > 16 * PAGE_SIZE) 431 goto out_dput; 432 433 ops = memdup_user(compat_ptr(am_hreq.ops), size); 434 if (IS_ERR(ops)) { 435 error = PTR_ERR(ops); 436 goto out_dput; 437 } 438 439 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); 440 if (!attr_name) 441 goto out_kfree_ops; 442 443 error = 0; 444 for (i = 0; i < am_hreq.opcount; i++) { 445 ops[i].am_error = strncpy_from_user((char *)attr_name, 446 compat_ptr(ops[i].am_attrname), 447 MAXNAMELEN); 448 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) 449 error = -ERANGE; 450 if (ops[i].am_error < 0) 451 break; 452 453 switch (ops[i].am_opcode) { 454 case ATTR_OP_GET: 455 ops[i].am_error = xfs_attrmulti_attr_get( 456 dentry->d_inode, attr_name, 457 compat_ptr(ops[i].am_attrvalue), 458 &ops[i].am_length, ops[i].am_flags); 459 break; 460 case ATTR_OP_SET: 461 ops[i].am_error = mnt_want_write_file(parfilp); 462 if (ops[i].am_error) 463 break; 464 ops[i].am_error = xfs_attrmulti_attr_set( 465 dentry->d_inode, attr_name, 466 compat_ptr(ops[i].am_attrvalue), 467 ops[i].am_length, ops[i].am_flags); 468 mnt_drop_write_file(parfilp); 469 break; 470 case ATTR_OP_REMOVE: 471 ops[i].am_error = mnt_want_write_file(parfilp); 472 if (ops[i].am_error) 473 break; 474 ops[i].am_error = xfs_attrmulti_attr_remove( 475 dentry->d_inode, attr_name, 476 ops[i].am_flags); 477 mnt_drop_write_file(parfilp); 478 break; 479 default: 480 ops[i].am_error = EINVAL; 481 } 482 } 483 484 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size)) 485 error = XFS_ERROR(EFAULT); 486 487 kfree(attr_name); 488 out_kfree_ops: 489 kfree(ops); 490 out_dput: 491 dput(dentry); 492 return -error; 493 } 494 495 STATIC int 496 xfs_compat_fssetdm_by_handle( 497 struct file *parfilp, 498 void __user *arg) 499 { 500 int error; 501 struct fsdmidata fsd; 502 compat_xfs_fsop_setdm_handlereq_t dmhreq; 503 struct dentry *dentry; 504 505 if (!capable(CAP_MKNOD)) 506 return -XFS_ERROR(EPERM); 507 if (copy_from_user(&dmhreq, arg, 508 sizeof(compat_xfs_fsop_setdm_handlereq_t))) 509 return -XFS_ERROR(EFAULT); 510 511 dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq); 512 if (IS_ERR(dentry)) 513 return PTR_ERR(dentry); 514 515 if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { 516 error = -XFS_ERROR(EPERM); 517 goto out; 518 } 519 520 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) { 521 error = -XFS_ERROR(EFAULT); 522 goto out; 523 } 524 525 error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, 526 fsd.fsd_dmstate); 527 528 out: 529 dput(dentry); 530 return error; 531 } 532 533 long 534 xfs_file_compat_ioctl( 535 struct file *filp, 536 unsigned cmd, 537 unsigned long p) 538 { 539 struct inode *inode = file_inode(filp); 540 struct xfs_inode *ip = XFS_I(inode); 541 struct xfs_mount *mp = ip->i_mount; 542 void __user *arg = (void __user *)p; 543 int ioflags = 0; 544 int error; 545 546 if (filp->f_mode & FMODE_NOCMTIME) 547 ioflags |= IO_INVIS; 548 549 trace_xfs_file_compat_ioctl(ip); 550 551 switch (cmd) { 552 /* No size or alignment issues on any arch */ 553 case XFS_IOC_DIOINFO: 554 case XFS_IOC_FSGEOMETRY: 555 case XFS_IOC_FSGETXATTR: 556 case XFS_IOC_FSSETXATTR: 557 case XFS_IOC_FSGETXATTRA: 558 case XFS_IOC_FSSETDM: 559 case XFS_IOC_GETBMAP: 560 case XFS_IOC_GETBMAPA: 561 case XFS_IOC_GETBMAPX: 562 case XFS_IOC_FSCOUNTS: 563 case XFS_IOC_SET_RESBLKS: 564 case XFS_IOC_GET_RESBLKS: 565 case XFS_IOC_FSGROWFSLOG: 566 case XFS_IOC_GOINGDOWN: 567 case XFS_IOC_ERROR_INJECTION: 568 case XFS_IOC_ERROR_CLEARALL: 569 return xfs_file_ioctl(filp, cmd, p); 570 #ifndef BROKEN_X86_ALIGNMENT 571 /* These are handled fine if no alignment issues */ 572 case XFS_IOC_ALLOCSP: 573 case XFS_IOC_FREESP: 574 case XFS_IOC_RESVSP: 575 case XFS_IOC_UNRESVSP: 576 case XFS_IOC_ALLOCSP64: 577 case XFS_IOC_FREESP64: 578 case XFS_IOC_RESVSP64: 579 case XFS_IOC_UNRESVSP64: 580 case XFS_IOC_FSGEOMETRY_V1: 581 case XFS_IOC_FSGROWFSDATA: 582 case XFS_IOC_FSGROWFSRT: 583 case XFS_IOC_ZERO_RANGE: 584 return xfs_file_ioctl(filp, cmd, p); 585 #else 586 case XFS_IOC_ALLOCSP_32: 587 case XFS_IOC_FREESP_32: 588 case XFS_IOC_ALLOCSP64_32: 589 case XFS_IOC_FREESP64_32: 590 case XFS_IOC_RESVSP_32: 591 case XFS_IOC_UNRESVSP_32: 592 case XFS_IOC_RESVSP64_32: 593 case XFS_IOC_UNRESVSP64_32: 594 case XFS_IOC_ZERO_RANGE_32: { 595 struct xfs_flock64 bf; 596 597 if (xfs_compat_flock64_copyin(&bf, arg)) 598 return -XFS_ERROR(EFAULT); 599 cmd = _NATIVE_IOC(cmd, struct xfs_flock64); 600 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); 601 } 602 case XFS_IOC_FSGEOMETRY_V1_32: 603 return xfs_compat_ioc_fsgeometry_v1(mp, arg); 604 case XFS_IOC_FSGROWFSDATA_32: { 605 struct xfs_growfs_data in; 606 607 if (xfs_compat_growfs_data_copyin(&in, arg)) 608 return -XFS_ERROR(EFAULT); 609 error = mnt_want_write_file(filp); 610 if (error) 611 return error; 612 error = xfs_growfs_data(mp, &in); 613 mnt_drop_write_file(filp); 614 return -error; 615 } 616 case XFS_IOC_FSGROWFSRT_32: { 617 struct xfs_growfs_rt in; 618 619 if (xfs_compat_growfs_rt_copyin(&in, arg)) 620 return -XFS_ERROR(EFAULT); 621 error = mnt_want_write_file(filp); 622 if (error) 623 return error; 624 error = xfs_growfs_rt(mp, &in); 625 mnt_drop_write_file(filp); 626 return -error; 627 } 628 #endif 629 /* long changes size, but xfs only copiese out 32 bits */ 630 case XFS_IOC_GETXFLAGS_32: 631 case XFS_IOC_SETXFLAGS_32: 632 case XFS_IOC_GETVERSION_32: 633 cmd = _NATIVE_IOC(cmd, long); 634 return xfs_file_ioctl(filp, cmd, p); 635 case XFS_IOC_SWAPEXT_32: { 636 struct xfs_swapext sxp; 637 struct compat_xfs_swapext __user *sxu = arg; 638 639 /* Bulk copy in up to the sx_stat field, then copy bstat */ 640 if (copy_from_user(&sxp, sxu, 641 offsetof(struct xfs_swapext, sx_stat)) || 642 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) 643 return -XFS_ERROR(EFAULT); 644 error = mnt_want_write_file(filp); 645 if (error) 646 return error; 647 error = xfs_swapext(&sxp); 648 mnt_drop_write_file(filp); 649 return -error; 650 } 651 case XFS_IOC_FSBULKSTAT_32: 652 case XFS_IOC_FSBULKSTAT_SINGLE_32: 653 case XFS_IOC_FSINUMBERS_32: 654 return xfs_compat_ioc_bulkstat(mp, cmd, arg); 655 case XFS_IOC_FD_TO_HANDLE_32: 656 case XFS_IOC_PATH_TO_HANDLE_32: 657 case XFS_IOC_PATH_TO_FSHANDLE_32: { 658 struct xfs_fsop_handlereq hreq; 659 660 if (xfs_compat_handlereq_copyin(&hreq, arg)) 661 return -XFS_ERROR(EFAULT); 662 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); 663 return xfs_find_handle(cmd, &hreq); 664 } 665 case XFS_IOC_OPEN_BY_HANDLE_32: { 666 struct xfs_fsop_handlereq hreq; 667 668 if (xfs_compat_handlereq_copyin(&hreq, arg)) 669 return -XFS_ERROR(EFAULT); 670 return xfs_open_by_handle(filp, &hreq); 671 } 672 case XFS_IOC_READLINK_BY_HANDLE_32: { 673 struct xfs_fsop_handlereq hreq; 674 675 if (xfs_compat_handlereq_copyin(&hreq, arg)) 676 return -XFS_ERROR(EFAULT); 677 return xfs_readlink_by_handle(filp, &hreq); 678 } 679 case XFS_IOC_ATTRLIST_BY_HANDLE_32: 680 return xfs_compat_attrlist_by_handle(filp, arg); 681 case XFS_IOC_ATTRMULTI_BY_HANDLE_32: 682 return xfs_compat_attrmulti_by_handle(filp, arg); 683 case XFS_IOC_FSSETDM_BY_HANDLE_32: 684 return xfs_compat_fssetdm_by_handle(filp, arg); 685 default: 686 return -XFS_ERROR(ENOIOCTLCMD); 687 } 688 } 689