1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_rtalloc.h"
15 #include "xfs_iwalk.h"
16 #include "xfs_itable.h"
17 #include "xfs_error.h"
18 #include "xfs_da_format.h"
19 #include "xfs_da_btree.h"
20 #include "xfs_attr.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_fsops.h"
24 #include "xfs_discard.h"
25 #include "xfs_quota.h"
26 #include "xfs_trace.h"
27 #include "xfs_icache.h"
28 #include "xfs_trans.h"
29 #include "xfs_btree.h"
30 #include <linux/fsmap.h>
31 #include "xfs_fsmap.h"
32 #include "scrub/xfs_scrub.h"
33 #include "xfs_sb.h"
34 #include "xfs_ag.h"
35 #include "xfs_health.h"
36 #include "xfs_reflink.h"
37 #include "xfs_ioctl.h"
38 #include "xfs_xattr.h"
39 #include "xfs_rtbitmap.h"
40 #include "xfs_file.h"
41 #include "xfs_exchrange.h"
42 #include "xfs_handle.h"
43 #include "xfs_rtgroup.h"
44 #include "xfs_healthmon.h"
45 #include "xfs_verify_media.h"
46
47 #include <linux/mount.h>
48 #include <linux/fileattr.h>
49
50 /* Return 0 on success or positive error */
51 int
xfs_fsbulkstat_one_fmt(struct xfs_ibulk * breq,const struct xfs_bulkstat * bstat)52 xfs_fsbulkstat_one_fmt(
53 struct xfs_ibulk *breq,
54 const struct xfs_bulkstat *bstat)
55 {
56 struct xfs_bstat bs1;
57
58 xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
59 if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1)))
60 return -EFAULT;
61 return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat));
62 }
63
64 int
xfs_fsinumbers_fmt(struct xfs_ibulk * breq,const struct xfs_inumbers * igrp)65 xfs_fsinumbers_fmt(
66 struct xfs_ibulk *breq,
67 const struct xfs_inumbers *igrp)
68 {
69 struct xfs_inogrp ig1;
70
71 xfs_inumbers_to_inogrp(&ig1, igrp);
72 if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp)))
73 return -EFAULT;
74 return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp));
75 }
76
77 STATIC int
xfs_ioc_fsbulkstat(struct file * file,unsigned int cmd,void __user * arg)78 xfs_ioc_fsbulkstat(
79 struct file *file,
80 unsigned int cmd,
81 void __user *arg)
82 {
83 struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount;
84 struct xfs_fsop_bulkreq bulkreq;
85 struct xfs_ibulk breq = {
86 .mp = mp,
87 .idmap = file_mnt_idmap(file),
88 .ocount = 0,
89 };
90 xfs_ino_t lastino;
91 int error;
92
93 /* done = 1 if there are more stats to get and if bulkstat */
94 /* should be called again (unused here, but used in dmapi) */
95
96 if (!capable(CAP_SYS_ADMIN))
97 return -EPERM;
98
99 if (xfs_is_shutdown(mp))
100 return -EIO;
101
102 if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq)))
103 return -EFAULT;
104
105 if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
106 return -EFAULT;
107
108 if (bulkreq.icount <= 0)
109 return -EINVAL;
110
111 if (bulkreq.ubuffer == NULL)
112 return -EINVAL;
113
114 breq.ubuffer = bulkreq.ubuffer;
115 breq.icount = bulkreq.icount;
116
117 /*
118 * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
119 * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect
120 * that *lastip contains either zero or the number of the last inode to
121 * be examined by the previous call and return results starting with
122 * the next inode after that. The new bulk request back end functions
123 * take the inode to start with, so we have to compute the startino
124 * parameter from lastino to maintain correct function. lastino == 0
125 * is a special case because it has traditionally meant "first inode
126 * in filesystem".
127 */
128 if (cmd == XFS_IOC_FSINUMBERS) {
129 breq.startino = lastino ? lastino + 1 : 0;
130 error = xfs_inumbers(&breq, xfs_fsinumbers_fmt);
131 lastino = breq.startino - 1;
132 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) {
133 breq.startino = lastino;
134 breq.icount = 1;
135 error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt);
136 } else { /* XFS_IOC_FSBULKSTAT */
137 breq.startino = lastino ? lastino + 1 : 0;
138 error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt);
139 lastino = breq.startino - 1;
140 }
141
142 if (error)
143 return error;
144
145 if (bulkreq.lastip != NULL &&
146 copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
147 return -EFAULT;
148
149 if (bulkreq.ocount != NULL &&
150 copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
151 return -EFAULT;
152
153 return 0;
154 }
155
156 /* Return 0 on success or positive error */
157 static int
xfs_bulkstat_fmt(struct xfs_ibulk * breq,const struct xfs_bulkstat * bstat)158 xfs_bulkstat_fmt(
159 struct xfs_ibulk *breq,
160 const struct xfs_bulkstat *bstat)
161 {
162 if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat)))
163 return -EFAULT;
164 return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat));
165 }
166
167 /*
168 * Check the incoming bulk request @hdr from userspace and initialize the
169 * internal @breq bulk request appropriately. Returns 0 if the bulk request
170 * should proceed; -ECANCELED if there's nothing to do; or the usual
171 * negative error code.
172 */
173 static int
xfs_bulk_ireq_setup(struct xfs_mount * mp,const struct xfs_bulk_ireq * hdr,struct xfs_ibulk * breq,void __user * ubuffer)174 xfs_bulk_ireq_setup(
175 struct xfs_mount *mp,
176 const struct xfs_bulk_ireq *hdr,
177 struct xfs_ibulk *breq,
178 void __user *ubuffer)
179 {
180 if (hdr->icount == 0 ||
181 (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) ||
182 memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
183 return -EINVAL;
184
185 breq->startino = hdr->ino;
186 breq->ubuffer = ubuffer;
187 breq->icount = hdr->icount;
188 breq->ocount = 0;
189 breq->flags = 0;
190
191 /*
192 * The @ino parameter is a special value, so we must look it up here.
193 * We're not allowed to have IREQ_AGNO, and we only return one inode
194 * worth of data.
195 */
196 if (hdr->flags & XFS_BULK_IREQ_SPECIAL) {
197 if (hdr->flags & XFS_BULK_IREQ_AGNO)
198 return -EINVAL;
199
200 switch (hdr->ino) {
201 case XFS_BULK_IREQ_SPECIAL_ROOT:
202 breq->startino = mp->m_sb.sb_rootino;
203 break;
204 default:
205 return -EINVAL;
206 }
207 breq->icount = 1;
208 }
209
210 /*
211 * The IREQ_AGNO flag means that we only want results from a given AG.
212 * If @hdr->ino is zero, we start iterating in that AG. If @hdr->ino is
213 * beyond the specified AG then we return no results.
214 */
215 if (hdr->flags & XFS_BULK_IREQ_AGNO) {
216 if (hdr->agno >= mp->m_sb.sb_agcount)
217 return -EINVAL;
218
219 if (breq->startino == 0)
220 breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0);
221 else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)
222 return -EINVAL;
223
224 breq->iwalk_flags |= XFS_IWALK_SAME_AG;
225
226 /* Asking for an inode past the end of the AG? We're done! */
227 if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
228 return -ECANCELED;
229 } else if (hdr->agno)
230 return -EINVAL;
231
232 /* Asking for an inode past the end of the FS? We're done! */
233 if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount)
234 return -ECANCELED;
235
236 if (hdr->flags & XFS_BULK_IREQ_NREXT64)
237 breq->flags |= XFS_IBULK_NREXT64;
238
239 /* Caller wants to see metadata directories in bulkstat output. */
240 if (hdr->flags & XFS_BULK_IREQ_METADIR)
241 breq->flags |= XFS_IBULK_METADIR;
242
243 return 0;
244 }
245
246 /*
247 * Update the userspace bulk request @hdr to reflect the end state of the
248 * internal bulk request @breq.
249 */
250 static void
xfs_bulk_ireq_teardown(struct xfs_bulk_ireq * hdr,struct xfs_ibulk * breq)251 xfs_bulk_ireq_teardown(
252 struct xfs_bulk_ireq *hdr,
253 struct xfs_ibulk *breq)
254 {
255 hdr->ino = breq->startino;
256 hdr->ocount = breq->ocount;
257 }
258
259 /* Handle the v5 bulkstat ioctl. */
260 STATIC int
xfs_ioc_bulkstat(struct file * file,unsigned int cmd,struct xfs_bulkstat_req __user * arg)261 xfs_ioc_bulkstat(
262 struct file *file,
263 unsigned int cmd,
264 struct xfs_bulkstat_req __user *arg)
265 {
266 struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount;
267 struct xfs_bulk_ireq hdr;
268 struct xfs_ibulk breq = {
269 .mp = mp,
270 .idmap = file_mnt_idmap(file),
271 };
272 int error;
273
274 if (!capable(CAP_SYS_ADMIN))
275 return -EPERM;
276
277 if (xfs_is_shutdown(mp))
278 return -EIO;
279
280 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
281 return -EFAULT;
282
283 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat);
284 if (error == -ECANCELED)
285 goto out_teardown;
286 if (error < 0)
287 return error;
288
289 error = xfs_bulkstat(&breq, xfs_bulkstat_fmt);
290 if (error)
291 return error;
292
293 out_teardown:
294 xfs_bulk_ireq_teardown(&hdr, &breq);
295 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
296 return -EFAULT;
297
298 return 0;
299 }
300
301 STATIC int
xfs_inumbers_fmt(struct xfs_ibulk * breq,const struct xfs_inumbers * igrp)302 xfs_inumbers_fmt(
303 struct xfs_ibulk *breq,
304 const struct xfs_inumbers *igrp)
305 {
306 if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers)))
307 return -EFAULT;
308 return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers));
309 }
310
311 /* Handle the v5 inumbers ioctl. */
312 STATIC int
xfs_ioc_inumbers(struct xfs_mount * mp,unsigned int cmd,struct xfs_inumbers_req __user * arg)313 xfs_ioc_inumbers(
314 struct xfs_mount *mp,
315 unsigned int cmd,
316 struct xfs_inumbers_req __user *arg)
317 {
318 struct xfs_bulk_ireq hdr;
319 struct xfs_ibulk breq = {
320 .mp = mp,
321 };
322 int error;
323
324 if (!capable(CAP_SYS_ADMIN))
325 return -EPERM;
326
327 if (xfs_is_shutdown(mp))
328 return -EIO;
329
330 if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
331 return -EFAULT;
332
333 if (hdr.flags & XFS_BULK_IREQ_METADIR)
334 return -EINVAL;
335
336 error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers);
337 if (error == -ECANCELED)
338 goto out_teardown;
339 if (error < 0)
340 return error;
341
342 error = xfs_inumbers(&breq, xfs_inumbers_fmt);
343 if (error)
344 return error;
345
346 out_teardown:
347 xfs_bulk_ireq_teardown(&hdr, &breq);
348 if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
349 return -EFAULT;
350
351 return 0;
352 }
353
354 STATIC int
xfs_ioc_fsgeometry(struct xfs_mount * mp,void __user * arg,int struct_version)355 xfs_ioc_fsgeometry(
356 struct xfs_mount *mp,
357 void __user *arg,
358 int struct_version)
359 {
360 struct xfs_fsop_geom fsgeo;
361 size_t len;
362
363 xfs_fs_geometry(mp, &fsgeo, struct_version);
364
365 if (struct_version <= 3)
366 len = sizeof(struct xfs_fsop_geom_v1);
367 else if (struct_version == 4)
368 len = sizeof(struct xfs_fsop_geom_v4);
369 else {
370 xfs_fsop_geom_health(mp, &fsgeo);
371 len = sizeof(fsgeo);
372 }
373
374 if (copy_to_user(arg, &fsgeo, len))
375 return -EFAULT;
376 return 0;
377 }
378
379 STATIC int
xfs_ioc_ag_geometry(struct xfs_mount * mp,void __user * arg)380 xfs_ioc_ag_geometry(
381 struct xfs_mount *mp,
382 void __user *arg)
383 {
384 struct xfs_perag *pag;
385 struct xfs_ag_geometry ageo;
386 int error;
387
388 if (copy_from_user(&ageo, arg, sizeof(ageo)))
389 return -EFAULT;
390 if (ageo.ag_flags)
391 return -EINVAL;
392 if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved)))
393 return -EINVAL;
394
395 pag = xfs_perag_get(mp, ageo.ag_number);
396 if (!pag)
397 return -EINVAL;
398
399 error = xfs_ag_get_geometry(pag, &ageo);
400 xfs_perag_put(pag);
401 if (error)
402 return error;
403
404 if (copy_to_user(arg, &ageo, sizeof(ageo)))
405 return -EFAULT;
406 return 0;
407 }
408
409 STATIC int
xfs_ioc_rtgroup_geometry(struct xfs_mount * mp,void __user * arg)410 xfs_ioc_rtgroup_geometry(
411 struct xfs_mount *mp,
412 void __user *arg)
413 {
414 struct xfs_rtgroup *rtg;
415 struct xfs_rtgroup_geometry rgeo;
416 int error;
417
418 if (copy_from_user(&rgeo, arg, sizeof(rgeo)))
419 return -EFAULT;
420 if (rgeo.rg_flags)
421 return -EINVAL;
422 if (memchr_inv(&rgeo.rg_reserved, 0, sizeof(rgeo.rg_reserved)))
423 return -EINVAL;
424 if (!xfs_has_rtgroups(mp))
425 return -EINVAL;
426
427 rtg = xfs_rtgroup_get(mp, rgeo.rg_number);
428 if (!rtg)
429 return -EINVAL;
430
431 error = xfs_rtgroup_get_geometry(rtg, &rgeo);
432 xfs_rtgroup_put(rtg);
433 if (error)
434 return error;
435
436 if (copy_to_user(arg, &rgeo, sizeof(rgeo)))
437 return -EFAULT;
438 return 0;
439 }
440
441 /*
442 * Linux extended inode flags interface.
443 */
444
445 static void
xfs_fill_fsxattr(struct xfs_inode * ip,int whichfork,struct file_kattr * fa)446 xfs_fill_fsxattr(
447 struct xfs_inode *ip,
448 int whichfork,
449 struct file_kattr *fa)
450 {
451 struct xfs_mount *mp = ip->i_mount;
452 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
453
454 fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
455
456 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) {
457 fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
458 } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
459 /*
460 * Don't let a misaligned extent size hint on a directory
461 * escape to userspace if it won't pass the setattr checks
462 * later.
463 */
464 if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
465 xfs_extlen_to_rtxmod(mp, ip->i_extsize) > 0) {
466 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
467 FS_XFLAG_EXTSZINHERIT);
468 fa->fsx_extsize = 0;
469 } else {
470 fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
471 }
472 }
473
474 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
475 /*
476 * Don't let a misaligned CoW extent size hint on a directory
477 * escape to userspace if it won't pass the setattr checks
478 * later.
479 */
480 if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
481 ip->i_cowextsize % mp->m_sb.sb_rextsize > 0) {
482 fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
483 fa->fsx_cowextsize = 0;
484 } else {
485 fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
486 }
487 }
488
489 fa->fsx_projid = ip->i_projid;
490 if (ifp && !xfs_need_iread_extents(ifp))
491 fa->fsx_nextents = xfs_iext_count(ifp);
492 else
493 fa->fsx_nextents = xfs_ifork_nextents(ifp);
494 }
495
496 STATIC int
xfs_ioc_fsgetxattra(xfs_inode_t * ip,void __user * arg)497 xfs_ioc_fsgetxattra(
498 xfs_inode_t *ip,
499 void __user *arg)
500 {
501 struct file_kattr fa;
502
503 xfs_ilock(ip, XFS_ILOCK_SHARED);
504 xfs_fill_fsxattr(ip, XFS_ATTR_FORK, &fa);
505 xfs_iunlock(ip, XFS_ILOCK_SHARED);
506
507 return copy_fsxattr_to_user(&fa, arg);
508 }
509
510 int
xfs_fileattr_get(struct dentry * dentry,struct file_kattr * fa)511 xfs_fileattr_get(
512 struct dentry *dentry,
513 struct file_kattr *fa)
514 {
515 struct xfs_inode *ip = XFS_I(d_inode(dentry));
516
517 xfs_ilock(ip, XFS_ILOCK_SHARED);
518 xfs_fill_fsxattr(ip, XFS_DATA_FORK, fa);
519 xfs_iunlock(ip, XFS_ILOCK_SHARED);
520
521 return 0;
522 }
523
524 static int
xfs_ioctl_setattr_xflags(struct xfs_trans * tp,struct xfs_inode * ip,struct file_kattr * fa)525 xfs_ioctl_setattr_xflags(
526 struct xfs_trans *tp,
527 struct xfs_inode *ip,
528 struct file_kattr *fa)
529 {
530 struct xfs_mount *mp = ip->i_mount;
531 bool rtflag = (fa->fsx_xflags & FS_XFLAG_REALTIME);
532 uint64_t i_flags2;
533
534 if (rtflag != XFS_IS_REALTIME_INODE(ip)) {
535 /* Can't change realtime flag if any extents are allocated. */
536 if (xfs_inode_has_filedata(ip))
537 return -EINVAL;
538
539 /*
540 * If S_DAX is enabled on this file, we can only switch the
541 * device if both support fsdax. We can't update S_DAX because
542 * there might be other threads walking down the access paths.
543 */
544 if (IS_DAX(VFS_I(ip)) &&
545 (mp->m_ddev_targp->bt_daxdev == NULL ||
546 (mp->m_rtdev_targp &&
547 mp->m_rtdev_targp->bt_daxdev == NULL)))
548 return -EINVAL;
549 }
550
551 if (rtflag) {
552 /* If realtime flag is set then must have realtime device */
553 if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
554 xfs_extlen_to_rtxmod(mp, ip->i_extsize))
555 return -EINVAL;
556 }
557
558 /* diflags2 only valid for v3 inodes. */
559 i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
560 if (i_flags2 && !xfs_has_v3inodes(mp))
561 return -EINVAL;
562
563 ip->i_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
564 ip->i_diflags2 = i_flags2;
565
566 xfs_diflags_to_iflags(ip, false);
567
568 /*
569 * Make the stable writes flag match that of the device the inode
570 * resides on when flipping the RT flag.
571 */
572 if (rtflag != XFS_IS_REALTIME_INODE(ip) && S_ISREG(VFS_I(ip)->i_mode))
573 xfs_update_stable_writes(ip);
574
575 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
576 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
577 XFS_STATS_INC(mp, xs_ig_attrchg);
578 return 0;
579 }
580
581 static void
xfs_ioctl_setattr_prepare_dax(struct xfs_inode * ip,struct file_kattr * fa)582 xfs_ioctl_setattr_prepare_dax(
583 struct xfs_inode *ip,
584 struct file_kattr *fa)
585 {
586 struct xfs_mount *mp = ip->i_mount;
587 struct inode *inode = VFS_I(ip);
588
589 if (S_ISDIR(inode->i_mode))
590 return;
591
592 if (xfs_has_dax_always(mp) || xfs_has_dax_never(mp))
593 return;
594
595 if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
596 !(ip->i_diflags2 & XFS_DIFLAG2_DAX)) ||
597 (!(fa->fsx_xflags & FS_XFLAG_DAX) &&
598 (ip->i_diflags2 & XFS_DIFLAG2_DAX)))
599 d_mark_dontcache(inode);
600 }
601
602 /*
603 * Set up the transaction structure for the setattr operation, checking that we
604 * have permission to do so. On success, return a clean transaction and the
605 * inode locked exclusively ready for further operation specific checks. On
606 * failure, return an error without modifying or locking the inode.
607 */
608 static struct xfs_trans *
xfs_ioctl_setattr_get_trans(struct xfs_inode * ip,struct xfs_dquot * pdqp)609 xfs_ioctl_setattr_get_trans(
610 struct xfs_inode *ip,
611 struct xfs_dquot *pdqp)
612 {
613 struct xfs_mount *mp = ip->i_mount;
614 struct xfs_trans *tp;
615 int error = -EROFS;
616
617 if (xfs_is_readonly(mp))
618 goto out_error;
619 error = -EIO;
620 if (xfs_is_shutdown(mp))
621 goto out_error;
622
623 error = xfs_trans_alloc_ichange(ip, NULL, NULL, pdqp,
624 has_capability_noaudit(current, CAP_FOWNER), &tp);
625 if (error)
626 goto out_error;
627
628 if (xfs_has_wsync(mp))
629 xfs_trans_set_sync(tp);
630
631 return tp;
632
633 out_error:
634 return ERR_PTR(error);
635 }
636
637 /*
638 * Validate a proposed extent size hint. For regular files, the hint can only
639 * be changed if no extents are allocated.
640 */
641 static int
xfs_ioctl_setattr_check_extsize(struct xfs_inode * ip,struct file_kattr * fa)642 xfs_ioctl_setattr_check_extsize(
643 struct xfs_inode *ip,
644 struct file_kattr *fa)
645 {
646 struct xfs_mount *mp = ip->i_mount;
647 xfs_failaddr_t failaddr;
648 uint16_t new_diflags;
649
650 if (!fa->fsx_valid)
651 return 0;
652
653 if (S_ISREG(VFS_I(ip)->i_mode) && xfs_inode_has_filedata(ip) &&
654 XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize)
655 return -EINVAL;
656
657 if (fa->fsx_extsize & mp->m_blockmask)
658 return -EINVAL;
659
660 new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
661
662 /*
663 * Inode verifiers do not check that the extent size hint is an integer
664 * multiple of the rt extent size on a directory with both rtinherit
665 * and extszinherit flags set. Don't let sysadmins misconfigure
666 * directories.
667 */
668 if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
669 (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
670 unsigned int rtextsize_bytes;
671
672 rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
673 if (fa->fsx_extsize % rtextsize_bytes)
674 return -EINVAL;
675 }
676
677 failaddr = xfs_inode_validate_extsize(ip->i_mount,
678 XFS_B_TO_FSB(mp, fa->fsx_extsize),
679 VFS_I(ip)->i_mode, new_diflags);
680 return failaddr != NULL ? -EINVAL : 0;
681 }
682
683 static int
xfs_ioctl_setattr_check_cowextsize(struct xfs_inode * ip,struct file_kattr * fa)684 xfs_ioctl_setattr_check_cowextsize(
685 struct xfs_inode *ip,
686 struct file_kattr *fa)
687 {
688 struct xfs_mount *mp = ip->i_mount;
689 xfs_failaddr_t failaddr;
690 uint64_t new_diflags2;
691 uint16_t new_diflags;
692
693 if (!fa->fsx_valid)
694 return 0;
695
696 if (fa->fsx_cowextsize & mp->m_blockmask)
697 return -EINVAL;
698
699 new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
700 new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
701
702 failaddr = xfs_inode_validate_cowextsize(ip->i_mount,
703 XFS_B_TO_FSB(mp, fa->fsx_cowextsize),
704 VFS_I(ip)->i_mode, new_diflags, new_diflags2);
705 return failaddr != NULL ? -EINVAL : 0;
706 }
707
708 static int
xfs_ioctl_setattr_check_projid(struct xfs_inode * ip,struct file_kattr * fa)709 xfs_ioctl_setattr_check_projid(
710 struct xfs_inode *ip,
711 struct file_kattr *fa)
712 {
713 if (!fa->fsx_valid)
714 return 0;
715
716 /* Disallow 32bit project ids if 32bit IDs are not enabled. */
717 if (fa->fsx_projid > (uint16_t)-1 &&
718 !xfs_has_projid32(ip->i_mount))
719 return -EINVAL;
720 return 0;
721 }
722
723 int
xfs_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct file_kattr * fa)724 xfs_fileattr_set(
725 struct mnt_idmap *idmap,
726 struct dentry *dentry,
727 struct file_kattr *fa)
728 {
729 struct xfs_inode *ip = XFS_I(d_inode(dentry));
730 struct xfs_mount *mp = ip->i_mount;
731 struct xfs_trans *tp;
732 struct xfs_dquot *pdqp = NULL;
733 struct xfs_dquot *olddquot = NULL;
734 int error;
735
736 trace_xfs_ioctl_setattr(ip);
737
738 if (!fa->fsx_valid) {
739 if (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL |
740 FS_NOATIME_FL | FS_NODUMP_FL |
741 FS_SYNC_FL | FS_DAX_FL | FS_PROJINHERIT_FL))
742 return -EOPNOTSUPP;
743 }
744
745 error = xfs_ioctl_setattr_check_projid(ip, fa);
746 if (error)
747 return error;
748
749 /*
750 * If disk quotas is on, we make sure that the dquots do exist on disk,
751 * before we start any other transactions. Trying to do this later
752 * is messy. We don't care to take a readlock to look at the ids
753 * in inode here, because we can't hold it across the trans_reserve.
754 * If the IDs do change before we take the ilock, we're covered
755 * because the i_*dquot fields will get updated anyway.
756 */
757 if (fa->fsx_valid && XFS_IS_QUOTA_ON(mp)) {
758 error = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
759 VFS_I(ip)->i_gid, fa->fsx_projid,
760 XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp);
761 if (error)
762 return error;
763 }
764
765 xfs_ioctl_setattr_prepare_dax(ip, fa);
766
767 tp = xfs_ioctl_setattr_get_trans(ip, pdqp);
768 if (IS_ERR(tp)) {
769 error = PTR_ERR(tp);
770 goto error_free_dquots;
771 }
772
773 error = xfs_ioctl_setattr_check_extsize(ip, fa);
774 if (error)
775 goto error_trans_cancel;
776
777 error = xfs_ioctl_setattr_check_cowextsize(ip, fa);
778 if (error)
779 goto error_trans_cancel;
780
781 error = xfs_ioctl_setattr_xflags(tp, ip, fa);
782 if (error)
783 goto error_trans_cancel;
784
785 if (!fa->fsx_valid)
786 goto skip_xattr;
787 /*
788 * Change file ownership. Must be the owner or privileged. CAP_FSETID
789 * overrides the following restrictions:
790 *
791 * The set-user-ID and set-group-ID bits of a file will be cleared upon
792 * successful return from chown()
793 */
794
795 if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
796 !capable_wrt_inode_uidgid(idmap, VFS_I(ip), CAP_FSETID))
797 VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
798
799 /* Change the ownerships and register project quota modifications */
800 if (ip->i_projid != fa->fsx_projid) {
801 if (XFS_IS_PQUOTA_ON(mp)) {
802 olddquot = xfs_qm_vop_chown(tp, ip,
803 &ip->i_pdquot, pdqp);
804 }
805 ip->i_projid = fa->fsx_projid;
806 }
807
808 /*
809 * Only set the extent size hint if we've already determined that the
810 * extent size hint should be set on the inode. If no extent size flags
811 * are set on the inode then unconditionally clear the extent size hint.
812 */
813 if (ip->i_diflags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
814 ip->i_extsize = XFS_B_TO_FSB(mp, fa->fsx_extsize);
815 else
816 ip->i_extsize = 0;
817
818 if (xfs_has_v3inodes(mp)) {
819 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
820 ip->i_cowextsize = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
821 else
822 ip->i_cowextsize = 0;
823 }
824
825 skip_xattr:
826 error = xfs_trans_commit(tp);
827
828 /*
829 * Release any dquot(s) the inode had kept before chown.
830 */
831 xfs_qm_dqrele(olddquot);
832 xfs_qm_dqrele(pdqp);
833
834 return error;
835
836 error_trans_cancel:
837 xfs_trans_cancel(tp);
838 error_free_dquots:
839 xfs_qm_dqrele(pdqp);
840 return error;
841 }
842
843 static bool
xfs_getbmap_format(struct kgetbmap * p,struct getbmapx __user * u,size_t recsize)844 xfs_getbmap_format(
845 struct kgetbmap *p,
846 struct getbmapx __user *u,
847 size_t recsize)
848 {
849 if (put_user(p->bmv_offset, &u->bmv_offset) ||
850 put_user(p->bmv_block, &u->bmv_block) ||
851 put_user(p->bmv_length, &u->bmv_length) ||
852 put_user(0, &u->bmv_count) ||
853 put_user(0, &u->bmv_entries))
854 return false;
855 if (recsize < sizeof(struct getbmapx))
856 return true;
857 if (put_user(0, &u->bmv_iflags) ||
858 put_user(p->bmv_oflags, &u->bmv_oflags) ||
859 put_user(0, &u->bmv_unused1) ||
860 put_user(0, &u->bmv_unused2))
861 return false;
862 return true;
863 }
864
865 STATIC int
xfs_ioc_getbmap(struct file * file,unsigned int cmd,void __user * arg)866 xfs_ioc_getbmap(
867 struct file *file,
868 unsigned int cmd,
869 void __user *arg)
870 {
871 struct getbmapx bmx = { 0 };
872 struct kgetbmap *buf;
873 size_t recsize;
874 int error, i;
875
876 switch (cmd) {
877 case XFS_IOC_GETBMAPA:
878 bmx.bmv_iflags = BMV_IF_ATTRFORK;
879 fallthrough;
880 case XFS_IOC_GETBMAP:
881 /* struct getbmap is a strict subset of struct getbmapx. */
882 recsize = sizeof(struct getbmap);
883 break;
884 case XFS_IOC_GETBMAPX:
885 recsize = sizeof(struct getbmapx);
886 break;
887 default:
888 return -EINVAL;
889 }
890
891 if (copy_from_user(&bmx, arg, recsize))
892 return -EFAULT;
893
894 if (bmx.bmv_count < 2)
895 return -EINVAL;
896 if (bmx.bmv_count >= INT_MAX / recsize)
897 return -ENOMEM;
898
899 buf = kvzalloc_objs(*buf, bmx.bmv_count);
900 if (!buf)
901 return -ENOMEM;
902
903 error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf);
904 if (error)
905 goto out_free_buf;
906
907 error = -EFAULT;
908 if (copy_to_user(arg, &bmx, recsize))
909 goto out_free_buf;
910 arg += recsize;
911
912 for (i = 0; i < bmx.bmv_entries; i++) {
913 if (!xfs_getbmap_format(buf + i, arg, recsize))
914 goto out_free_buf;
915 arg += recsize;
916 }
917
918 error = 0;
919 out_free_buf:
920 kvfree(buf);
921 return error;
922 }
923
924 int
xfs_ioc_swapext(xfs_swapext_t * sxp)925 xfs_ioc_swapext(
926 xfs_swapext_t *sxp)
927 {
928 xfs_inode_t *ip, *tip;
929
930 /* Pull information for the target fd */
931 CLASS(fd, f)((int)sxp->sx_fdtarget);
932 if (fd_empty(f))
933 return -EINVAL;
934
935 if (!(fd_file(f)->f_mode & FMODE_WRITE) ||
936 !(fd_file(f)->f_mode & FMODE_READ) ||
937 (fd_file(f)->f_flags & O_APPEND))
938 return -EBADF;
939
940 CLASS(fd, tmp)((int)sxp->sx_fdtmp);
941 if (fd_empty(tmp))
942 return -EINVAL;
943
944 if (!(fd_file(tmp)->f_mode & FMODE_WRITE) ||
945 !(fd_file(tmp)->f_mode & FMODE_READ) ||
946 (fd_file(tmp)->f_flags & O_APPEND))
947 return -EBADF;
948
949 if (IS_SWAPFILE(file_inode(fd_file(f))) ||
950 IS_SWAPFILE(file_inode(fd_file(tmp))))
951 return -EINVAL;
952
953 /*
954 * We need to ensure that the fds passed in point to XFS inodes
955 * before we cast and access them as XFS structures as we have no
956 * control over what the user passes us here.
957 */
958 if (fd_file(f)->f_op != &xfs_file_operations ||
959 fd_file(tmp)->f_op != &xfs_file_operations)
960 return -EINVAL;
961
962 ip = XFS_I(file_inode(fd_file(f)));
963 tip = XFS_I(file_inode(fd_file(tmp)));
964
965 if (ip->i_mount != tip->i_mount)
966 return -EINVAL;
967
968 if (ip->i_ino == tip->i_ino)
969 return -EINVAL;
970
971 if (xfs_is_shutdown(ip->i_mount))
972 return -EIO;
973
974 return xfs_swap_extents(ip, tip, sxp);
975 }
976
977 static int
xfs_ioc_getlabel(struct xfs_mount * mp,char __user * user_label)978 xfs_ioc_getlabel(
979 struct xfs_mount *mp,
980 char __user *user_label)
981 {
982 struct xfs_sb *sbp = &mp->m_sb;
983 char label[XFSLABEL_MAX + 1];
984
985 /* Paranoia */
986 BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX);
987
988 /* 1 larger than sb_fname, so this ensures a trailing NUL char */
989 spin_lock(&mp->m_sb_lock);
990 memtostr_pad(label, sbp->sb_fname);
991 spin_unlock(&mp->m_sb_lock);
992
993 if (copy_to_user(user_label, label, sizeof(label)))
994 return -EFAULT;
995 return 0;
996 }
997
998 static int
xfs_ioc_setlabel(struct file * filp,struct xfs_mount * mp,char __user * newlabel)999 xfs_ioc_setlabel(
1000 struct file *filp,
1001 struct xfs_mount *mp,
1002 char __user *newlabel)
1003 {
1004 struct xfs_sb *sbp = &mp->m_sb;
1005 char label[XFSLABEL_MAX + 1];
1006 size_t len;
1007 int error;
1008
1009 if (!capable(CAP_SYS_ADMIN))
1010 return -EPERM;
1011 /*
1012 * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much
1013 * smaller, at 12 bytes. We copy one more to be sure we find the
1014 * (required) NULL character to test the incoming label length.
1015 * NB: The on disk label doesn't need to be null terminated.
1016 */
1017 if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1))
1018 return -EFAULT;
1019 len = strnlen(label, XFSLABEL_MAX + 1);
1020 if (len > sizeof(sbp->sb_fname))
1021 return -EINVAL;
1022
1023 error = mnt_want_write_file(filp);
1024 if (error)
1025 return error;
1026
1027 spin_lock(&mp->m_sb_lock);
1028 memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname));
1029 memcpy(sbp->sb_fname, label, len);
1030 spin_unlock(&mp->m_sb_lock);
1031
1032 /*
1033 * Now we do several things to satisfy userspace.
1034 * In addition to normal logging of the primary superblock, we also
1035 * immediately write these changes to sector zero for the primary, then
1036 * update all backup supers (as xfs_db does for a label change), then
1037 * invalidate the block device page cache. This is so that any prior
1038 * buffered reads from userspace (i.e. from blkid) are invalidated,
1039 * and userspace will see the newly-written label.
1040 */
1041 error = xfs_sync_sb_buf(mp, true);
1042 if (error)
1043 goto out;
1044 /*
1045 * growfs also updates backup supers so lock against that.
1046 */
1047 mutex_lock(&mp->m_growlock);
1048 error = xfs_update_secondary_sbs(mp);
1049 mutex_unlock(&mp->m_growlock);
1050
1051 invalidate_bdev(mp->m_ddev_targp->bt_bdev);
1052 if (xfs_has_rtsb(mp) && mp->m_rtdev_targp)
1053 invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
1054
1055 out:
1056 mnt_drop_write_file(filp);
1057 return error;
1058 }
1059
1060 static inline int
xfs_fs_eofblocks_from_user(struct xfs_fs_eofblocks * src,struct xfs_icwalk * dst)1061 xfs_fs_eofblocks_from_user(
1062 struct xfs_fs_eofblocks *src,
1063 struct xfs_icwalk *dst)
1064 {
1065 if (src->eof_version != XFS_EOFBLOCKS_VERSION)
1066 return -EINVAL;
1067
1068 if (src->eof_flags & ~XFS_EOF_FLAGS_VALID)
1069 return -EINVAL;
1070
1071 if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) ||
1072 memchr_inv(src->pad64, 0, sizeof(src->pad64)))
1073 return -EINVAL;
1074
1075 dst->icw_flags = 0;
1076 if (src->eof_flags & XFS_EOF_FLAGS_SYNC)
1077 dst->icw_flags |= XFS_ICWALK_FLAG_SYNC;
1078 if (src->eof_flags & XFS_EOF_FLAGS_UID)
1079 dst->icw_flags |= XFS_ICWALK_FLAG_UID;
1080 if (src->eof_flags & XFS_EOF_FLAGS_GID)
1081 dst->icw_flags |= XFS_ICWALK_FLAG_GID;
1082 if (src->eof_flags & XFS_EOF_FLAGS_PRID)
1083 dst->icw_flags |= XFS_ICWALK_FLAG_PRID;
1084 if (src->eof_flags & XFS_EOF_FLAGS_MINFILESIZE)
1085 dst->icw_flags |= XFS_ICWALK_FLAG_MINFILESIZE;
1086
1087 dst->icw_prid = src->eof_prid;
1088 dst->icw_min_file_size = src->eof_min_file_size;
1089
1090 dst->icw_uid = INVALID_UID;
1091 if (src->eof_flags & XFS_EOF_FLAGS_UID) {
1092 dst->icw_uid = make_kuid(current_user_ns(), src->eof_uid);
1093 if (!uid_valid(dst->icw_uid))
1094 return -EINVAL;
1095 }
1096
1097 dst->icw_gid = INVALID_GID;
1098 if (src->eof_flags & XFS_EOF_FLAGS_GID) {
1099 dst->icw_gid = make_kgid(current_user_ns(), src->eof_gid);
1100 if (!gid_valid(dst->icw_gid))
1101 return -EINVAL;
1102 }
1103 return 0;
1104 }
1105
1106 static int
xfs_ioctl_getset_resblocks(struct file * filp,unsigned int cmd,void __user * arg)1107 xfs_ioctl_getset_resblocks(
1108 struct file *filp,
1109 unsigned int cmd,
1110 void __user *arg)
1111 {
1112 struct xfs_mount *mp = XFS_I(file_inode(filp))->i_mount;
1113 struct xfs_fsop_resblks fsop = { };
1114 int error;
1115
1116 if (!capable(CAP_SYS_ADMIN))
1117 return -EPERM;
1118
1119 if (cmd == XFS_IOC_SET_RESBLKS) {
1120 if (xfs_is_readonly(mp))
1121 return -EROFS;
1122
1123 if (copy_from_user(&fsop, arg, sizeof(fsop)))
1124 return -EFAULT;
1125
1126 error = mnt_want_write_file(filp);
1127 if (error)
1128 return error;
1129 error = xfs_reserve_blocks(mp, XC_FREE_BLOCKS, fsop.resblks);
1130 mnt_drop_write_file(filp);
1131 if (error)
1132 return error;
1133 }
1134
1135 spin_lock(&mp->m_sb_lock);
1136 fsop.resblks = mp->m_free[XC_FREE_BLOCKS].res_total;
1137 fsop.resblks_avail = mp->m_free[XC_FREE_BLOCKS].res_avail;
1138 spin_unlock(&mp->m_sb_lock);
1139
1140 if (copy_to_user(arg, &fsop, sizeof(fsop)))
1141 return -EFAULT;
1142 return 0;
1143 }
1144
1145 static int
xfs_ioctl_fs_counts(struct xfs_mount * mp,struct xfs_fsop_counts __user * uarg)1146 xfs_ioctl_fs_counts(
1147 struct xfs_mount *mp,
1148 struct xfs_fsop_counts __user *uarg)
1149 {
1150 struct xfs_fsop_counts out = {
1151 .allocino = percpu_counter_read_positive(&mp->m_icount),
1152 .freeino = percpu_counter_read_positive(&mp->m_ifree),
1153 .freedata = xfs_estimate_freecounter(mp, XC_FREE_BLOCKS) -
1154 xfs_freecounter_unavailable(mp, XC_FREE_BLOCKS),
1155 .freertx = xfs_estimate_freecounter(mp, XC_FREE_RTEXTENTS),
1156 };
1157
1158 if (copy_to_user(uarg, &out, sizeof(out)))
1159 return -EFAULT;
1160 return 0;
1161 }
1162
1163 /*
1164 * These long-unused ioctls were removed from the official ioctl API in 5.17,
1165 * but retain these definitions so that we can log warnings about them.
1166 */
1167 #define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64)
1168 #define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64)
1169 #define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64)
1170 #define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64)
1171
1172 /*
1173 * Note: some of the ioctl's return positive numbers as a
1174 * byte count indicating success, such as readlink_by_handle.
1175 * So we don't "sign flip" like most other routines. This means
1176 * true errors need to be returned as a negative value.
1177 */
1178 long
xfs_file_ioctl(struct file * filp,unsigned int cmd,unsigned long p)1179 xfs_file_ioctl(
1180 struct file *filp,
1181 unsigned int cmd,
1182 unsigned long p)
1183 {
1184 struct inode *inode = file_inode(filp);
1185 struct xfs_inode *ip = XFS_I(inode);
1186 struct xfs_mount *mp = ip->i_mount;
1187 void __user *arg = (void __user *)p;
1188 int error;
1189
1190 trace_xfs_file_ioctl(ip);
1191
1192 switch (cmd) {
1193 case FITRIM:
1194 return xfs_ioc_trim(mp, arg);
1195 case FS_IOC_GETFSLABEL:
1196 return xfs_ioc_getlabel(mp, arg);
1197 case FS_IOC_SETFSLABEL:
1198 return xfs_ioc_setlabel(filp, mp, arg);
1199 case XFS_IOC_ALLOCSP:
1200 case XFS_IOC_FREESP:
1201 case XFS_IOC_ALLOCSP64:
1202 case XFS_IOC_FREESP64:
1203 xfs_warn_once(mp,
1204 "%s should use fallocate; XFS_IOC_{ALLOC,FREE}SP ioctl unsupported",
1205 current->comm);
1206 return -ENOTTY;
1207 case XFS_IOC_DIOINFO: {
1208 struct kstat st;
1209 struct dioattr da;
1210
1211 error = vfs_getattr(&filp->f_path, &st, STATX_DIOALIGN, 0);
1212 if (error)
1213 return error;
1214
1215 /*
1216 * Some userspace directly feeds the return value to
1217 * posix_memalign, which fails for values that are smaller than
1218 * the pointer size. Round up the value to not break userspace.
1219 */
1220 da.d_mem = roundup(st.dio_mem_align, sizeof(void *));
1221 da.d_miniosz = st.dio_offset_align;
1222 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
1223 if (copy_to_user(arg, &da, sizeof(da)))
1224 return -EFAULT;
1225 return 0;
1226 }
1227
1228 case XFS_IOC_FSBULKSTAT_SINGLE:
1229 case XFS_IOC_FSBULKSTAT:
1230 case XFS_IOC_FSINUMBERS:
1231 return xfs_ioc_fsbulkstat(filp, cmd, arg);
1232
1233 case XFS_IOC_BULKSTAT:
1234 return xfs_ioc_bulkstat(filp, cmd, arg);
1235 case XFS_IOC_INUMBERS:
1236 return xfs_ioc_inumbers(mp, cmd, arg);
1237
1238 case XFS_IOC_FSGEOMETRY_V1:
1239 return xfs_ioc_fsgeometry(mp, arg, 3);
1240 case XFS_IOC_FSGEOMETRY_V4:
1241 return xfs_ioc_fsgeometry(mp, arg, 4);
1242 case XFS_IOC_FSGEOMETRY:
1243 return xfs_ioc_fsgeometry(mp, arg, 5);
1244
1245 case XFS_IOC_AG_GEOMETRY:
1246 return xfs_ioc_ag_geometry(mp, arg);
1247 case XFS_IOC_RTGROUP_GEOMETRY:
1248 return xfs_ioc_rtgroup_geometry(mp, arg);
1249
1250 case XFS_IOC_GETVERSION:
1251 return put_user(inode->i_generation, (int __user *)arg);
1252
1253 case XFS_IOC_FSGETXATTRA:
1254 return xfs_ioc_fsgetxattra(ip, arg);
1255 case XFS_IOC_GETPARENTS:
1256 return xfs_ioc_getparents(filp, arg);
1257 case XFS_IOC_GETPARENTS_BY_HANDLE:
1258 return xfs_ioc_getparents_by_handle(filp, arg);
1259 case XFS_IOC_GETBMAP:
1260 case XFS_IOC_GETBMAPA:
1261 case XFS_IOC_GETBMAPX:
1262 return xfs_ioc_getbmap(filp, cmd, arg);
1263
1264 case FS_IOC_GETFSMAP:
1265 return xfs_ioc_getfsmap(ip, arg);
1266
1267 case XFS_IOC_SCRUBV_METADATA:
1268 return xfs_ioc_scrubv_metadata(filp, arg);
1269 case XFS_IOC_SCRUB_METADATA:
1270 return xfs_ioc_scrub_metadata(filp, arg);
1271
1272 case XFS_IOC_FD_TO_HANDLE:
1273 case XFS_IOC_PATH_TO_HANDLE:
1274 case XFS_IOC_PATH_TO_FSHANDLE: {
1275 xfs_fsop_handlereq_t hreq;
1276
1277 if (copy_from_user(&hreq, arg, sizeof(hreq)))
1278 return -EFAULT;
1279 return xfs_find_handle(cmd, &hreq);
1280 }
1281 case XFS_IOC_OPEN_BY_HANDLE: {
1282 xfs_fsop_handlereq_t hreq;
1283
1284 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
1285 return -EFAULT;
1286 return xfs_open_by_handle(filp, &hreq);
1287 }
1288
1289 case XFS_IOC_READLINK_BY_HANDLE: {
1290 xfs_fsop_handlereq_t hreq;
1291
1292 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
1293 return -EFAULT;
1294 return xfs_readlink_by_handle(filp, &hreq);
1295 }
1296 case XFS_IOC_ATTRLIST_BY_HANDLE:
1297 return xfs_attrlist_by_handle(filp, arg);
1298
1299 case XFS_IOC_ATTRMULTI_BY_HANDLE:
1300 return xfs_attrmulti_by_handle(filp, arg);
1301
1302 case XFS_IOC_SWAPEXT: {
1303 struct xfs_swapext sxp;
1304
1305 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
1306 return -EFAULT;
1307 error = mnt_want_write_file(filp);
1308 if (error)
1309 return error;
1310 error = xfs_ioc_swapext(&sxp);
1311 mnt_drop_write_file(filp);
1312 return error;
1313 }
1314
1315 case XFS_IOC_FSCOUNTS:
1316 return xfs_ioctl_fs_counts(mp, arg);
1317
1318 case XFS_IOC_SET_RESBLKS:
1319 case XFS_IOC_GET_RESBLKS:
1320 return xfs_ioctl_getset_resblocks(filp, cmd, arg);
1321
1322 case XFS_IOC_FSGROWFSDATA: {
1323 struct xfs_growfs_data in;
1324
1325 if (copy_from_user(&in, arg, sizeof(in)))
1326 return -EFAULT;
1327
1328 error = mnt_want_write_file(filp);
1329 if (error)
1330 return error;
1331 error = xfs_growfs_data(mp, &in);
1332 mnt_drop_write_file(filp);
1333 return error;
1334 }
1335
1336 case XFS_IOC_FSGROWFSLOG: {
1337 struct xfs_growfs_log in;
1338
1339 if (copy_from_user(&in, arg, sizeof(in)))
1340 return -EFAULT;
1341
1342 error = mnt_want_write_file(filp);
1343 if (error)
1344 return error;
1345 error = xfs_growfs_log(mp, &in);
1346 mnt_drop_write_file(filp);
1347 return error;
1348 }
1349
1350 case XFS_IOC_FSGROWFSRT: {
1351 xfs_growfs_rt_t in;
1352
1353 if (copy_from_user(&in, arg, sizeof(in)))
1354 return -EFAULT;
1355
1356 error = mnt_want_write_file(filp);
1357 if (error)
1358 return error;
1359 error = xfs_growfs_rt(mp, &in);
1360 mnt_drop_write_file(filp);
1361 return error;
1362 }
1363
1364 case XFS_IOC_GOINGDOWN: {
1365 uint32_t in;
1366
1367 if (!capable(CAP_SYS_ADMIN))
1368 return -EPERM;
1369
1370 if (get_user(in, (uint32_t __user *)arg))
1371 return -EFAULT;
1372
1373 return xfs_fs_goingdown(mp, in);
1374 }
1375
1376 case XFS_IOC_ERROR_INJECTION: {
1377 xfs_error_injection_t in;
1378
1379 if (!capable(CAP_SYS_ADMIN))
1380 return -EPERM;
1381
1382 if (copy_from_user(&in, arg, sizeof(in)))
1383 return -EFAULT;
1384
1385 return xfs_errortag_add(mp, in.errtag);
1386 }
1387
1388 case XFS_IOC_ERROR_CLEARALL:
1389 if (!capable(CAP_SYS_ADMIN))
1390 return -EPERM;
1391
1392 return xfs_errortag_clearall(mp);
1393
1394 case XFS_IOC_FREE_EOFBLOCKS: {
1395 struct xfs_fs_eofblocks eofb;
1396 struct xfs_icwalk icw;
1397
1398 if (!capable(CAP_SYS_ADMIN))
1399 return -EPERM;
1400
1401 if (xfs_is_readonly(mp))
1402 return -EROFS;
1403
1404 if (copy_from_user(&eofb, arg, sizeof(eofb)))
1405 return -EFAULT;
1406
1407 error = xfs_fs_eofblocks_from_user(&eofb, &icw);
1408 if (error)
1409 return error;
1410
1411 trace_xfs_ioc_free_eofblocks(mp, &icw, _RET_IP_);
1412
1413 guard(super_write)(mp->m_super);
1414 return xfs_blockgc_free_space(mp, &icw);
1415 }
1416
1417 case XFS_IOC_EXCHANGE_RANGE:
1418 return xfs_ioc_exchange_range(filp, arg);
1419 case XFS_IOC_START_COMMIT:
1420 return xfs_ioc_start_commit(filp, arg);
1421 case XFS_IOC_COMMIT_RANGE:
1422 return xfs_ioc_commit_range(filp, arg);
1423
1424 case XFS_IOC_HEALTH_MONITOR:
1425 return xfs_ioc_health_monitor(filp, arg);
1426 case XFS_IOC_VERIFY_MEDIA:
1427 return xfs_ioc_verify_media(filp, arg);
1428
1429 default:
1430 return -ENOTTY;
1431 }
1432 }
1433