1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2022-2024 Oracle.
5 * All rights reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_shared.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_bmap_btree.h"
15 #include "xfs_inode.h"
16 #include "xfs_error.h"
17 #include "xfs_trace.h"
18 #include "xfs_trans.h"
19 #include "xfs_da_format.h"
20 #include "xfs_da_btree.h"
21 #include "xfs_attr.h"
22 #include "xfs_ioctl.h"
23 #include "xfs_parent.h"
24 #include "xfs_handle.h"
25 #include "xfs_health.h"
26 #include "xfs_icache.h"
27 #include "xfs_export.h"
28 #include "xfs_xattr.h"
29 #include "xfs_acl.h"
30
31 #include <linux/namei.h>
32
33 static inline size_t
xfs_filehandle_fid_len(void)34 xfs_filehandle_fid_len(void)
35 {
36 struct xfs_handle *handle = NULL;
37
38 return sizeof(struct xfs_fid) - sizeof(handle->ha_fid.fid_len);
39 }
40
41 static inline size_t
xfs_filehandle_init(struct xfs_mount * mp,xfs_ino_t ino,uint32_t gen,struct xfs_handle * handle)42 xfs_filehandle_init(
43 struct xfs_mount *mp,
44 xfs_ino_t ino,
45 uint32_t gen,
46 struct xfs_handle *handle)
47 {
48 memcpy(&handle->ha_fsid, mp->m_fixedfsid, sizeof(struct xfs_fsid));
49
50 handle->ha_fid.fid_len = xfs_filehandle_fid_len();
51 handle->ha_fid.fid_pad = 0;
52 handle->ha_fid.fid_gen = gen;
53 handle->ha_fid.fid_ino = ino;
54
55 return sizeof(struct xfs_handle);
56 }
57
58 static inline size_t
xfs_fshandle_init(struct xfs_mount * mp,struct xfs_handle * handle)59 xfs_fshandle_init(
60 struct xfs_mount *mp,
61 struct xfs_handle *handle)
62 {
63 memcpy(&handle->ha_fsid, mp->m_fixedfsid, sizeof(struct xfs_fsid));
64 memset(&handle->ha_fid, 0, sizeof(handle->ha_fid));
65
66 return sizeof(struct xfs_fsid);
67 }
68
69 /*
70 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
71 * a file or fs handle.
72 *
73 * XFS_IOC_PATH_TO_FSHANDLE
74 * returns fs handle for a mount point or path within that mount point
75 * XFS_IOC_FD_TO_HANDLE
76 * returns full handle for a FD opened in user space
77 * XFS_IOC_PATH_TO_HANDLE
78 * returns full handle for a path
79 */
80 int
xfs_find_handle(unsigned int cmd,xfs_fsop_handlereq_t * hreq)81 xfs_find_handle(
82 unsigned int cmd,
83 xfs_fsop_handlereq_t *hreq)
84 {
85 int hsize;
86 xfs_handle_t handle;
87 struct inode *inode;
88 struct fd f = EMPTY_FD;
89 struct path path;
90 int error;
91 struct xfs_inode *ip;
92
93 if (cmd == XFS_IOC_FD_TO_HANDLE) {
94 f = fdget(hreq->fd);
95 if (!fd_file(f))
96 return -EBADF;
97 inode = file_inode(fd_file(f));
98 } else {
99 error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
100 if (error)
101 return error;
102 inode = d_inode(path.dentry);
103 }
104 ip = XFS_I(inode);
105
106 /*
107 * We can only generate handles for inodes residing on a XFS filesystem,
108 * and only for regular files, directories or symbolic links.
109 */
110 error = -EINVAL;
111 if (inode->i_sb->s_magic != XFS_SB_MAGIC)
112 goto out_put;
113
114 error = -EBADF;
115 if (!S_ISREG(inode->i_mode) &&
116 !S_ISDIR(inode->i_mode) &&
117 !S_ISLNK(inode->i_mode))
118 goto out_put;
119
120
121 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
122
123 if (cmd == XFS_IOC_PATH_TO_FSHANDLE)
124 hsize = xfs_fshandle_init(ip->i_mount, &handle);
125 else
126 hsize = xfs_filehandle_init(ip->i_mount, ip->i_ino,
127 inode->i_generation, &handle);
128
129 error = -EFAULT;
130 if (copy_to_user(hreq->ohandle, &handle, hsize) ||
131 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
132 goto out_put;
133
134 error = 0;
135
136 out_put:
137 if (cmd == XFS_IOC_FD_TO_HANDLE)
138 fdput(f);
139 else
140 path_put(&path);
141 return error;
142 }
143
144 /*
145 * No need to do permission checks on the various pathname components
146 * as the handle operations are privileged.
147 */
148 STATIC int
xfs_handle_acceptable(void * context,struct dentry * dentry)149 xfs_handle_acceptable(
150 void *context,
151 struct dentry *dentry)
152 {
153 return 1;
154 }
155
156 /* Convert handle already copied to kernel space into a dentry. */
157 static struct dentry *
xfs_khandle_to_dentry(struct file * file,struct xfs_handle * handle)158 xfs_khandle_to_dentry(
159 struct file *file,
160 struct xfs_handle *handle)
161 {
162 struct xfs_fid64 fid = {
163 .ino = handle->ha_fid.fid_ino,
164 .gen = handle->ha_fid.fid_gen,
165 };
166
167 /*
168 * Only allow handle opens under a directory.
169 */
170 if (!S_ISDIR(file_inode(file)->i_mode))
171 return ERR_PTR(-ENOTDIR);
172
173 if (handle->ha_fid.fid_len != xfs_filehandle_fid_len())
174 return ERR_PTR(-EINVAL);
175
176 return exportfs_decode_fh(file->f_path.mnt, (struct fid *)&fid, 3,
177 FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG,
178 xfs_handle_acceptable, NULL);
179 }
180
181 /* Convert handle already copied to kernel space into an xfs_inode. */
182 static struct xfs_inode *
xfs_khandle_to_inode(struct file * file,struct xfs_handle * handle)183 xfs_khandle_to_inode(
184 struct file *file,
185 struct xfs_handle *handle)
186 {
187 struct xfs_inode *ip = XFS_I(file_inode(file));
188 struct xfs_mount *mp = ip->i_mount;
189 struct inode *inode;
190
191 if (!S_ISDIR(VFS_I(ip)->i_mode))
192 return ERR_PTR(-ENOTDIR);
193
194 if (handle->ha_fid.fid_len != xfs_filehandle_fid_len())
195 return ERR_PTR(-EINVAL);
196
197 inode = xfs_nfs_get_inode(mp->m_super, handle->ha_fid.fid_ino,
198 handle->ha_fid.fid_gen);
199 if (IS_ERR(inode))
200 return ERR_CAST(inode);
201
202 return XFS_I(inode);
203 }
204
205 /*
206 * Convert userspace handle data into a dentry.
207 */
208 struct dentry *
xfs_handle_to_dentry(struct file * parfilp,void __user * uhandle,u32 hlen)209 xfs_handle_to_dentry(
210 struct file *parfilp,
211 void __user *uhandle,
212 u32 hlen)
213 {
214 xfs_handle_t handle;
215
216 if (hlen != sizeof(xfs_handle_t))
217 return ERR_PTR(-EINVAL);
218 if (copy_from_user(&handle, uhandle, hlen))
219 return ERR_PTR(-EFAULT);
220
221 return xfs_khandle_to_dentry(parfilp, &handle);
222 }
223
224 STATIC struct dentry *
xfs_handlereq_to_dentry(struct file * parfilp,xfs_fsop_handlereq_t * hreq)225 xfs_handlereq_to_dentry(
226 struct file *parfilp,
227 xfs_fsop_handlereq_t *hreq)
228 {
229 return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen);
230 }
231
232 int
xfs_open_by_handle(struct file * parfilp,xfs_fsop_handlereq_t * hreq)233 xfs_open_by_handle(
234 struct file *parfilp,
235 xfs_fsop_handlereq_t *hreq)
236 {
237 const struct cred *cred = current_cred();
238 int error;
239 int fd;
240 int permflag;
241 struct file *filp;
242 struct inode *inode;
243 struct dentry *dentry;
244 fmode_t fmode;
245 struct path path;
246
247 if (!capable(CAP_SYS_ADMIN))
248 return -EPERM;
249
250 dentry = xfs_handlereq_to_dentry(parfilp, hreq);
251 if (IS_ERR(dentry))
252 return PTR_ERR(dentry);
253 inode = d_inode(dentry);
254
255 /* Restrict xfs_open_by_handle to directories & regular files. */
256 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
257 error = -EPERM;
258 goto out_dput;
259 }
260
261 #if BITS_PER_LONG != 32
262 hreq->oflags |= O_LARGEFILE;
263 #endif
264
265 permflag = hreq->oflags;
266 fmode = OPEN_FMODE(permflag);
267 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
268 (fmode & FMODE_WRITE) && IS_APPEND(inode)) {
269 error = -EPERM;
270 goto out_dput;
271 }
272
273 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
274 error = -EPERM;
275 goto out_dput;
276 }
277
278 /* Can't write directories. */
279 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) {
280 error = -EISDIR;
281 goto out_dput;
282 }
283
284 fd = get_unused_fd_flags(0);
285 if (fd < 0) {
286 error = fd;
287 goto out_dput;
288 }
289
290 path.mnt = parfilp->f_path.mnt;
291 path.dentry = dentry;
292 filp = dentry_open(&path, hreq->oflags, cred);
293 dput(dentry);
294 if (IS_ERR(filp)) {
295 put_unused_fd(fd);
296 return PTR_ERR(filp);
297 }
298
299 if (S_ISREG(inode->i_mode)) {
300 filp->f_flags |= O_NOATIME;
301 filp->f_mode |= FMODE_NOCMTIME;
302 }
303
304 fd_install(fd, filp);
305 return fd;
306
307 out_dput:
308 dput(dentry);
309 return error;
310 }
311
312 int
xfs_readlink_by_handle(struct file * parfilp,xfs_fsop_handlereq_t * hreq)313 xfs_readlink_by_handle(
314 struct file *parfilp,
315 xfs_fsop_handlereq_t *hreq)
316 {
317 struct dentry *dentry;
318 __u32 olen;
319 int error;
320
321 if (!capable(CAP_SYS_ADMIN))
322 return -EPERM;
323
324 dentry = xfs_handlereq_to_dentry(parfilp, hreq);
325 if (IS_ERR(dentry))
326 return PTR_ERR(dentry);
327
328 /* Restrict this handle operation to symlinks only. */
329 if (!d_is_symlink(dentry)) {
330 error = -EINVAL;
331 goto out_dput;
332 }
333
334 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
335 error = -EFAULT;
336 goto out_dput;
337 }
338
339 error = vfs_readlink(dentry, hreq->ohandle, olen);
340
341 out_dput:
342 dput(dentry);
343 return error;
344 }
345
346 /*
347 * Format an attribute and copy it out to the user's buffer.
348 * Take care to check values and protect against them changing later,
349 * we may be reading them directly out of a user buffer.
350 */
351 static void
xfs_ioc_attr_put_listent(struct xfs_attr_list_context * context,int flags,unsigned char * name,int namelen,void * value,int valuelen)352 xfs_ioc_attr_put_listent(
353 struct xfs_attr_list_context *context,
354 int flags,
355 unsigned char *name,
356 int namelen,
357 void *value,
358 int valuelen)
359 {
360 struct xfs_attrlist *alist = context->buffer;
361 struct xfs_attrlist_ent *aep;
362 int arraytop;
363
364 ASSERT(!context->seen_enough);
365 ASSERT(context->count >= 0);
366 ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
367 ASSERT(context->firstu >= sizeof(*alist));
368 ASSERT(context->firstu <= context->bufsize);
369
370 /*
371 * Only list entries in the right namespace.
372 */
373 if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK))
374 return;
375
376 arraytop = sizeof(*alist) +
377 context->count * sizeof(alist->al_offset[0]);
378
379 /* decrement by the actual bytes used by the attr */
380 context->firstu -= round_up(offsetof(struct xfs_attrlist_ent, a_name) +
381 namelen + 1, sizeof(uint32_t));
382 if (context->firstu < arraytop) {
383 trace_xfs_attr_list_full(context);
384 alist->al_more = 1;
385 context->seen_enough = 1;
386 return;
387 }
388
389 aep = context->buffer + context->firstu;
390 aep->a_valuelen = valuelen;
391 memcpy(aep->a_name, name, namelen);
392 aep->a_name[namelen] = 0;
393 alist->al_offset[context->count++] = context->firstu;
394 alist->al_count = context->count;
395 trace_xfs_attr_list_add(context);
396 }
397
398 static unsigned int
xfs_attr_filter(u32 ioc_flags)399 xfs_attr_filter(
400 u32 ioc_flags)
401 {
402 if (ioc_flags & XFS_IOC_ATTR_ROOT)
403 return XFS_ATTR_ROOT;
404 if (ioc_flags & XFS_IOC_ATTR_SECURE)
405 return XFS_ATTR_SECURE;
406 return 0;
407 }
408
409 static inline enum xfs_attr_update
xfs_xattr_flags(u32 ioc_flags,void * value)410 xfs_xattr_flags(
411 u32 ioc_flags,
412 void *value)
413 {
414 if (!value)
415 return XFS_ATTRUPDATE_REMOVE;
416 if (ioc_flags & XFS_IOC_ATTR_CREATE)
417 return XFS_ATTRUPDATE_CREATE;
418 if (ioc_flags & XFS_IOC_ATTR_REPLACE)
419 return XFS_ATTRUPDATE_REPLACE;
420 return XFS_ATTRUPDATE_UPSERT;
421 }
422
423 int
xfs_ioc_attr_list(struct xfs_inode * dp,void __user * ubuf,size_t bufsize,int flags,struct xfs_attrlist_cursor __user * ucursor)424 xfs_ioc_attr_list(
425 struct xfs_inode *dp,
426 void __user *ubuf,
427 size_t bufsize,
428 int flags,
429 struct xfs_attrlist_cursor __user *ucursor)
430 {
431 struct xfs_attr_list_context context = { };
432 struct xfs_attrlist *alist;
433 void *buffer;
434 int error;
435
436 if (bufsize < sizeof(struct xfs_attrlist) ||
437 bufsize > XFS_XATTR_LIST_MAX)
438 return -EINVAL;
439
440 /*
441 * Reject flags, only allow namespaces.
442 */
443 if (flags & ~(XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
444 return -EINVAL;
445 if (flags == (XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
446 return -EINVAL;
447
448 /*
449 * Validate the cursor.
450 */
451 if (copy_from_user(&context.cursor, ucursor, sizeof(context.cursor)))
452 return -EFAULT;
453 if (context.cursor.pad1 || context.cursor.pad2)
454 return -EINVAL;
455 if (!context.cursor.initted &&
456 (context.cursor.hashval || context.cursor.blkno ||
457 context.cursor.offset))
458 return -EINVAL;
459
460 buffer = kvzalloc(bufsize, GFP_KERNEL);
461 if (!buffer)
462 return -ENOMEM;
463
464 /*
465 * Initialize the output buffer.
466 */
467 context.dp = dp;
468 context.resynch = 1;
469 context.attr_filter = xfs_attr_filter(flags);
470 context.buffer = buffer;
471 context.bufsize = round_down(bufsize, sizeof(uint32_t));
472 context.firstu = context.bufsize;
473 context.put_listent = xfs_ioc_attr_put_listent;
474
475 alist = context.buffer;
476 alist->al_count = 0;
477 alist->al_more = 0;
478 alist->al_offset[0] = context.bufsize;
479
480 error = xfs_attr_list(&context);
481 if (error)
482 goto out_free;
483
484 if (copy_to_user(ubuf, buffer, bufsize) ||
485 copy_to_user(ucursor, &context.cursor, sizeof(context.cursor)))
486 error = -EFAULT;
487 out_free:
488 kvfree(buffer);
489 return error;
490 }
491
492 int
xfs_attrlist_by_handle(struct file * parfilp,struct xfs_fsop_attrlist_handlereq __user * p)493 xfs_attrlist_by_handle(
494 struct file *parfilp,
495 struct xfs_fsop_attrlist_handlereq __user *p)
496 {
497 struct xfs_fsop_attrlist_handlereq al_hreq;
498 struct dentry *dentry;
499 int error = -ENOMEM;
500
501 if (!capable(CAP_SYS_ADMIN))
502 return -EPERM;
503 if (copy_from_user(&al_hreq, p, sizeof(al_hreq)))
504 return -EFAULT;
505
506 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
507 if (IS_ERR(dentry))
508 return PTR_ERR(dentry);
509
510 error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), al_hreq.buffer,
511 al_hreq.buflen, al_hreq.flags, &p->pos);
512 dput(dentry);
513 return error;
514 }
515
516 static int
xfs_attrmulti_attr_get(struct inode * inode,unsigned char * name,unsigned char __user * ubuf,uint32_t * len,uint32_t flags)517 xfs_attrmulti_attr_get(
518 struct inode *inode,
519 unsigned char *name,
520 unsigned char __user *ubuf,
521 uint32_t *len,
522 uint32_t flags)
523 {
524 struct xfs_da_args args = {
525 .dp = XFS_I(inode),
526 .attr_filter = xfs_attr_filter(flags),
527 .name = name,
528 .namelen = strlen(name),
529 .valuelen = *len,
530 };
531 int error;
532
533 if (*len > XFS_XATTR_SIZE_MAX)
534 return -EINVAL;
535
536 error = xfs_attr_get(&args);
537 if (error)
538 goto out_kfree;
539
540 *len = args.valuelen;
541 if (copy_to_user(ubuf, args.value, args.valuelen))
542 error = -EFAULT;
543
544 out_kfree:
545 kvfree(args.value);
546 return error;
547 }
548
549 static int
xfs_attrmulti_attr_set(struct inode * inode,unsigned char * name,const unsigned char __user * ubuf,uint32_t len,uint32_t flags)550 xfs_attrmulti_attr_set(
551 struct inode *inode,
552 unsigned char *name,
553 const unsigned char __user *ubuf,
554 uint32_t len,
555 uint32_t flags)
556 {
557 struct xfs_da_args args = {
558 .dp = XFS_I(inode),
559 .attr_filter = xfs_attr_filter(flags),
560 .name = name,
561 .namelen = strlen(name),
562 };
563 int error;
564
565 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
566 return -EPERM;
567
568 if (ubuf) {
569 if (len > XFS_XATTR_SIZE_MAX)
570 return -EINVAL;
571 args.value = memdup_user(ubuf, len);
572 if (IS_ERR(args.value))
573 return PTR_ERR(args.value);
574 args.valuelen = len;
575 }
576
577 error = xfs_attr_change(&args, xfs_xattr_flags(flags, args.value));
578 if (!error && (flags & XFS_IOC_ATTR_ROOT))
579 xfs_forget_acl(inode, name);
580 kfree(args.value);
581 return error;
582 }
583
584 int
xfs_ioc_attrmulti_one(struct file * parfilp,struct inode * inode,uint32_t opcode,void __user * uname,void __user * value,uint32_t * len,uint32_t flags)585 xfs_ioc_attrmulti_one(
586 struct file *parfilp,
587 struct inode *inode,
588 uint32_t opcode,
589 void __user *uname,
590 void __user *value,
591 uint32_t *len,
592 uint32_t flags)
593 {
594 unsigned char *name;
595 int error;
596
597 if ((flags & XFS_IOC_ATTR_ROOT) && (flags & XFS_IOC_ATTR_SECURE))
598 return -EINVAL;
599
600 name = strndup_user(uname, MAXNAMELEN);
601 if (IS_ERR(name))
602 return PTR_ERR(name);
603
604 switch (opcode) {
605 case ATTR_OP_GET:
606 error = xfs_attrmulti_attr_get(inode, name, value, len, flags);
607 break;
608 case ATTR_OP_REMOVE:
609 value = NULL;
610 *len = 0;
611 fallthrough;
612 case ATTR_OP_SET:
613 error = mnt_want_write_file(parfilp);
614 if (error)
615 break;
616 error = xfs_attrmulti_attr_set(inode, name, value, *len, flags);
617 mnt_drop_write_file(parfilp);
618 break;
619 default:
620 error = -EINVAL;
621 break;
622 }
623
624 kfree(name);
625 return error;
626 }
627
628 int
xfs_attrmulti_by_handle(struct file * parfilp,void __user * arg)629 xfs_attrmulti_by_handle(
630 struct file *parfilp,
631 void __user *arg)
632 {
633 int error;
634 xfs_attr_multiop_t *ops;
635 xfs_fsop_attrmulti_handlereq_t am_hreq;
636 struct dentry *dentry;
637 unsigned int i, size;
638
639 if (!capable(CAP_SYS_ADMIN))
640 return -EPERM;
641 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
642 return -EFAULT;
643
644 /* overflow check */
645 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
646 return -E2BIG;
647
648 dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
649 if (IS_ERR(dentry))
650 return PTR_ERR(dentry);
651
652 error = -E2BIG;
653 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
654 if (!size || size > 16 * PAGE_SIZE)
655 goto out_dput;
656
657 ops = memdup_user(am_hreq.ops, size);
658 if (IS_ERR(ops)) {
659 error = PTR_ERR(ops);
660 goto out_dput;
661 }
662
663 error = 0;
664 for (i = 0; i < am_hreq.opcount; i++) {
665 ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
666 d_inode(dentry), ops[i].am_opcode,
667 ops[i].am_attrname, ops[i].am_attrvalue,
668 &ops[i].am_length, ops[i].am_flags);
669 }
670
671 if (copy_to_user(am_hreq.ops, ops, size))
672 error = -EFAULT;
673
674 kfree(ops);
675 out_dput:
676 dput(dentry);
677 return error;
678 }
679
680 struct xfs_getparents_ctx {
681 struct xfs_attr_list_context context;
682 struct xfs_getparents_by_handle gph;
683
684 /* File to target */
685 struct xfs_inode *ip;
686
687 /* Internal buffer where we format records */
688 void *krecords;
689
690 /* Last record filled out */
691 struct xfs_getparents_rec *lastrec;
692
693 unsigned int count;
694 };
695
696 static inline unsigned int
xfs_getparents_rec_sizeof(unsigned int namelen)697 xfs_getparents_rec_sizeof(
698 unsigned int namelen)
699 {
700 return round_up(sizeof(struct xfs_getparents_rec) + namelen + 1,
701 sizeof(uint64_t));
702 }
703
704 static void
xfs_getparents_put_listent(struct xfs_attr_list_context * context,int flags,unsigned char * name,int namelen,void * value,int valuelen)705 xfs_getparents_put_listent(
706 struct xfs_attr_list_context *context,
707 int flags,
708 unsigned char *name,
709 int namelen,
710 void *value,
711 int valuelen)
712 {
713 struct xfs_getparents_ctx *gpx =
714 container_of(context, struct xfs_getparents_ctx, context);
715 struct xfs_inode *ip = context->dp;
716 struct xfs_mount *mp = ip->i_mount;
717 struct xfs_getparents *gp = &gpx->gph.gph_request;
718 struct xfs_getparents_rec *gpr = gpx->krecords + context->firstu;
719 unsigned short reclen =
720 xfs_getparents_rec_sizeof(namelen);
721 xfs_ino_t ino;
722 uint32_t gen;
723 int error;
724
725 if (!(flags & XFS_ATTR_PARENT))
726 return;
727
728 error = xfs_parent_from_attr(mp, flags, name, namelen, value, valuelen,
729 &ino, &gen);
730 if (error) {
731 xfs_inode_mark_sick(ip, XFS_SICK_INO_PARENT);
732 context->seen_enough = -EFSCORRUPTED;
733 return;
734 }
735
736 /*
737 * We found a parent pointer, but we've filled up the buffer. Signal
738 * to the caller that we did /not/ reach the end of the parent pointer
739 * recordset.
740 */
741 if (context->firstu > context->bufsize - reclen) {
742 context->seen_enough = 1;
743 return;
744 }
745
746 /* Format the parent pointer directly into the caller buffer. */
747 gpr->gpr_reclen = reclen;
748 xfs_filehandle_init(mp, ino, gen, &gpr->gpr_parent);
749 memcpy(gpr->gpr_name, name, namelen);
750 gpr->gpr_name[namelen] = 0;
751
752 trace_xfs_getparents_put_listent(ip, gp, context, gpr);
753
754 context->firstu += reclen;
755 gpx->count++;
756 gpx->lastrec = gpr;
757 }
758
759 /* Expand the last record to fill the rest of the caller's buffer. */
760 static inline void
xfs_getparents_expand_lastrec(struct xfs_getparents_ctx * gpx)761 xfs_getparents_expand_lastrec(
762 struct xfs_getparents_ctx *gpx)
763 {
764 struct xfs_getparents *gp = &gpx->gph.gph_request;
765 struct xfs_getparents_rec *gpr = gpx->lastrec;
766
767 if (!gpx->lastrec)
768 gpr = gpx->krecords;
769
770 gpr->gpr_reclen = gp->gp_bufsize - ((void *)gpr - gpx->krecords);
771
772 trace_xfs_getparents_expand_lastrec(gpx->ip, gp, &gpx->context, gpr);
773 }
774
775 /* Retrieve the parent pointers for a given inode. */
776 STATIC int
xfs_getparents(struct xfs_getparents_ctx * gpx)777 xfs_getparents(
778 struct xfs_getparents_ctx *gpx)
779 {
780 struct xfs_getparents *gp = &gpx->gph.gph_request;
781 struct xfs_inode *ip = gpx->ip;
782 struct xfs_mount *mp = ip->i_mount;
783 size_t bufsize;
784 int error;
785
786 /* Check size of buffer requested by user */
787 if (gp->gp_bufsize > XFS_XATTR_LIST_MAX)
788 return -ENOMEM;
789 if (gp->gp_bufsize < xfs_getparents_rec_sizeof(1))
790 return -EINVAL;
791
792 if (gp->gp_iflags & ~XFS_GETPARENTS_IFLAGS_ALL)
793 return -EINVAL;
794 if (gp->gp_reserved)
795 return -EINVAL;
796
797 bufsize = round_down(gp->gp_bufsize, sizeof(uint64_t));
798 gpx->krecords = kvzalloc(bufsize, GFP_KERNEL);
799 if (!gpx->krecords) {
800 bufsize = min(bufsize, PAGE_SIZE);
801 gpx->krecords = kvzalloc(bufsize, GFP_KERNEL);
802 if (!gpx->krecords)
803 return -ENOMEM;
804 }
805
806 gpx->context.dp = ip;
807 gpx->context.resynch = 1;
808 gpx->context.put_listent = xfs_getparents_put_listent;
809 gpx->context.bufsize = bufsize;
810 /* firstu is used to track the bytes filled in the buffer */
811 gpx->context.firstu = 0;
812
813 /* Copy the cursor provided by caller */
814 memcpy(&gpx->context.cursor, &gp->gp_cursor,
815 sizeof(struct xfs_attrlist_cursor));
816 gpx->count = 0;
817 gp->gp_oflags = 0;
818
819 trace_xfs_getparents_begin(ip, gp, &gpx->context.cursor);
820
821 error = xfs_attr_list(&gpx->context);
822 if (error)
823 goto out_free_buf;
824 if (gpx->context.seen_enough < 0) {
825 error = gpx->context.seen_enough;
826 goto out_free_buf;
827 }
828 xfs_getparents_expand_lastrec(gpx);
829
830 /* Update the caller with the current cursor position */
831 memcpy(&gp->gp_cursor, &gpx->context.cursor,
832 sizeof(struct xfs_attrlist_cursor));
833
834 /* Is this the root directory? */
835 if (ip->i_ino == mp->m_sb.sb_rootino)
836 gp->gp_oflags |= XFS_GETPARENTS_OFLAG_ROOT;
837
838 if (gpx->context.seen_enough == 0) {
839 /*
840 * If we did not run out of buffer space, then we reached the
841 * end of the pptr recordset, so set the DONE flag.
842 */
843 gp->gp_oflags |= XFS_GETPARENTS_OFLAG_DONE;
844 } else if (gpx->count == 0) {
845 /*
846 * If we ran out of buffer space before copying any parent
847 * pointers at all, the caller's buffer was too short. Tell
848 * userspace that, erm, the message is too long.
849 */
850 error = -EMSGSIZE;
851 goto out_free_buf;
852 }
853
854 trace_xfs_getparents_end(ip, gp, &gpx->context.cursor);
855
856 ASSERT(gpx->context.firstu <= gpx->gph.gph_request.gp_bufsize);
857
858 /* Copy the records to userspace. */
859 if (copy_to_user(u64_to_user_ptr(gpx->gph.gph_request.gp_buffer),
860 gpx->krecords, gpx->context.firstu))
861 error = -EFAULT;
862
863 out_free_buf:
864 kvfree(gpx->krecords);
865 gpx->krecords = NULL;
866 return error;
867 }
868
869 /* Retrieve the parents of this file and pass them back to userspace. */
870 int
xfs_ioc_getparents(struct file * file,struct xfs_getparents __user * ureq)871 xfs_ioc_getparents(
872 struct file *file,
873 struct xfs_getparents __user *ureq)
874 {
875 struct xfs_getparents_ctx gpx = {
876 .ip = XFS_I(file_inode(file)),
877 };
878 struct xfs_getparents *kreq = &gpx.gph.gph_request;
879 struct xfs_mount *mp = gpx.ip->i_mount;
880 int error;
881
882 if (!capable(CAP_SYS_ADMIN))
883 return -EPERM;
884 if (!xfs_has_parent(mp))
885 return -EOPNOTSUPP;
886 if (copy_from_user(kreq, ureq, sizeof(*kreq)))
887 return -EFAULT;
888
889 error = xfs_getparents(&gpx);
890 if (error)
891 return error;
892
893 if (copy_to_user(ureq, kreq, sizeof(*kreq)))
894 return -EFAULT;
895
896 return 0;
897 }
898
899 /* Retrieve the parents of this file handle and pass them back to userspace. */
900 int
xfs_ioc_getparents_by_handle(struct file * file,struct xfs_getparents_by_handle __user * ureq)901 xfs_ioc_getparents_by_handle(
902 struct file *file,
903 struct xfs_getparents_by_handle __user *ureq)
904 {
905 struct xfs_getparents_ctx gpx = { };
906 struct xfs_inode *ip = XFS_I(file_inode(file));
907 struct xfs_mount *mp = ip->i_mount;
908 struct xfs_getparents_by_handle *kreq = &gpx.gph;
909 struct xfs_handle *handle = &kreq->gph_handle;
910 int error;
911
912 if (!capable(CAP_SYS_ADMIN))
913 return -EPERM;
914 if (!xfs_has_parent(mp))
915 return -EOPNOTSUPP;
916 if (copy_from_user(kreq, ureq, sizeof(*kreq)))
917 return -EFAULT;
918
919 /*
920 * We don't use exportfs_decode_fh because it does too much work here.
921 * If the handle refers to a directory, the exportfs code will walk
922 * upwards through the directory tree to connect the dentries to the
923 * root directory dentry. For GETPARENTS we don't care about that
924 * because we're not actually going to open a file descriptor; we only
925 * want to open an inode and read its parent pointers.
926 *
927 * Note that xfs_scrub uses GETPARENTS to log that it will try to fix a
928 * corrupted file's metadata. For this usecase we would really rather
929 * userspace single-step the path reconstruction to avoid loops or
930 * other strange things if the directory tree is corrupt.
931 */
932 gpx.ip = xfs_khandle_to_inode(file, handle);
933 if (IS_ERR(gpx.ip))
934 return PTR_ERR(gpx.ip);
935
936 error = xfs_getparents(&gpx);
937 if (error)
938 goto out_rele;
939
940 if (copy_to_user(ureq, kreq, sizeof(*kreq)))
941 error = -EFAULT;
942
943 out_rele:
944 xfs_irele(gpx.ip);
945 return error;
946 }
947