1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * File operations used by nfsd. Some of these have been ripped from
4 * other parts of the kernel because they weren't exported, others
5 * are partial duplicates with added or changed functionality.
6 *
7 * Note that several functions dget() the dentry upon which they want
8 * to act, most notably those that create directory entries. Response
9 * dentry's are dput()'d if necessary in the release callback.
10 * So if you notice code paths that apparently fail to dput() the
11 * dentry, don't worry--they have been taken care of.
12 *
13 * Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de>
14 * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp>
15 */
16
17 #include <linux/fs.h>
18 #include <linux/file.h>
19 #include <linux/splice.h>
20 #include <linux/falloc.h>
21 #include <linux/fcntl.h>
22 #include <linux/namei.h>
23 #include <linux/delay.h>
24 #include <linux/fsnotify.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/xattr.h>
27 #include <linux/jhash.h>
28 #include <linux/pagemap.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/exportfs.h>
32 #include <linux/writeback.h>
33 #include <linux/security.h>
34 #include <linux/sunrpc/xdr.h>
35
36 #include "xdr3.h"
37
38 #ifdef CONFIG_NFSD_V4
39 #include "acl.h"
40 #include "idmap.h"
41 #include "xdr4.h"
42 #endif /* CONFIG_NFSD_V4 */
43
44 #include "nfsd.h"
45 #include "vfs.h"
46 #include "filecache.h"
47 #include "trace.h"
48
49 #define NFSDDBG_FACILITY NFSDDBG_FILEOP
50
51 bool nfsd_disable_splice_read __read_mostly;
52 u64 nfsd_io_cache_read __read_mostly = NFSD_IO_BUFFERED;
53 u64 nfsd_io_cache_write __read_mostly = NFSD_IO_BUFFERED;
54
55 /**
56 * nfserrno - Map Linux errnos to NFS errnos
57 * @errno: POSIX(-ish) error code to be mapped
58 *
59 * Returns the appropriate (net-endian) nfserr_* (or nfs_ok if errno is 0). If
60 * it's an error we don't expect, log it once and return nfserr_io.
61 */
62 __be32
nfserrno(int errno)63 nfserrno (int errno)
64 {
65 static struct {
66 __be32 nfserr;
67 int syserr;
68 } nfs_errtbl[] = {
69 { nfs_ok, 0 },
70 { nfserr_perm, -EPERM },
71 { nfserr_noent, -ENOENT },
72 { nfserr_io, -EIO },
73 { nfserr_nxio, -ENXIO },
74 { nfserr_fbig, -E2BIG },
75 { nfserr_stale, -EBADF },
76 { nfserr_acces, -EACCES },
77 { nfserr_exist, -EEXIST },
78 { nfserr_xdev, -EXDEV },
79 { nfserr_nodev, -ENODEV },
80 { nfserr_notdir, -ENOTDIR },
81 { nfserr_isdir, -EISDIR },
82 { nfserr_inval, -EINVAL },
83 { nfserr_fbig, -EFBIG },
84 { nfserr_nospc, -ENOSPC },
85 { nfserr_rofs, -EROFS },
86 { nfserr_mlink, -EMLINK },
87 { nfserr_nametoolong, -ENAMETOOLONG },
88 { nfserr_notempty, -ENOTEMPTY },
89 { nfserr_dquot, -EDQUOT },
90 { nfserr_stale, -ESTALE },
91 { nfserr_jukebox, -ETIMEDOUT },
92 { nfserr_jukebox, -ERESTARTSYS },
93 { nfserr_jukebox, -EAGAIN },
94 { nfserr_jukebox, -EWOULDBLOCK },
95 { nfserr_jukebox, -ENOMEM },
96 { nfserr_io, -ETXTBSY },
97 { nfserr_notsupp, -EOPNOTSUPP },
98 { nfserr_toosmall, -ETOOSMALL },
99 { nfserr_serverfault, -ESERVERFAULT },
100 { nfserr_serverfault, -ENFILE },
101 { nfserr_io, -EREMOTEIO },
102 { nfserr_stale, -EOPENSTALE },
103 { nfserr_io, -EUCLEAN },
104 { nfserr_perm, -ENOKEY },
105 { nfserr_no_grace, -ENOGRACE},
106 { nfserr_io, -EBADMSG },
107 };
108 int i;
109
110 for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
111 if (nfs_errtbl[i].syserr == errno)
112 return nfs_errtbl[i].nfserr;
113 }
114 WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno);
115 return nfserr_io;
116 }
117
118 /*
119 * Called from nfsd_lookup and encode_dirent. Check if we have crossed
120 * a mount point.
121 * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged,
122 * or nfs_ok having possibly changed *dpp and *expp
123 */
124 int
nfsd_cross_mnt(struct svc_rqst * rqstp,struct dentry ** dpp,struct svc_export ** expp)125 nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
126 struct svc_export **expp)
127 {
128 struct svc_export *exp = *expp, *exp2 = NULL;
129 struct dentry *dentry = *dpp;
130 struct path path = {.mnt = mntget(exp->ex_path.mnt),
131 .dentry = dget(dentry)};
132 unsigned int follow_flags = 0;
133 int err = 0;
134
135 if (exp->ex_flags & NFSEXP_CROSSMOUNT)
136 follow_flags = LOOKUP_AUTOMOUNT;
137
138 err = follow_down(&path, follow_flags);
139 if (err < 0)
140 goto out;
141 if (path.mnt == exp->ex_path.mnt && path.dentry == dentry &&
142 nfsd_mountpoint(dentry, exp) == 2) {
143 /* This is only a mountpoint in some other namespace */
144 path_put(&path);
145 goto out;
146 }
147
148 exp2 = rqst_exp_get_by_name(rqstp, &path);
149 if (IS_ERR(exp2)) {
150 err = PTR_ERR(exp2);
151 /*
152 * We normally allow NFS clients to continue
153 * "underneath" a mountpoint that is not exported.
154 * The exception is V4ROOT, where no traversal is ever
155 * allowed without an explicit export of the new
156 * directory.
157 */
158 if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT))
159 err = 0;
160 path_put(&path);
161 goto out;
162 }
163 if (nfsd_v4client(rqstp) ||
164 (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
165 /* successfully crossed mount point */
166 /*
167 * This is subtle: path.dentry is *not* on path.mnt
168 * at this point. The only reason we are safe is that
169 * original mnt is pinned down by exp, so we should
170 * put path *before* putting exp
171 */
172 *dpp = path.dentry;
173 path.dentry = dentry;
174 *expp = exp2;
175 exp2 = exp;
176 }
177 path_put(&path);
178 exp_put(exp2);
179 out:
180 return err;
181 }
182
follow_to_parent(struct path * path)183 static void follow_to_parent(struct path *path)
184 {
185 struct dentry *dp;
186
187 while (path->dentry == path->mnt->mnt_root && follow_up(path))
188 ;
189 dp = dget_parent(path->dentry);
190 dput(path->dentry);
191 path->dentry = dp;
192 }
193
nfsd_lookup_parent(struct svc_rqst * rqstp,struct dentry * dparent,struct svc_export ** exp,struct dentry ** dentryp)194 static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp)
195 {
196 struct svc_export *exp2;
197 struct path path = {.mnt = mntget((*exp)->ex_path.mnt),
198 .dentry = dget(dparent)};
199
200 follow_to_parent(&path);
201
202 exp2 = rqst_exp_parent(rqstp, &path);
203 if (PTR_ERR(exp2) == -ENOENT) {
204 *dentryp = dget(dparent);
205 } else if (IS_ERR(exp2)) {
206 path_put(&path);
207 return PTR_ERR(exp2);
208 } else {
209 *dentryp = dget(path.dentry);
210 exp_put(*exp);
211 *exp = exp2;
212 }
213 path_put(&path);
214 return 0;
215 }
216
217 /*
218 * For nfsd purposes, we treat V4ROOT exports as though there was an
219 * export at *every* directory.
220 * We return:
221 * '1' if this dentry *must* be an export point,
222 * '2' if it might be, if there is really a mount here, and
223 * '0' if there is no chance of an export point here.
224 */
nfsd_mountpoint(struct dentry * dentry,struct svc_export * exp)225 int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
226 {
227 if (!d_inode(dentry))
228 return 0;
229 if (exp->ex_flags & NFSEXP_V4ROOT)
230 return 1;
231 if (nfsd4_is_junction(dentry))
232 return 1;
233 if (d_managed(dentry))
234 /*
235 * Might only be a mountpoint in a different namespace,
236 * but we need to check.
237 */
238 return 2;
239 return 0;
240 }
241
242 __be32
nfsd_lookup_dentry(struct svc_rqst * rqstp,struct svc_fh * fhp,const char * name,unsigned int len,struct svc_export ** exp_ret,struct dentry ** dentry_ret)243 nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
244 const char *name, unsigned int len,
245 struct svc_export **exp_ret, struct dentry **dentry_ret)
246 {
247 struct svc_export *exp;
248 struct dentry *dparent;
249 struct dentry *dentry;
250 int host_err;
251
252 trace_nfsd_vfs_lookup(rqstp, fhp, name, len);
253
254 dparent = fhp->fh_dentry;
255 exp = exp_get(fhp->fh_export);
256
257 /* Lookup the name, but don't follow links */
258 if (isdotent(name, len)) {
259 if (len==1)
260 dentry = dget(dparent);
261 else if (dparent != exp->ex_path.dentry)
262 dentry = dget_parent(dparent);
263 else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp))
264 dentry = dget(dparent); /* .. == . just like at / */
265 else {
266 /* checking mountpoint crossing is very different when stepping up */
267 host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry);
268 if (host_err)
269 goto out_nfserr;
270 }
271 } else {
272 dentry = lookup_one_unlocked(&nop_mnt_idmap,
273 &QSTR_LEN(name, len), dparent);
274 host_err = PTR_ERR(dentry);
275 if (IS_ERR(dentry))
276 goto out_nfserr;
277 if (nfsd_mountpoint(dentry, exp)) {
278 host_err = nfsd_cross_mnt(rqstp, &dentry, &exp);
279 if (host_err) {
280 dput(dentry);
281 goto out_nfserr;
282 }
283 }
284 }
285 *dentry_ret = dentry;
286 *exp_ret = exp;
287 return 0;
288
289 out_nfserr:
290 exp_put(exp);
291 return nfserrno(host_err);
292 }
293
294 /**
295 * nfsd_lookup - look up a single path component for nfsd
296 *
297 * @rqstp: the request context
298 * @fhp: the file handle of the directory
299 * @name: the component name, or %NULL to look up parent
300 * @len: length of name to examine
301 * @resfh: pointer to pre-initialised filehandle to hold result.
302 *
303 * Look up one component of a pathname.
304 * N.B. After this call _both_ fhp and resfh need an fh_put
305 *
306 * If the lookup would cross a mountpoint, and the mounted filesystem
307 * is exported to the client with NFSEXP_NOHIDE, then the lookup is
308 * accepted as it stands and the mounted directory is
309 * returned. Otherwise the covered directory is returned.
310 * NOTE: this mountpoint crossing is not supported properly by all
311 * clients and is explicitly disallowed for NFSv3
312 *
313 */
314 __be32
nfsd_lookup(struct svc_rqst * rqstp,struct svc_fh * fhp,const char * name,unsigned int len,struct svc_fh * resfh)315 nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
316 unsigned int len, struct svc_fh *resfh)
317 {
318 struct svc_export *exp;
319 struct dentry *dentry;
320 __be32 err;
321
322 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
323 if (err)
324 return err;
325 err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
326 if (err)
327 return err;
328 err = check_nfsd_access(exp, rqstp, false);
329 if (err)
330 goto out;
331 /*
332 * Note: we compose the file handle now, but as the
333 * dentry may be negative, it may need to be updated.
334 */
335 err = fh_compose(resfh, exp, dentry, fhp);
336 if (!err && d_really_is_negative(dentry))
337 err = nfserr_noent;
338 out:
339 dput(dentry);
340 exp_put(exp);
341 return err;
342 }
343
344 static void
commit_reset_write_verifier(struct nfsd_net * nn,struct svc_rqst * rqstp,int err)345 commit_reset_write_verifier(struct nfsd_net *nn, struct svc_rqst *rqstp,
346 int err)
347 {
348 switch (err) {
349 case -EAGAIN:
350 case -ESTALE:
351 /*
352 * Neither of these are the result of a problem with
353 * durable storage, so avoid a write verifier reset.
354 */
355 break;
356 default:
357 nfsd_reset_write_verifier(nn);
358 trace_nfsd_writeverf_reset(nn, rqstp, err);
359 }
360 }
361
362 /*
363 * Commit metadata changes to stable storage.
364 */
365 static int
commit_inode_metadata(struct inode * inode)366 commit_inode_metadata(struct inode *inode)
367 {
368 const struct export_operations *export_ops = inode->i_sb->s_export_op;
369
370 if (export_ops->commit_metadata)
371 return export_ops->commit_metadata(inode);
372 return sync_inode_metadata(inode, 1);
373 }
374
375 static int
commit_metadata(struct svc_fh * fhp)376 commit_metadata(struct svc_fh *fhp)
377 {
378 struct inode *inode = d_inode(fhp->fh_dentry);
379
380 if (!EX_ISSYNC(fhp->fh_export))
381 return 0;
382 return commit_inode_metadata(inode);
383 }
384
385 /*
386 * Go over the attributes and take care of the small differences between
387 * NFS semantics and what Linux expects.
388 */
389 static void
nfsd_sanitize_attrs(struct inode * inode,struct iattr * iap)390 nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
391 {
392 /* Ignore mode updates on symlinks */
393 if (S_ISLNK(inode->i_mode))
394 iap->ia_valid &= ~ATTR_MODE;
395
396 /* sanitize the mode change */
397 if (iap->ia_valid & ATTR_MODE) {
398 iap->ia_mode &= S_IALLUGO;
399 iap->ia_mode |= (inode->i_mode & ~S_IALLUGO);
400 }
401
402 /* Revoke setuid/setgid on chown */
403 if (!S_ISDIR(inode->i_mode) &&
404 ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) {
405 iap->ia_valid |= ATTR_KILL_PRIV;
406 if (iap->ia_valid & ATTR_MODE) {
407 /* we're setting mode too, just clear the s*id bits */
408 iap->ia_mode &= ~S_ISUID;
409 if (iap->ia_mode & S_IXGRP)
410 iap->ia_mode &= ~S_ISGID;
411 } else {
412 /* set ATTR_KILL_* bits and let VFS handle it */
413 iap->ia_valid |= ATTR_KILL_SUID;
414 iap->ia_valid |=
415 setattr_should_drop_sgid(&nop_mnt_idmap, inode);
416 }
417 }
418 }
419
420 static __be32
nfsd_get_write_access(struct svc_rqst * rqstp,struct svc_fh * fhp,struct iattr * iap)421 nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
422 struct iattr *iap)
423 {
424 struct inode *inode = d_inode(fhp->fh_dentry);
425
426 if (iap->ia_size < inode->i_size) {
427 __be32 err;
428
429 err = nfsd_permission(&rqstp->rq_cred,
430 fhp->fh_export, fhp->fh_dentry,
431 NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
432 if (err)
433 return err;
434 }
435 return nfserrno(get_write_access(inode));
436 }
437
__nfsd_setattr(struct dentry * dentry,struct iattr * iap)438 static int __nfsd_setattr(struct dentry *dentry, struct iattr *iap)
439 {
440 int host_err;
441
442 if (iap->ia_valid & ATTR_SIZE) {
443 /*
444 * RFC5661, Section 18.30.4:
445 * Changing the size of a file with SETATTR indirectly
446 * changes the time_modify and change attributes.
447 *
448 * (and similar for the older RFCs)
449 */
450 struct iattr size_attr = {
451 .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME,
452 .ia_size = iap->ia_size,
453 };
454
455 if (iap->ia_size < 0)
456 return -EFBIG;
457
458 host_err = notify_change(&nop_mnt_idmap, dentry, &size_attr, NULL);
459 if (host_err)
460 return host_err;
461 iap->ia_valid &= ~ATTR_SIZE;
462
463 /*
464 * Avoid the additional setattr call below if the only other
465 * attribute that the client sends is the mtime, as we update
466 * it as part of the size change above.
467 */
468 if ((iap->ia_valid & ~ATTR_MTIME) == 0)
469 return 0;
470 }
471
472 if ((iap->ia_valid & ~ATTR_DELEG) == 0)
473 return 0;
474
475 /*
476 * If ATTR_DELEG is set, then this is an update from a client that
477 * holds a delegation. If this is an update for only the atime, the
478 * ctime should not be changed. If the update contains the mtime
479 * too, then ATTR_CTIME should already be set.
480 */
481 if (!(iap->ia_valid & ATTR_DELEG))
482 iap->ia_valid |= ATTR_CTIME;
483
484 return notify_change(&nop_mnt_idmap, dentry, iap, NULL);
485 }
486
487 /**
488 * nfsd_setattr - Set various file attributes.
489 * @rqstp: controlling RPC transaction
490 * @fhp: filehandle of target
491 * @attr: attributes to set
492 * @guardtime: do not act if ctime.tv_sec does not match this timestamp
493 *
494 * This call may adjust the contents of @attr (in particular, this
495 * call may change the bits in the na_iattr.ia_valid field).
496 *
497 * Returns nfs_ok on success, otherwise an NFS status code is
498 * returned. Caller must release @fhp by calling fh_put in either
499 * case.
500 */
501 __be32
nfsd_setattr(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfsd_attrs * attr,const struct timespec64 * guardtime)502 nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
503 struct nfsd_attrs *attr, const struct timespec64 *guardtime)
504 {
505 struct dentry *dentry;
506 struct inode *inode;
507 struct iattr *iap = attr->na_iattr;
508 int accmode = NFSD_MAY_SATTR;
509 umode_t ftype = 0;
510 __be32 err;
511 int host_err = 0;
512 bool get_write_count;
513 bool size_change = (iap->ia_valid & ATTR_SIZE);
514 int retries;
515
516 trace_nfsd_vfs_setattr(rqstp, fhp, iap, guardtime);
517
518 if (iap->ia_valid & ATTR_SIZE) {
519 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
520 ftype = S_IFREG;
521 }
522
523 /*
524 * If utimes(2) and friends are called with times not NULL, we should
525 * not set NFSD_MAY_WRITE bit. Otherwise fh_verify->nfsd_permission
526 * will return EACCES, when the caller's effective UID does not match
527 * the owner of the file, and the caller is not privileged. In this
528 * situation, we should return EPERM(notify_change will return this).
529 */
530 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME)) {
531 accmode |= NFSD_MAY_OWNER_OVERRIDE;
532 if (!(iap->ia_valid & (ATTR_ATIME_SET | ATTR_MTIME_SET)))
533 accmode |= NFSD_MAY_WRITE;
534 }
535
536 /* Callers that do fh_verify should do the fh_want_write: */
537 get_write_count = !fhp->fh_dentry;
538
539 /* Get inode */
540 err = fh_verify(rqstp, fhp, ftype, accmode);
541 if (err)
542 return err;
543 if (get_write_count) {
544 host_err = fh_want_write(fhp);
545 if (host_err)
546 goto out;
547 }
548
549 dentry = fhp->fh_dentry;
550 inode = d_inode(dentry);
551
552 nfsd_sanitize_attrs(inode, iap);
553
554 /*
555 * The size case is special, it changes the file in addition to the
556 * attributes, and file systems don't expect it to be mixed with
557 * "random" attribute changes. We thus split out the size change
558 * into a separate call to ->setattr, and do the rest as a separate
559 * setattr call.
560 */
561 if (size_change) {
562 err = nfsd_get_write_access(rqstp, fhp, iap);
563 if (err)
564 return err;
565 }
566
567 inode_lock(inode);
568 err = fh_fill_pre_attrs(fhp);
569 if (err)
570 goto out_unlock;
571
572 if (guardtime) {
573 struct timespec64 ctime = inode_get_ctime(inode);
574 if ((u32)guardtime->tv_sec != (u32)ctime.tv_sec ||
575 guardtime->tv_nsec != ctime.tv_nsec) {
576 err = nfserr_notsync;
577 goto out_fill_attrs;
578 }
579 }
580
581 for (retries = 1;;) {
582 struct iattr attrs;
583
584 /*
585 * notify_change() can alter its iattr argument, making
586 * @iap unsuitable for submission multiple times. Make a
587 * copy for every loop iteration.
588 */
589 attrs = *iap;
590 host_err = __nfsd_setattr(dentry, &attrs);
591 if (host_err != -EAGAIN || !retries--)
592 break;
593 if (!nfsd_wait_for_delegreturn(rqstp, inode))
594 break;
595 }
596 if (attr->na_seclabel && attr->na_seclabel->len)
597 attr->na_labelerr = security_inode_setsecctx(dentry,
598 attr->na_seclabel->data, attr->na_seclabel->len);
599 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && attr->na_pacl)
600 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap,
601 dentry, ACL_TYPE_ACCESS,
602 attr->na_pacl);
603 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) &&
604 !attr->na_aclerr && attr->na_dpacl && S_ISDIR(inode->i_mode))
605 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap,
606 dentry, ACL_TYPE_DEFAULT,
607 attr->na_dpacl);
608 out_fill_attrs:
609 /*
610 * RFC 1813 Section 3.3.2 does not mandate that an NFS server
611 * returns wcc_data for SETATTR. Some client implementations
612 * depend on receiving wcc_data, however, to sort out partial
613 * updates (eg., the client requested that size and mode be
614 * modified, but the server changed only the file mode).
615 */
616 fh_fill_post_attrs(fhp);
617 out_unlock:
618 inode_unlock(inode);
619 if (size_change)
620 put_write_access(inode);
621 out:
622 if (!host_err)
623 host_err = commit_metadata(fhp);
624 return err != 0 ? err : nfserrno(host_err);
625 }
626
627 #if defined(CONFIG_NFSD_V4)
628 /*
629 * NFS junction information is stored in an extended attribute.
630 */
631 #define NFSD_JUNCTION_XATTR_NAME XATTR_TRUSTED_PREFIX "junction.nfs"
632
633 /**
634 * nfsd4_is_junction - Test if an object could be an NFS junction
635 *
636 * @dentry: object to test
637 *
638 * Returns 1 if "dentry" appears to contain NFS junction information.
639 * Otherwise 0 is returned.
640 */
nfsd4_is_junction(struct dentry * dentry)641 int nfsd4_is_junction(struct dentry *dentry)
642 {
643 struct inode *inode = d_inode(dentry);
644
645 if (inode == NULL)
646 return 0;
647 if (inode->i_mode & S_IXUGO)
648 return 0;
649 if (!(inode->i_mode & S_ISVTX))
650 return 0;
651 if (vfs_getxattr(&nop_mnt_idmap, dentry, NFSD_JUNCTION_XATTR_NAME,
652 NULL, 0) <= 0)
653 return 0;
654 return 1;
655 }
656
nfsd4_get_cstate(struct svc_rqst * rqstp)657 static struct nfsd4_compound_state *nfsd4_get_cstate(struct svc_rqst *rqstp)
658 {
659 return &((struct nfsd4_compoundres *)rqstp->rq_resp)->cstate;
660 }
661
nfsd4_clone_file_range(struct svc_rqst * rqstp,struct nfsd_file * nf_src,u64 src_pos,struct nfsd_file * nf_dst,u64 dst_pos,u64 count,bool sync)662 __be32 nfsd4_clone_file_range(struct svc_rqst *rqstp,
663 struct nfsd_file *nf_src, u64 src_pos,
664 struct nfsd_file *nf_dst, u64 dst_pos,
665 u64 count, bool sync)
666 {
667 struct file *src = nf_src->nf_file;
668 struct file *dst = nf_dst->nf_file;
669 errseq_t since;
670 loff_t cloned;
671 __be32 ret = 0;
672
673 since = READ_ONCE(dst->f_wb_err);
674 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
675 if (cloned < 0) {
676 ret = nfserrno(cloned);
677 goto out_err;
678 }
679 if (count && cloned != count) {
680 ret = nfserrno(-EINVAL);
681 goto out_err;
682 }
683 if (sync) {
684 loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
685 int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
686
687 if (!status)
688 status = filemap_check_wb_err(dst->f_mapping, since);
689 if (!status)
690 status = commit_inode_metadata(file_inode(src));
691 if (status < 0) {
692 struct nfsd_net *nn = net_generic(nf_dst->nf_net,
693 nfsd_net_id);
694
695 trace_nfsd_clone_file_range_err(rqstp,
696 &nfsd4_get_cstate(rqstp)->save_fh,
697 src_pos,
698 &nfsd4_get_cstate(rqstp)->current_fh,
699 dst_pos,
700 count, status);
701 commit_reset_write_verifier(nn, rqstp, status);
702 ret = nfserrno(status);
703 }
704 }
705 out_err:
706 return ret;
707 }
708
nfsd_copy_file_range(struct file * src,u64 src_pos,struct file * dst,u64 dst_pos,u64 count)709 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
710 u64 dst_pos, u64 count)
711 {
712 ssize_t ret;
713
714 /*
715 * Limit copy to 4MB to prevent indefinitely blocking an nfsd
716 * thread and client rpc slot. The choice of 4MB is somewhat
717 * arbitrary. We might instead base this on r/wsize, or make it
718 * tunable, or use a time instead of a byte limit, or implement
719 * asynchronous copy. In theory a client could also recognize a
720 * limit like this and pipeline multiple COPY requests.
721 */
722 count = min_t(u64, count, 1 << 22);
723 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
724
725 if (ret == -EOPNOTSUPP || ret == -EXDEV)
726 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count,
727 COPY_FILE_SPLICE);
728 return ret;
729 }
730
nfsd4_vfs_fallocate(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file * file,loff_t offset,loff_t len,int flags)731 __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
732 struct file *file, loff_t offset, loff_t len,
733 int flags)
734 {
735 int error;
736
737 if (!S_ISREG(file_inode(file)->i_mode))
738 return nfserr_inval;
739
740 error = vfs_fallocate(file, flags, offset, len);
741 if (!error)
742 error = commit_metadata(fhp);
743
744 return nfserrno(error);
745 }
746 #endif /* defined(CONFIG_NFSD_V4) */
747
748 /*
749 * Check server access rights to a file system object
750 */
751 struct accessmap {
752 u32 access;
753 int how;
754 };
755 static struct accessmap nfs3_regaccess[] = {
756 { NFS3_ACCESS_READ, NFSD_MAY_READ },
757 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
758 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC },
759 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE },
760
761 #ifdef CONFIG_NFSD_V4
762 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ },
763 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE },
764 { NFS4_ACCESS_XALIST, NFSD_MAY_READ },
765 #endif
766
767 { 0, 0 }
768 };
769
770 static struct accessmap nfs3_diraccess[] = {
771 { NFS3_ACCESS_READ, NFSD_MAY_READ },
772 { NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC },
773 { NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC},
774 { NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE },
775 { NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE },
776
777 #ifdef CONFIG_NFSD_V4
778 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ },
779 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE },
780 { NFS4_ACCESS_XALIST, NFSD_MAY_READ },
781 #endif
782
783 { 0, 0 }
784 };
785
786 static struct accessmap nfs3_anyaccess[] = {
787 /* Some clients - Solaris 2.6 at least, make an access call
788 * to the server to check for access for things like /dev/null
789 * (which really, the server doesn't care about). So
790 * We provide simple access checking for them, looking
791 * mainly at mode bits, and we make sure to ignore read-only
792 * filesystem checks
793 */
794 { NFS3_ACCESS_READ, NFSD_MAY_READ },
795 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
796 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
797 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
798
799 { 0, 0 }
800 };
801
802 __be32
nfsd_access(struct svc_rqst * rqstp,struct svc_fh * fhp,u32 * access,u32 * supported)803 nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported)
804 {
805 struct accessmap *map;
806 struct svc_export *export;
807 struct dentry *dentry;
808 u32 query, result = 0, sresult = 0;
809 __be32 error;
810
811 error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
812 if (error)
813 goto out;
814
815 export = fhp->fh_export;
816 dentry = fhp->fh_dentry;
817
818 if (d_is_reg(dentry))
819 map = nfs3_regaccess;
820 else if (d_is_dir(dentry))
821 map = nfs3_diraccess;
822 else
823 map = nfs3_anyaccess;
824
825
826 query = *access;
827 for (; map->access; map++) {
828 if (map->access & query) {
829 __be32 err2;
830
831 sresult |= map->access;
832
833 err2 = nfsd_permission(&rqstp->rq_cred, export,
834 dentry, map->how);
835 switch (err2) {
836 case nfs_ok:
837 result |= map->access;
838 break;
839
840 /* the following error codes just mean the access was not allowed,
841 * rather than an error occurred */
842 case nfserr_rofs:
843 case nfserr_acces:
844 case nfserr_perm:
845 /* simply don't "or" in the access bit. */
846 break;
847 default:
848 error = err2;
849 goto out;
850 }
851 }
852 }
853 *access = result;
854 if (supported)
855 *supported = sresult;
856
857 out:
858 return error;
859 }
860
nfsd_open_break_lease(struct inode * inode,int access)861 int nfsd_open_break_lease(struct inode *inode, int access)
862 {
863 unsigned int mode;
864
865 if (access & NFSD_MAY_NOT_BREAK_LEASE)
866 return 0;
867 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
868 return break_lease(inode, mode | O_NONBLOCK);
869 }
870
871 /*
872 * Open an existing file or directory.
873 * The may_flags argument indicates the type of open (read/write/lock)
874 * and additional flags.
875 * N.B. After this call fhp needs an fh_put
876 */
877 static int
__nfsd_open(struct svc_fh * fhp,umode_t type,int may_flags,struct file ** filp)878 __nfsd_open(struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp)
879 {
880 struct path path;
881 struct inode *inode;
882 struct file *file;
883 int flags = O_RDONLY|O_LARGEFILE;
884 int host_err = -EPERM;
885
886 path.mnt = fhp->fh_export->ex_path.mnt;
887 path.dentry = fhp->fh_dentry;
888 inode = d_inode(path.dentry);
889
890 if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
891 goto out;
892
893 if (!inode->i_fop)
894 goto out;
895
896 host_err = nfsd_open_break_lease(inode, may_flags);
897 if (host_err) /* NOMEM or WOULDBLOCK */
898 goto out;
899
900 if (may_flags & NFSD_MAY_WRITE) {
901 if (may_flags & NFSD_MAY_READ)
902 flags = O_RDWR|O_LARGEFILE;
903 else
904 flags = O_WRONLY|O_LARGEFILE;
905 }
906
907 file = dentry_open(&path, flags, current_cred());
908 if (IS_ERR(file)) {
909 host_err = PTR_ERR(file);
910 goto out;
911 }
912
913 host_err = security_file_post_open(file, may_flags);
914 if (host_err) {
915 fput(file);
916 goto out;
917 }
918
919 *filp = file;
920 out:
921 return host_err;
922 }
923
924 __be32
nfsd_open(struct svc_rqst * rqstp,struct svc_fh * fhp,umode_t type,int may_flags,struct file ** filp)925 nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
926 int may_flags, struct file **filp)
927 {
928 __be32 err;
929 int host_err;
930 bool retried = false;
931
932 /*
933 * If we get here, then the client has already done an "open",
934 * and (hopefully) checked permission - so allow OWNER_OVERRIDE
935 * in case a chmod has now revoked permission.
936 *
937 * Arguably we should also allow the owner override for
938 * directories, but we never have and it doesn't seem to have
939 * caused anyone a problem. If we were to change this, note
940 * also that our filldir callbacks would need a variant of
941 * lookup_one_positive_unlocked() that doesn't check permissions.
942 */
943 if (type == S_IFREG)
944 may_flags |= NFSD_MAY_OWNER_OVERRIDE;
945 retry:
946 err = fh_verify(rqstp, fhp, type, may_flags);
947 if (!err) {
948 host_err = __nfsd_open(fhp, type, may_flags, filp);
949 if (host_err == -EOPENSTALE && !retried) {
950 retried = true;
951 fh_put(fhp);
952 goto retry;
953 }
954 err = nfserrno(host_err);
955 }
956 return err;
957 }
958
959 /**
960 * nfsd_open_verified - Open a regular file for the filecache
961 * @fhp: NFS filehandle of the file to open
962 * @type: S_IFMT inode type allowed (0 means any type is allowed)
963 * @may_flags: internal permission flags
964 * @filp: OUT: open "struct file *"
965 *
966 * Returns zero on success, or a negative errno value.
967 */
968 int
nfsd_open_verified(struct svc_fh * fhp,umode_t type,int may_flags,struct file ** filp)969 nfsd_open_verified(struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp)
970 {
971 return __nfsd_open(fhp, type, may_flags, filp);
972 }
973
974 /*
975 * Grab and keep cached pages associated with a file in the svc_rqst
976 * so that they can be passed to the network sendmsg routines
977 * directly. They will be released after the sending has completed.
978 *
979 * Return values: Number of bytes consumed, or -EIO if there are no
980 * remaining pages in rqstp->rq_pages.
981 */
982 static int
nfsd_splice_actor(struct pipe_inode_info * pipe,struct pipe_buffer * buf,struct splice_desc * sd)983 nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
984 struct splice_desc *sd)
985 {
986 struct svc_rqst *rqstp = sd->u.data;
987 struct page *page = buf->page; // may be a compound one
988 unsigned offset = buf->offset;
989 struct page *last_page;
990
991 last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
992 for (page += offset / PAGE_SIZE; page <= last_page; page++) {
993 /*
994 * Skip page replacement when extending the contents of the
995 * current page. But note that we may get two zero_pages in a
996 * row from shmem.
997 */
998 if (page == *(rqstp->rq_next_page - 1) &&
999 offset_in_page(rqstp->rq_res.page_base +
1000 rqstp->rq_res.page_len))
1001 continue;
1002 if (unlikely(!svc_rqst_replace_page(rqstp, page)))
1003 return -EIO;
1004 }
1005 if (rqstp->rq_res.page_len == 0) // first call
1006 rqstp->rq_res.page_base = offset % PAGE_SIZE;
1007 rqstp->rq_res.page_len += sd->len;
1008 return sd->len;
1009 }
1010
nfsd_direct_splice_actor(struct pipe_inode_info * pipe,struct splice_desc * sd)1011 static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
1012 struct splice_desc *sd)
1013 {
1014 return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
1015 }
1016
nfsd_eof_on_read(struct file * file,loff_t offset,ssize_t len,size_t expected)1017 static u32 nfsd_eof_on_read(struct file *file, loff_t offset, ssize_t len,
1018 size_t expected)
1019 {
1020 if (expected != 0 && len == 0)
1021 return 1;
1022 if (offset+len >= i_size_read(file_inode(file)))
1023 return 1;
1024 return 0;
1025 }
1026
nfsd_finish_read(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file * file,loff_t offset,unsigned long * count,u32 * eof,ssize_t host_err)1027 static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1028 struct file *file, loff_t offset,
1029 unsigned long *count, u32 *eof, ssize_t host_err)
1030 {
1031 if (host_err >= 0) {
1032 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1033
1034 nfsd_stats_io_read_add(nn, fhp->fh_export, host_err);
1035 *eof = nfsd_eof_on_read(file, offset, host_err, *count);
1036 *count = host_err;
1037 fsnotify_access(file);
1038 trace_nfsd_read_io_done(rqstp, fhp, offset, *count);
1039 return 0;
1040 } else {
1041 trace_nfsd_read_err(rqstp, fhp, offset, host_err);
1042 return nfserrno(host_err);
1043 }
1044 }
1045
1046 /**
1047 * nfsd_splice_read - Perform a VFS read using a splice pipe
1048 * @rqstp: RPC transaction context
1049 * @fhp: file handle of file to be read
1050 * @file: opened struct file of file to be read
1051 * @offset: starting byte offset
1052 * @count: IN: requested number of bytes; OUT: number of bytes read
1053 * @eof: OUT: set non-zero if operation reached the end of the file
1054 *
1055 * Returns nfs_ok on success, otherwise an nfserr stat value is
1056 * returned.
1057 */
nfsd_splice_read(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file * file,loff_t offset,unsigned long * count,u32 * eof)1058 __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1059 struct file *file, loff_t offset, unsigned long *count,
1060 u32 *eof)
1061 {
1062 struct splice_desc sd = {
1063 .len = 0,
1064 .total_len = *count,
1065 .pos = offset,
1066 .u.data = rqstp,
1067 };
1068 ssize_t host_err;
1069
1070 trace_nfsd_read_splice(rqstp, fhp, offset, *count);
1071 host_err = rw_verify_area(READ, file, &offset, *count);
1072 if (!host_err)
1073 host_err = splice_direct_to_actor(file, &sd,
1074 nfsd_direct_splice_actor);
1075 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
1076 }
1077
1078 /*
1079 * The byte range of the client's READ request is expanded on both ends
1080 * until it meets the underlying file system's direct I/O alignment
1081 * requirements. After the internal read is complete, the byte range of
1082 * the NFS READ payload is reduced to the byte range that was originally
1083 * requested.
1084 *
1085 * Note that a direct read can be done only when the xdr_buf containing
1086 * the NFS READ reply does not already have contents in its .pages array.
1087 * This is due to potentially restrictive alignment requirements on the
1088 * read buffer. When .page_len and @base are zero, the .pages array is
1089 * guaranteed to be page-aligned.
1090 */
1091 static noinline_for_stack __be32
nfsd_direct_read(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfsd_file * nf,loff_t offset,unsigned long * count,u32 * eof)1092 nfsd_direct_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1093 struct nfsd_file *nf, loff_t offset, unsigned long *count,
1094 u32 *eof)
1095 {
1096 u64 dio_start, dio_end;
1097 unsigned long v, total;
1098 struct iov_iter iter;
1099 struct kiocb kiocb;
1100 ssize_t host_err;
1101 size_t len;
1102
1103 init_sync_kiocb(&kiocb, nf->nf_file);
1104 kiocb.ki_flags |= IOCB_DIRECT;
1105
1106 /* Read a properly-aligned region of bytes into rq_bvec */
1107 dio_start = round_down(offset, nf->nf_dio_read_offset_align);
1108 dio_end = round_up((u64)offset + *count, nf->nf_dio_read_offset_align);
1109
1110 kiocb.ki_pos = dio_start;
1111
1112 v = 0;
1113 total = dio_end - dio_start;
1114 while (total && v < rqstp->rq_maxpages &&
1115 rqstp->rq_next_page < rqstp->rq_page_end) {
1116 len = min_t(size_t, total, PAGE_SIZE);
1117 bvec_set_page(&rqstp->rq_bvec[v], *rqstp->rq_next_page,
1118 len, 0);
1119
1120 total -= len;
1121 ++rqstp->rq_next_page;
1122 ++v;
1123 }
1124
1125 trace_nfsd_read_direct(rqstp, fhp, offset, *count - total);
1126 iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v,
1127 dio_end - dio_start - total);
1128
1129 host_err = vfs_iocb_iter_read(nf->nf_file, &kiocb, &iter);
1130 if (host_err >= 0) {
1131 unsigned int pad = offset - dio_start;
1132
1133 /* The returned payload starts after the pad */
1134 rqstp->rq_res.page_base = pad;
1135
1136 /* Compute the count of bytes to be returned */
1137 if (host_err > pad + *count)
1138 host_err = *count;
1139 else if (host_err > pad)
1140 host_err -= pad;
1141 else
1142 host_err = 0;
1143 } else if (unlikely(host_err == -EINVAL)) {
1144 struct inode *inode = d_inode(fhp->fh_dentry);
1145
1146 pr_info_ratelimited("nfsd: Direct I/O alignment failure on %s/%ld\n",
1147 inode->i_sb->s_id, inode->i_ino);
1148 host_err = -ESERVERFAULT;
1149 }
1150
1151 return nfsd_finish_read(rqstp, fhp, nf->nf_file, offset, count,
1152 eof, host_err);
1153 }
1154
1155 /**
1156 * nfsd_iter_read - Perform a VFS read using an iterator
1157 * @rqstp: RPC transaction context
1158 * @fhp: file handle of file to be read
1159 * @nf: opened struct nfsd_file of file to be read
1160 * @offset: starting byte offset
1161 * @count: IN: requested number of bytes; OUT: number of bytes read
1162 * @base: offset in first page of read buffer
1163 * @eof: OUT: set non-zero if operation reached the end of the file
1164 *
1165 * Some filesystems or situations cannot use nfsd_splice_read. This
1166 * function is the slightly less-performant fallback for those cases.
1167 *
1168 * Returns nfs_ok on success, otherwise an nfserr stat value is
1169 * returned.
1170 */
nfsd_iter_read(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfsd_file * nf,loff_t offset,unsigned long * count,unsigned int base,u32 * eof)1171 __be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1172 struct nfsd_file *nf, loff_t offset, unsigned long *count,
1173 unsigned int base, u32 *eof)
1174 {
1175 struct file *file = nf->nf_file;
1176 unsigned long v, total;
1177 struct iov_iter iter;
1178 struct kiocb kiocb;
1179 ssize_t host_err;
1180 size_t len;
1181
1182 init_sync_kiocb(&kiocb, file);
1183
1184 switch (nfsd_io_cache_read) {
1185 case NFSD_IO_BUFFERED:
1186 break;
1187 case NFSD_IO_DIRECT:
1188 /* When dio_read_offset_align is zero, dio is not supported */
1189 if (nf->nf_dio_read_offset_align && !rqstp->rq_res.page_len)
1190 return nfsd_direct_read(rqstp, fhp, nf, offset,
1191 count, eof);
1192 fallthrough;
1193 case NFSD_IO_DONTCACHE:
1194 if (file->f_op->fop_flags & FOP_DONTCACHE)
1195 kiocb.ki_flags = IOCB_DONTCACHE;
1196 break;
1197 }
1198
1199 kiocb.ki_pos = offset;
1200
1201 v = 0;
1202 total = *count;
1203 while (total && v < rqstp->rq_maxpages &&
1204 rqstp->rq_next_page < rqstp->rq_page_end) {
1205 len = min_t(size_t, total, PAGE_SIZE - base);
1206 bvec_set_page(&rqstp->rq_bvec[v], *rqstp->rq_next_page,
1207 len, base);
1208
1209 total -= len;
1210 ++rqstp->rq_next_page;
1211 ++v;
1212 base = 0;
1213 }
1214
1215 trace_nfsd_read_vector(rqstp, fhp, offset, *count - total);
1216 iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v, *count - total);
1217 host_err = vfs_iocb_iter_read(file, &kiocb, &iter);
1218 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
1219 }
1220
1221 /*
1222 * Gathered writes: If another process is currently writing to the file,
1223 * there's a high chance this is another nfsd (triggered by a bulk write
1224 * from a client's biod). Rather than syncing the file with each write
1225 * request, we sleep for 10 msec.
1226 *
1227 * I don't know if this roughly approximates C. Juszak's idea of
1228 * gathered writes, but it's a nice and simple solution (IMHO), and it
1229 * seems to work:-)
1230 *
1231 * Note: we do this only in the NFSv2 case, since v3 and higher have a
1232 * better tool (separate unstable writes and commits) for solving this
1233 * problem.
1234 */
wait_for_concurrent_writes(struct file * file)1235 static int wait_for_concurrent_writes(struct file *file)
1236 {
1237 struct inode *inode = file_inode(file);
1238 static ino_t last_ino;
1239 static dev_t last_dev;
1240 int err = 0;
1241
1242 if (atomic_read(&inode->i_writecount) > 1
1243 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
1244 dprintk("nfsd: write defer %d\n", task_pid_nr(current));
1245 msleep(10);
1246 dprintk("nfsd: write resume %d\n", task_pid_nr(current));
1247 }
1248
1249 if (inode_state_read_once(inode) & I_DIRTY) {
1250 dprintk("nfsd: write sync %d\n", task_pid_nr(current));
1251 err = vfs_fsync(file, 0);
1252 }
1253 last_ino = inode->i_ino;
1254 last_dev = inode->i_sb->s_dev;
1255 return err;
1256 }
1257
1258 struct nfsd_write_dio_seg {
1259 struct iov_iter iter;
1260 int flags;
1261 };
1262
1263 static unsigned long
iov_iter_bvec_offset(const struct iov_iter * iter)1264 iov_iter_bvec_offset(const struct iov_iter *iter)
1265 {
1266 return (unsigned long)(iter->bvec->bv_offset + iter->iov_offset);
1267 }
1268
1269 static void
nfsd_write_dio_seg_init(struct nfsd_write_dio_seg * segment,struct bio_vec * bvec,unsigned int nvecs,unsigned long total,size_t start,size_t len,struct kiocb * iocb)1270 nfsd_write_dio_seg_init(struct nfsd_write_dio_seg *segment,
1271 struct bio_vec *bvec, unsigned int nvecs,
1272 unsigned long total, size_t start, size_t len,
1273 struct kiocb *iocb)
1274 {
1275 iov_iter_bvec(&segment->iter, ITER_SOURCE, bvec, nvecs, total);
1276 if (start)
1277 iov_iter_advance(&segment->iter, start);
1278 iov_iter_truncate(&segment->iter, len);
1279 segment->flags = iocb->ki_flags;
1280 }
1281
1282 static unsigned int
nfsd_write_dio_iters_init(struct nfsd_file * nf,struct bio_vec * bvec,unsigned int nvecs,struct kiocb * iocb,unsigned long total,struct nfsd_write_dio_seg segments[3])1283 nfsd_write_dio_iters_init(struct nfsd_file *nf, struct bio_vec *bvec,
1284 unsigned int nvecs, struct kiocb *iocb,
1285 unsigned long total,
1286 struct nfsd_write_dio_seg segments[3])
1287 {
1288 u32 offset_align = nf->nf_dio_offset_align;
1289 loff_t prefix_end, orig_end, middle_end;
1290 u32 mem_align = nf->nf_dio_mem_align;
1291 size_t prefix, middle, suffix;
1292 loff_t offset = iocb->ki_pos;
1293 unsigned int nsegs = 0;
1294
1295 /*
1296 * Check if direct I/O is feasible for this write request.
1297 * If alignments are not available, the write is too small,
1298 * or no alignment can be found, fall back to buffered I/O.
1299 */
1300 if (unlikely(!mem_align || !offset_align) ||
1301 unlikely(total < max(offset_align, mem_align)))
1302 goto no_dio;
1303
1304 prefix_end = round_up(offset, offset_align);
1305 orig_end = offset + total;
1306 middle_end = round_down(orig_end, offset_align);
1307
1308 prefix = prefix_end - offset;
1309 middle = middle_end - prefix_end;
1310 suffix = orig_end - middle_end;
1311
1312 if (!middle)
1313 goto no_dio;
1314
1315 if (prefix)
1316 nfsd_write_dio_seg_init(&segments[nsegs++], bvec,
1317 nvecs, total, 0, prefix, iocb);
1318
1319 nfsd_write_dio_seg_init(&segments[nsegs], bvec, nvecs,
1320 total, prefix, middle, iocb);
1321
1322 /*
1323 * Check if the bvec iterator is aligned for direct I/O.
1324 *
1325 * bvecs generated from RPC receive buffers are contiguous: After
1326 * the first bvec, all subsequent bvecs start at bv_offset zero
1327 * (page-aligned). Therefore, only the first bvec is checked.
1328 */
1329 if (iov_iter_bvec_offset(&segments[nsegs].iter) & (mem_align - 1))
1330 goto no_dio;
1331 segments[nsegs].flags |= IOCB_DIRECT;
1332 nsegs++;
1333
1334 if (suffix)
1335 nfsd_write_dio_seg_init(&segments[nsegs++], bvec, nvecs, total,
1336 prefix + middle, suffix, iocb);
1337
1338 return nsegs;
1339
1340 no_dio:
1341 /* No DIO alignment possible - pack into single non-DIO segment. */
1342 nfsd_write_dio_seg_init(&segments[0], bvec, nvecs, total, 0,
1343 total, iocb);
1344 return 1;
1345 }
1346
1347 static noinline_for_stack int
nfsd_direct_write(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfsd_file * nf,unsigned int nvecs,unsigned long * cnt,struct kiocb * kiocb)1348 nfsd_direct_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
1349 struct nfsd_file *nf, unsigned int nvecs,
1350 unsigned long *cnt, struct kiocb *kiocb)
1351 {
1352 struct nfsd_write_dio_seg segments[3];
1353 struct file *file = nf->nf_file;
1354 unsigned int nsegs, i;
1355 ssize_t host_err;
1356
1357 nsegs = nfsd_write_dio_iters_init(nf, rqstp->rq_bvec, nvecs,
1358 kiocb, *cnt, segments);
1359
1360 *cnt = 0;
1361 for (i = 0; i < nsegs; i++) {
1362 kiocb->ki_flags = segments[i].flags;
1363 if (kiocb->ki_flags & IOCB_DIRECT)
1364 trace_nfsd_write_direct(rqstp, fhp, kiocb->ki_pos,
1365 segments[i].iter.count);
1366 else {
1367 trace_nfsd_write_vector(rqstp, fhp, kiocb->ki_pos,
1368 segments[i].iter.count);
1369 /*
1370 * Mark the I/O buffer as evict-able to reduce
1371 * memory contention.
1372 */
1373 if (nf->nf_file->f_op->fop_flags & FOP_DONTCACHE)
1374 kiocb->ki_flags |= IOCB_DONTCACHE;
1375 }
1376
1377 host_err = vfs_iocb_iter_write(file, kiocb, &segments[i].iter);
1378 if (host_err < 0)
1379 return host_err;
1380 *cnt += host_err;
1381 if (host_err < segments[i].iter.count)
1382 break; /* partial write */
1383 }
1384
1385 return 0;
1386 }
1387
1388 /**
1389 * nfsd_vfs_write - write data to an already-open file
1390 * @rqstp: RPC execution context
1391 * @fhp: File handle of file to write into
1392 * @nf: An open file matching @fhp
1393 * @offset: Byte offset of start
1394 * @payload: xdr_buf containing the write payload
1395 * @cnt: IN: number of bytes to write, OUT: number of bytes actually written
1396 * @stable: An NFS stable_how value
1397 * @verf: NFS WRITE verifier
1398 *
1399 * Upon return, caller must invoke fh_put on @fhp.
1400 *
1401 * Return values:
1402 * An nfsstat value in network byte order.
1403 */
1404 __be32
nfsd_vfs_write(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfsd_file * nf,loff_t offset,const struct xdr_buf * payload,unsigned long * cnt,int stable,__be32 * verf)1405 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
1406 struct nfsd_file *nf, loff_t offset,
1407 const struct xdr_buf *payload, unsigned long *cnt,
1408 int stable, __be32 *verf)
1409 {
1410 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1411 struct file *file = nf->nf_file;
1412 struct super_block *sb = file_inode(file)->i_sb;
1413 struct kiocb kiocb;
1414 struct svc_export *exp;
1415 struct iov_iter iter;
1416 errseq_t since;
1417 __be32 nfserr;
1418 int host_err;
1419 unsigned long exp_op_flags = 0;
1420 unsigned int pflags = current->flags;
1421 bool restore_flags = false;
1422 unsigned int nvecs;
1423
1424 trace_nfsd_write_opened(rqstp, fhp, offset, *cnt);
1425
1426 if (sb->s_export_op)
1427 exp_op_flags = sb->s_export_op->flags;
1428
1429 if (test_bit(RQ_LOCAL, &rqstp->rq_flags) &&
1430 !(exp_op_flags & EXPORT_OP_REMOTE_FS)) {
1431 /*
1432 * We want throttling in balance_dirty_pages()
1433 * and shrink_inactive_list() to only consider
1434 * the backingdev we are writing to, so that nfs to
1435 * localhost doesn't cause nfsd to lock up due to all
1436 * the client's dirty pages or its congested queue.
1437 */
1438 current->flags |= PF_LOCAL_THROTTLE;
1439 restore_flags = true;
1440 }
1441
1442 exp = fhp->fh_export;
1443
1444 if (!EX_ISSYNC(exp))
1445 stable = NFS_UNSTABLE;
1446 init_sync_kiocb(&kiocb, file);
1447 kiocb.ki_pos = offset;
1448 if (likely(!fhp->fh_use_wgather)) {
1449 switch (stable) {
1450 case NFS_FILE_SYNC:
1451 /* persist data and timestamps */
1452 kiocb.ki_flags |= IOCB_DSYNC | IOCB_SYNC;
1453 break;
1454 case NFS_DATA_SYNC:
1455 /* persist data only */
1456 kiocb.ki_flags |= IOCB_DSYNC;
1457 break;
1458 }
1459 }
1460
1461 nvecs = xdr_buf_to_bvec(rqstp->rq_bvec, rqstp->rq_maxpages, payload);
1462
1463 since = READ_ONCE(file->f_wb_err);
1464 if (verf)
1465 nfsd_copy_write_verifier(verf, nn);
1466
1467 switch (nfsd_io_cache_write) {
1468 case NFSD_IO_DIRECT:
1469 host_err = nfsd_direct_write(rqstp, fhp, nf, nvecs,
1470 cnt, &kiocb);
1471 break;
1472 case NFSD_IO_DONTCACHE:
1473 if (file->f_op->fop_flags & FOP_DONTCACHE)
1474 kiocb.ki_flags |= IOCB_DONTCACHE;
1475 fallthrough;
1476 case NFSD_IO_BUFFERED:
1477 iov_iter_bvec(&iter, ITER_SOURCE, rqstp->rq_bvec, nvecs, *cnt);
1478 host_err = vfs_iocb_iter_write(file, &kiocb, &iter);
1479 if (host_err < 0)
1480 break;
1481 *cnt = host_err;
1482 break;
1483 }
1484 if (host_err < 0) {
1485 commit_reset_write_verifier(nn, rqstp, host_err);
1486 goto out_nfserr;
1487 }
1488 nfsd_stats_io_write_add(nn, exp, *cnt);
1489 fsnotify_modify(file);
1490 host_err = filemap_check_wb_err(file->f_mapping, since);
1491 if (host_err < 0)
1492 goto out_nfserr;
1493
1494 if (stable && fhp->fh_use_wgather) {
1495 host_err = wait_for_concurrent_writes(file);
1496 if (host_err < 0)
1497 commit_reset_write_verifier(nn, rqstp, host_err);
1498 }
1499
1500 out_nfserr:
1501 if (host_err >= 0) {
1502 trace_nfsd_write_io_done(rqstp, fhp, offset, *cnt);
1503 nfserr = nfs_ok;
1504 } else {
1505 trace_nfsd_write_err(rqstp, fhp, offset, host_err);
1506 nfserr = nfserrno(host_err);
1507 }
1508 if (restore_flags)
1509 current_restore_flags(pflags, PF_LOCAL_THROTTLE);
1510 return nfserr;
1511 }
1512
1513 /**
1514 * nfsd_read_splice_ok - check if spliced reading is supported
1515 * @rqstp: RPC transaction context
1516 *
1517 * Return values:
1518 * %true: nfsd_splice_read() may be used
1519 * %false: nfsd_splice_read() must not be used
1520 *
1521 * NFS READ normally uses splice to send data in-place. However the
1522 * data in cache can change after the reply's MIC is computed but
1523 * before the RPC reply is sent. To prevent the client from
1524 * rejecting the server-computed MIC in this somewhat rare case, do
1525 * not use splice with the GSS integrity and privacy services.
1526 */
nfsd_read_splice_ok(struct svc_rqst * rqstp)1527 bool nfsd_read_splice_ok(struct svc_rqst *rqstp)
1528 {
1529 if (nfsd_disable_splice_read)
1530 return false;
1531 switch (svc_auth_flavor(rqstp)) {
1532 case RPC_AUTH_GSS_KRB5I:
1533 case RPC_AUTH_GSS_KRB5P:
1534 return false;
1535 }
1536 return true;
1537 }
1538
1539 /**
1540 * nfsd_read - Read data from a file
1541 * @rqstp: RPC transaction context
1542 * @fhp: file handle of file to be read
1543 * @offset: starting byte offset
1544 * @count: IN: requested number of bytes; OUT: number of bytes read
1545 * @eof: OUT: set non-zero if operation reached the end of the file
1546 *
1547 * The caller must verify that there is enough space in @rqstp.rq_res
1548 * to perform this operation.
1549 *
1550 * N.B. After this call fhp needs an fh_put
1551 *
1552 * Returns nfs_ok on success, otherwise an nfserr stat value is
1553 * returned.
1554 */
nfsd_read(struct svc_rqst * rqstp,struct svc_fh * fhp,loff_t offset,unsigned long * count,u32 * eof)1555 __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1556 loff_t offset, unsigned long *count, u32 *eof)
1557 {
1558 struct nfsd_file *nf;
1559 struct file *file;
1560 __be32 err;
1561
1562 trace_nfsd_read_start(rqstp, fhp, offset, *count);
1563 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_READ, &nf);
1564 if (err)
1565 return err;
1566
1567 file = nf->nf_file;
1568 if (file->f_op->splice_read && nfsd_read_splice_ok(rqstp))
1569 err = nfsd_splice_read(rqstp, fhp, file, offset, count, eof);
1570 else
1571 err = nfsd_iter_read(rqstp, fhp, nf, offset, count, 0, eof);
1572
1573 nfsd_file_put(nf);
1574 trace_nfsd_read_done(rqstp, fhp, offset, *count);
1575 return err;
1576 }
1577
1578 /**
1579 * nfsd_write - open a file and write data to it
1580 * @rqstp: RPC execution context
1581 * @fhp: File handle of file to write into; nfsd_write() may modify it
1582 * @offset: Byte offset of start
1583 * @payload: xdr_buf containing the write payload
1584 * @cnt: IN: number of bytes to write, OUT: number of bytes actually written
1585 * @stable: An NFS stable_how value
1586 * @verf: NFS WRITE verifier
1587 *
1588 * Upon return, caller must invoke fh_put on @fhp.
1589 *
1590 * Return values:
1591 * An nfsstat value in network byte order.
1592 */
1593 __be32
nfsd_write(struct svc_rqst * rqstp,struct svc_fh * fhp,loff_t offset,const struct xdr_buf * payload,unsigned long * cnt,int stable,__be32 * verf)1594 nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
1595 const struct xdr_buf *payload, unsigned long *cnt, int stable,
1596 __be32 *verf)
1597 {
1598 struct nfsd_file *nf;
1599 __be32 err;
1600
1601 trace_nfsd_write_start(rqstp, fhp, offset, *cnt);
1602
1603 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_WRITE, &nf);
1604 if (err)
1605 goto out;
1606
1607 err = nfsd_vfs_write(rqstp, fhp, nf, offset, payload, cnt,
1608 stable, verf);
1609 nfsd_file_put(nf);
1610 out:
1611 trace_nfsd_write_done(rqstp, fhp, offset, *cnt);
1612 return err;
1613 }
1614
1615 /**
1616 * nfsd_commit - Commit pending writes to stable storage
1617 * @rqstp: RPC request being processed
1618 * @fhp: NFS filehandle
1619 * @nf: target file
1620 * @offset: raw offset from beginning of file
1621 * @count: raw count of bytes to sync
1622 * @verf: filled in with the server's current write verifier
1623 *
1624 * Note: we guarantee that data that lies within the range specified
1625 * by the 'offset' and 'count' parameters will be synced. The server
1626 * is permitted to sync data that lies outside this range at the
1627 * same time.
1628 *
1629 * Unfortunately we cannot lock the file to make sure we return full WCC
1630 * data to the client, as locking happens lower down in the filesystem.
1631 *
1632 * Return values:
1633 * An nfsstat value in network byte order.
1634 */
1635 __be32
nfsd_commit(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfsd_file * nf,u64 offset,u32 count,__be32 * verf)1636 nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
1637 u64 offset, u32 count, __be32 *verf)
1638 {
1639 __be32 err = nfs_ok;
1640 u64 maxbytes;
1641 loff_t start, end;
1642 struct nfsd_net *nn;
1643
1644 trace_nfsd_commit_start(rqstp, fhp, offset, count);
1645
1646 /*
1647 * Convert the client-provided (offset, count) range to a
1648 * (start, end) range. If the client-provided range falls
1649 * outside the maximum file size of the underlying FS,
1650 * clamp the sync range appropriately.
1651 */
1652 start = 0;
1653 end = LLONG_MAX;
1654 maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes;
1655 if (offset < maxbytes) {
1656 start = offset;
1657 if (count && (offset + count - 1 < maxbytes))
1658 end = offset + count - 1;
1659 }
1660
1661 nn = net_generic(nf->nf_net, nfsd_net_id);
1662 if (EX_ISSYNC(fhp->fh_export)) {
1663 errseq_t since = READ_ONCE(nf->nf_file->f_wb_err);
1664 int err2;
1665
1666 err2 = vfs_fsync_range(nf->nf_file, start, end, 0);
1667 switch (err2) {
1668 case 0:
1669 nfsd_copy_write_verifier(verf, nn);
1670 err2 = filemap_check_wb_err(nf->nf_file->f_mapping,
1671 since);
1672 err = nfserrno(err2);
1673 break;
1674 case -EINVAL:
1675 err = nfserr_notsupp;
1676 break;
1677 default:
1678 commit_reset_write_verifier(nn, rqstp, err2);
1679 err = nfserrno(err2);
1680 }
1681 } else
1682 nfsd_copy_write_verifier(verf, nn);
1683
1684 trace_nfsd_commit_done(rqstp, fhp, offset, count);
1685 return err;
1686 }
1687
1688 /**
1689 * nfsd_create_setattr - Set a created file's attributes
1690 * @rqstp: RPC transaction being executed
1691 * @fhp: NFS filehandle of parent directory
1692 * @resfhp: NFS filehandle of new object
1693 * @attrs: requested attributes of new object
1694 *
1695 * Returns nfs_ok on success, or an nfsstat in network byte order.
1696 */
1697 __be32
nfsd_create_setattr(struct svc_rqst * rqstp,struct svc_fh * fhp,struct svc_fh * resfhp,struct nfsd_attrs * attrs)1698 nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
1699 struct svc_fh *resfhp, struct nfsd_attrs *attrs)
1700 {
1701 struct iattr *iap = attrs->na_iattr;
1702 __be32 status;
1703
1704 /*
1705 * Mode has already been set by file creation.
1706 */
1707 iap->ia_valid &= ~ATTR_MODE;
1708
1709 /*
1710 * Setting uid/gid works only for root. Irix appears to
1711 * send along the gid on create when it tries to implement
1712 * setgid directories via NFS:
1713 */
1714 if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
1715 iap->ia_valid &= ~(ATTR_UID|ATTR_GID);
1716
1717 /*
1718 * Callers expect new file metadata to be committed even
1719 * if the attributes have not changed.
1720 */
1721 if (nfsd_attrs_valid(attrs))
1722 status = nfsd_setattr(rqstp, resfhp, attrs, NULL);
1723 else
1724 status = nfserrno(commit_metadata(resfhp));
1725
1726 /*
1727 * Transactional filesystems had a chance to commit changes
1728 * for both parent and child simultaneously making the
1729 * following commit_metadata a noop in many cases.
1730 */
1731 if (!status)
1732 status = nfserrno(commit_metadata(fhp));
1733
1734 /*
1735 * Update the new filehandle to pick up the new attributes.
1736 */
1737 if (!status)
1738 status = fh_update(resfhp);
1739
1740 return status;
1741 }
1742
1743 /* HPUX client sometimes creates a file in mode 000, and sets size to 0.
1744 * setting size to 0 may fail for some specific file systems by the permission
1745 * checking which requires WRITE permission but the mode is 000.
1746 * we ignore the resizing(to 0) on the just new created file, since the size is
1747 * 0 after file created.
1748 *
1749 * call this only after vfs_create() is called.
1750 * */
1751 static void
nfsd_check_ignore_resizing(struct iattr * iap)1752 nfsd_check_ignore_resizing(struct iattr *iap)
1753 {
1754 if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
1755 iap->ia_valid &= ~ATTR_SIZE;
1756 }
1757
1758 /* The parent directory should already be locked - we will unlock */
1759 __be32
nfsd_create_locked(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfsd_attrs * attrs,int type,dev_t rdev,struct svc_fh * resfhp)1760 nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
1761 struct nfsd_attrs *attrs,
1762 int type, dev_t rdev, struct svc_fh *resfhp)
1763 {
1764 struct dentry *dentry, *dchild;
1765 struct inode *dirp;
1766 struct iattr *iap = attrs->na_iattr;
1767 __be32 err;
1768 int host_err = 0;
1769
1770 dentry = fhp->fh_dentry;
1771 dirp = d_inode(dentry);
1772
1773 dchild = dget(resfhp->fh_dentry);
1774 err = nfsd_permission(&rqstp->rq_cred, fhp->fh_export, dentry,
1775 NFSD_MAY_CREATE);
1776 if (err)
1777 goto out;
1778
1779 if (!(iap->ia_valid & ATTR_MODE))
1780 iap->ia_mode = 0;
1781 iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
1782
1783 if (!IS_POSIXACL(dirp))
1784 iap->ia_mode &= ~current_umask();
1785
1786 err = 0;
1787 switch (type) {
1788 case S_IFREG:
1789 host_err = vfs_create(&nop_mnt_idmap, dchild, iap->ia_mode, NULL);
1790 if (!host_err)
1791 nfsd_check_ignore_resizing(iap);
1792 break;
1793 case S_IFDIR:
1794 dchild = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode, NULL);
1795 if (IS_ERR(dchild)) {
1796 host_err = PTR_ERR(dchild);
1797 } else if (d_is_negative(dchild)) {
1798 err = nfserr_serverfault;
1799 goto out;
1800 } else if (unlikely(dchild != resfhp->fh_dentry)) {
1801 dput(resfhp->fh_dentry);
1802 resfhp->fh_dentry = dget(dchild);
1803 }
1804 break;
1805 case S_IFCHR:
1806 case S_IFBLK:
1807 case S_IFIFO:
1808 case S_IFSOCK:
1809 host_err = vfs_mknod(&nop_mnt_idmap, dirp, dchild,
1810 iap->ia_mode, rdev, NULL);
1811 break;
1812 default:
1813 printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
1814 type);
1815 host_err = -EINVAL;
1816 }
1817 if (host_err < 0)
1818 goto out_nfserr;
1819
1820 err = nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
1821
1822 out:
1823 if (!err)
1824 fh_fill_post_attrs(fhp);
1825 end_creating(dchild);
1826 return err;
1827
1828 out_nfserr:
1829 err = nfserrno(host_err);
1830 goto out;
1831 }
1832
1833 /*
1834 * Create a filesystem object (regular, directory, special).
1835 * Note that the parent directory is left locked.
1836 *
1837 * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
1838 */
1839 __be32
nfsd_create(struct svc_rqst * rqstp,struct svc_fh * fhp,char * fname,int flen,struct nfsd_attrs * attrs,int type,dev_t rdev,struct svc_fh * resfhp)1840 nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
1841 char *fname, int flen, struct nfsd_attrs *attrs,
1842 int type, dev_t rdev, struct svc_fh *resfhp)
1843 {
1844 struct dentry *dentry, *dchild = NULL;
1845 __be32 err;
1846 int host_err;
1847
1848 trace_nfsd_vfs_create(rqstp, fhp, type, fname, flen);
1849
1850 if (isdotent(fname, flen))
1851 return nfserr_exist;
1852
1853 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP);
1854 if (err)
1855 return err;
1856
1857 dentry = fhp->fh_dentry;
1858
1859 host_err = fh_want_write(fhp);
1860 if (host_err)
1861 return nfserrno(host_err);
1862
1863 dchild = start_creating(&nop_mnt_idmap, dentry, &QSTR_LEN(fname, flen));
1864 host_err = PTR_ERR(dchild);
1865 if (IS_ERR(dchild))
1866 return nfserrno(host_err);
1867
1868 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
1869 if (err)
1870 goto out_unlock;
1871 err = fh_fill_pre_attrs(fhp);
1872 if (err != nfs_ok)
1873 goto out_unlock;
1874 err = nfsd_create_locked(rqstp, fhp, attrs, type, rdev, resfhp);
1875 /* nfsd_create_locked() unlocked the parent */
1876 dput(dchild);
1877 return err;
1878
1879 out_unlock:
1880 end_creating(dchild);
1881 return err;
1882 }
1883
1884 /*
1885 * Read a symlink. On entry, *lenp must contain the maximum path length that
1886 * fits into the buffer. On return, it contains the true length.
1887 * N.B. After this call fhp needs an fh_put
1888 */
1889 __be32
nfsd_readlink(struct svc_rqst * rqstp,struct svc_fh * fhp,char * buf,int * lenp)1890 nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
1891 {
1892 __be32 err;
1893 const char *link;
1894 struct path path;
1895 DEFINE_DELAYED_CALL(done);
1896 int len;
1897
1898 err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP);
1899 if (unlikely(err))
1900 return err;
1901
1902 path.mnt = fhp->fh_export->ex_path.mnt;
1903 path.dentry = fhp->fh_dentry;
1904
1905 if (unlikely(!d_is_symlink(path.dentry)))
1906 return nfserr_inval;
1907
1908 touch_atime(&path);
1909
1910 link = vfs_get_link(path.dentry, &done);
1911 if (IS_ERR(link))
1912 return nfserrno(PTR_ERR(link));
1913
1914 len = strlen(link);
1915 if (len < *lenp)
1916 *lenp = len;
1917 memcpy(buf, link, *lenp);
1918 do_delayed_call(&done);
1919 return 0;
1920 }
1921
1922 /**
1923 * nfsd_symlink - Create a symlink and look up its inode
1924 * @rqstp: RPC transaction being executed
1925 * @fhp: NFS filehandle of parent directory
1926 * @fname: filename of the new symlink
1927 * @flen: length of @fname
1928 * @path: content of the new symlink (NUL-terminated)
1929 * @attrs: requested attributes of new object
1930 * @resfhp: NFS filehandle of new object
1931 *
1932 * N.B. After this call _both_ fhp and resfhp need an fh_put
1933 *
1934 * Returns nfs_ok on success, or an nfsstat in network byte order.
1935 */
1936 __be32
nfsd_symlink(struct svc_rqst * rqstp,struct svc_fh * fhp,char * fname,int flen,char * path,struct nfsd_attrs * attrs,struct svc_fh * resfhp)1937 nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
1938 char *fname, int flen,
1939 char *path, struct nfsd_attrs *attrs,
1940 struct svc_fh *resfhp)
1941 {
1942 struct dentry *dentry, *dnew;
1943 __be32 err, cerr;
1944 int host_err;
1945
1946 trace_nfsd_vfs_symlink(rqstp, fhp, fname, flen, path);
1947
1948 err = nfserr_noent;
1949 if (!flen || path[0] == '\0')
1950 goto out;
1951 err = nfserr_exist;
1952 if (isdotent(fname, flen))
1953 goto out;
1954
1955 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
1956 if (err)
1957 goto out;
1958
1959 host_err = fh_want_write(fhp);
1960 if (host_err) {
1961 err = nfserrno(host_err);
1962 goto out;
1963 }
1964
1965 dentry = fhp->fh_dentry;
1966 dnew = start_creating(&nop_mnt_idmap, dentry, &QSTR_LEN(fname, flen));
1967 if (IS_ERR(dnew)) {
1968 err = nfserrno(PTR_ERR(dnew));
1969 goto out_drop_write;
1970 }
1971 err = fh_fill_pre_attrs(fhp);
1972 if (err != nfs_ok)
1973 goto out_unlock;
1974 host_err = vfs_symlink(&nop_mnt_idmap, d_inode(dentry), dnew, path, NULL);
1975 err = nfserrno(host_err);
1976 cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
1977 if (!err)
1978 nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
1979 fh_fill_post_attrs(fhp);
1980 out_unlock:
1981 end_creating(dnew);
1982 if (!err)
1983 err = nfserrno(commit_metadata(fhp));
1984 if (!err)
1985 err = cerr;
1986 out_drop_write:
1987 fh_drop_write(fhp);
1988 out:
1989 return err;
1990 }
1991
1992 /**
1993 * nfsd_link - create a link
1994 * @rqstp: RPC transaction context
1995 * @ffhp: the file handle of the directory where the new link is to be created
1996 * @name: the filename of the new link
1997 * @len: the length of @name in octets
1998 * @tfhp: the file handle of an existing file object
1999 *
2000 * After this call _both_ ffhp and tfhp need an fh_put.
2001 *
2002 * Returns a generic NFS status code in network byte-order.
2003 */
2004 __be32
nfsd_link(struct svc_rqst * rqstp,struct svc_fh * ffhp,char * name,int len,struct svc_fh * tfhp)2005 nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
2006 char *name, int len, struct svc_fh *tfhp)
2007 {
2008 struct dentry *ddir, *dnew, *dold;
2009 struct inode *dirp;
2010 int type;
2011 __be32 err;
2012 int host_err;
2013
2014 trace_nfsd_vfs_link(rqstp, ffhp, tfhp, name, len);
2015
2016 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE);
2017 if (err)
2018 goto out;
2019 err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP);
2020 if (err)
2021 goto out;
2022 err = nfserr_isdir;
2023 if (d_is_dir(tfhp->fh_dentry))
2024 goto out;
2025 err = nfserr_perm;
2026 if (!len)
2027 goto out;
2028 err = nfserr_exist;
2029 if (isdotent(name, len))
2030 goto out;
2031
2032 err = nfs_ok;
2033 type = d_inode(tfhp->fh_dentry)->i_mode & S_IFMT;
2034 host_err = fh_want_write(tfhp);
2035 if (host_err)
2036 goto out;
2037
2038 ddir = ffhp->fh_dentry;
2039 dirp = d_inode(ddir);
2040 dnew = start_creating(&nop_mnt_idmap, ddir, &QSTR_LEN(name, len));
2041
2042 if (IS_ERR(dnew)) {
2043 host_err = PTR_ERR(dnew);
2044 goto out_drop_write;
2045 }
2046
2047 dold = tfhp->fh_dentry;
2048
2049 err = nfserr_noent;
2050 if (d_really_is_negative(dold))
2051 goto out_unlock;
2052 err = fh_fill_pre_attrs(ffhp);
2053 if (err != nfs_ok)
2054 goto out_unlock;
2055 host_err = vfs_link(dold, &nop_mnt_idmap, dirp, dnew, NULL);
2056 fh_fill_post_attrs(ffhp);
2057 out_unlock:
2058 end_creating(dnew);
2059 if (!host_err) {
2060 host_err = commit_metadata(ffhp);
2061 if (!host_err)
2062 host_err = commit_metadata(tfhp);
2063 }
2064
2065 out_drop_write:
2066 fh_drop_write(tfhp);
2067 if (host_err == -EBUSY) {
2068 /*
2069 * See RFC 8881 Section 18.9.4 para 1-2: NFSv4 LINK
2070 * wants a status unique to the object type.
2071 */
2072 if (type != S_IFDIR)
2073 err = nfserr_file_open;
2074 else
2075 err = nfserr_acces;
2076 }
2077 out:
2078 return err != nfs_ok ? err : nfserrno(host_err);
2079 }
2080
2081 static void
nfsd_close_cached_files(struct dentry * dentry)2082 nfsd_close_cached_files(struct dentry *dentry)
2083 {
2084 struct inode *inode = d_inode(dentry);
2085
2086 if (inode && S_ISREG(inode->i_mode))
2087 nfsd_file_close_inode_sync(inode);
2088 }
2089
2090 static bool
nfsd_has_cached_files(struct dentry * dentry)2091 nfsd_has_cached_files(struct dentry *dentry)
2092 {
2093 bool ret = false;
2094 struct inode *inode = d_inode(dentry);
2095
2096 if (inode && S_ISREG(inode->i_mode))
2097 ret = nfsd_file_is_cached(inode);
2098 return ret;
2099 }
2100
2101 /**
2102 * nfsd_rename - rename a directory entry
2103 * @rqstp: RPC transaction context
2104 * @ffhp: the file handle of parent directory containing the entry to be renamed
2105 * @fname: the filename of directory entry to be renamed
2106 * @flen: the length of @fname in octets
2107 * @tfhp: the file handle of parent directory to contain the renamed entry
2108 * @tname: the filename of the new entry
2109 * @tlen: the length of @tlen in octets
2110 *
2111 * After this call _both_ ffhp and tfhp need an fh_put.
2112 *
2113 * Returns a generic NFS status code in network byte-order.
2114 */
2115 __be32
nfsd_rename(struct svc_rqst * rqstp,struct svc_fh * ffhp,char * fname,int flen,struct svc_fh * tfhp,char * tname,int tlen)2116 nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
2117 struct svc_fh *tfhp, char *tname, int tlen)
2118 {
2119 struct dentry *fdentry, *tdentry;
2120 int type = S_IFDIR;
2121 struct renamedata rd = {};
2122 __be32 err;
2123 int host_err;
2124 struct dentry *close_cached;
2125
2126 trace_nfsd_vfs_rename(rqstp, ffhp, tfhp, fname, flen, tname, tlen);
2127
2128 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE);
2129 if (err)
2130 goto out;
2131 err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE);
2132 if (err)
2133 goto out;
2134
2135 fdentry = ffhp->fh_dentry;
2136
2137 tdentry = tfhp->fh_dentry;
2138
2139 err = nfserr_perm;
2140 if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
2141 goto out;
2142
2143 err = nfserr_xdev;
2144 if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
2145 goto out;
2146 if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
2147 goto out;
2148
2149 retry:
2150 close_cached = NULL;
2151 host_err = fh_want_write(ffhp);
2152 if (host_err) {
2153 err = nfserrno(host_err);
2154 goto out;
2155 }
2156
2157 rd.mnt_idmap = &nop_mnt_idmap;
2158 rd.old_parent = fdentry;
2159 rd.new_parent = tdentry;
2160
2161 host_err = start_renaming(&rd, 0, &QSTR_LEN(fname, flen),
2162 &QSTR_LEN(tname, tlen));
2163
2164 if (host_err) {
2165 err = nfserrno(host_err);
2166 goto out_want_write;
2167 }
2168 err = fh_fill_pre_attrs(ffhp);
2169 if (err != nfs_ok)
2170 goto out_unlock;
2171 err = fh_fill_pre_attrs(tfhp);
2172 if (err != nfs_ok)
2173 goto out_unlock;
2174
2175 type = d_inode(rd.old_dentry)->i_mode & S_IFMT;
2176
2177 if (d_inode(rd.new_dentry))
2178 type = d_inode(rd.new_dentry)->i_mode & S_IFMT;
2179
2180 if ((rd.new_dentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
2181 nfsd_has_cached_files(rd.new_dentry)) {
2182 close_cached = dget(rd.new_dentry);
2183 goto out_unlock;
2184 } else {
2185 int retries;
2186
2187 for (retries = 1;;) {
2188 host_err = vfs_rename(&rd);
2189 if (host_err != -EAGAIN || !retries--)
2190 break;
2191 if (!nfsd_wait_for_delegreturn(rqstp, d_inode(rd.old_dentry)))
2192 break;
2193 }
2194 if (!host_err) {
2195 host_err = commit_metadata(tfhp);
2196 if (!host_err)
2197 host_err = commit_metadata(ffhp);
2198 }
2199 }
2200 if (host_err == -EBUSY) {
2201 /*
2202 * See RFC 8881 Section 18.26.4 para 1-3: NFSv4 RENAME
2203 * wants a status unique to the object type.
2204 */
2205 if (type != S_IFDIR)
2206 err = nfserr_file_open;
2207 else
2208 err = nfserr_acces;
2209 } else {
2210 err = nfserrno(host_err);
2211 }
2212
2213 if (!close_cached) {
2214 fh_fill_post_attrs(ffhp);
2215 fh_fill_post_attrs(tfhp);
2216 }
2217 out_unlock:
2218 end_renaming(&rd);
2219 out_want_write:
2220 fh_drop_write(ffhp);
2221
2222 /*
2223 * If the target dentry has cached open files, then we need to
2224 * try to close them prior to doing the rename. Final fput
2225 * shouldn't be done with locks held however, so we delay it
2226 * until this point and then reattempt the whole shebang.
2227 */
2228 if (close_cached) {
2229 nfsd_close_cached_files(close_cached);
2230 dput(close_cached);
2231 goto retry;
2232 }
2233 out:
2234 return err;
2235 }
2236
2237 /**
2238 * nfsd_unlink - remove a directory entry
2239 * @rqstp: RPC transaction context
2240 * @fhp: the file handle of the parent directory to be modified
2241 * @type: enforced file type of the object to be removed
2242 * @fname: the name of directory entry to be removed
2243 * @flen: length of @fname in octets
2244 *
2245 * After this call fhp needs an fh_put.
2246 *
2247 * Returns a generic NFS status code in network byte-order.
2248 */
2249 __be32
nfsd_unlink(struct svc_rqst * rqstp,struct svc_fh * fhp,int type,char * fname,int flen)2250 nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
2251 char *fname, int flen)
2252 {
2253 struct dentry *dentry, *rdentry;
2254 struct inode *dirp;
2255 struct inode *rinode = NULL;
2256 __be32 err;
2257 int host_err;
2258
2259 trace_nfsd_vfs_unlink(rqstp, fhp, fname, flen);
2260
2261 err = nfserr_acces;
2262 if (!flen || isdotent(fname, flen))
2263 goto out;
2264 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE);
2265 if (err)
2266 goto out;
2267
2268 host_err = fh_want_write(fhp);
2269 if (host_err)
2270 goto out_nfserr;
2271
2272 dentry = fhp->fh_dentry;
2273 dirp = d_inode(dentry);
2274
2275 rdentry = start_removing(&nop_mnt_idmap, dentry, &QSTR_LEN(fname, flen));
2276
2277 host_err = PTR_ERR(rdentry);
2278 if (IS_ERR(rdentry))
2279 goto out_drop_write;
2280
2281 err = fh_fill_pre_attrs(fhp);
2282 if (err != nfs_ok)
2283 goto out_unlock;
2284
2285 rinode = d_inode(rdentry);
2286 /* Prevent truncation until after locks dropped */
2287 ihold(rinode);
2288
2289 if (!type)
2290 type = d_inode(rdentry)->i_mode & S_IFMT;
2291
2292 if (type != S_IFDIR) {
2293 int retries;
2294
2295 if (rdentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK)
2296 nfsd_close_cached_files(rdentry);
2297
2298 for (retries = 1;;) {
2299 host_err = vfs_unlink(&nop_mnt_idmap, dirp, rdentry, NULL);
2300 if (host_err != -EAGAIN || !retries--)
2301 break;
2302 if (!nfsd_wait_for_delegreturn(rqstp, rinode))
2303 break;
2304 }
2305 } else {
2306 host_err = vfs_rmdir(&nop_mnt_idmap, dirp, rdentry, NULL);
2307 }
2308 fh_fill_post_attrs(fhp);
2309
2310 out_unlock:
2311 end_removing(rdentry);
2312 if (!err && !host_err)
2313 host_err = commit_metadata(fhp);
2314 iput(rinode); /* truncate the inode here */
2315
2316 out_drop_write:
2317 fh_drop_write(fhp);
2318 out_nfserr:
2319 if (host_err == -EBUSY) {
2320 /*
2321 * See RFC 8881 Section 18.25.4 para 4: NFSv4 REMOVE
2322 * wants a status unique to the object type.
2323 */
2324 if (type != S_IFDIR)
2325 err = nfserr_file_open;
2326 else
2327 err = nfserr_acces;
2328 }
2329 out:
2330 return err != nfs_ok ? err : nfserrno(host_err);
2331 }
2332
2333 /*
2334 * We do this buffering because we must not call back into the file
2335 * system's ->lookup() method from the filldir callback. That may well
2336 * deadlock a number of file systems.
2337 *
2338 * This is based heavily on the implementation of same in XFS.
2339 */
2340 struct buffered_dirent {
2341 u64 ino;
2342 loff_t offset;
2343 int namlen;
2344 unsigned int d_type;
2345 char name[];
2346 };
2347
2348 struct readdir_data {
2349 struct dir_context ctx;
2350 char *dirent;
2351 size_t used;
2352 int full;
2353 };
2354
nfsd_buffered_filldir(struct dir_context * ctx,const char * name,int namlen,loff_t offset,u64 ino,unsigned int d_type)2355 static bool nfsd_buffered_filldir(struct dir_context *ctx, const char *name,
2356 int namlen, loff_t offset, u64 ino,
2357 unsigned int d_type)
2358 {
2359 struct readdir_data *buf =
2360 container_of(ctx, struct readdir_data, ctx);
2361 struct buffered_dirent *de = (void *)(buf->dirent + buf->used);
2362 unsigned int reclen;
2363
2364 reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64));
2365 if (buf->used + reclen > PAGE_SIZE) {
2366 buf->full = 1;
2367 return false;
2368 }
2369
2370 de->namlen = namlen;
2371 de->offset = offset;
2372 de->ino = ino;
2373 de->d_type = d_type;
2374 memcpy(de->name, name, namlen);
2375 buf->used += reclen;
2376
2377 return true;
2378 }
2379
nfsd_buffered_readdir(struct file * file,struct svc_fh * fhp,nfsd_filldir_t func,struct readdir_cd * cdp,loff_t * offsetp)2380 static __be32 nfsd_buffered_readdir(struct file *file, struct svc_fh *fhp,
2381 nfsd_filldir_t func, struct readdir_cd *cdp,
2382 loff_t *offsetp)
2383 {
2384 struct buffered_dirent *de;
2385 int host_err;
2386 int size;
2387 loff_t offset;
2388 struct readdir_data buf = {
2389 .ctx.actor = nfsd_buffered_filldir,
2390 .dirent = (void *)__get_free_page(GFP_KERNEL)
2391 };
2392
2393 if (!buf.dirent)
2394 return nfserrno(-ENOMEM);
2395
2396 offset = *offsetp;
2397
2398 while (1) {
2399 unsigned int reclen;
2400
2401 cdp->err = nfserr_eof; /* will be cleared on successful read */
2402 buf.used = 0;
2403 buf.full = 0;
2404
2405 host_err = iterate_dir(file, &buf.ctx);
2406 if (buf.full)
2407 host_err = 0;
2408
2409 if (host_err < 0)
2410 break;
2411
2412 size = buf.used;
2413
2414 if (!size)
2415 break;
2416
2417 de = (struct buffered_dirent *)buf.dirent;
2418 while (size > 0) {
2419 offset = de->offset;
2420
2421 if (func(cdp, de->name, de->namlen, de->offset,
2422 de->ino, de->d_type))
2423 break;
2424
2425 if (cdp->err != nfs_ok)
2426 break;
2427
2428 trace_nfsd_dirent(fhp, de->ino, de->name, de->namlen);
2429
2430 reclen = ALIGN(sizeof(*de) + de->namlen,
2431 sizeof(u64));
2432 size -= reclen;
2433 de = (struct buffered_dirent *)((char *)de + reclen);
2434 }
2435 if (size > 0) /* We bailed out early */
2436 break;
2437
2438 offset = vfs_llseek(file, 0, SEEK_CUR);
2439 }
2440
2441 free_page((unsigned long)(buf.dirent));
2442
2443 if (host_err)
2444 return nfserrno(host_err);
2445
2446 *offsetp = offset;
2447 return cdp->err;
2448 }
2449
2450 /**
2451 * nfsd_readdir - Read entries from a directory
2452 * @rqstp: RPC transaction context
2453 * @fhp: NFS file handle of directory to be read
2454 * @offsetp: OUT: seek offset of final entry that was read
2455 * @cdp: OUT: an eof error value
2456 * @func: entry filler actor
2457 *
2458 * This implementation ignores the NFSv3/4 verifier cookie.
2459 *
2460 * NB: normal system calls hold file->f_pos_lock when calling
2461 * ->iterate_shared and ->llseek, but nfsd_readdir() does not.
2462 * Because the struct file acquired here is not visible to other
2463 * threads, it's internal state does not need mutex protection.
2464 *
2465 * Returns nfs_ok on success, otherwise an nfsstat code is
2466 * returned.
2467 */
2468 __be32
nfsd_readdir(struct svc_rqst * rqstp,struct svc_fh * fhp,loff_t * offsetp,struct readdir_cd * cdp,nfsd_filldir_t func)2469 nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
2470 struct readdir_cd *cdp, nfsd_filldir_t func)
2471 {
2472 __be32 err;
2473 struct file *file;
2474 loff_t offset = *offsetp;
2475 int may_flags = NFSD_MAY_READ;
2476
2477 err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
2478 if (err)
2479 goto out;
2480
2481 if (fhp->fh_64bit_cookies)
2482 file->f_mode |= FMODE_64BITHASH;
2483 else
2484 file->f_mode |= FMODE_32BITHASH;
2485
2486 offset = vfs_llseek(file, offset, SEEK_SET);
2487 if (offset < 0) {
2488 err = nfserrno((int)offset);
2489 goto out_close;
2490 }
2491
2492 err = nfsd_buffered_readdir(file, fhp, func, cdp, offsetp);
2493
2494 if (err == nfserr_eof || err == nfserr_toosmall)
2495 err = nfs_ok; /* can still be found in ->err */
2496 out_close:
2497 nfsd_filp_close(file);
2498 out:
2499 return err;
2500 }
2501
2502 /**
2503 * nfsd_filp_close: close a file synchronously
2504 * @fp: the file to close
2505 *
2506 * nfsd_filp_close() is similar in behaviour to filp_close().
2507 * The difference is that if this is the final close on the
2508 * file, the that finalisation happens immediately, rather then
2509 * being handed over to a work_queue, as it the case for
2510 * filp_close().
2511 * When a user-space process closes a file (even when using
2512 * filp_close() the finalisation happens before returning to
2513 * userspace, so it is effectively synchronous. When a kernel thread
2514 * uses file_close(), on the other hand, the handling is completely
2515 * asynchronous. This means that any cost imposed by that finalisation
2516 * is not imposed on the nfsd thread, and nfsd could potentually
2517 * close files more quickly than the work queue finalises the close,
2518 * which would lead to unbounded growth in the queue.
2519 *
2520 * In some contexts is it not safe to synchronously wait for
2521 * close finalisation (see comment for __fput_sync()), but nfsd
2522 * does not match those contexts. In partcilarly it does not, at the
2523 * time that this function is called, hold and locks and no finalisation
2524 * of any file, socket, or device driver would have any cause to wait
2525 * for nfsd to make progress.
2526 */
nfsd_filp_close(struct file * fp)2527 void nfsd_filp_close(struct file *fp)
2528 {
2529 get_file(fp);
2530 filp_close(fp, NULL);
2531 __fput_sync(fp);
2532 }
2533
2534 /*
2535 * Get file system stats
2536 * N.B. After this call fhp needs an fh_put
2537 */
2538 __be32
nfsd_statfs(struct svc_rqst * rqstp,struct svc_fh * fhp,struct kstatfs * stat,int access)2539 nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
2540 {
2541 __be32 err;
2542
2543 trace_nfsd_vfs_statfs(rqstp, fhp);
2544
2545 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
2546 if (!err) {
2547 struct path path = {
2548 .mnt = fhp->fh_export->ex_path.mnt,
2549 .dentry = fhp->fh_dentry,
2550 };
2551 if (vfs_statfs(&path, stat))
2552 err = nfserr_io;
2553 }
2554 return err;
2555 }
2556
exp_rdonly(struct svc_cred * cred,struct svc_export * exp)2557 static int exp_rdonly(struct svc_cred *cred, struct svc_export *exp)
2558 {
2559 return nfsexp_flags(cred, exp) & NFSEXP_READONLY;
2560 }
2561
2562 #ifdef CONFIG_NFSD_V4
2563 /*
2564 * Helper function to translate error numbers. In the case of xattr operations,
2565 * some error codes need to be translated outside of the standard translations.
2566 *
2567 * ENODATA needs to be translated to nfserr_noxattr.
2568 * E2BIG to nfserr_xattr2big.
2569 *
2570 * Additionally, vfs_listxattr can return -ERANGE. This means that the
2571 * file has too many extended attributes to retrieve inside an
2572 * XATTR_LIST_MAX sized buffer. This is a bug in the xattr implementation:
2573 * filesystems will allow the adding of extended attributes until they hit
2574 * their own internal limit. This limit may be larger than XATTR_LIST_MAX.
2575 * So, at that point, the attributes are present and valid, but can't
2576 * be retrieved using listxattr, since the upper level xattr code enforces
2577 * the XATTR_LIST_MAX limit.
2578 *
2579 * This bug means that we need to deal with listxattr returning -ERANGE. The
2580 * best mapping is to return TOOSMALL.
2581 */
2582 static __be32
nfsd_xattr_errno(int err)2583 nfsd_xattr_errno(int err)
2584 {
2585 switch (err) {
2586 case -ENODATA:
2587 return nfserr_noxattr;
2588 case -E2BIG:
2589 return nfserr_xattr2big;
2590 case -ERANGE:
2591 return nfserr_toosmall;
2592 }
2593 return nfserrno(err);
2594 }
2595
2596 /*
2597 * Retrieve the specified user extended attribute. To avoid always
2598 * having to allocate the maximum size (since we are not getting
2599 * a maximum size from the RPC), do a probe + alloc. Hold a reader
2600 * lock on i_rwsem to prevent the extended attribute from changing
2601 * size while we're doing this.
2602 */
2603 __be32
nfsd_getxattr(struct svc_rqst * rqstp,struct svc_fh * fhp,char * name,void ** bufp,int * lenp)2604 nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
2605 void **bufp, int *lenp)
2606 {
2607 ssize_t len;
2608 __be32 err;
2609 char *buf;
2610 struct inode *inode;
2611 struct dentry *dentry;
2612
2613 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ);
2614 if (err)
2615 return err;
2616
2617 err = nfs_ok;
2618 dentry = fhp->fh_dentry;
2619 inode = d_inode(dentry);
2620
2621 inode_lock_shared(inode);
2622
2623 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, NULL, 0);
2624
2625 /*
2626 * Zero-length attribute, just return.
2627 */
2628 if (len == 0) {
2629 *bufp = NULL;
2630 *lenp = 0;
2631 goto out;
2632 }
2633
2634 if (len < 0) {
2635 err = nfsd_xattr_errno(len);
2636 goto out;
2637 }
2638
2639 if (len > *lenp) {
2640 err = nfserr_toosmall;
2641 goto out;
2642 }
2643
2644 buf = kvmalloc(len, GFP_KERNEL);
2645 if (buf == NULL) {
2646 err = nfserr_jukebox;
2647 goto out;
2648 }
2649
2650 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, buf, len);
2651 if (len <= 0) {
2652 kvfree(buf);
2653 buf = NULL;
2654 err = nfsd_xattr_errno(len);
2655 }
2656
2657 *lenp = len;
2658 *bufp = buf;
2659
2660 out:
2661 inode_unlock_shared(inode);
2662
2663 return err;
2664 }
2665
2666 /*
2667 * Retrieve the xattr names. Since we can't know how many are
2668 * user extended attributes, we must get all attributes here,
2669 * and have the XDR encode filter out the "user." ones.
2670 *
2671 * While this could always just allocate an XATTR_LIST_MAX
2672 * buffer, that's a waste, so do a probe + allocate. To
2673 * avoid any changes between the probe and allocate, wrap
2674 * this in inode_lock.
2675 */
2676 __be32
nfsd_listxattr(struct svc_rqst * rqstp,struct svc_fh * fhp,char ** bufp,int * lenp)2677 nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char **bufp,
2678 int *lenp)
2679 {
2680 ssize_t len;
2681 __be32 err;
2682 char *buf;
2683 struct inode *inode;
2684 struct dentry *dentry;
2685
2686 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ);
2687 if (err)
2688 return err;
2689
2690 dentry = fhp->fh_dentry;
2691 inode = d_inode(dentry);
2692 *lenp = 0;
2693
2694 inode_lock_shared(inode);
2695
2696 len = vfs_listxattr(dentry, NULL, 0);
2697 if (len <= 0) {
2698 err = nfsd_xattr_errno(len);
2699 goto out;
2700 }
2701
2702 if (len > XATTR_LIST_MAX) {
2703 err = nfserr_xattr2big;
2704 goto out;
2705 }
2706
2707 buf = kvmalloc(len, GFP_KERNEL);
2708 if (buf == NULL) {
2709 err = nfserr_jukebox;
2710 goto out;
2711 }
2712
2713 len = vfs_listxattr(dentry, buf, len);
2714 if (len <= 0) {
2715 kvfree(buf);
2716 err = nfsd_xattr_errno(len);
2717 goto out;
2718 }
2719
2720 *lenp = len;
2721 *bufp = buf;
2722
2723 err = nfs_ok;
2724 out:
2725 inode_unlock_shared(inode);
2726
2727 return err;
2728 }
2729
2730 /**
2731 * nfsd_removexattr - Remove an extended attribute
2732 * @rqstp: RPC transaction being executed
2733 * @fhp: NFS filehandle of object with xattr to remove
2734 * @name: name of xattr to remove (NUL-terminate)
2735 *
2736 * Pass in a NULL pointer for delegated_inode, and let the client deal
2737 * with NFS4ERR_DELAY (same as with e.g. setattr and remove).
2738 *
2739 * Returns nfs_ok on success, or an nfsstat in network byte order.
2740 */
2741 __be32
nfsd_removexattr(struct svc_rqst * rqstp,struct svc_fh * fhp,char * name)2742 nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name)
2743 {
2744 __be32 err;
2745 int ret;
2746
2747 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE);
2748 if (err)
2749 return err;
2750
2751 ret = fh_want_write(fhp);
2752 if (ret)
2753 return nfserrno(ret);
2754
2755 inode_lock(fhp->fh_dentry->d_inode);
2756 err = fh_fill_pre_attrs(fhp);
2757 if (err != nfs_ok)
2758 goto out_unlock;
2759 ret = __vfs_removexattr_locked(&nop_mnt_idmap, fhp->fh_dentry,
2760 name, NULL);
2761 err = nfsd_xattr_errno(ret);
2762 fh_fill_post_attrs(fhp);
2763 out_unlock:
2764 inode_unlock(fhp->fh_dentry->d_inode);
2765 fh_drop_write(fhp);
2766
2767 return err;
2768 }
2769
2770 __be32
nfsd_setxattr(struct svc_rqst * rqstp,struct svc_fh * fhp,char * name,void * buf,u32 len,u32 flags)2771 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
2772 void *buf, u32 len, u32 flags)
2773 {
2774 __be32 err;
2775 int ret;
2776
2777 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE);
2778 if (err)
2779 return err;
2780
2781 ret = fh_want_write(fhp);
2782 if (ret)
2783 return nfserrno(ret);
2784 inode_lock(fhp->fh_dentry->d_inode);
2785 err = fh_fill_pre_attrs(fhp);
2786 if (err != nfs_ok)
2787 goto out_unlock;
2788 ret = __vfs_setxattr_locked(&nop_mnt_idmap, fhp->fh_dentry,
2789 name, buf, len, flags, NULL);
2790 fh_fill_post_attrs(fhp);
2791 err = nfsd_xattr_errno(ret);
2792 out_unlock:
2793 inode_unlock(fhp->fh_dentry->d_inode);
2794 fh_drop_write(fhp);
2795 return err;
2796 }
2797 #endif
2798
2799 /*
2800 * Check for a user's access permissions to this inode.
2801 */
2802 __be32
nfsd_permission(struct svc_cred * cred,struct svc_export * exp,struct dentry * dentry,int acc)2803 nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
2804 struct dentry *dentry, int acc)
2805 {
2806 struct inode *inode = d_inode(dentry);
2807 int err;
2808
2809 if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP)
2810 return 0;
2811 #if 0
2812 dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n",
2813 acc,
2814 (acc & NFSD_MAY_READ)? " read" : "",
2815 (acc & NFSD_MAY_WRITE)? " write" : "",
2816 (acc & NFSD_MAY_EXEC)? " exec" : "",
2817 (acc & NFSD_MAY_SATTR)? " sattr" : "",
2818 (acc & NFSD_MAY_TRUNC)? " trunc" : "",
2819 (acc & NFSD_MAY_NLM)? " nlm" : "",
2820 (acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "",
2821 inode->i_mode,
2822 IS_IMMUTABLE(inode)? " immut" : "",
2823 IS_APPEND(inode)? " append" : "",
2824 __mnt_is_readonly(exp->ex_path.mnt)? " ro" : "");
2825 dprintk(" owner %d/%d user %d/%d\n",
2826 inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid());
2827 #endif
2828
2829 /* Normally we reject any write/sattr etc access on a read-only file
2830 * system. But if it is IRIX doing check on write-access for a
2831 * device special file, we ignore rofs.
2832 */
2833 if (!(acc & NFSD_MAY_LOCAL_ACCESS))
2834 if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) {
2835 if (exp_rdonly(cred, exp) ||
2836 __mnt_is_readonly(exp->ex_path.mnt))
2837 return nfserr_rofs;
2838 if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode))
2839 return nfserr_perm;
2840 }
2841 if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode))
2842 return nfserr_perm;
2843
2844 /*
2845 * The file owner always gets access permission for accesses that
2846 * would normally be checked at open time. This is to make
2847 * file access work even when the client has done a fchmod(fd, 0).
2848 *
2849 * However, `cp foo bar' should fail nevertheless when bar is
2850 * readonly. A sensible way to do this might be to reject all
2851 * attempts to truncate a read-only file, because a creat() call
2852 * always implies file truncation.
2853 * ... but this isn't really fair. A process may reasonably call
2854 * ftruncate on an open file descriptor on a file with perm 000.
2855 * We must trust the client to do permission checking - using "ACCESS"
2856 * with NFSv3.
2857 */
2858 if ((acc & NFSD_MAY_OWNER_OVERRIDE) &&
2859 uid_eq(inode->i_uid, current_fsuid()))
2860 return 0;
2861
2862 /* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */
2863 err = inode_permission(&nop_mnt_idmap, inode,
2864 acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
2865
2866 /* Allow read access to binaries even when mode 111 */
2867 if (err == -EACCES && S_ISREG(inode->i_mode) &&
2868 (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
2869 acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
2870 err = inode_permission(&nop_mnt_idmap, inode, MAY_EXEC);
2871
2872 return err? nfserrno(err) : 0;
2873 }
2874