1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * File operations used by nfsd. Some of these have been ripped from 4 * other parts of the kernel because they weren't exported, others 5 * are partial duplicates with added or changed functionality. 6 * 7 * Note that several functions dget() the dentry upon which they want 8 * to act, most notably those that create directory entries. Response 9 * dentry's are dput()'d if necessary in the release callback. 10 * So if you notice code paths that apparently fail to dput() the 11 * dentry, don't worry--they have been taken care of. 12 * 13 * Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de> 14 * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp> 15 */ 16 17 #include <linux/fs.h> 18 #include <linux/file.h> 19 #include <linux/splice.h> 20 #include <linux/falloc.h> 21 #include <linux/fcntl.h> 22 #include <linux/namei.h> 23 #include <linux/delay.h> 24 #include <linux/fsnotify.h> 25 #include <linux/posix_acl_xattr.h> 26 #include <linux/xattr.h> 27 #include <linux/jhash.h> 28 #include <linux/pagemap.h> 29 #include <linux/slab.h> 30 #include <linux/uaccess.h> 31 #include <linux/exportfs.h> 32 #include <linux/writeback.h> 33 #include <linux/security.h> 34 35 #include "xdr3.h" 36 37 #ifdef CONFIG_NFSD_V4 38 #include "../internal.h" 39 #include "acl.h" 40 #include "idmap.h" 41 #include "xdr4.h" 42 #endif /* CONFIG_NFSD_V4 */ 43 44 #include "nfsd.h" 45 #include "vfs.h" 46 #include "filecache.h" 47 #include "trace.h" 48 49 #define NFSDDBG_FACILITY NFSDDBG_FILEOP 50 51 /** 52 * nfserrno - Map Linux errnos to NFS errnos 53 * @errno: POSIX(-ish) error code to be mapped 54 * 55 * Returns the appropriate (net-endian) nfserr_* (or nfs_ok if errno is 0). If 56 * it's an error we don't expect, log it once and return nfserr_io. 57 */ 58 __be32 59 nfserrno (int errno) 60 { 61 static struct { 62 __be32 nfserr; 63 int syserr; 64 } nfs_errtbl[] = { 65 { nfs_ok, 0 }, 66 { nfserr_perm, -EPERM }, 67 { nfserr_noent, -ENOENT }, 68 { nfserr_io, -EIO }, 69 { nfserr_nxio, -ENXIO }, 70 { nfserr_fbig, -E2BIG }, 71 { nfserr_stale, -EBADF }, 72 { nfserr_acces, -EACCES }, 73 { nfserr_exist, -EEXIST }, 74 { nfserr_xdev, -EXDEV }, 75 { nfserr_mlink, -EMLINK }, 76 { nfserr_nodev, -ENODEV }, 77 { nfserr_notdir, -ENOTDIR }, 78 { nfserr_isdir, -EISDIR }, 79 { nfserr_inval, -EINVAL }, 80 { nfserr_fbig, -EFBIG }, 81 { nfserr_nospc, -ENOSPC }, 82 { nfserr_rofs, -EROFS }, 83 { nfserr_mlink, -EMLINK }, 84 { nfserr_nametoolong, -ENAMETOOLONG }, 85 { nfserr_notempty, -ENOTEMPTY }, 86 { nfserr_dquot, -EDQUOT }, 87 { nfserr_stale, -ESTALE }, 88 { nfserr_jukebox, -ETIMEDOUT }, 89 { nfserr_jukebox, -ERESTARTSYS }, 90 { nfserr_jukebox, -EAGAIN }, 91 { nfserr_jukebox, -EWOULDBLOCK }, 92 { nfserr_jukebox, -ENOMEM }, 93 { nfserr_io, -ETXTBSY }, 94 { nfserr_notsupp, -EOPNOTSUPP }, 95 { nfserr_toosmall, -ETOOSMALL }, 96 { nfserr_serverfault, -ESERVERFAULT }, 97 { nfserr_serverfault, -ENFILE }, 98 { nfserr_io, -EREMOTEIO }, 99 { nfserr_stale, -EOPENSTALE }, 100 { nfserr_io, -EUCLEAN }, 101 { nfserr_perm, -ENOKEY }, 102 { nfserr_no_grace, -ENOGRACE}, 103 { nfserr_io, -EBADMSG }, 104 }; 105 int i; 106 107 for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) { 108 if (nfs_errtbl[i].syserr == errno) 109 return nfs_errtbl[i].nfserr; 110 } 111 WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno); 112 return nfserr_io; 113 } 114 115 /* 116 * Called from nfsd_lookup and encode_dirent. Check if we have crossed 117 * a mount point. 118 * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged, 119 * or nfs_ok having possibly changed *dpp and *expp 120 */ 121 int 122 nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, 123 struct svc_export **expp) 124 { 125 struct svc_export *exp = *expp, *exp2 = NULL; 126 struct dentry *dentry = *dpp; 127 struct path path = {.mnt = mntget(exp->ex_path.mnt), 128 .dentry = dget(dentry)}; 129 unsigned int follow_flags = 0; 130 int err = 0; 131 132 if (exp->ex_flags & NFSEXP_CROSSMOUNT) 133 follow_flags = LOOKUP_AUTOMOUNT; 134 135 err = follow_down(&path, follow_flags); 136 if (err < 0) 137 goto out; 138 if (path.mnt == exp->ex_path.mnt && path.dentry == dentry && 139 nfsd_mountpoint(dentry, exp) == 2) { 140 /* This is only a mountpoint in some other namespace */ 141 path_put(&path); 142 goto out; 143 } 144 145 exp2 = rqst_exp_get_by_name(rqstp, &path); 146 if (IS_ERR(exp2)) { 147 err = PTR_ERR(exp2); 148 /* 149 * We normally allow NFS clients to continue 150 * "underneath" a mountpoint that is not exported. 151 * The exception is V4ROOT, where no traversal is ever 152 * allowed without an explicit export of the new 153 * directory. 154 */ 155 if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT)) 156 err = 0; 157 path_put(&path); 158 goto out; 159 } 160 if (nfsd_v4client(rqstp) || 161 (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) { 162 /* successfully crossed mount point */ 163 /* 164 * This is subtle: path.dentry is *not* on path.mnt 165 * at this point. The only reason we are safe is that 166 * original mnt is pinned down by exp, so we should 167 * put path *before* putting exp 168 */ 169 *dpp = path.dentry; 170 path.dentry = dentry; 171 *expp = exp2; 172 exp2 = exp; 173 } 174 path_put(&path); 175 exp_put(exp2); 176 out: 177 return err; 178 } 179 180 static void follow_to_parent(struct path *path) 181 { 182 struct dentry *dp; 183 184 while (path->dentry == path->mnt->mnt_root && follow_up(path)) 185 ; 186 dp = dget_parent(path->dentry); 187 dput(path->dentry); 188 path->dentry = dp; 189 } 190 191 static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp) 192 { 193 struct svc_export *exp2; 194 struct path path = {.mnt = mntget((*exp)->ex_path.mnt), 195 .dentry = dget(dparent)}; 196 197 follow_to_parent(&path); 198 199 exp2 = rqst_exp_parent(rqstp, &path); 200 if (PTR_ERR(exp2) == -ENOENT) { 201 *dentryp = dget(dparent); 202 } else if (IS_ERR(exp2)) { 203 path_put(&path); 204 return PTR_ERR(exp2); 205 } else { 206 *dentryp = dget(path.dentry); 207 exp_put(*exp); 208 *exp = exp2; 209 } 210 path_put(&path); 211 return 0; 212 } 213 214 /* 215 * For nfsd purposes, we treat V4ROOT exports as though there was an 216 * export at *every* directory. 217 * We return: 218 * '1' if this dentry *must* be an export point, 219 * '2' if it might be, if there is really a mount here, and 220 * '0' if there is no chance of an export point here. 221 */ 222 int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) 223 { 224 if (!d_inode(dentry)) 225 return 0; 226 if (exp->ex_flags & NFSEXP_V4ROOT) 227 return 1; 228 if (nfsd4_is_junction(dentry)) 229 return 1; 230 if (d_managed(dentry)) 231 /* 232 * Might only be a mountpoint in a different namespace, 233 * but we need to check. 234 */ 235 return 2; 236 return 0; 237 } 238 239 __be32 240 nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp, 241 const char *name, unsigned int len, 242 struct svc_export **exp_ret, struct dentry **dentry_ret) 243 { 244 struct svc_export *exp; 245 struct dentry *dparent; 246 struct dentry *dentry; 247 int host_err; 248 249 dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name); 250 251 dparent = fhp->fh_dentry; 252 exp = exp_get(fhp->fh_export); 253 254 /* Lookup the name, but don't follow links */ 255 if (isdotent(name, len)) { 256 if (len==1) 257 dentry = dget(dparent); 258 else if (dparent != exp->ex_path.dentry) 259 dentry = dget_parent(dparent); 260 else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp)) 261 dentry = dget(dparent); /* .. == . just like at / */ 262 else { 263 /* checking mountpoint crossing is very different when stepping up */ 264 host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry); 265 if (host_err) 266 goto out_nfserr; 267 } 268 } else { 269 dentry = lookup_one_len_unlocked(name, dparent, len); 270 host_err = PTR_ERR(dentry); 271 if (IS_ERR(dentry)) 272 goto out_nfserr; 273 if (nfsd_mountpoint(dentry, exp)) { 274 host_err = nfsd_cross_mnt(rqstp, &dentry, &exp); 275 if (host_err) { 276 dput(dentry); 277 goto out_nfserr; 278 } 279 } 280 } 281 *dentry_ret = dentry; 282 *exp_ret = exp; 283 return 0; 284 285 out_nfserr: 286 exp_put(exp); 287 return nfserrno(host_err); 288 } 289 290 /** 291 * nfsd_lookup - look up a single path component for nfsd 292 * 293 * @rqstp: the request context 294 * @fhp: the file handle of the directory 295 * @name: the component name, or %NULL to look up parent 296 * @len: length of name to examine 297 * @resfh: pointer to pre-initialised filehandle to hold result. 298 * 299 * Look up one component of a pathname. 300 * N.B. After this call _both_ fhp and resfh need an fh_put 301 * 302 * If the lookup would cross a mountpoint, and the mounted filesystem 303 * is exported to the client with NFSEXP_NOHIDE, then the lookup is 304 * accepted as it stands and the mounted directory is 305 * returned. Otherwise the covered directory is returned. 306 * NOTE: this mountpoint crossing is not supported properly by all 307 * clients and is explicitly disallowed for NFSv3 308 * 309 */ 310 __be32 311 nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, 312 unsigned int len, struct svc_fh *resfh) 313 { 314 struct svc_export *exp; 315 struct dentry *dentry; 316 __be32 err; 317 318 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); 319 if (err) 320 return err; 321 err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry); 322 if (err) 323 return err; 324 err = check_nfsd_access(exp, rqstp); 325 if (err) 326 goto out; 327 /* 328 * Note: we compose the file handle now, but as the 329 * dentry may be negative, it may need to be updated. 330 */ 331 err = fh_compose(resfh, exp, dentry, fhp); 332 if (!err && d_really_is_negative(dentry)) 333 err = nfserr_noent; 334 out: 335 dput(dentry); 336 exp_put(exp); 337 return err; 338 } 339 340 static void 341 commit_reset_write_verifier(struct nfsd_net *nn, struct svc_rqst *rqstp, 342 int err) 343 { 344 switch (err) { 345 case -EAGAIN: 346 case -ESTALE: 347 /* 348 * Neither of these are the result of a problem with 349 * durable storage, so avoid a write verifier reset. 350 */ 351 break; 352 default: 353 nfsd_reset_write_verifier(nn); 354 trace_nfsd_writeverf_reset(nn, rqstp, err); 355 } 356 } 357 358 /* 359 * Commit metadata changes to stable storage. 360 */ 361 static int 362 commit_inode_metadata(struct inode *inode) 363 { 364 const struct export_operations *export_ops = inode->i_sb->s_export_op; 365 366 if (export_ops->commit_metadata) 367 return export_ops->commit_metadata(inode); 368 return sync_inode_metadata(inode, 1); 369 } 370 371 static int 372 commit_metadata(struct svc_fh *fhp) 373 { 374 struct inode *inode = d_inode(fhp->fh_dentry); 375 376 if (!EX_ISSYNC(fhp->fh_export)) 377 return 0; 378 return commit_inode_metadata(inode); 379 } 380 381 /* 382 * Go over the attributes and take care of the small differences between 383 * NFS semantics and what Linux expects. 384 */ 385 static void 386 nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap) 387 { 388 /* Ignore mode updates on symlinks */ 389 if (S_ISLNK(inode->i_mode)) 390 iap->ia_valid &= ~ATTR_MODE; 391 392 /* sanitize the mode change */ 393 if (iap->ia_valid & ATTR_MODE) { 394 iap->ia_mode &= S_IALLUGO; 395 iap->ia_mode |= (inode->i_mode & ~S_IALLUGO); 396 } 397 398 /* Revoke setuid/setgid on chown */ 399 if (!S_ISDIR(inode->i_mode) && 400 ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) { 401 iap->ia_valid |= ATTR_KILL_PRIV; 402 if (iap->ia_valid & ATTR_MODE) { 403 /* we're setting mode too, just clear the s*id bits */ 404 iap->ia_mode &= ~S_ISUID; 405 if (iap->ia_mode & S_IXGRP) 406 iap->ia_mode &= ~S_ISGID; 407 } else { 408 /* set ATTR_KILL_* bits and let VFS handle it */ 409 iap->ia_valid |= ATTR_KILL_SUID; 410 iap->ia_valid |= 411 setattr_should_drop_sgid(&nop_mnt_idmap, inode); 412 } 413 } 414 } 415 416 static __be32 417 nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp, 418 struct iattr *iap) 419 { 420 struct inode *inode = d_inode(fhp->fh_dentry); 421 422 if (iap->ia_size < inode->i_size) { 423 __be32 err; 424 425 err = nfsd_permission(&rqstp->rq_cred, 426 fhp->fh_export, fhp->fh_dentry, 427 NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE); 428 if (err) 429 return err; 430 } 431 return nfserrno(get_write_access(inode)); 432 } 433 434 static int __nfsd_setattr(struct dentry *dentry, struct iattr *iap) 435 { 436 int host_err; 437 438 if (iap->ia_valid & ATTR_SIZE) { 439 /* 440 * RFC5661, Section 18.30.4: 441 * Changing the size of a file with SETATTR indirectly 442 * changes the time_modify and change attributes. 443 * 444 * (and similar for the older RFCs) 445 */ 446 struct iattr size_attr = { 447 .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME, 448 .ia_size = iap->ia_size, 449 }; 450 451 if (iap->ia_size < 0) 452 return -EFBIG; 453 454 host_err = notify_change(&nop_mnt_idmap, dentry, &size_attr, NULL); 455 if (host_err) 456 return host_err; 457 iap->ia_valid &= ~ATTR_SIZE; 458 459 /* 460 * Avoid the additional setattr call below if the only other 461 * attribute that the client sends is the mtime, as we update 462 * it as part of the size change above. 463 */ 464 if ((iap->ia_valid & ~ATTR_MTIME) == 0) 465 return 0; 466 } 467 468 if (!iap->ia_valid) 469 return 0; 470 471 iap->ia_valid |= ATTR_CTIME; 472 return notify_change(&nop_mnt_idmap, dentry, iap, NULL); 473 } 474 475 /** 476 * nfsd_setattr - Set various file attributes. 477 * @rqstp: controlling RPC transaction 478 * @fhp: filehandle of target 479 * @attr: attributes to set 480 * @guardtime: do not act if ctime.tv_sec does not match this timestamp 481 * 482 * This call may adjust the contents of @attr (in particular, this 483 * call may change the bits in the na_iattr.ia_valid field). 484 * 485 * Returns nfs_ok on success, otherwise an NFS status code is 486 * returned. Caller must release @fhp by calling fh_put in either 487 * case. 488 */ 489 __be32 490 nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, 491 struct nfsd_attrs *attr, const struct timespec64 *guardtime) 492 { 493 struct dentry *dentry; 494 struct inode *inode; 495 struct iattr *iap = attr->na_iattr; 496 int accmode = NFSD_MAY_SATTR; 497 umode_t ftype = 0; 498 __be32 err; 499 int host_err = 0; 500 bool get_write_count; 501 bool size_change = (iap->ia_valid & ATTR_SIZE); 502 int retries; 503 504 if (iap->ia_valid & ATTR_SIZE) { 505 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; 506 ftype = S_IFREG; 507 } 508 509 /* 510 * If utimes(2) and friends are called with times not NULL, we should 511 * not set NFSD_MAY_WRITE bit. Otherwise fh_verify->nfsd_permission 512 * will return EACCES, when the caller's effective UID does not match 513 * the owner of the file, and the caller is not privileged. In this 514 * situation, we should return EPERM(notify_change will return this). 515 */ 516 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME)) { 517 accmode |= NFSD_MAY_OWNER_OVERRIDE; 518 if (!(iap->ia_valid & (ATTR_ATIME_SET | ATTR_MTIME_SET))) 519 accmode |= NFSD_MAY_WRITE; 520 } 521 522 /* Callers that do fh_verify should do the fh_want_write: */ 523 get_write_count = !fhp->fh_dentry; 524 525 /* Get inode */ 526 err = fh_verify(rqstp, fhp, ftype, accmode); 527 if (err) 528 return err; 529 if (get_write_count) { 530 host_err = fh_want_write(fhp); 531 if (host_err) 532 goto out; 533 } 534 535 dentry = fhp->fh_dentry; 536 inode = d_inode(dentry); 537 538 nfsd_sanitize_attrs(inode, iap); 539 540 /* 541 * The size case is special, it changes the file in addition to the 542 * attributes, and file systems don't expect it to be mixed with 543 * "random" attribute changes. We thus split out the size change 544 * into a separate call to ->setattr, and do the rest as a separate 545 * setattr call. 546 */ 547 if (size_change) { 548 err = nfsd_get_write_access(rqstp, fhp, iap); 549 if (err) 550 return err; 551 } 552 553 inode_lock(inode); 554 err = fh_fill_pre_attrs(fhp); 555 if (err) 556 goto out_unlock; 557 558 if (guardtime) { 559 struct timespec64 ctime = inode_get_ctime(inode); 560 if ((u32)guardtime->tv_sec != (u32)ctime.tv_sec || 561 guardtime->tv_nsec != ctime.tv_nsec) { 562 err = nfserr_notsync; 563 goto out_fill_attrs; 564 } 565 } 566 567 for (retries = 1;;) { 568 struct iattr attrs; 569 570 /* 571 * notify_change() can alter its iattr argument, making 572 * @iap unsuitable for submission multiple times. Make a 573 * copy for every loop iteration. 574 */ 575 attrs = *iap; 576 host_err = __nfsd_setattr(dentry, &attrs); 577 if (host_err != -EAGAIN || !retries--) 578 break; 579 if (!nfsd_wait_for_delegreturn(rqstp, inode)) 580 break; 581 } 582 if (attr->na_seclabel && attr->na_seclabel->len) 583 attr->na_labelerr = security_inode_setsecctx(dentry, 584 attr->na_seclabel->data, attr->na_seclabel->len); 585 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && attr->na_pacl) 586 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap, 587 dentry, ACL_TYPE_ACCESS, 588 attr->na_pacl); 589 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && 590 !attr->na_aclerr && attr->na_dpacl && S_ISDIR(inode->i_mode)) 591 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap, 592 dentry, ACL_TYPE_DEFAULT, 593 attr->na_dpacl); 594 out_fill_attrs: 595 /* 596 * RFC 1813 Section 3.3.2 does not mandate that an NFS server 597 * returns wcc_data for SETATTR. Some client implementations 598 * depend on receiving wcc_data, however, to sort out partial 599 * updates (eg., the client requested that size and mode be 600 * modified, but the server changed only the file mode). 601 */ 602 fh_fill_post_attrs(fhp); 603 out_unlock: 604 inode_unlock(inode); 605 if (size_change) 606 put_write_access(inode); 607 out: 608 if (!host_err) 609 host_err = commit_metadata(fhp); 610 return err != 0 ? err : nfserrno(host_err); 611 } 612 613 #if defined(CONFIG_NFSD_V4) 614 /* 615 * NFS junction information is stored in an extended attribute. 616 */ 617 #define NFSD_JUNCTION_XATTR_NAME XATTR_TRUSTED_PREFIX "junction.nfs" 618 619 /** 620 * nfsd4_is_junction - Test if an object could be an NFS junction 621 * 622 * @dentry: object to test 623 * 624 * Returns 1 if "dentry" appears to contain NFS junction information. 625 * Otherwise 0 is returned. 626 */ 627 int nfsd4_is_junction(struct dentry *dentry) 628 { 629 struct inode *inode = d_inode(dentry); 630 631 if (inode == NULL) 632 return 0; 633 if (inode->i_mode & S_IXUGO) 634 return 0; 635 if (!(inode->i_mode & S_ISVTX)) 636 return 0; 637 if (vfs_getxattr(&nop_mnt_idmap, dentry, NFSD_JUNCTION_XATTR_NAME, 638 NULL, 0) <= 0) 639 return 0; 640 return 1; 641 } 642 643 static struct nfsd4_compound_state *nfsd4_get_cstate(struct svc_rqst *rqstp) 644 { 645 return &((struct nfsd4_compoundres *)rqstp->rq_resp)->cstate; 646 } 647 648 __be32 nfsd4_clone_file_range(struct svc_rqst *rqstp, 649 struct nfsd_file *nf_src, u64 src_pos, 650 struct nfsd_file *nf_dst, u64 dst_pos, 651 u64 count, bool sync) 652 { 653 struct file *src = nf_src->nf_file; 654 struct file *dst = nf_dst->nf_file; 655 errseq_t since; 656 loff_t cloned; 657 __be32 ret = 0; 658 659 since = READ_ONCE(dst->f_wb_err); 660 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); 661 if (cloned < 0) { 662 ret = nfserrno(cloned); 663 goto out_err; 664 } 665 if (count && cloned != count) { 666 ret = nfserrno(-EINVAL); 667 goto out_err; 668 } 669 if (sync) { 670 loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX; 671 int status = vfs_fsync_range(dst, dst_pos, dst_end, 0); 672 673 if (!status) 674 status = filemap_check_wb_err(dst->f_mapping, since); 675 if (!status) 676 status = commit_inode_metadata(file_inode(src)); 677 if (status < 0) { 678 struct nfsd_net *nn = net_generic(nf_dst->nf_net, 679 nfsd_net_id); 680 681 trace_nfsd_clone_file_range_err(rqstp, 682 &nfsd4_get_cstate(rqstp)->save_fh, 683 src_pos, 684 &nfsd4_get_cstate(rqstp)->current_fh, 685 dst_pos, 686 count, status); 687 commit_reset_write_verifier(nn, rqstp, status); 688 ret = nfserrno(status); 689 } 690 } 691 out_err: 692 return ret; 693 } 694 695 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, 696 u64 dst_pos, u64 count) 697 { 698 ssize_t ret; 699 700 /* 701 * Limit copy to 4MB to prevent indefinitely blocking an nfsd 702 * thread and client rpc slot. The choice of 4MB is somewhat 703 * arbitrary. We might instead base this on r/wsize, or make it 704 * tunable, or use a time instead of a byte limit, or implement 705 * asynchronous copy. In theory a client could also recognize a 706 * limit like this and pipeline multiple COPY requests. 707 */ 708 count = min_t(u64, count, 1 << 22); 709 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0); 710 711 if (ret == -EOPNOTSUPP || ret == -EXDEV) 712 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 713 COPY_FILE_SPLICE); 714 return ret; 715 } 716 717 __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp, 718 struct file *file, loff_t offset, loff_t len, 719 int flags) 720 { 721 int error; 722 723 if (!S_ISREG(file_inode(file)->i_mode)) 724 return nfserr_inval; 725 726 error = vfs_fallocate(file, flags, offset, len); 727 if (!error) 728 error = commit_metadata(fhp); 729 730 return nfserrno(error); 731 } 732 #endif /* defined(CONFIG_NFSD_V4) */ 733 734 /* 735 * Check server access rights to a file system object 736 */ 737 struct accessmap { 738 u32 access; 739 int how; 740 }; 741 static struct accessmap nfs3_regaccess[] = { 742 { NFS3_ACCESS_READ, NFSD_MAY_READ }, 743 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, 744 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC }, 745 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE }, 746 747 #ifdef CONFIG_NFSD_V4 748 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ }, 749 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE }, 750 { NFS4_ACCESS_XALIST, NFSD_MAY_READ }, 751 #endif 752 753 { 0, 0 } 754 }; 755 756 static struct accessmap nfs3_diraccess[] = { 757 { NFS3_ACCESS_READ, NFSD_MAY_READ }, 758 { NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC }, 759 { NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC}, 760 { NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE }, 761 { NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE }, 762 763 #ifdef CONFIG_NFSD_V4 764 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ }, 765 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE }, 766 { NFS4_ACCESS_XALIST, NFSD_MAY_READ }, 767 #endif 768 769 { 0, 0 } 770 }; 771 772 static struct accessmap nfs3_anyaccess[] = { 773 /* Some clients - Solaris 2.6 at least, make an access call 774 * to the server to check for access for things like /dev/null 775 * (which really, the server doesn't care about). So 776 * We provide simple access checking for them, looking 777 * mainly at mode bits, and we make sure to ignore read-only 778 * filesystem checks 779 */ 780 { NFS3_ACCESS_READ, NFSD_MAY_READ }, 781 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, 782 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, 783 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, 784 785 { 0, 0 } 786 }; 787 788 __be32 789 nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported) 790 { 791 struct accessmap *map; 792 struct svc_export *export; 793 struct dentry *dentry; 794 u32 query, result = 0, sresult = 0; 795 __be32 error; 796 797 error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP); 798 if (error) 799 goto out; 800 801 export = fhp->fh_export; 802 dentry = fhp->fh_dentry; 803 804 if (d_is_reg(dentry)) 805 map = nfs3_regaccess; 806 else if (d_is_dir(dentry)) 807 map = nfs3_diraccess; 808 else 809 map = nfs3_anyaccess; 810 811 812 query = *access; 813 for (; map->access; map++) { 814 if (map->access & query) { 815 __be32 err2; 816 817 sresult |= map->access; 818 819 err2 = nfsd_permission(&rqstp->rq_cred, export, 820 dentry, map->how); 821 switch (err2) { 822 case nfs_ok: 823 result |= map->access; 824 break; 825 826 /* the following error codes just mean the access was not allowed, 827 * rather than an error occurred */ 828 case nfserr_rofs: 829 case nfserr_acces: 830 case nfserr_perm: 831 /* simply don't "or" in the access bit. */ 832 break; 833 default: 834 error = err2; 835 goto out; 836 } 837 } 838 } 839 *access = result; 840 if (supported) 841 *supported = sresult; 842 843 out: 844 return error; 845 } 846 847 int nfsd_open_break_lease(struct inode *inode, int access) 848 { 849 unsigned int mode; 850 851 if (access & NFSD_MAY_NOT_BREAK_LEASE) 852 return 0; 853 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY; 854 return break_lease(inode, mode | O_NONBLOCK); 855 } 856 857 /* 858 * Open an existing file or directory. 859 * The may_flags argument indicates the type of open (read/write/lock) 860 * and additional flags. 861 * N.B. After this call fhp needs an fh_put 862 */ 863 static int 864 __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, 865 int may_flags, struct file **filp) 866 { 867 struct path path; 868 struct inode *inode; 869 struct file *file; 870 int flags = O_RDONLY|O_LARGEFILE; 871 int host_err = -EPERM; 872 873 path.mnt = fhp->fh_export->ex_path.mnt; 874 path.dentry = fhp->fh_dentry; 875 inode = d_inode(path.dentry); 876 877 if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE)) 878 goto out; 879 880 if (!inode->i_fop) 881 goto out; 882 883 host_err = nfsd_open_break_lease(inode, may_flags); 884 if (host_err) /* NOMEM or WOULDBLOCK */ 885 goto out; 886 887 if (may_flags & NFSD_MAY_WRITE) { 888 if (may_flags & NFSD_MAY_READ) 889 flags = O_RDWR|O_LARGEFILE; 890 else 891 flags = O_WRONLY|O_LARGEFILE; 892 } 893 894 file = dentry_open(&path, flags, current_cred()); 895 if (IS_ERR(file)) { 896 host_err = PTR_ERR(file); 897 goto out; 898 } 899 900 host_err = security_file_post_open(file, may_flags); 901 if (host_err) { 902 fput(file); 903 goto out; 904 } 905 906 if (may_flags & NFSD_MAY_64BIT_COOKIE) 907 file->f_mode |= FMODE_64BITHASH; 908 else 909 file->f_mode |= FMODE_32BITHASH; 910 911 *filp = file; 912 out: 913 return host_err; 914 } 915 916 __be32 917 nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, 918 int may_flags, struct file **filp) 919 { 920 __be32 err; 921 int host_err; 922 bool retried = false; 923 924 /* 925 * If we get here, then the client has already done an "open", 926 * and (hopefully) checked permission - so allow OWNER_OVERRIDE 927 * in case a chmod has now revoked permission. 928 * 929 * Arguably we should also allow the owner override for 930 * directories, but we never have and it doesn't seem to have 931 * caused anyone a problem. If we were to change this, note 932 * also that our filldir callbacks would need a variant of 933 * lookup_one_len that doesn't check permissions. 934 */ 935 if (type == S_IFREG) 936 may_flags |= NFSD_MAY_OWNER_OVERRIDE; 937 retry: 938 err = fh_verify(rqstp, fhp, type, may_flags); 939 if (!err) { 940 host_err = __nfsd_open(rqstp, fhp, type, may_flags, filp); 941 if (host_err == -EOPENSTALE && !retried) { 942 retried = true; 943 fh_put(fhp); 944 goto retry; 945 } 946 err = nfserrno(host_err); 947 } 948 return err; 949 } 950 951 /** 952 * nfsd_open_verified - Open a regular file for the filecache 953 * @rqstp: RPC request 954 * @fhp: NFS filehandle of the file to open 955 * @may_flags: internal permission flags 956 * @filp: OUT: open "struct file *" 957 * 958 * Returns zero on success, or a negative errno value. 959 */ 960 int 961 nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags, 962 struct file **filp) 963 { 964 return __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp); 965 } 966 967 /* 968 * Grab and keep cached pages associated with a file in the svc_rqst 969 * so that they can be passed to the network sendmsg routines 970 * directly. They will be released after the sending has completed. 971 * 972 * Return values: Number of bytes consumed, or -EIO if there are no 973 * remaining pages in rqstp->rq_pages. 974 */ 975 static int 976 nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 977 struct splice_desc *sd) 978 { 979 struct svc_rqst *rqstp = sd->u.data; 980 struct page *page = buf->page; // may be a compound one 981 unsigned offset = buf->offset; 982 struct page *last_page; 983 984 last_page = page + (offset + sd->len - 1) / PAGE_SIZE; 985 for (page += offset / PAGE_SIZE; page <= last_page; page++) { 986 /* 987 * Skip page replacement when extending the contents of the 988 * current page. But note that we may get two zero_pages in a 989 * row from shmem. 990 */ 991 if (page == *(rqstp->rq_next_page - 1) && 992 offset_in_page(rqstp->rq_res.page_base + 993 rqstp->rq_res.page_len)) 994 continue; 995 if (unlikely(!svc_rqst_replace_page(rqstp, page))) 996 return -EIO; 997 } 998 if (rqstp->rq_res.page_len == 0) // first call 999 rqstp->rq_res.page_base = offset % PAGE_SIZE; 1000 rqstp->rq_res.page_len += sd->len; 1001 return sd->len; 1002 } 1003 1004 static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe, 1005 struct splice_desc *sd) 1006 { 1007 return __splice_from_pipe(pipe, sd, nfsd_splice_actor); 1008 } 1009 1010 static u32 nfsd_eof_on_read(struct file *file, loff_t offset, ssize_t len, 1011 size_t expected) 1012 { 1013 if (expected != 0 && len == 0) 1014 return 1; 1015 if (offset+len >= i_size_read(file_inode(file))) 1016 return 1; 1017 return 0; 1018 } 1019 1020 static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1021 struct file *file, loff_t offset, 1022 unsigned long *count, u32 *eof, ssize_t host_err) 1023 { 1024 if (host_err >= 0) { 1025 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1026 1027 nfsd_stats_io_read_add(nn, fhp->fh_export, host_err); 1028 *eof = nfsd_eof_on_read(file, offset, host_err, *count); 1029 *count = host_err; 1030 fsnotify_access(file); 1031 trace_nfsd_read_io_done(rqstp, fhp, offset, *count); 1032 return 0; 1033 } else { 1034 trace_nfsd_read_err(rqstp, fhp, offset, host_err); 1035 return nfserrno(host_err); 1036 } 1037 } 1038 1039 /** 1040 * nfsd_splice_read - Perform a VFS read using a splice pipe 1041 * @rqstp: RPC transaction context 1042 * @fhp: file handle of file to be read 1043 * @file: opened struct file of file to be read 1044 * @offset: starting byte offset 1045 * @count: IN: requested number of bytes; OUT: number of bytes read 1046 * @eof: OUT: set non-zero if operation reached the end of the file 1047 * 1048 * Returns nfs_ok on success, otherwise an nfserr stat value is 1049 * returned. 1050 */ 1051 __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1052 struct file *file, loff_t offset, unsigned long *count, 1053 u32 *eof) 1054 { 1055 struct splice_desc sd = { 1056 .len = 0, 1057 .total_len = *count, 1058 .pos = offset, 1059 .u.data = rqstp, 1060 }; 1061 ssize_t host_err; 1062 1063 trace_nfsd_read_splice(rqstp, fhp, offset, *count); 1064 host_err = rw_verify_area(READ, file, &offset, *count); 1065 if (!host_err) 1066 host_err = splice_direct_to_actor(file, &sd, 1067 nfsd_direct_splice_actor); 1068 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err); 1069 } 1070 1071 /** 1072 * nfsd_iter_read - Perform a VFS read using an iterator 1073 * @rqstp: RPC transaction context 1074 * @fhp: file handle of file to be read 1075 * @file: opened struct file of file to be read 1076 * @offset: starting byte offset 1077 * @count: IN: requested number of bytes; OUT: number of bytes read 1078 * @base: offset in first page of read buffer 1079 * @eof: OUT: set non-zero if operation reached the end of the file 1080 * 1081 * Some filesystems or situations cannot use nfsd_splice_read. This 1082 * function is the slightly less-performant fallback for those cases. 1083 * 1084 * Returns nfs_ok on success, otherwise an nfserr stat value is 1085 * returned. 1086 */ 1087 __be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1088 struct file *file, loff_t offset, unsigned long *count, 1089 unsigned int base, u32 *eof) 1090 { 1091 unsigned long v, total; 1092 struct iov_iter iter; 1093 loff_t ppos = offset; 1094 struct page *page; 1095 ssize_t host_err; 1096 1097 v = 0; 1098 total = *count; 1099 while (total) { 1100 page = *(rqstp->rq_next_page++); 1101 rqstp->rq_vec[v].iov_base = page_address(page) + base; 1102 rqstp->rq_vec[v].iov_len = min_t(size_t, total, PAGE_SIZE - base); 1103 total -= rqstp->rq_vec[v].iov_len; 1104 ++v; 1105 base = 0; 1106 } 1107 WARN_ON_ONCE(v > ARRAY_SIZE(rqstp->rq_vec)); 1108 1109 trace_nfsd_read_vector(rqstp, fhp, offset, *count); 1110 iov_iter_kvec(&iter, ITER_DEST, rqstp->rq_vec, v, *count); 1111 host_err = vfs_iter_read(file, &iter, &ppos, 0); 1112 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err); 1113 } 1114 1115 /* 1116 * Gathered writes: If another process is currently writing to the file, 1117 * there's a high chance this is another nfsd (triggered by a bulk write 1118 * from a client's biod). Rather than syncing the file with each write 1119 * request, we sleep for 10 msec. 1120 * 1121 * I don't know if this roughly approximates C. Juszak's idea of 1122 * gathered writes, but it's a nice and simple solution (IMHO), and it 1123 * seems to work:-) 1124 * 1125 * Note: we do this only in the NFSv2 case, since v3 and higher have a 1126 * better tool (separate unstable writes and commits) for solving this 1127 * problem. 1128 */ 1129 static int wait_for_concurrent_writes(struct file *file) 1130 { 1131 struct inode *inode = file_inode(file); 1132 static ino_t last_ino; 1133 static dev_t last_dev; 1134 int err = 0; 1135 1136 if (atomic_read(&inode->i_writecount) > 1 1137 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) { 1138 dprintk("nfsd: write defer %d\n", task_pid_nr(current)); 1139 msleep(10); 1140 dprintk("nfsd: write resume %d\n", task_pid_nr(current)); 1141 } 1142 1143 if (inode->i_state & I_DIRTY) { 1144 dprintk("nfsd: write sync %d\n", task_pid_nr(current)); 1145 err = vfs_fsync(file, 0); 1146 } 1147 last_ino = inode->i_ino; 1148 last_dev = inode->i_sb->s_dev; 1149 return err; 1150 } 1151 1152 __be32 1153 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, 1154 loff_t offset, struct kvec *vec, int vlen, 1155 unsigned long *cnt, int stable, 1156 __be32 *verf) 1157 { 1158 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1159 struct file *file = nf->nf_file; 1160 struct super_block *sb = file_inode(file)->i_sb; 1161 struct svc_export *exp; 1162 struct iov_iter iter; 1163 errseq_t since; 1164 __be32 nfserr; 1165 int host_err; 1166 loff_t pos = offset; 1167 unsigned long exp_op_flags = 0; 1168 unsigned int pflags = current->flags; 1169 rwf_t flags = 0; 1170 bool restore_flags = false; 1171 1172 trace_nfsd_write_opened(rqstp, fhp, offset, *cnt); 1173 1174 if (sb->s_export_op) 1175 exp_op_flags = sb->s_export_op->flags; 1176 1177 if (test_bit(RQ_LOCAL, &rqstp->rq_flags) && 1178 !(exp_op_flags & EXPORT_OP_REMOTE_FS)) { 1179 /* 1180 * We want throttling in balance_dirty_pages() 1181 * and shrink_inactive_list() to only consider 1182 * the backingdev we are writing to, so that nfs to 1183 * localhost doesn't cause nfsd to lock up due to all 1184 * the client's dirty pages or its congested queue. 1185 */ 1186 current->flags |= PF_LOCAL_THROTTLE; 1187 restore_flags = true; 1188 } 1189 1190 exp = fhp->fh_export; 1191 1192 if (!EX_ISSYNC(exp)) 1193 stable = NFS_UNSTABLE; 1194 1195 if (stable && !fhp->fh_use_wgather) 1196 flags |= RWF_SYNC; 1197 1198 iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt); 1199 since = READ_ONCE(file->f_wb_err); 1200 if (verf) 1201 nfsd_copy_write_verifier(verf, nn); 1202 host_err = vfs_iter_write(file, &iter, &pos, flags); 1203 if (host_err < 0) { 1204 commit_reset_write_verifier(nn, rqstp, host_err); 1205 goto out_nfserr; 1206 } 1207 *cnt = host_err; 1208 nfsd_stats_io_write_add(nn, exp, *cnt); 1209 fsnotify_modify(file); 1210 host_err = filemap_check_wb_err(file->f_mapping, since); 1211 if (host_err < 0) 1212 goto out_nfserr; 1213 1214 if (stable && fhp->fh_use_wgather) { 1215 host_err = wait_for_concurrent_writes(file); 1216 if (host_err < 0) 1217 commit_reset_write_verifier(nn, rqstp, host_err); 1218 } 1219 1220 out_nfserr: 1221 if (host_err >= 0) { 1222 trace_nfsd_write_io_done(rqstp, fhp, offset, *cnt); 1223 nfserr = nfs_ok; 1224 } else { 1225 trace_nfsd_write_err(rqstp, fhp, offset, host_err); 1226 nfserr = nfserrno(host_err); 1227 } 1228 if (restore_flags) 1229 current_restore_flags(pflags, PF_LOCAL_THROTTLE); 1230 return nfserr; 1231 } 1232 1233 /** 1234 * nfsd_read_splice_ok - check if spliced reading is supported 1235 * @rqstp: RPC transaction context 1236 * 1237 * Return values: 1238 * %true: nfsd_splice_read() may be used 1239 * %false: nfsd_splice_read() must not be used 1240 * 1241 * NFS READ normally uses splice to send data in-place. However the 1242 * data in cache can change after the reply's MIC is computed but 1243 * before the RPC reply is sent. To prevent the client from 1244 * rejecting the server-computed MIC in this somewhat rare case, do 1245 * not use splice with the GSS integrity and privacy services. 1246 */ 1247 bool nfsd_read_splice_ok(struct svc_rqst *rqstp) 1248 { 1249 switch (svc_auth_flavor(rqstp)) { 1250 case RPC_AUTH_GSS_KRB5I: 1251 case RPC_AUTH_GSS_KRB5P: 1252 return false; 1253 } 1254 return true; 1255 } 1256 1257 /** 1258 * nfsd_read - Read data from a file 1259 * @rqstp: RPC transaction context 1260 * @fhp: file handle of file to be read 1261 * @offset: starting byte offset 1262 * @count: IN: requested number of bytes; OUT: number of bytes read 1263 * @eof: OUT: set non-zero if operation reached the end of the file 1264 * 1265 * The caller must verify that there is enough space in @rqstp.rq_res 1266 * to perform this operation. 1267 * 1268 * N.B. After this call fhp needs an fh_put 1269 * 1270 * Returns nfs_ok on success, otherwise an nfserr stat value is 1271 * returned. 1272 */ 1273 __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1274 loff_t offset, unsigned long *count, u32 *eof) 1275 { 1276 struct nfsd_file *nf; 1277 struct file *file; 1278 __be32 err; 1279 1280 trace_nfsd_read_start(rqstp, fhp, offset, *count); 1281 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_READ, &nf); 1282 if (err) 1283 return err; 1284 1285 file = nf->nf_file; 1286 if (file->f_op->splice_read && nfsd_read_splice_ok(rqstp)) 1287 err = nfsd_splice_read(rqstp, fhp, file, offset, count, eof); 1288 else 1289 err = nfsd_iter_read(rqstp, fhp, file, offset, count, 0, eof); 1290 1291 nfsd_file_put(nf); 1292 trace_nfsd_read_done(rqstp, fhp, offset, *count); 1293 return err; 1294 } 1295 1296 /* 1297 * Write data to a file. 1298 * The stable flag requests synchronous writes. 1299 * N.B. After this call fhp needs an fh_put 1300 */ 1301 __be32 1302 nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, 1303 struct kvec *vec, int vlen, unsigned long *cnt, int stable, 1304 __be32 *verf) 1305 { 1306 struct nfsd_file *nf; 1307 __be32 err; 1308 1309 trace_nfsd_write_start(rqstp, fhp, offset, *cnt); 1310 1311 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_WRITE, &nf); 1312 if (err) 1313 goto out; 1314 1315 err = nfsd_vfs_write(rqstp, fhp, nf, offset, vec, 1316 vlen, cnt, stable, verf); 1317 nfsd_file_put(nf); 1318 out: 1319 trace_nfsd_write_done(rqstp, fhp, offset, *cnt); 1320 return err; 1321 } 1322 1323 /** 1324 * nfsd_commit - Commit pending writes to stable storage 1325 * @rqstp: RPC request being processed 1326 * @fhp: NFS filehandle 1327 * @nf: target file 1328 * @offset: raw offset from beginning of file 1329 * @count: raw count of bytes to sync 1330 * @verf: filled in with the server's current write verifier 1331 * 1332 * Note: we guarantee that data that lies within the range specified 1333 * by the 'offset' and 'count' parameters will be synced. The server 1334 * is permitted to sync data that lies outside this range at the 1335 * same time. 1336 * 1337 * Unfortunately we cannot lock the file to make sure we return full WCC 1338 * data to the client, as locking happens lower down in the filesystem. 1339 * 1340 * Return values: 1341 * An nfsstat value in network byte order. 1342 */ 1343 __be32 1344 nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, 1345 u64 offset, u32 count, __be32 *verf) 1346 { 1347 __be32 err = nfs_ok; 1348 u64 maxbytes; 1349 loff_t start, end; 1350 struct nfsd_net *nn; 1351 1352 /* 1353 * Convert the client-provided (offset, count) range to a 1354 * (start, end) range. If the client-provided range falls 1355 * outside the maximum file size of the underlying FS, 1356 * clamp the sync range appropriately. 1357 */ 1358 start = 0; 1359 end = LLONG_MAX; 1360 maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes; 1361 if (offset < maxbytes) { 1362 start = offset; 1363 if (count && (offset + count - 1 < maxbytes)) 1364 end = offset + count - 1; 1365 } 1366 1367 nn = net_generic(nf->nf_net, nfsd_net_id); 1368 if (EX_ISSYNC(fhp->fh_export)) { 1369 errseq_t since = READ_ONCE(nf->nf_file->f_wb_err); 1370 int err2; 1371 1372 err2 = vfs_fsync_range(nf->nf_file, start, end, 0); 1373 switch (err2) { 1374 case 0: 1375 nfsd_copy_write_verifier(verf, nn); 1376 err2 = filemap_check_wb_err(nf->nf_file->f_mapping, 1377 since); 1378 err = nfserrno(err2); 1379 break; 1380 case -EINVAL: 1381 err = nfserr_notsupp; 1382 break; 1383 default: 1384 commit_reset_write_verifier(nn, rqstp, err2); 1385 err = nfserrno(err2); 1386 } 1387 } else 1388 nfsd_copy_write_verifier(verf, nn); 1389 1390 return err; 1391 } 1392 1393 /** 1394 * nfsd_create_setattr - Set a created file's attributes 1395 * @rqstp: RPC transaction being executed 1396 * @fhp: NFS filehandle of parent directory 1397 * @resfhp: NFS filehandle of new object 1398 * @attrs: requested attributes of new object 1399 * 1400 * Returns nfs_ok on success, or an nfsstat in network byte order. 1401 */ 1402 __be32 1403 nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, 1404 struct svc_fh *resfhp, struct nfsd_attrs *attrs) 1405 { 1406 struct iattr *iap = attrs->na_iattr; 1407 __be32 status; 1408 1409 /* 1410 * Mode has already been set by file creation. 1411 */ 1412 iap->ia_valid &= ~ATTR_MODE; 1413 1414 /* 1415 * Setting uid/gid works only for root. Irix appears to 1416 * send along the gid on create when it tries to implement 1417 * setgid directories via NFS: 1418 */ 1419 if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) 1420 iap->ia_valid &= ~(ATTR_UID|ATTR_GID); 1421 1422 /* 1423 * Callers expect new file metadata to be committed even 1424 * if the attributes have not changed. 1425 */ 1426 if (nfsd_attrs_valid(attrs)) 1427 status = nfsd_setattr(rqstp, resfhp, attrs, NULL); 1428 else 1429 status = nfserrno(commit_metadata(resfhp)); 1430 1431 /* 1432 * Transactional filesystems had a chance to commit changes 1433 * for both parent and child simultaneously making the 1434 * following commit_metadata a noop in many cases. 1435 */ 1436 if (!status) 1437 status = nfserrno(commit_metadata(fhp)); 1438 1439 /* 1440 * Update the new filehandle to pick up the new attributes. 1441 */ 1442 if (!status) 1443 status = fh_update(resfhp); 1444 1445 return status; 1446 } 1447 1448 /* HPUX client sometimes creates a file in mode 000, and sets size to 0. 1449 * setting size to 0 may fail for some specific file systems by the permission 1450 * checking which requires WRITE permission but the mode is 000. 1451 * we ignore the resizing(to 0) on the just new created file, since the size is 1452 * 0 after file created. 1453 * 1454 * call this only after vfs_create() is called. 1455 * */ 1456 static void 1457 nfsd_check_ignore_resizing(struct iattr *iap) 1458 { 1459 if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0)) 1460 iap->ia_valid &= ~ATTR_SIZE; 1461 } 1462 1463 /* The parent directory should already be locked: */ 1464 __be32 1465 nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp, 1466 struct nfsd_attrs *attrs, 1467 int type, dev_t rdev, struct svc_fh *resfhp) 1468 { 1469 struct dentry *dentry, *dchild; 1470 struct inode *dirp; 1471 struct iattr *iap = attrs->na_iattr; 1472 __be32 err; 1473 int host_err; 1474 1475 dentry = fhp->fh_dentry; 1476 dirp = d_inode(dentry); 1477 1478 dchild = dget(resfhp->fh_dentry); 1479 err = nfsd_permission(&rqstp->rq_cred, fhp->fh_export, dentry, 1480 NFSD_MAY_CREATE); 1481 if (err) 1482 goto out; 1483 1484 if (!(iap->ia_valid & ATTR_MODE)) 1485 iap->ia_mode = 0; 1486 iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; 1487 1488 if (!IS_POSIXACL(dirp)) 1489 iap->ia_mode &= ~current_umask(); 1490 1491 err = 0; 1492 switch (type) { 1493 case S_IFREG: 1494 host_err = vfs_create(&nop_mnt_idmap, dirp, dchild, 1495 iap->ia_mode, true); 1496 if (!host_err) 1497 nfsd_check_ignore_resizing(iap); 1498 break; 1499 case S_IFDIR: 1500 host_err = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode); 1501 if (!host_err && unlikely(d_unhashed(dchild))) { 1502 struct dentry *d; 1503 d = lookup_one_len(dchild->d_name.name, 1504 dchild->d_parent, 1505 dchild->d_name.len); 1506 if (IS_ERR(d)) { 1507 host_err = PTR_ERR(d); 1508 break; 1509 } 1510 if (unlikely(d_is_negative(d))) { 1511 dput(d); 1512 err = nfserr_serverfault; 1513 goto out; 1514 } 1515 dput(resfhp->fh_dentry); 1516 resfhp->fh_dentry = dget(d); 1517 err = fh_update(resfhp); 1518 dput(dchild); 1519 dchild = d; 1520 if (err) 1521 goto out; 1522 } 1523 break; 1524 case S_IFCHR: 1525 case S_IFBLK: 1526 case S_IFIFO: 1527 case S_IFSOCK: 1528 host_err = vfs_mknod(&nop_mnt_idmap, dirp, dchild, 1529 iap->ia_mode, rdev); 1530 break; 1531 default: 1532 printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n", 1533 type); 1534 host_err = -EINVAL; 1535 } 1536 if (host_err < 0) 1537 goto out_nfserr; 1538 1539 err = nfsd_create_setattr(rqstp, fhp, resfhp, attrs); 1540 1541 out: 1542 dput(dchild); 1543 return err; 1544 1545 out_nfserr: 1546 err = nfserrno(host_err); 1547 goto out; 1548 } 1549 1550 /* 1551 * Create a filesystem object (regular, directory, special). 1552 * Note that the parent directory is left locked. 1553 * 1554 * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp 1555 */ 1556 __be32 1557 nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, 1558 char *fname, int flen, struct nfsd_attrs *attrs, 1559 int type, dev_t rdev, struct svc_fh *resfhp) 1560 { 1561 struct dentry *dentry, *dchild = NULL; 1562 __be32 err; 1563 int host_err; 1564 1565 if (isdotent(fname, flen)) 1566 return nfserr_exist; 1567 1568 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP); 1569 if (err) 1570 return err; 1571 1572 dentry = fhp->fh_dentry; 1573 1574 host_err = fh_want_write(fhp); 1575 if (host_err) 1576 return nfserrno(host_err); 1577 1578 inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT); 1579 dchild = lookup_one_len(fname, dentry, flen); 1580 host_err = PTR_ERR(dchild); 1581 if (IS_ERR(dchild)) { 1582 err = nfserrno(host_err); 1583 goto out_unlock; 1584 } 1585 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); 1586 /* 1587 * We unconditionally drop our ref to dchild as fh_compose will have 1588 * already grabbed its own ref for it. 1589 */ 1590 dput(dchild); 1591 if (err) 1592 goto out_unlock; 1593 err = fh_fill_pre_attrs(fhp); 1594 if (err != nfs_ok) 1595 goto out_unlock; 1596 err = nfsd_create_locked(rqstp, fhp, attrs, type, rdev, resfhp); 1597 fh_fill_post_attrs(fhp); 1598 out_unlock: 1599 inode_unlock(dentry->d_inode); 1600 return err; 1601 } 1602 1603 /* 1604 * Read a symlink. On entry, *lenp must contain the maximum path length that 1605 * fits into the buffer. On return, it contains the true length. 1606 * N.B. After this call fhp needs an fh_put 1607 */ 1608 __be32 1609 nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) 1610 { 1611 __be32 err; 1612 const char *link; 1613 struct path path; 1614 DEFINE_DELAYED_CALL(done); 1615 int len; 1616 1617 err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP); 1618 if (unlikely(err)) 1619 return err; 1620 1621 path.mnt = fhp->fh_export->ex_path.mnt; 1622 path.dentry = fhp->fh_dentry; 1623 1624 if (unlikely(!d_is_symlink(path.dentry))) 1625 return nfserr_inval; 1626 1627 touch_atime(&path); 1628 1629 link = vfs_get_link(path.dentry, &done); 1630 if (IS_ERR(link)) 1631 return nfserrno(PTR_ERR(link)); 1632 1633 len = strlen(link); 1634 if (len < *lenp) 1635 *lenp = len; 1636 memcpy(buf, link, *lenp); 1637 do_delayed_call(&done); 1638 return 0; 1639 } 1640 1641 /** 1642 * nfsd_symlink - Create a symlink and look up its inode 1643 * @rqstp: RPC transaction being executed 1644 * @fhp: NFS filehandle of parent directory 1645 * @fname: filename of the new symlink 1646 * @flen: length of @fname 1647 * @path: content of the new symlink (NUL-terminated) 1648 * @attrs: requested attributes of new object 1649 * @resfhp: NFS filehandle of new object 1650 * 1651 * N.B. After this call _both_ fhp and resfhp need an fh_put 1652 * 1653 * Returns nfs_ok on success, or an nfsstat in network byte order. 1654 */ 1655 __be32 1656 nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, 1657 char *fname, int flen, 1658 char *path, struct nfsd_attrs *attrs, 1659 struct svc_fh *resfhp) 1660 { 1661 struct dentry *dentry, *dnew; 1662 __be32 err, cerr; 1663 int host_err; 1664 1665 err = nfserr_noent; 1666 if (!flen || path[0] == '\0') 1667 goto out; 1668 err = nfserr_exist; 1669 if (isdotent(fname, flen)) 1670 goto out; 1671 1672 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); 1673 if (err) 1674 goto out; 1675 1676 host_err = fh_want_write(fhp); 1677 if (host_err) { 1678 err = nfserrno(host_err); 1679 goto out; 1680 } 1681 1682 dentry = fhp->fh_dentry; 1683 inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT); 1684 dnew = lookup_one_len(fname, dentry, flen); 1685 if (IS_ERR(dnew)) { 1686 err = nfserrno(PTR_ERR(dnew)); 1687 inode_unlock(dentry->d_inode); 1688 goto out_drop_write; 1689 } 1690 err = fh_fill_pre_attrs(fhp); 1691 if (err != nfs_ok) 1692 goto out_unlock; 1693 host_err = vfs_symlink(&nop_mnt_idmap, d_inode(dentry), dnew, path); 1694 err = nfserrno(host_err); 1695 cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp); 1696 if (!err) 1697 nfsd_create_setattr(rqstp, fhp, resfhp, attrs); 1698 fh_fill_post_attrs(fhp); 1699 out_unlock: 1700 inode_unlock(dentry->d_inode); 1701 if (!err) 1702 err = nfserrno(commit_metadata(fhp)); 1703 dput(dnew); 1704 if (err==0) err = cerr; 1705 out_drop_write: 1706 fh_drop_write(fhp); 1707 out: 1708 return err; 1709 } 1710 1711 /* 1712 * Create a hardlink 1713 * N.B. After this call _both_ ffhp and tfhp need an fh_put 1714 */ 1715 __be32 1716 nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, 1717 char *name, int len, struct svc_fh *tfhp) 1718 { 1719 struct dentry *ddir, *dnew, *dold; 1720 struct inode *dirp; 1721 __be32 err; 1722 int host_err; 1723 1724 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE); 1725 if (err) 1726 goto out; 1727 err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP); 1728 if (err) 1729 goto out; 1730 err = nfserr_isdir; 1731 if (d_is_dir(tfhp->fh_dentry)) 1732 goto out; 1733 err = nfserr_perm; 1734 if (!len) 1735 goto out; 1736 err = nfserr_exist; 1737 if (isdotent(name, len)) 1738 goto out; 1739 1740 host_err = fh_want_write(tfhp); 1741 if (host_err) { 1742 err = nfserrno(host_err); 1743 goto out; 1744 } 1745 1746 ddir = ffhp->fh_dentry; 1747 dirp = d_inode(ddir); 1748 inode_lock_nested(dirp, I_MUTEX_PARENT); 1749 1750 dnew = lookup_one_len(name, ddir, len); 1751 if (IS_ERR(dnew)) { 1752 err = nfserrno(PTR_ERR(dnew)); 1753 goto out_unlock; 1754 } 1755 1756 dold = tfhp->fh_dentry; 1757 1758 err = nfserr_noent; 1759 if (d_really_is_negative(dold)) 1760 goto out_dput; 1761 err = fh_fill_pre_attrs(ffhp); 1762 if (err != nfs_ok) 1763 goto out_dput; 1764 host_err = vfs_link(dold, &nop_mnt_idmap, dirp, dnew, NULL); 1765 fh_fill_post_attrs(ffhp); 1766 inode_unlock(dirp); 1767 if (!host_err) { 1768 err = nfserrno(commit_metadata(ffhp)); 1769 if (!err) 1770 err = nfserrno(commit_metadata(tfhp)); 1771 } else { 1772 err = nfserrno(host_err); 1773 } 1774 dput(dnew); 1775 out_drop_write: 1776 fh_drop_write(tfhp); 1777 out: 1778 return err; 1779 1780 out_dput: 1781 dput(dnew); 1782 out_unlock: 1783 inode_unlock(dirp); 1784 goto out_drop_write; 1785 } 1786 1787 static void 1788 nfsd_close_cached_files(struct dentry *dentry) 1789 { 1790 struct inode *inode = d_inode(dentry); 1791 1792 if (inode && S_ISREG(inode->i_mode)) 1793 nfsd_file_close_inode_sync(inode); 1794 } 1795 1796 static bool 1797 nfsd_has_cached_files(struct dentry *dentry) 1798 { 1799 bool ret = false; 1800 struct inode *inode = d_inode(dentry); 1801 1802 if (inode && S_ISREG(inode->i_mode)) 1803 ret = nfsd_file_is_cached(inode); 1804 return ret; 1805 } 1806 1807 /* 1808 * Rename a file 1809 * N.B. After this call _both_ ffhp and tfhp need an fh_put 1810 */ 1811 __be32 1812 nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, 1813 struct svc_fh *tfhp, char *tname, int tlen) 1814 { 1815 struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap; 1816 struct inode *fdir, *tdir; 1817 __be32 err; 1818 int host_err; 1819 bool close_cached = false; 1820 1821 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE); 1822 if (err) 1823 goto out; 1824 err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE); 1825 if (err) 1826 goto out; 1827 1828 fdentry = ffhp->fh_dentry; 1829 fdir = d_inode(fdentry); 1830 1831 tdentry = tfhp->fh_dentry; 1832 tdir = d_inode(tdentry); 1833 1834 err = nfserr_perm; 1835 if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) 1836 goto out; 1837 1838 err = nfserr_xdev; 1839 if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) 1840 goto out; 1841 if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry) 1842 goto out; 1843 1844 retry: 1845 host_err = fh_want_write(ffhp); 1846 if (host_err) { 1847 err = nfserrno(host_err); 1848 goto out; 1849 } 1850 1851 trap = lock_rename(tdentry, fdentry); 1852 if (IS_ERR(trap)) { 1853 err = nfserr_xdev; 1854 goto out_want_write; 1855 } 1856 err = fh_fill_pre_attrs(ffhp); 1857 if (err != nfs_ok) 1858 goto out_unlock; 1859 err = fh_fill_pre_attrs(tfhp); 1860 if (err != nfs_ok) 1861 goto out_unlock; 1862 1863 odentry = lookup_one_len(fname, fdentry, flen); 1864 host_err = PTR_ERR(odentry); 1865 if (IS_ERR(odentry)) 1866 goto out_nfserr; 1867 1868 host_err = -ENOENT; 1869 if (d_really_is_negative(odentry)) 1870 goto out_dput_old; 1871 host_err = -EINVAL; 1872 if (odentry == trap) 1873 goto out_dput_old; 1874 1875 ndentry = lookup_one_len(tname, tdentry, tlen); 1876 host_err = PTR_ERR(ndentry); 1877 if (IS_ERR(ndentry)) 1878 goto out_dput_old; 1879 host_err = -ENOTEMPTY; 1880 if (ndentry == trap) 1881 goto out_dput_new; 1882 1883 if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) && 1884 nfsd_has_cached_files(ndentry)) { 1885 close_cached = true; 1886 goto out_dput_old; 1887 } else { 1888 struct renamedata rd = { 1889 .old_mnt_idmap = &nop_mnt_idmap, 1890 .old_dir = fdir, 1891 .old_dentry = odentry, 1892 .new_mnt_idmap = &nop_mnt_idmap, 1893 .new_dir = tdir, 1894 .new_dentry = ndentry, 1895 }; 1896 int retries; 1897 1898 for (retries = 1;;) { 1899 host_err = vfs_rename(&rd); 1900 if (host_err != -EAGAIN || !retries--) 1901 break; 1902 if (!nfsd_wait_for_delegreturn(rqstp, d_inode(odentry))) 1903 break; 1904 } 1905 if (!host_err) { 1906 host_err = commit_metadata(tfhp); 1907 if (!host_err) 1908 host_err = commit_metadata(ffhp); 1909 } 1910 } 1911 out_dput_new: 1912 dput(ndentry); 1913 out_dput_old: 1914 dput(odentry); 1915 out_nfserr: 1916 err = nfserrno(host_err); 1917 1918 if (!close_cached) { 1919 fh_fill_post_attrs(ffhp); 1920 fh_fill_post_attrs(tfhp); 1921 } 1922 out_unlock: 1923 unlock_rename(tdentry, fdentry); 1924 out_want_write: 1925 fh_drop_write(ffhp); 1926 1927 /* 1928 * If the target dentry has cached open files, then we need to 1929 * try to close them prior to doing the rename. Final fput 1930 * shouldn't be done with locks held however, so we delay it 1931 * until this point and then reattempt the whole shebang. 1932 */ 1933 if (close_cached) { 1934 close_cached = false; 1935 nfsd_close_cached_files(ndentry); 1936 dput(ndentry); 1937 goto retry; 1938 } 1939 out: 1940 return err; 1941 } 1942 1943 /* 1944 * Unlink a file or directory 1945 * N.B. After this call fhp needs an fh_put 1946 */ 1947 __be32 1948 nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, 1949 char *fname, int flen) 1950 { 1951 struct dentry *dentry, *rdentry; 1952 struct inode *dirp; 1953 struct inode *rinode; 1954 __be32 err; 1955 int host_err; 1956 1957 err = nfserr_acces; 1958 if (!flen || isdotent(fname, flen)) 1959 goto out; 1960 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE); 1961 if (err) 1962 goto out; 1963 1964 host_err = fh_want_write(fhp); 1965 if (host_err) 1966 goto out_nfserr; 1967 1968 dentry = fhp->fh_dentry; 1969 dirp = d_inode(dentry); 1970 inode_lock_nested(dirp, I_MUTEX_PARENT); 1971 1972 rdentry = lookup_one_len(fname, dentry, flen); 1973 host_err = PTR_ERR(rdentry); 1974 if (IS_ERR(rdentry)) 1975 goto out_unlock; 1976 1977 if (d_really_is_negative(rdentry)) { 1978 dput(rdentry); 1979 host_err = -ENOENT; 1980 goto out_unlock; 1981 } 1982 rinode = d_inode(rdentry); 1983 err = fh_fill_pre_attrs(fhp); 1984 if (err != nfs_ok) 1985 goto out_unlock; 1986 1987 ihold(rinode); 1988 if (!type) 1989 type = d_inode(rdentry)->i_mode & S_IFMT; 1990 1991 if (type != S_IFDIR) { 1992 int retries; 1993 1994 if (rdentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) 1995 nfsd_close_cached_files(rdentry); 1996 1997 for (retries = 1;;) { 1998 host_err = vfs_unlink(&nop_mnt_idmap, dirp, rdentry, NULL); 1999 if (host_err != -EAGAIN || !retries--) 2000 break; 2001 if (!nfsd_wait_for_delegreturn(rqstp, rinode)) 2002 break; 2003 } 2004 } else { 2005 host_err = vfs_rmdir(&nop_mnt_idmap, dirp, rdentry); 2006 } 2007 fh_fill_post_attrs(fhp); 2008 2009 inode_unlock(dirp); 2010 if (!host_err) 2011 host_err = commit_metadata(fhp); 2012 dput(rdentry); 2013 iput(rinode); /* truncate the inode here */ 2014 2015 out_drop_write: 2016 fh_drop_write(fhp); 2017 out_nfserr: 2018 if (host_err == -EBUSY) { 2019 /* name is mounted-on. There is no perfect 2020 * error status. 2021 */ 2022 err = nfserr_file_open; 2023 } else { 2024 err = nfserrno(host_err); 2025 } 2026 out: 2027 return err; 2028 out_unlock: 2029 inode_unlock(dirp); 2030 goto out_drop_write; 2031 } 2032 2033 /* 2034 * We do this buffering because we must not call back into the file 2035 * system's ->lookup() method from the filldir callback. That may well 2036 * deadlock a number of file systems. 2037 * 2038 * This is based heavily on the implementation of same in XFS. 2039 */ 2040 struct buffered_dirent { 2041 u64 ino; 2042 loff_t offset; 2043 int namlen; 2044 unsigned int d_type; 2045 char name[]; 2046 }; 2047 2048 struct readdir_data { 2049 struct dir_context ctx; 2050 char *dirent; 2051 size_t used; 2052 int full; 2053 }; 2054 2055 static bool nfsd_buffered_filldir(struct dir_context *ctx, const char *name, 2056 int namlen, loff_t offset, u64 ino, 2057 unsigned int d_type) 2058 { 2059 struct readdir_data *buf = 2060 container_of(ctx, struct readdir_data, ctx); 2061 struct buffered_dirent *de = (void *)(buf->dirent + buf->used); 2062 unsigned int reclen; 2063 2064 reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64)); 2065 if (buf->used + reclen > PAGE_SIZE) { 2066 buf->full = 1; 2067 return false; 2068 } 2069 2070 de->namlen = namlen; 2071 de->offset = offset; 2072 de->ino = ino; 2073 de->d_type = d_type; 2074 memcpy(de->name, name, namlen); 2075 buf->used += reclen; 2076 2077 return true; 2078 } 2079 2080 static __be32 nfsd_buffered_readdir(struct file *file, struct svc_fh *fhp, 2081 nfsd_filldir_t func, struct readdir_cd *cdp, 2082 loff_t *offsetp) 2083 { 2084 struct buffered_dirent *de; 2085 int host_err; 2086 int size; 2087 loff_t offset; 2088 struct readdir_data buf = { 2089 .ctx.actor = nfsd_buffered_filldir, 2090 .dirent = (void *)__get_free_page(GFP_KERNEL) 2091 }; 2092 2093 if (!buf.dirent) 2094 return nfserrno(-ENOMEM); 2095 2096 offset = *offsetp; 2097 2098 while (1) { 2099 unsigned int reclen; 2100 2101 cdp->err = nfserr_eof; /* will be cleared on successful read */ 2102 buf.used = 0; 2103 buf.full = 0; 2104 2105 host_err = iterate_dir(file, &buf.ctx); 2106 if (buf.full) 2107 host_err = 0; 2108 2109 if (host_err < 0) 2110 break; 2111 2112 size = buf.used; 2113 2114 if (!size) 2115 break; 2116 2117 de = (struct buffered_dirent *)buf.dirent; 2118 while (size > 0) { 2119 offset = de->offset; 2120 2121 if (func(cdp, de->name, de->namlen, de->offset, 2122 de->ino, de->d_type)) 2123 break; 2124 2125 if (cdp->err != nfs_ok) 2126 break; 2127 2128 trace_nfsd_dirent(fhp, de->ino, de->name, de->namlen); 2129 2130 reclen = ALIGN(sizeof(*de) + de->namlen, 2131 sizeof(u64)); 2132 size -= reclen; 2133 de = (struct buffered_dirent *)((char *)de + reclen); 2134 } 2135 if (size > 0) /* We bailed out early */ 2136 break; 2137 2138 offset = vfs_llseek(file, 0, SEEK_CUR); 2139 } 2140 2141 free_page((unsigned long)(buf.dirent)); 2142 2143 if (host_err) 2144 return nfserrno(host_err); 2145 2146 *offsetp = offset; 2147 return cdp->err; 2148 } 2149 2150 /** 2151 * nfsd_readdir - Read entries from a directory 2152 * @rqstp: RPC transaction context 2153 * @fhp: NFS file handle of directory to be read 2154 * @offsetp: OUT: seek offset of final entry that was read 2155 * @cdp: OUT: an eof error value 2156 * @func: entry filler actor 2157 * 2158 * This implementation ignores the NFSv3/4 verifier cookie. 2159 * 2160 * NB: normal system calls hold file->f_pos_lock when calling 2161 * ->iterate_shared and ->llseek, but nfsd_readdir() does not. 2162 * Because the struct file acquired here is not visible to other 2163 * threads, it's internal state does not need mutex protection. 2164 * 2165 * Returns nfs_ok on success, otherwise an nfsstat code is 2166 * returned. 2167 */ 2168 __be32 2169 nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp, 2170 struct readdir_cd *cdp, nfsd_filldir_t func) 2171 { 2172 __be32 err; 2173 struct file *file; 2174 loff_t offset = *offsetp; 2175 int may_flags = NFSD_MAY_READ; 2176 2177 if (fhp->fh_64bit_cookies) 2178 may_flags |= NFSD_MAY_64BIT_COOKIE; 2179 2180 err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file); 2181 if (err) 2182 goto out; 2183 2184 offset = vfs_llseek(file, offset, SEEK_SET); 2185 if (offset < 0) { 2186 err = nfserrno((int)offset); 2187 goto out_close; 2188 } 2189 2190 err = nfsd_buffered_readdir(file, fhp, func, cdp, offsetp); 2191 2192 if (err == nfserr_eof || err == nfserr_toosmall) 2193 err = nfs_ok; /* can still be found in ->err */ 2194 out_close: 2195 nfsd_filp_close(file); 2196 out: 2197 return err; 2198 } 2199 2200 /** 2201 * nfsd_filp_close: close a file synchronously 2202 * @fp: the file to close 2203 * 2204 * nfsd_filp_close() is similar in behaviour to filp_close(). 2205 * The difference is that if this is the final close on the 2206 * file, the that finalisation happens immediately, rather then 2207 * being handed over to a work_queue, as it the case for 2208 * filp_close(). 2209 * When a user-space process closes a file (even when using 2210 * filp_close() the finalisation happens before returning to 2211 * userspace, so it is effectively synchronous. When a kernel thread 2212 * uses file_close(), on the other hand, the handling is completely 2213 * asynchronous. This means that any cost imposed by that finalisation 2214 * is not imposed on the nfsd thread, and nfsd could potentually 2215 * close files more quickly than the work queue finalises the close, 2216 * which would lead to unbounded growth in the queue. 2217 * 2218 * In some contexts is it not safe to synchronously wait for 2219 * close finalisation (see comment for __fput_sync()), but nfsd 2220 * does not match those contexts. In partcilarly it does not, at the 2221 * time that this function is called, hold and locks and no finalisation 2222 * of any file, socket, or device driver would have any cause to wait 2223 * for nfsd to make progress. 2224 */ 2225 void nfsd_filp_close(struct file *fp) 2226 { 2227 get_file(fp); 2228 filp_close(fp, NULL); 2229 __fput_sync(fp); 2230 } 2231 2232 /* 2233 * Get file system stats 2234 * N.B. After this call fhp needs an fh_put 2235 */ 2236 __be32 2237 nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access) 2238 { 2239 __be32 err; 2240 2241 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access); 2242 if (!err) { 2243 struct path path = { 2244 .mnt = fhp->fh_export->ex_path.mnt, 2245 .dentry = fhp->fh_dentry, 2246 }; 2247 if (vfs_statfs(&path, stat)) 2248 err = nfserr_io; 2249 } 2250 return err; 2251 } 2252 2253 static int exp_rdonly(struct svc_cred *cred, struct svc_export *exp) 2254 { 2255 return nfsexp_flags(cred, exp) & NFSEXP_READONLY; 2256 } 2257 2258 #ifdef CONFIG_NFSD_V4 2259 /* 2260 * Helper function to translate error numbers. In the case of xattr operations, 2261 * some error codes need to be translated outside of the standard translations. 2262 * 2263 * ENODATA needs to be translated to nfserr_noxattr. 2264 * E2BIG to nfserr_xattr2big. 2265 * 2266 * Additionally, vfs_listxattr can return -ERANGE. This means that the 2267 * file has too many extended attributes to retrieve inside an 2268 * XATTR_LIST_MAX sized buffer. This is a bug in the xattr implementation: 2269 * filesystems will allow the adding of extended attributes until they hit 2270 * their own internal limit. This limit may be larger than XATTR_LIST_MAX. 2271 * So, at that point, the attributes are present and valid, but can't 2272 * be retrieved using listxattr, since the upper level xattr code enforces 2273 * the XATTR_LIST_MAX limit. 2274 * 2275 * This bug means that we need to deal with listxattr returning -ERANGE. The 2276 * best mapping is to return TOOSMALL. 2277 */ 2278 static __be32 2279 nfsd_xattr_errno(int err) 2280 { 2281 switch (err) { 2282 case -ENODATA: 2283 return nfserr_noxattr; 2284 case -E2BIG: 2285 return nfserr_xattr2big; 2286 case -ERANGE: 2287 return nfserr_toosmall; 2288 } 2289 return nfserrno(err); 2290 } 2291 2292 /* 2293 * Retrieve the specified user extended attribute. To avoid always 2294 * having to allocate the maximum size (since we are not getting 2295 * a maximum size from the RPC), do a probe + alloc. Hold a reader 2296 * lock on i_rwsem to prevent the extended attribute from changing 2297 * size while we're doing this. 2298 */ 2299 __be32 2300 nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name, 2301 void **bufp, int *lenp) 2302 { 2303 ssize_t len; 2304 __be32 err; 2305 char *buf; 2306 struct inode *inode; 2307 struct dentry *dentry; 2308 2309 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ); 2310 if (err) 2311 return err; 2312 2313 err = nfs_ok; 2314 dentry = fhp->fh_dentry; 2315 inode = d_inode(dentry); 2316 2317 inode_lock_shared(inode); 2318 2319 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, NULL, 0); 2320 2321 /* 2322 * Zero-length attribute, just return. 2323 */ 2324 if (len == 0) { 2325 *bufp = NULL; 2326 *lenp = 0; 2327 goto out; 2328 } 2329 2330 if (len < 0) { 2331 err = nfsd_xattr_errno(len); 2332 goto out; 2333 } 2334 2335 if (len > *lenp) { 2336 err = nfserr_toosmall; 2337 goto out; 2338 } 2339 2340 buf = kvmalloc(len, GFP_KERNEL); 2341 if (buf == NULL) { 2342 err = nfserr_jukebox; 2343 goto out; 2344 } 2345 2346 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, buf, len); 2347 if (len <= 0) { 2348 kvfree(buf); 2349 buf = NULL; 2350 err = nfsd_xattr_errno(len); 2351 } 2352 2353 *lenp = len; 2354 *bufp = buf; 2355 2356 out: 2357 inode_unlock_shared(inode); 2358 2359 return err; 2360 } 2361 2362 /* 2363 * Retrieve the xattr names. Since we can't know how many are 2364 * user extended attributes, we must get all attributes here, 2365 * and have the XDR encode filter out the "user." ones. 2366 * 2367 * While this could always just allocate an XATTR_LIST_MAX 2368 * buffer, that's a waste, so do a probe + allocate. To 2369 * avoid any changes between the probe and allocate, wrap 2370 * this in inode_lock. 2371 */ 2372 __be32 2373 nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char **bufp, 2374 int *lenp) 2375 { 2376 ssize_t len; 2377 __be32 err; 2378 char *buf; 2379 struct inode *inode; 2380 struct dentry *dentry; 2381 2382 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ); 2383 if (err) 2384 return err; 2385 2386 dentry = fhp->fh_dentry; 2387 inode = d_inode(dentry); 2388 *lenp = 0; 2389 2390 inode_lock_shared(inode); 2391 2392 len = vfs_listxattr(dentry, NULL, 0); 2393 if (len <= 0) { 2394 err = nfsd_xattr_errno(len); 2395 goto out; 2396 } 2397 2398 if (len > XATTR_LIST_MAX) { 2399 err = nfserr_xattr2big; 2400 goto out; 2401 } 2402 2403 buf = kvmalloc(len, GFP_KERNEL); 2404 if (buf == NULL) { 2405 err = nfserr_jukebox; 2406 goto out; 2407 } 2408 2409 len = vfs_listxattr(dentry, buf, len); 2410 if (len <= 0) { 2411 kvfree(buf); 2412 err = nfsd_xattr_errno(len); 2413 goto out; 2414 } 2415 2416 *lenp = len; 2417 *bufp = buf; 2418 2419 err = nfs_ok; 2420 out: 2421 inode_unlock_shared(inode); 2422 2423 return err; 2424 } 2425 2426 /** 2427 * nfsd_removexattr - Remove an extended attribute 2428 * @rqstp: RPC transaction being executed 2429 * @fhp: NFS filehandle of object with xattr to remove 2430 * @name: name of xattr to remove (NUL-terminate) 2431 * 2432 * Pass in a NULL pointer for delegated_inode, and let the client deal 2433 * with NFS4ERR_DELAY (same as with e.g. setattr and remove). 2434 * 2435 * Returns nfs_ok on success, or an nfsstat in network byte order. 2436 */ 2437 __be32 2438 nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name) 2439 { 2440 __be32 err; 2441 int ret; 2442 2443 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE); 2444 if (err) 2445 return err; 2446 2447 ret = fh_want_write(fhp); 2448 if (ret) 2449 return nfserrno(ret); 2450 2451 inode_lock(fhp->fh_dentry->d_inode); 2452 err = fh_fill_pre_attrs(fhp); 2453 if (err != nfs_ok) 2454 goto out_unlock; 2455 ret = __vfs_removexattr_locked(&nop_mnt_idmap, fhp->fh_dentry, 2456 name, NULL); 2457 err = nfsd_xattr_errno(ret); 2458 fh_fill_post_attrs(fhp); 2459 out_unlock: 2460 inode_unlock(fhp->fh_dentry->d_inode); 2461 fh_drop_write(fhp); 2462 2463 return err; 2464 } 2465 2466 __be32 2467 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name, 2468 void *buf, u32 len, u32 flags) 2469 { 2470 __be32 err; 2471 int ret; 2472 2473 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE); 2474 if (err) 2475 return err; 2476 2477 ret = fh_want_write(fhp); 2478 if (ret) 2479 return nfserrno(ret); 2480 inode_lock(fhp->fh_dentry->d_inode); 2481 err = fh_fill_pre_attrs(fhp); 2482 if (err != nfs_ok) 2483 goto out_unlock; 2484 ret = __vfs_setxattr_locked(&nop_mnt_idmap, fhp->fh_dentry, 2485 name, buf, len, flags, NULL); 2486 fh_fill_post_attrs(fhp); 2487 err = nfsd_xattr_errno(ret); 2488 out_unlock: 2489 inode_unlock(fhp->fh_dentry->d_inode); 2490 fh_drop_write(fhp); 2491 return err; 2492 } 2493 #endif 2494 2495 /* 2496 * Check for a user's access permissions to this inode. 2497 */ 2498 __be32 2499 nfsd_permission(struct svc_cred *cred, struct svc_export *exp, 2500 struct dentry *dentry, int acc) 2501 { 2502 struct inode *inode = d_inode(dentry); 2503 int err; 2504 2505 if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP) 2506 return 0; 2507 #if 0 2508 dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n", 2509 acc, 2510 (acc & NFSD_MAY_READ)? " read" : "", 2511 (acc & NFSD_MAY_WRITE)? " write" : "", 2512 (acc & NFSD_MAY_EXEC)? " exec" : "", 2513 (acc & NFSD_MAY_SATTR)? " sattr" : "", 2514 (acc & NFSD_MAY_TRUNC)? " trunc" : "", 2515 (acc & NFSD_MAY_LOCK)? " lock" : "", 2516 (acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "", 2517 inode->i_mode, 2518 IS_IMMUTABLE(inode)? " immut" : "", 2519 IS_APPEND(inode)? " append" : "", 2520 __mnt_is_readonly(exp->ex_path.mnt)? " ro" : ""); 2521 dprintk(" owner %d/%d user %d/%d\n", 2522 inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid()); 2523 #endif 2524 2525 /* Normally we reject any write/sattr etc access on a read-only file 2526 * system. But if it is IRIX doing check on write-access for a 2527 * device special file, we ignore rofs. 2528 */ 2529 if (!(acc & NFSD_MAY_LOCAL_ACCESS)) 2530 if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) { 2531 if (exp_rdonly(cred, exp) || 2532 __mnt_is_readonly(exp->ex_path.mnt)) 2533 return nfserr_rofs; 2534 if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode)) 2535 return nfserr_perm; 2536 } 2537 if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode)) 2538 return nfserr_perm; 2539 2540 if (acc & NFSD_MAY_LOCK) { 2541 /* If we cannot rely on authentication in NLM requests, 2542 * just allow locks, otherwise require read permission, or 2543 * ownership 2544 */ 2545 if (exp->ex_flags & NFSEXP_NOAUTHNLM) 2546 return 0; 2547 else 2548 acc = NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE; 2549 } 2550 /* 2551 * The file owner always gets access permission for accesses that 2552 * would normally be checked at open time. This is to make 2553 * file access work even when the client has done a fchmod(fd, 0). 2554 * 2555 * However, `cp foo bar' should fail nevertheless when bar is 2556 * readonly. A sensible way to do this might be to reject all 2557 * attempts to truncate a read-only file, because a creat() call 2558 * always implies file truncation. 2559 * ... but this isn't really fair. A process may reasonably call 2560 * ftruncate on an open file descriptor on a file with perm 000. 2561 * We must trust the client to do permission checking - using "ACCESS" 2562 * with NFSv3. 2563 */ 2564 if ((acc & NFSD_MAY_OWNER_OVERRIDE) && 2565 uid_eq(inode->i_uid, current_fsuid())) 2566 return 0; 2567 2568 /* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */ 2569 err = inode_permission(&nop_mnt_idmap, inode, 2570 acc & (MAY_READ | MAY_WRITE | MAY_EXEC)); 2571 2572 /* Allow read access to binaries even when mode 111 */ 2573 if (err == -EACCES && S_ISREG(inode->i_mode) && 2574 (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) || 2575 acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC))) 2576 err = inode_permission(&nop_mnt_idmap, inode, MAY_EXEC); 2577 2578 return err? nfserrno(err) : 0; 2579 } 2580