1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * File operations used by nfsd. Some of these have been ripped from 4 * other parts of the kernel because they weren't exported, others 5 * are partial duplicates with added or changed functionality. 6 * 7 * Note that several functions dget() the dentry upon which they want 8 * to act, most notably those that create directory entries. Response 9 * dentry's are dput()'d if necessary in the release callback. 10 * So if you notice code paths that apparently fail to dput() the 11 * dentry, don't worry--they have been taken care of. 12 * 13 * Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de> 14 * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp> 15 */ 16 17 #include <linux/fs.h> 18 #include <linux/file.h> 19 #include <linux/splice.h> 20 #include <linux/falloc.h> 21 #include <linux/fcntl.h> 22 #include <linux/namei.h> 23 #include <linux/delay.h> 24 #include <linux/fsnotify.h> 25 #include <linux/posix_acl_xattr.h> 26 #include <linux/xattr.h> 27 #include <linux/jhash.h> 28 #include <linux/pagemap.h> 29 #include <linux/slab.h> 30 #include <linux/uaccess.h> 31 #include <linux/exportfs.h> 32 #include <linux/writeback.h> 33 #include <linux/security.h> 34 #include <linux/sunrpc/xdr.h> 35 36 #include "xdr3.h" 37 38 #ifdef CONFIG_NFSD_V4 39 #include "acl.h" 40 #include "idmap.h" 41 #include "xdr4.h" 42 #endif /* CONFIG_NFSD_V4 */ 43 44 #include "nfsd.h" 45 #include "vfs.h" 46 #include "filecache.h" 47 #include "trace.h" 48 49 #define NFSDDBG_FACILITY NFSDDBG_FILEOP 50 51 bool nfsd_disable_splice_read __read_mostly; 52 u64 nfsd_io_cache_read __read_mostly = NFSD_IO_BUFFERED; 53 u64 nfsd_io_cache_write __read_mostly = NFSD_IO_BUFFERED; 54 55 /** 56 * nfserrno - Map Linux errnos to NFS errnos 57 * @errno: POSIX(-ish) error code to be mapped 58 * 59 * Returns the appropriate (net-endian) nfserr_* (or nfs_ok if errno is 0). If 60 * it's an error we don't expect, log it once and return nfserr_io. 61 */ 62 __be32 63 nfserrno (int errno) 64 { 65 static struct { 66 __be32 nfserr; 67 int syserr; 68 } nfs_errtbl[] = { 69 { nfs_ok, 0 }, 70 { nfserr_perm, -EPERM }, 71 { nfserr_noent, -ENOENT }, 72 { nfserr_io, -EIO }, 73 { nfserr_nxio, -ENXIO }, 74 { nfserr_fbig, -E2BIG }, 75 { nfserr_stale, -EBADF }, 76 { nfserr_acces, -EACCES }, 77 { nfserr_exist, -EEXIST }, 78 { nfserr_xdev, -EXDEV }, 79 { nfserr_nodev, -ENODEV }, 80 { nfserr_notdir, -ENOTDIR }, 81 { nfserr_isdir, -EISDIR }, 82 { nfserr_inval, -EINVAL }, 83 { nfserr_fbig, -EFBIG }, 84 { nfserr_nospc, -ENOSPC }, 85 { nfserr_rofs, -EROFS }, 86 { nfserr_mlink, -EMLINK }, 87 { nfserr_nametoolong, -ENAMETOOLONG }, 88 { nfserr_notempty, -ENOTEMPTY }, 89 { nfserr_dquot, -EDQUOT }, 90 { nfserr_stale, -ESTALE }, 91 { nfserr_jukebox, -ETIMEDOUT }, 92 { nfserr_jukebox, -ERESTARTSYS }, 93 { nfserr_jukebox, -EAGAIN }, 94 { nfserr_jukebox, -EWOULDBLOCK }, 95 { nfserr_jukebox, -ENOMEM }, 96 { nfserr_io, -ETXTBSY }, 97 { nfserr_notsupp, -EOPNOTSUPP }, 98 { nfserr_toosmall, -ETOOSMALL }, 99 { nfserr_serverfault, -ESERVERFAULT }, 100 { nfserr_serverfault, -ENFILE }, 101 { nfserr_io, -EREMOTEIO }, 102 { nfserr_stale, -EOPENSTALE }, 103 { nfserr_io, -EUCLEAN }, 104 { nfserr_perm, -ENOKEY }, 105 { nfserr_no_grace, -ENOGRACE}, 106 { nfserr_io, -EBADMSG }, 107 }; 108 int i; 109 110 for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) { 111 if (nfs_errtbl[i].syserr == errno) 112 return nfs_errtbl[i].nfserr; 113 } 114 WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno); 115 return nfserr_io; 116 } 117 118 /* 119 * Called from nfsd_lookup and encode_dirent. Check if we have crossed 120 * a mount point. 121 * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged, 122 * or nfs_ok having possibly changed *dpp and *expp 123 */ 124 int 125 nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, 126 struct svc_export **expp) 127 { 128 struct svc_export *exp = *expp, *exp2 = NULL; 129 struct dentry *dentry = *dpp; 130 struct path path = {.mnt = mntget(exp->ex_path.mnt), 131 .dentry = dget(dentry)}; 132 unsigned int follow_flags = 0; 133 int err = 0; 134 135 if (exp->ex_flags & NFSEXP_CROSSMOUNT) 136 follow_flags = LOOKUP_AUTOMOUNT; 137 138 err = follow_down(&path, follow_flags); 139 if (err < 0) 140 goto out; 141 if (path.mnt == exp->ex_path.mnt && path.dentry == dentry && 142 nfsd_mountpoint(dentry, exp) == 2) { 143 /* This is only a mountpoint in some other namespace */ 144 path_put(&path); 145 goto out; 146 } 147 148 exp2 = rqst_exp_get_by_name(rqstp, &path); 149 if (IS_ERR(exp2)) { 150 err = PTR_ERR(exp2); 151 /* 152 * We normally allow NFS clients to continue 153 * "underneath" a mountpoint that is not exported. 154 * The exception is V4ROOT, where no traversal is ever 155 * allowed without an explicit export of the new 156 * directory. 157 */ 158 if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT)) 159 err = 0; 160 path_put(&path); 161 goto out; 162 } 163 if (nfsd_v4client(rqstp) || 164 (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) { 165 /* successfully crossed mount point */ 166 /* 167 * This is subtle: path.dentry is *not* on path.mnt 168 * at this point. The only reason we are safe is that 169 * original mnt is pinned down by exp, so we should 170 * put path *before* putting exp 171 */ 172 *dpp = path.dentry; 173 path.dentry = dentry; 174 *expp = exp2; 175 exp2 = exp; 176 } 177 path_put(&path); 178 exp_put(exp2); 179 out: 180 return err; 181 } 182 183 static void follow_to_parent(struct path *path) 184 { 185 struct dentry *dp; 186 187 while (path->dentry == path->mnt->mnt_root && follow_up(path)) 188 ; 189 dp = dget_parent(path->dentry); 190 dput(path->dentry); 191 path->dentry = dp; 192 } 193 194 static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp) 195 { 196 struct svc_export *exp2; 197 struct path path = {.mnt = mntget((*exp)->ex_path.mnt), 198 .dentry = dget(dparent)}; 199 200 follow_to_parent(&path); 201 202 exp2 = rqst_exp_parent(rqstp, &path); 203 if (PTR_ERR(exp2) == -ENOENT) { 204 *dentryp = dget(dparent); 205 } else if (IS_ERR(exp2)) { 206 path_put(&path); 207 return PTR_ERR(exp2); 208 } else { 209 *dentryp = dget(path.dentry); 210 exp_put(*exp); 211 *exp = exp2; 212 } 213 path_put(&path); 214 return 0; 215 } 216 217 /* 218 * For nfsd purposes, we treat V4ROOT exports as though there was an 219 * export at *every* directory. 220 * We return: 221 * '1' if this dentry *must* be an export point, 222 * '2' if it might be, if there is really a mount here, and 223 * '0' if there is no chance of an export point here. 224 */ 225 int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) 226 { 227 if (!d_inode(dentry)) 228 return 0; 229 if (exp->ex_flags & NFSEXP_V4ROOT) 230 return 1; 231 if (nfsd4_is_junction(dentry)) 232 return 1; 233 if (d_managed(dentry)) 234 /* 235 * Might only be a mountpoint in a different namespace, 236 * but we need to check. 237 */ 238 return 2; 239 return 0; 240 } 241 242 __be32 243 nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp, 244 const char *name, unsigned int len, 245 struct svc_export **exp_ret, struct dentry **dentry_ret) 246 { 247 struct svc_export *exp; 248 struct dentry *dparent; 249 struct dentry *dentry; 250 int host_err; 251 252 trace_nfsd_vfs_lookup(rqstp, fhp, name, len); 253 254 dparent = fhp->fh_dentry; 255 exp = exp_get(fhp->fh_export); 256 257 /* Lookup the name, but don't follow links */ 258 if (isdotent(name, len)) { 259 if (len==1) 260 dentry = dget(dparent); 261 else if (dparent != exp->ex_path.dentry) 262 dentry = dget_parent(dparent); 263 else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp)) 264 dentry = dget(dparent); /* .. == . just like at / */ 265 else { 266 /* checking mountpoint crossing is very different when stepping up */ 267 host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry); 268 if (host_err) 269 goto out_nfserr; 270 } 271 } else { 272 dentry = lookup_one_unlocked(&nop_mnt_idmap, 273 &QSTR_LEN(name, len), dparent); 274 host_err = PTR_ERR(dentry); 275 if (IS_ERR(dentry)) 276 goto out_nfserr; 277 if (nfsd_mountpoint(dentry, exp)) { 278 host_err = nfsd_cross_mnt(rqstp, &dentry, &exp); 279 if (host_err) { 280 dput(dentry); 281 goto out_nfserr; 282 } 283 } 284 } 285 *dentry_ret = dentry; 286 *exp_ret = exp; 287 return 0; 288 289 out_nfserr: 290 exp_put(exp); 291 return nfserrno(host_err); 292 } 293 294 /** 295 * nfsd_lookup - look up a single path component for nfsd 296 * 297 * @rqstp: the request context 298 * @fhp: the file handle of the directory 299 * @name: the component name, or %NULL to look up parent 300 * @len: length of name to examine 301 * @resfh: pointer to pre-initialised filehandle to hold result. 302 * 303 * Look up one component of a pathname. 304 * N.B. After this call _both_ fhp and resfh need an fh_put 305 * 306 * If the lookup would cross a mountpoint, and the mounted filesystem 307 * is exported to the client with NFSEXP_NOHIDE, then the lookup is 308 * accepted as it stands and the mounted directory is 309 * returned. Otherwise the covered directory is returned. 310 * NOTE: this mountpoint crossing is not supported properly by all 311 * clients and is explicitly disallowed for NFSv3 312 * 313 */ 314 __be32 315 nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, 316 unsigned int len, struct svc_fh *resfh) 317 { 318 struct svc_export *exp; 319 struct dentry *dentry; 320 __be32 err; 321 322 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); 323 if (err) 324 return err; 325 err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry); 326 if (err) 327 return err; 328 err = check_nfsd_access(exp, rqstp, false); 329 if (err) 330 goto out; 331 /* 332 * Note: we compose the file handle now, but as the 333 * dentry may be negative, it may need to be updated. 334 */ 335 err = fh_compose(resfh, exp, dentry, fhp); 336 if (!err && d_really_is_negative(dentry)) 337 err = nfserr_noent; 338 out: 339 dput(dentry); 340 exp_put(exp); 341 return err; 342 } 343 344 static void 345 commit_reset_write_verifier(struct nfsd_net *nn, struct svc_rqst *rqstp, 346 int err) 347 { 348 switch (err) { 349 case -EAGAIN: 350 case -ESTALE: 351 /* 352 * Neither of these are the result of a problem with 353 * durable storage, so avoid a write verifier reset. 354 */ 355 break; 356 default: 357 nfsd_reset_write_verifier(nn); 358 trace_nfsd_writeverf_reset(nn, rqstp, err); 359 } 360 } 361 362 /* 363 * Commit metadata changes to stable storage. 364 */ 365 static int 366 commit_inode_metadata(struct inode *inode) 367 { 368 const struct export_operations *export_ops = inode->i_sb->s_export_op; 369 370 if (export_ops->commit_metadata) 371 return export_ops->commit_metadata(inode); 372 return sync_inode_metadata(inode, 1); 373 } 374 375 static int 376 commit_metadata(struct svc_fh *fhp) 377 { 378 struct inode *inode = d_inode(fhp->fh_dentry); 379 380 if (!EX_ISSYNC(fhp->fh_export)) 381 return 0; 382 return commit_inode_metadata(inode); 383 } 384 385 /* 386 * Go over the attributes and take care of the small differences between 387 * NFS semantics and what Linux expects. 388 */ 389 static void 390 nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap) 391 { 392 /* Ignore mode updates on symlinks */ 393 if (S_ISLNK(inode->i_mode)) 394 iap->ia_valid &= ~ATTR_MODE; 395 396 /* sanitize the mode change */ 397 if (iap->ia_valid & ATTR_MODE) { 398 iap->ia_mode &= S_IALLUGO; 399 iap->ia_mode |= (inode->i_mode & ~S_IALLUGO); 400 } 401 402 /* Revoke setuid/setgid on chown */ 403 if (!S_ISDIR(inode->i_mode) && 404 ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) { 405 iap->ia_valid |= ATTR_KILL_PRIV; 406 if (iap->ia_valid & ATTR_MODE) { 407 /* we're setting mode too, just clear the s*id bits */ 408 iap->ia_mode &= ~S_ISUID; 409 if (iap->ia_mode & S_IXGRP) 410 iap->ia_mode &= ~S_ISGID; 411 } else { 412 /* set ATTR_KILL_* bits and let VFS handle it */ 413 iap->ia_valid |= ATTR_KILL_SUID; 414 iap->ia_valid |= 415 setattr_should_drop_sgid(&nop_mnt_idmap, inode); 416 } 417 } 418 } 419 420 static __be32 421 nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp, 422 struct iattr *iap) 423 { 424 struct inode *inode = d_inode(fhp->fh_dentry); 425 426 if (iap->ia_size < inode->i_size) { 427 __be32 err; 428 429 err = nfsd_permission(&rqstp->rq_cred, 430 fhp->fh_export, fhp->fh_dentry, 431 NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE); 432 if (err) 433 return err; 434 } 435 return nfserrno(get_write_access(inode)); 436 } 437 438 static int __nfsd_setattr(struct dentry *dentry, struct iattr *iap) 439 { 440 int host_err; 441 442 if (iap->ia_valid & ATTR_SIZE) { 443 /* 444 * RFC5661, Section 18.30.4: 445 * Changing the size of a file with SETATTR indirectly 446 * changes the time_modify and change attributes. 447 * 448 * (and similar for the older RFCs) 449 */ 450 struct iattr size_attr = { 451 .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME, 452 .ia_size = iap->ia_size, 453 }; 454 455 if (iap->ia_size < 0) 456 return -EFBIG; 457 458 host_err = notify_change(&nop_mnt_idmap, dentry, &size_attr, NULL); 459 if (host_err) 460 return host_err; 461 iap->ia_valid &= ~ATTR_SIZE; 462 463 /* 464 * Avoid the additional setattr call below if the only other 465 * attribute that the client sends is the mtime, as we update 466 * it as part of the size change above. 467 */ 468 if ((iap->ia_valid & ~ATTR_MTIME) == 0) 469 return 0; 470 } 471 472 if ((iap->ia_valid & ~ATTR_DELEG) == 0) 473 return 0; 474 475 /* 476 * If ATTR_DELEG is set, then this is an update from a client that 477 * holds a delegation. If this is an update for only the atime, the 478 * ctime should not be changed. If the update contains the mtime 479 * too, then ATTR_CTIME should already be set. 480 */ 481 if (!(iap->ia_valid & ATTR_DELEG)) 482 iap->ia_valid |= ATTR_CTIME; 483 484 return notify_change(&nop_mnt_idmap, dentry, iap, NULL); 485 } 486 487 /** 488 * nfsd_setattr - Set various file attributes. 489 * @rqstp: controlling RPC transaction 490 * @fhp: filehandle of target 491 * @attr: attributes to set 492 * @guardtime: do not act if ctime.tv_sec does not match this timestamp 493 * 494 * This call may adjust the contents of @attr (in particular, this 495 * call may change the bits in the na_iattr.ia_valid field). 496 * 497 * Returns nfs_ok on success, otherwise an NFS status code is 498 * returned. Caller must release @fhp by calling fh_put in either 499 * case. 500 */ 501 __be32 502 nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, 503 struct nfsd_attrs *attr, const struct timespec64 *guardtime) 504 { 505 struct dentry *dentry; 506 struct inode *inode; 507 struct iattr *iap = attr->na_iattr; 508 int accmode = NFSD_MAY_SATTR; 509 umode_t ftype = 0; 510 __be32 err; 511 int host_err = 0; 512 bool get_write_count; 513 bool size_change = (iap->ia_valid & ATTR_SIZE); 514 int retries; 515 516 trace_nfsd_vfs_setattr(rqstp, fhp, iap, guardtime); 517 518 if (iap->ia_valid & ATTR_SIZE) { 519 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; 520 ftype = S_IFREG; 521 } 522 523 /* 524 * If utimes(2) and friends are called with times not NULL, we should 525 * not set NFSD_MAY_WRITE bit. Otherwise fh_verify->nfsd_permission 526 * will return EACCES, when the caller's effective UID does not match 527 * the owner of the file, and the caller is not privileged. In this 528 * situation, we should return EPERM(notify_change will return this). 529 */ 530 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME)) { 531 accmode |= NFSD_MAY_OWNER_OVERRIDE; 532 if (!(iap->ia_valid & (ATTR_ATIME_SET | ATTR_MTIME_SET))) 533 accmode |= NFSD_MAY_WRITE; 534 } 535 536 /* Callers that do fh_verify should do the fh_want_write: */ 537 get_write_count = !fhp->fh_dentry; 538 539 /* Get inode */ 540 err = fh_verify(rqstp, fhp, ftype, accmode); 541 if (err) 542 return err; 543 if (get_write_count) { 544 host_err = fh_want_write(fhp); 545 if (host_err) 546 goto out; 547 } 548 549 dentry = fhp->fh_dentry; 550 inode = d_inode(dentry); 551 552 nfsd_sanitize_attrs(inode, iap); 553 554 /* 555 * The size case is special, it changes the file in addition to the 556 * attributes, and file systems don't expect it to be mixed with 557 * "random" attribute changes. We thus split out the size change 558 * into a separate call to ->setattr, and do the rest as a separate 559 * setattr call. 560 */ 561 if (size_change) { 562 err = nfsd_get_write_access(rqstp, fhp, iap); 563 if (err) 564 return err; 565 } 566 567 inode_lock(inode); 568 err = fh_fill_pre_attrs(fhp); 569 if (err) 570 goto out_unlock; 571 572 if (guardtime) { 573 struct timespec64 ctime = inode_get_ctime(inode); 574 if ((u32)guardtime->tv_sec != (u32)ctime.tv_sec || 575 guardtime->tv_nsec != ctime.tv_nsec) { 576 err = nfserr_notsync; 577 goto out_fill_attrs; 578 } 579 } 580 581 for (retries = 1;;) { 582 struct iattr attrs; 583 584 /* 585 * notify_change() can alter its iattr argument, making 586 * @iap unsuitable for submission multiple times. Make a 587 * copy for every loop iteration. 588 */ 589 attrs = *iap; 590 host_err = __nfsd_setattr(dentry, &attrs); 591 if (host_err != -EAGAIN || !retries--) 592 break; 593 if (!nfsd_wait_for_delegreturn(rqstp, inode)) 594 break; 595 } 596 if (attr->na_seclabel && attr->na_seclabel->len) 597 attr->na_labelerr = security_inode_setsecctx(dentry, 598 attr->na_seclabel->data, attr->na_seclabel->len); 599 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && attr->na_pacl) 600 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap, 601 dentry, ACL_TYPE_ACCESS, 602 attr->na_pacl); 603 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && 604 !attr->na_aclerr && attr->na_dpacl && S_ISDIR(inode->i_mode)) 605 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap, 606 dentry, ACL_TYPE_DEFAULT, 607 attr->na_dpacl); 608 out_fill_attrs: 609 /* 610 * RFC 1813 Section 3.3.2 does not mandate that an NFS server 611 * returns wcc_data for SETATTR. Some client implementations 612 * depend on receiving wcc_data, however, to sort out partial 613 * updates (eg., the client requested that size and mode be 614 * modified, but the server changed only the file mode). 615 */ 616 fh_fill_post_attrs(fhp); 617 out_unlock: 618 inode_unlock(inode); 619 if (size_change) 620 put_write_access(inode); 621 out: 622 if (!host_err) 623 host_err = commit_metadata(fhp); 624 return err != 0 ? err : nfserrno(host_err); 625 } 626 627 #if defined(CONFIG_NFSD_V4) 628 /* 629 * NFS junction information is stored in an extended attribute. 630 */ 631 #define NFSD_JUNCTION_XATTR_NAME XATTR_TRUSTED_PREFIX "junction.nfs" 632 633 /** 634 * nfsd4_is_junction - Test if an object could be an NFS junction 635 * 636 * @dentry: object to test 637 * 638 * Returns 1 if "dentry" appears to contain NFS junction information. 639 * Otherwise 0 is returned. 640 */ 641 int nfsd4_is_junction(struct dentry *dentry) 642 { 643 struct inode *inode = d_inode(dentry); 644 645 if (inode == NULL) 646 return 0; 647 if (inode->i_mode & S_IXUGO) 648 return 0; 649 if (!(inode->i_mode & S_ISVTX)) 650 return 0; 651 if (vfs_getxattr(&nop_mnt_idmap, dentry, NFSD_JUNCTION_XATTR_NAME, 652 NULL, 0) <= 0) 653 return 0; 654 return 1; 655 } 656 657 static struct nfsd4_compound_state *nfsd4_get_cstate(struct svc_rqst *rqstp) 658 { 659 return &((struct nfsd4_compoundres *)rqstp->rq_resp)->cstate; 660 } 661 662 __be32 nfsd4_clone_file_range(struct svc_rqst *rqstp, 663 struct nfsd_file *nf_src, u64 src_pos, 664 struct nfsd_file *nf_dst, u64 dst_pos, 665 u64 count, bool sync) 666 { 667 struct file *src = nf_src->nf_file; 668 struct file *dst = nf_dst->nf_file; 669 errseq_t since; 670 loff_t cloned; 671 __be32 ret = 0; 672 673 since = READ_ONCE(dst->f_wb_err); 674 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); 675 if (cloned < 0) { 676 ret = nfserrno(cloned); 677 goto out_err; 678 } 679 if (count && cloned != count) { 680 ret = nfserrno(-EINVAL); 681 goto out_err; 682 } 683 if (sync) { 684 loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX; 685 int status = vfs_fsync_range(dst, dst_pos, dst_end, 0); 686 687 if (!status) 688 status = filemap_check_wb_err(dst->f_mapping, since); 689 if (!status) 690 status = commit_inode_metadata(file_inode(src)); 691 if (status < 0) { 692 struct nfsd_net *nn = net_generic(nf_dst->nf_net, 693 nfsd_net_id); 694 695 trace_nfsd_clone_file_range_err(rqstp, 696 &nfsd4_get_cstate(rqstp)->save_fh, 697 src_pos, 698 &nfsd4_get_cstate(rqstp)->current_fh, 699 dst_pos, 700 count, status); 701 commit_reset_write_verifier(nn, rqstp, status); 702 ret = nfserrno(status); 703 } 704 } 705 out_err: 706 return ret; 707 } 708 709 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, 710 u64 dst_pos, u64 count) 711 { 712 ssize_t ret; 713 714 /* 715 * Limit copy to 4MB to prevent indefinitely blocking an nfsd 716 * thread and client rpc slot. The choice of 4MB is somewhat 717 * arbitrary. We might instead base this on r/wsize, or make it 718 * tunable, or use a time instead of a byte limit, or implement 719 * asynchronous copy. In theory a client could also recognize a 720 * limit like this and pipeline multiple COPY requests. 721 */ 722 count = min_t(u64, count, 1 << 22); 723 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0); 724 725 if (ret == -EOPNOTSUPP || ret == -EXDEV) 726 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 727 COPY_FILE_SPLICE); 728 return ret; 729 } 730 731 __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp, 732 struct file *file, loff_t offset, loff_t len, 733 int flags) 734 { 735 int error; 736 737 if (!S_ISREG(file_inode(file)->i_mode)) 738 return nfserr_inval; 739 740 error = vfs_fallocate(file, flags, offset, len); 741 if (!error) 742 error = commit_metadata(fhp); 743 744 return nfserrno(error); 745 } 746 #endif /* defined(CONFIG_NFSD_V4) */ 747 748 /* 749 * Check server access rights to a file system object 750 */ 751 struct accessmap { 752 u32 access; 753 int how; 754 }; 755 static struct accessmap nfs3_regaccess[] = { 756 { NFS3_ACCESS_READ, NFSD_MAY_READ }, 757 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, 758 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC }, 759 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE }, 760 761 #ifdef CONFIG_NFSD_V4 762 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ }, 763 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE }, 764 { NFS4_ACCESS_XALIST, NFSD_MAY_READ }, 765 #endif 766 767 { 0, 0 } 768 }; 769 770 static struct accessmap nfs3_diraccess[] = { 771 { NFS3_ACCESS_READ, NFSD_MAY_READ }, 772 { NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC }, 773 { NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC}, 774 { NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE }, 775 { NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE }, 776 777 #ifdef CONFIG_NFSD_V4 778 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ }, 779 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE }, 780 { NFS4_ACCESS_XALIST, NFSD_MAY_READ }, 781 #endif 782 783 { 0, 0 } 784 }; 785 786 static struct accessmap nfs3_anyaccess[] = { 787 /* Some clients - Solaris 2.6 at least, make an access call 788 * to the server to check for access for things like /dev/null 789 * (which really, the server doesn't care about). So 790 * We provide simple access checking for them, looking 791 * mainly at mode bits, and we make sure to ignore read-only 792 * filesystem checks 793 */ 794 { NFS3_ACCESS_READ, NFSD_MAY_READ }, 795 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, 796 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, 797 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, 798 799 { 0, 0 } 800 }; 801 802 __be32 803 nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported) 804 { 805 struct accessmap *map; 806 struct svc_export *export; 807 struct dentry *dentry; 808 u32 query, result = 0, sresult = 0; 809 __be32 error; 810 811 error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP); 812 if (error) 813 goto out; 814 815 export = fhp->fh_export; 816 dentry = fhp->fh_dentry; 817 818 if (d_is_reg(dentry)) 819 map = nfs3_regaccess; 820 else if (d_is_dir(dentry)) 821 map = nfs3_diraccess; 822 else 823 map = nfs3_anyaccess; 824 825 826 query = *access; 827 for (; map->access; map++) { 828 if (map->access & query) { 829 __be32 err2; 830 831 sresult |= map->access; 832 833 err2 = nfsd_permission(&rqstp->rq_cred, export, 834 dentry, map->how); 835 switch (err2) { 836 case nfs_ok: 837 result |= map->access; 838 break; 839 840 /* the following error codes just mean the access was not allowed, 841 * rather than an error occurred */ 842 case nfserr_rofs: 843 case nfserr_acces: 844 case nfserr_perm: 845 /* simply don't "or" in the access bit. */ 846 break; 847 default: 848 error = err2; 849 goto out; 850 } 851 } 852 } 853 *access = result; 854 if (supported) 855 *supported = sresult; 856 857 out: 858 return error; 859 } 860 861 int nfsd_open_break_lease(struct inode *inode, int access) 862 { 863 unsigned int mode; 864 865 if (access & NFSD_MAY_NOT_BREAK_LEASE) 866 return 0; 867 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY; 868 return break_lease(inode, mode | O_NONBLOCK); 869 } 870 871 /* 872 * Open an existing file or directory. 873 * The may_flags argument indicates the type of open (read/write/lock) 874 * and additional flags. 875 * N.B. After this call fhp needs an fh_put 876 */ 877 static int 878 __nfsd_open(struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp) 879 { 880 struct path path; 881 struct inode *inode; 882 struct file *file; 883 int flags = O_RDONLY|O_LARGEFILE; 884 int host_err = -EPERM; 885 886 path.mnt = fhp->fh_export->ex_path.mnt; 887 path.dentry = fhp->fh_dentry; 888 inode = d_inode(path.dentry); 889 890 if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE)) 891 goto out; 892 893 if (!inode->i_fop) 894 goto out; 895 896 host_err = nfsd_open_break_lease(inode, may_flags); 897 if (host_err) /* NOMEM or WOULDBLOCK */ 898 goto out; 899 900 if (may_flags & NFSD_MAY_WRITE) { 901 if (may_flags & NFSD_MAY_READ) 902 flags = O_RDWR|O_LARGEFILE; 903 else 904 flags = O_WRONLY|O_LARGEFILE; 905 } 906 907 file = dentry_open(&path, flags, current_cred()); 908 if (IS_ERR(file)) { 909 host_err = PTR_ERR(file); 910 goto out; 911 } 912 913 host_err = security_file_post_open(file, may_flags); 914 if (host_err) { 915 fput(file); 916 goto out; 917 } 918 919 *filp = file; 920 out: 921 return host_err; 922 } 923 924 __be32 925 nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, 926 int may_flags, struct file **filp) 927 { 928 __be32 err; 929 int host_err; 930 bool retried = false; 931 932 /* 933 * If we get here, then the client has already done an "open", 934 * and (hopefully) checked permission - so allow OWNER_OVERRIDE 935 * in case a chmod has now revoked permission. 936 * 937 * Arguably we should also allow the owner override for 938 * directories, but we never have and it doesn't seem to have 939 * caused anyone a problem. If we were to change this, note 940 * also that our filldir callbacks would need a variant of 941 * lookup_one_positive_unlocked() that doesn't check permissions. 942 */ 943 if (type == S_IFREG) 944 may_flags |= NFSD_MAY_OWNER_OVERRIDE; 945 retry: 946 err = fh_verify(rqstp, fhp, type, may_flags); 947 if (!err) { 948 host_err = __nfsd_open(fhp, type, may_flags, filp); 949 if (host_err == -EOPENSTALE && !retried) { 950 retried = true; 951 fh_put(fhp); 952 goto retry; 953 } 954 err = nfserrno(host_err); 955 } 956 return err; 957 } 958 959 /** 960 * nfsd_open_verified - Open a regular file for the filecache 961 * @fhp: NFS filehandle of the file to open 962 * @may_flags: internal permission flags 963 * @filp: OUT: open "struct file *" 964 * 965 * Returns zero on success, or a negative errno value. 966 */ 967 int 968 nfsd_open_verified(struct svc_fh *fhp, int may_flags, struct file **filp) 969 { 970 return __nfsd_open(fhp, S_IFREG, may_flags, filp); 971 } 972 973 /* 974 * Grab and keep cached pages associated with a file in the svc_rqst 975 * so that they can be passed to the network sendmsg routines 976 * directly. They will be released after the sending has completed. 977 * 978 * Return values: Number of bytes consumed, or -EIO if there are no 979 * remaining pages in rqstp->rq_pages. 980 */ 981 static int 982 nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 983 struct splice_desc *sd) 984 { 985 struct svc_rqst *rqstp = sd->u.data; 986 struct page *page = buf->page; // may be a compound one 987 unsigned offset = buf->offset; 988 struct page *last_page; 989 990 last_page = page + (offset + sd->len - 1) / PAGE_SIZE; 991 for (page += offset / PAGE_SIZE; page <= last_page; page++) { 992 /* 993 * Skip page replacement when extending the contents of the 994 * current page. But note that we may get two zero_pages in a 995 * row from shmem. 996 */ 997 if (page == *(rqstp->rq_next_page - 1) && 998 offset_in_page(rqstp->rq_res.page_base + 999 rqstp->rq_res.page_len)) 1000 continue; 1001 if (unlikely(!svc_rqst_replace_page(rqstp, page))) 1002 return -EIO; 1003 } 1004 if (rqstp->rq_res.page_len == 0) // first call 1005 rqstp->rq_res.page_base = offset % PAGE_SIZE; 1006 rqstp->rq_res.page_len += sd->len; 1007 return sd->len; 1008 } 1009 1010 static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe, 1011 struct splice_desc *sd) 1012 { 1013 return __splice_from_pipe(pipe, sd, nfsd_splice_actor); 1014 } 1015 1016 static u32 nfsd_eof_on_read(struct file *file, loff_t offset, ssize_t len, 1017 size_t expected) 1018 { 1019 if (expected != 0 && len == 0) 1020 return 1; 1021 if (offset+len >= i_size_read(file_inode(file))) 1022 return 1; 1023 return 0; 1024 } 1025 1026 static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1027 struct file *file, loff_t offset, 1028 unsigned long *count, u32 *eof, ssize_t host_err) 1029 { 1030 if (host_err >= 0) { 1031 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1032 1033 nfsd_stats_io_read_add(nn, fhp->fh_export, host_err); 1034 *eof = nfsd_eof_on_read(file, offset, host_err, *count); 1035 *count = host_err; 1036 fsnotify_access(file); 1037 trace_nfsd_read_io_done(rqstp, fhp, offset, *count); 1038 return 0; 1039 } else { 1040 trace_nfsd_read_err(rqstp, fhp, offset, host_err); 1041 return nfserrno(host_err); 1042 } 1043 } 1044 1045 /** 1046 * nfsd_splice_read - Perform a VFS read using a splice pipe 1047 * @rqstp: RPC transaction context 1048 * @fhp: file handle of file to be read 1049 * @file: opened struct file of file to be read 1050 * @offset: starting byte offset 1051 * @count: IN: requested number of bytes; OUT: number of bytes read 1052 * @eof: OUT: set non-zero if operation reached the end of the file 1053 * 1054 * Returns nfs_ok on success, otherwise an nfserr stat value is 1055 * returned. 1056 */ 1057 __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1058 struct file *file, loff_t offset, unsigned long *count, 1059 u32 *eof) 1060 { 1061 struct splice_desc sd = { 1062 .len = 0, 1063 .total_len = *count, 1064 .pos = offset, 1065 .u.data = rqstp, 1066 }; 1067 ssize_t host_err; 1068 1069 trace_nfsd_read_splice(rqstp, fhp, offset, *count); 1070 host_err = rw_verify_area(READ, file, &offset, *count); 1071 if (!host_err) 1072 host_err = splice_direct_to_actor(file, &sd, 1073 nfsd_direct_splice_actor); 1074 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err); 1075 } 1076 1077 /* 1078 * The byte range of the client's READ request is expanded on both ends 1079 * until it meets the underlying file system's direct I/O alignment 1080 * requirements. After the internal read is complete, the byte range of 1081 * the NFS READ payload is reduced to the byte range that was originally 1082 * requested. 1083 * 1084 * Note that a direct read can be done only when the xdr_buf containing 1085 * the NFS READ reply does not already have contents in its .pages array. 1086 * This is due to potentially restrictive alignment requirements on the 1087 * read buffer. When .page_len and @base are zero, the .pages array is 1088 * guaranteed to be page-aligned. 1089 */ 1090 static noinline_for_stack __be32 1091 nfsd_direct_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1092 struct nfsd_file *nf, loff_t offset, unsigned long *count, 1093 u32 *eof) 1094 { 1095 u64 dio_start, dio_end; 1096 unsigned long v, total; 1097 struct iov_iter iter; 1098 struct kiocb kiocb; 1099 ssize_t host_err; 1100 size_t len; 1101 1102 init_sync_kiocb(&kiocb, nf->nf_file); 1103 kiocb.ki_flags |= IOCB_DIRECT; 1104 1105 /* Read a properly-aligned region of bytes into rq_bvec */ 1106 dio_start = round_down(offset, nf->nf_dio_read_offset_align); 1107 dio_end = round_up((u64)offset + *count, nf->nf_dio_read_offset_align); 1108 1109 kiocb.ki_pos = dio_start; 1110 1111 v = 0; 1112 total = dio_end - dio_start; 1113 while (total && v < rqstp->rq_maxpages && 1114 rqstp->rq_next_page < rqstp->rq_page_end) { 1115 len = min_t(size_t, total, PAGE_SIZE); 1116 bvec_set_page(&rqstp->rq_bvec[v], *rqstp->rq_next_page, 1117 len, 0); 1118 1119 total -= len; 1120 ++rqstp->rq_next_page; 1121 ++v; 1122 } 1123 1124 trace_nfsd_read_direct(rqstp, fhp, offset, *count - total); 1125 iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v, 1126 dio_end - dio_start - total); 1127 1128 host_err = vfs_iocb_iter_read(nf->nf_file, &kiocb, &iter); 1129 if (host_err >= 0) { 1130 unsigned int pad = offset - dio_start; 1131 1132 /* The returned payload starts after the pad */ 1133 rqstp->rq_res.page_base = pad; 1134 1135 /* Compute the count of bytes to be returned */ 1136 if (host_err > pad + *count) 1137 host_err = *count; 1138 else if (host_err > pad) 1139 host_err -= pad; 1140 else 1141 host_err = 0; 1142 } else if (unlikely(host_err == -EINVAL)) { 1143 struct inode *inode = d_inode(fhp->fh_dentry); 1144 1145 pr_info_ratelimited("nfsd: Direct I/O alignment failure on %s/%ld\n", 1146 inode->i_sb->s_id, inode->i_ino); 1147 host_err = -ESERVERFAULT; 1148 } 1149 1150 return nfsd_finish_read(rqstp, fhp, nf->nf_file, offset, count, 1151 eof, host_err); 1152 } 1153 1154 /** 1155 * nfsd_iter_read - Perform a VFS read using an iterator 1156 * @rqstp: RPC transaction context 1157 * @fhp: file handle of file to be read 1158 * @nf: opened struct nfsd_file of file to be read 1159 * @offset: starting byte offset 1160 * @count: IN: requested number of bytes; OUT: number of bytes read 1161 * @base: offset in first page of read buffer 1162 * @eof: OUT: set non-zero if operation reached the end of the file 1163 * 1164 * Some filesystems or situations cannot use nfsd_splice_read. This 1165 * function is the slightly less-performant fallback for those cases. 1166 * 1167 * Returns nfs_ok on success, otherwise an nfserr stat value is 1168 * returned. 1169 */ 1170 __be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1171 struct nfsd_file *nf, loff_t offset, unsigned long *count, 1172 unsigned int base, u32 *eof) 1173 { 1174 struct file *file = nf->nf_file; 1175 unsigned long v, total; 1176 struct iov_iter iter; 1177 struct kiocb kiocb; 1178 ssize_t host_err; 1179 size_t len; 1180 1181 init_sync_kiocb(&kiocb, file); 1182 1183 switch (nfsd_io_cache_read) { 1184 case NFSD_IO_BUFFERED: 1185 break; 1186 case NFSD_IO_DIRECT: 1187 /* When dio_read_offset_align is zero, dio is not supported */ 1188 if (nf->nf_dio_read_offset_align && !rqstp->rq_res.page_len) 1189 return nfsd_direct_read(rqstp, fhp, nf, offset, 1190 count, eof); 1191 fallthrough; 1192 case NFSD_IO_DONTCACHE: 1193 if (file->f_op->fop_flags & FOP_DONTCACHE) 1194 kiocb.ki_flags = IOCB_DONTCACHE; 1195 break; 1196 } 1197 1198 kiocb.ki_pos = offset; 1199 1200 v = 0; 1201 total = *count; 1202 while (total && v < rqstp->rq_maxpages && 1203 rqstp->rq_next_page < rqstp->rq_page_end) { 1204 len = min_t(size_t, total, PAGE_SIZE - base); 1205 bvec_set_page(&rqstp->rq_bvec[v], *rqstp->rq_next_page, 1206 len, base); 1207 1208 total -= len; 1209 ++rqstp->rq_next_page; 1210 ++v; 1211 base = 0; 1212 } 1213 1214 trace_nfsd_read_vector(rqstp, fhp, offset, *count - total); 1215 iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v, *count - total); 1216 host_err = vfs_iocb_iter_read(file, &kiocb, &iter); 1217 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err); 1218 } 1219 1220 /* 1221 * Gathered writes: If another process is currently writing to the file, 1222 * there's a high chance this is another nfsd (triggered by a bulk write 1223 * from a client's biod). Rather than syncing the file with each write 1224 * request, we sleep for 10 msec. 1225 * 1226 * I don't know if this roughly approximates C. Juszak's idea of 1227 * gathered writes, but it's a nice and simple solution (IMHO), and it 1228 * seems to work:-) 1229 * 1230 * Note: we do this only in the NFSv2 case, since v3 and higher have a 1231 * better tool (separate unstable writes and commits) for solving this 1232 * problem. 1233 */ 1234 static int wait_for_concurrent_writes(struct file *file) 1235 { 1236 struct inode *inode = file_inode(file); 1237 static ino_t last_ino; 1238 static dev_t last_dev; 1239 int err = 0; 1240 1241 if (atomic_read(&inode->i_writecount) > 1 1242 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) { 1243 dprintk("nfsd: write defer %d\n", task_pid_nr(current)); 1244 msleep(10); 1245 dprintk("nfsd: write resume %d\n", task_pid_nr(current)); 1246 } 1247 1248 if (inode->i_state & I_DIRTY) { 1249 dprintk("nfsd: write sync %d\n", task_pid_nr(current)); 1250 err = vfs_fsync(file, 0); 1251 } 1252 last_ino = inode->i_ino; 1253 last_dev = inode->i_sb->s_dev; 1254 return err; 1255 } 1256 1257 struct nfsd_write_dio_seg { 1258 struct iov_iter iter; 1259 int flags; 1260 }; 1261 1262 static unsigned long 1263 iov_iter_bvec_offset(const struct iov_iter *iter) 1264 { 1265 return (unsigned long)(iter->bvec->bv_offset + iter->iov_offset); 1266 } 1267 1268 static void 1269 nfsd_write_dio_seg_init(struct nfsd_write_dio_seg *segment, 1270 struct bio_vec *bvec, unsigned int nvecs, 1271 unsigned long total, size_t start, size_t len, 1272 struct kiocb *iocb) 1273 { 1274 iov_iter_bvec(&segment->iter, ITER_SOURCE, bvec, nvecs, total); 1275 if (start) 1276 iov_iter_advance(&segment->iter, start); 1277 iov_iter_truncate(&segment->iter, len); 1278 segment->flags = iocb->ki_flags; 1279 } 1280 1281 static unsigned int 1282 nfsd_write_dio_iters_init(struct nfsd_file *nf, struct bio_vec *bvec, 1283 unsigned int nvecs, struct kiocb *iocb, 1284 unsigned long total, 1285 struct nfsd_write_dio_seg segments[3]) 1286 { 1287 u32 offset_align = nf->nf_dio_offset_align; 1288 loff_t prefix_end, orig_end, middle_end; 1289 u32 mem_align = nf->nf_dio_mem_align; 1290 size_t prefix, middle, suffix; 1291 loff_t offset = iocb->ki_pos; 1292 unsigned int nsegs = 0; 1293 1294 /* 1295 * Check if direct I/O is feasible for this write request. 1296 * If alignments are not available, the write is too small, 1297 * or no alignment can be found, fall back to buffered I/O. 1298 */ 1299 if (unlikely(!mem_align || !offset_align) || 1300 unlikely(total < max(offset_align, mem_align))) 1301 goto no_dio; 1302 1303 prefix_end = round_up(offset, offset_align); 1304 orig_end = offset + total; 1305 middle_end = round_down(orig_end, offset_align); 1306 1307 prefix = prefix_end - offset; 1308 middle = middle_end - prefix_end; 1309 suffix = orig_end - middle_end; 1310 1311 if (!middle) 1312 goto no_dio; 1313 1314 if (prefix) 1315 nfsd_write_dio_seg_init(&segments[nsegs++], bvec, 1316 nvecs, total, 0, prefix, iocb); 1317 1318 nfsd_write_dio_seg_init(&segments[nsegs], bvec, nvecs, 1319 total, prefix, middle, iocb); 1320 1321 /* 1322 * Check if the bvec iterator is aligned for direct I/O. 1323 * 1324 * bvecs generated from RPC receive buffers are contiguous: After 1325 * the first bvec, all subsequent bvecs start at bv_offset zero 1326 * (page-aligned). Therefore, only the first bvec is checked. 1327 */ 1328 if (iov_iter_bvec_offset(&segments[nsegs].iter) & (mem_align - 1)) 1329 goto no_dio; 1330 segments[nsegs].flags |= IOCB_DIRECT; 1331 nsegs++; 1332 1333 if (suffix) 1334 nfsd_write_dio_seg_init(&segments[nsegs++], bvec, nvecs, total, 1335 prefix + middle, suffix, iocb); 1336 1337 return nsegs; 1338 1339 no_dio: 1340 /* No DIO alignment possible - pack into single non-DIO segment. */ 1341 nfsd_write_dio_seg_init(&segments[0], bvec, nvecs, total, 0, 1342 total, iocb); 1343 return 1; 1344 } 1345 1346 static noinline_for_stack int 1347 nfsd_direct_write(struct svc_rqst *rqstp, struct svc_fh *fhp, 1348 struct nfsd_file *nf, unsigned int nvecs, 1349 unsigned long *cnt, struct kiocb *kiocb) 1350 { 1351 struct nfsd_write_dio_seg segments[3]; 1352 struct file *file = nf->nf_file; 1353 unsigned int nsegs, i; 1354 ssize_t host_err; 1355 1356 nsegs = nfsd_write_dio_iters_init(nf, rqstp->rq_bvec, nvecs, 1357 kiocb, *cnt, segments); 1358 1359 *cnt = 0; 1360 for (i = 0; i < nsegs; i++) { 1361 kiocb->ki_flags = segments[i].flags; 1362 if (kiocb->ki_flags & IOCB_DIRECT) 1363 trace_nfsd_write_direct(rqstp, fhp, kiocb->ki_pos, 1364 segments[i].iter.count); 1365 else { 1366 trace_nfsd_write_vector(rqstp, fhp, kiocb->ki_pos, 1367 segments[i].iter.count); 1368 /* 1369 * Mark the I/O buffer as evict-able to reduce 1370 * memory contention. 1371 */ 1372 if (nf->nf_file->f_op->fop_flags & FOP_DONTCACHE) 1373 kiocb->ki_flags |= IOCB_DONTCACHE; 1374 } 1375 1376 host_err = vfs_iocb_iter_write(file, kiocb, &segments[i].iter); 1377 if (host_err < 0) 1378 return host_err; 1379 *cnt += host_err; 1380 if (host_err < segments[i].iter.count) 1381 break; /* partial write */ 1382 } 1383 1384 return 0; 1385 } 1386 1387 /** 1388 * nfsd_vfs_write - write data to an already-open file 1389 * @rqstp: RPC execution context 1390 * @fhp: File handle of file to write into 1391 * @nf: An open file matching @fhp 1392 * @offset: Byte offset of start 1393 * @payload: xdr_buf containing the write payload 1394 * @cnt: IN: number of bytes to write, OUT: number of bytes actually written 1395 * @stable: An NFS stable_how value 1396 * @verf: NFS WRITE verifier 1397 * 1398 * Upon return, caller must invoke fh_put on @fhp. 1399 * 1400 * Return values: 1401 * An nfsstat value in network byte order. 1402 */ 1403 __be32 1404 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, 1405 struct nfsd_file *nf, loff_t offset, 1406 const struct xdr_buf *payload, unsigned long *cnt, 1407 int stable, __be32 *verf) 1408 { 1409 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1410 struct file *file = nf->nf_file; 1411 struct super_block *sb = file_inode(file)->i_sb; 1412 struct kiocb kiocb; 1413 struct svc_export *exp; 1414 struct iov_iter iter; 1415 errseq_t since; 1416 __be32 nfserr; 1417 int host_err; 1418 unsigned long exp_op_flags = 0; 1419 unsigned int pflags = current->flags; 1420 bool restore_flags = false; 1421 unsigned int nvecs; 1422 1423 trace_nfsd_write_opened(rqstp, fhp, offset, *cnt); 1424 1425 if (sb->s_export_op) 1426 exp_op_flags = sb->s_export_op->flags; 1427 1428 if (test_bit(RQ_LOCAL, &rqstp->rq_flags) && 1429 !(exp_op_flags & EXPORT_OP_REMOTE_FS)) { 1430 /* 1431 * We want throttling in balance_dirty_pages() 1432 * and shrink_inactive_list() to only consider 1433 * the backingdev we are writing to, so that nfs to 1434 * localhost doesn't cause nfsd to lock up due to all 1435 * the client's dirty pages or its congested queue. 1436 */ 1437 current->flags |= PF_LOCAL_THROTTLE; 1438 restore_flags = true; 1439 } 1440 1441 exp = fhp->fh_export; 1442 1443 if (!EX_ISSYNC(exp)) 1444 stable = NFS_UNSTABLE; 1445 init_sync_kiocb(&kiocb, file); 1446 kiocb.ki_pos = offset; 1447 if (likely(!fhp->fh_use_wgather)) { 1448 switch (stable) { 1449 case NFS_FILE_SYNC: 1450 /* persist data and timestamps */ 1451 kiocb.ki_flags |= IOCB_DSYNC | IOCB_SYNC; 1452 break; 1453 case NFS_DATA_SYNC: 1454 /* persist data only */ 1455 kiocb.ki_flags |= IOCB_DSYNC; 1456 break; 1457 } 1458 } 1459 1460 nvecs = xdr_buf_to_bvec(rqstp->rq_bvec, rqstp->rq_maxpages, payload); 1461 1462 since = READ_ONCE(file->f_wb_err); 1463 if (verf) 1464 nfsd_copy_write_verifier(verf, nn); 1465 1466 switch (nfsd_io_cache_write) { 1467 case NFSD_IO_DIRECT: 1468 host_err = nfsd_direct_write(rqstp, fhp, nf, nvecs, 1469 cnt, &kiocb); 1470 break; 1471 case NFSD_IO_DONTCACHE: 1472 if (file->f_op->fop_flags & FOP_DONTCACHE) 1473 kiocb.ki_flags |= IOCB_DONTCACHE; 1474 fallthrough; 1475 case NFSD_IO_BUFFERED: 1476 iov_iter_bvec(&iter, ITER_SOURCE, rqstp->rq_bvec, nvecs, *cnt); 1477 host_err = vfs_iocb_iter_write(file, &kiocb, &iter); 1478 if (host_err < 0) 1479 break; 1480 *cnt = host_err; 1481 break; 1482 } 1483 if (host_err < 0) { 1484 commit_reset_write_verifier(nn, rqstp, host_err); 1485 goto out_nfserr; 1486 } 1487 nfsd_stats_io_write_add(nn, exp, *cnt); 1488 fsnotify_modify(file); 1489 host_err = filemap_check_wb_err(file->f_mapping, since); 1490 if (host_err < 0) 1491 goto out_nfserr; 1492 1493 if (stable && fhp->fh_use_wgather) { 1494 host_err = wait_for_concurrent_writes(file); 1495 if (host_err < 0) 1496 commit_reset_write_verifier(nn, rqstp, host_err); 1497 } 1498 1499 out_nfserr: 1500 if (host_err >= 0) { 1501 trace_nfsd_write_io_done(rqstp, fhp, offset, *cnt); 1502 nfserr = nfs_ok; 1503 } else { 1504 trace_nfsd_write_err(rqstp, fhp, offset, host_err); 1505 nfserr = nfserrno(host_err); 1506 } 1507 if (restore_flags) 1508 current_restore_flags(pflags, PF_LOCAL_THROTTLE); 1509 return nfserr; 1510 } 1511 1512 /** 1513 * nfsd_read_splice_ok - check if spliced reading is supported 1514 * @rqstp: RPC transaction context 1515 * 1516 * Return values: 1517 * %true: nfsd_splice_read() may be used 1518 * %false: nfsd_splice_read() must not be used 1519 * 1520 * NFS READ normally uses splice to send data in-place. However the 1521 * data in cache can change after the reply's MIC is computed but 1522 * before the RPC reply is sent. To prevent the client from 1523 * rejecting the server-computed MIC in this somewhat rare case, do 1524 * not use splice with the GSS integrity and privacy services. 1525 */ 1526 bool nfsd_read_splice_ok(struct svc_rqst *rqstp) 1527 { 1528 if (nfsd_disable_splice_read) 1529 return false; 1530 switch (svc_auth_flavor(rqstp)) { 1531 case RPC_AUTH_GSS_KRB5I: 1532 case RPC_AUTH_GSS_KRB5P: 1533 return false; 1534 } 1535 return true; 1536 } 1537 1538 /** 1539 * nfsd_read - Read data from a file 1540 * @rqstp: RPC transaction context 1541 * @fhp: file handle of file to be read 1542 * @offset: starting byte offset 1543 * @count: IN: requested number of bytes; OUT: number of bytes read 1544 * @eof: OUT: set non-zero if operation reached the end of the file 1545 * 1546 * The caller must verify that there is enough space in @rqstp.rq_res 1547 * to perform this operation. 1548 * 1549 * N.B. After this call fhp needs an fh_put 1550 * 1551 * Returns nfs_ok on success, otherwise an nfserr stat value is 1552 * returned. 1553 */ 1554 __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1555 loff_t offset, unsigned long *count, u32 *eof) 1556 { 1557 struct nfsd_file *nf; 1558 struct file *file; 1559 __be32 err; 1560 1561 trace_nfsd_read_start(rqstp, fhp, offset, *count); 1562 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_READ, &nf); 1563 if (err) 1564 return err; 1565 1566 file = nf->nf_file; 1567 if (file->f_op->splice_read && nfsd_read_splice_ok(rqstp)) 1568 err = nfsd_splice_read(rqstp, fhp, file, offset, count, eof); 1569 else 1570 err = nfsd_iter_read(rqstp, fhp, nf, offset, count, 0, eof); 1571 1572 nfsd_file_put(nf); 1573 trace_nfsd_read_done(rqstp, fhp, offset, *count); 1574 return err; 1575 } 1576 1577 /** 1578 * nfsd_write - open a file and write data to it 1579 * @rqstp: RPC execution context 1580 * @fhp: File handle of file to write into; nfsd_write() may modify it 1581 * @offset: Byte offset of start 1582 * @payload: xdr_buf containing the write payload 1583 * @cnt: IN: number of bytes to write, OUT: number of bytes actually written 1584 * @stable: An NFS stable_how value 1585 * @verf: NFS WRITE verifier 1586 * 1587 * Upon return, caller must invoke fh_put on @fhp. 1588 * 1589 * Return values: 1590 * An nfsstat value in network byte order. 1591 */ 1592 __be32 1593 nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, 1594 const struct xdr_buf *payload, unsigned long *cnt, int stable, 1595 __be32 *verf) 1596 { 1597 struct nfsd_file *nf; 1598 __be32 err; 1599 1600 trace_nfsd_write_start(rqstp, fhp, offset, *cnt); 1601 1602 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_WRITE, &nf); 1603 if (err) 1604 goto out; 1605 1606 err = nfsd_vfs_write(rqstp, fhp, nf, offset, payload, cnt, 1607 stable, verf); 1608 nfsd_file_put(nf); 1609 out: 1610 trace_nfsd_write_done(rqstp, fhp, offset, *cnt); 1611 return err; 1612 } 1613 1614 /** 1615 * nfsd_commit - Commit pending writes to stable storage 1616 * @rqstp: RPC request being processed 1617 * @fhp: NFS filehandle 1618 * @nf: target file 1619 * @offset: raw offset from beginning of file 1620 * @count: raw count of bytes to sync 1621 * @verf: filled in with the server's current write verifier 1622 * 1623 * Note: we guarantee that data that lies within the range specified 1624 * by the 'offset' and 'count' parameters will be synced. The server 1625 * is permitted to sync data that lies outside this range at the 1626 * same time. 1627 * 1628 * Unfortunately we cannot lock the file to make sure we return full WCC 1629 * data to the client, as locking happens lower down in the filesystem. 1630 * 1631 * Return values: 1632 * An nfsstat value in network byte order. 1633 */ 1634 __be32 1635 nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, 1636 u64 offset, u32 count, __be32 *verf) 1637 { 1638 __be32 err = nfs_ok; 1639 u64 maxbytes; 1640 loff_t start, end; 1641 struct nfsd_net *nn; 1642 1643 trace_nfsd_commit_start(rqstp, fhp, offset, count); 1644 1645 /* 1646 * Convert the client-provided (offset, count) range to a 1647 * (start, end) range. If the client-provided range falls 1648 * outside the maximum file size of the underlying FS, 1649 * clamp the sync range appropriately. 1650 */ 1651 start = 0; 1652 end = LLONG_MAX; 1653 maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes; 1654 if (offset < maxbytes) { 1655 start = offset; 1656 if (count && (offset + count - 1 < maxbytes)) 1657 end = offset + count - 1; 1658 } 1659 1660 nn = net_generic(nf->nf_net, nfsd_net_id); 1661 if (EX_ISSYNC(fhp->fh_export)) { 1662 errseq_t since = READ_ONCE(nf->nf_file->f_wb_err); 1663 int err2; 1664 1665 err2 = vfs_fsync_range(nf->nf_file, start, end, 0); 1666 switch (err2) { 1667 case 0: 1668 nfsd_copy_write_verifier(verf, nn); 1669 err2 = filemap_check_wb_err(nf->nf_file->f_mapping, 1670 since); 1671 err = nfserrno(err2); 1672 break; 1673 case -EINVAL: 1674 err = nfserr_notsupp; 1675 break; 1676 default: 1677 commit_reset_write_verifier(nn, rqstp, err2); 1678 err = nfserrno(err2); 1679 } 1680 } else 1681 nfsd_copy_write_verifier(verf, nn); 1682 1683 trace_nfsd_commit_done(rqstp, fhp, offset, count); 1684 return err; 1685 } 1686 1687 /** 1688 * nfsd_create_setattr - Set a created file's attributes 1689 * @rqstp: RPC transaction being executed 1690 * @fhp: NFS filehandle of parent directory 1691 * @resfhp: NFS filehandle of new object 1692 * @attrs: requested attributes of new object 1693 * 1694 * Returns nfs_ok on success, or an nfsstat in network byte order. 1695 */ 1696 __be32 1697 nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, 1698 struct svc_fh *resfhp, struct nfsd_attrs *attrs) 1699 { 1700 struct iattr *iap = attrs->na_iattr; 1701 __be32 status; 1702 1703 /* 1704 * Mode has already been set by file creation. 1705 */ 1706 iap->ia_valid &= ~ATTR_MODE; 1707 1708 /* 1709 * Setting uid/gid works only for root. Irix appears to 1710 * send along the gid on create when it tries to implement 1711 * setgid directories via NFS: 1712 */ 1713 if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) 1714 iap->ia_valid &= ~(ATTR_UID|ATTR_GID); 1715 1716 /* 1717 * Callers expect new file metadata to be committed even 1718 * if the attributes have not changed. 1719 */ 1720 if (nfsd_attrs_valid(attrs)) 1721 status = nfsd_setattr(rqstp, resfhp, attrs, NULL); 1722 else 1723 status = nfserrno(commit_metadata(resfhp)); 1724 1725 /* 1726 * Transactional filesystems had a chance to commit changes 1727 * for both parent and child simultaneously making the 1728 * following commit_metadata a noop in many cases. 1729 */ 1730 if (!status) 1731 status = nfserrno(commit_metadata(fhp)); 1732 1733 /* 1734 * Update the new filehandle to pick up the new attributes. 1735 */ 1736 if (!status) 1737 status = fh_update(resfhp); 1738 1739 return status; 1740 } 1741 1742 /* HPUX client sometimes creates a file in mode 000, and sets size to 0. 1743 * setting size to 0 may fail for some specific file systems by the permission 1744 * checking which requires WRITE permission but the mode is 000. 1745 * we ignore the resizing(to 0) on the just new created file, since the size is 1746 * 0 after file created. 1747 * 1748 * call this only after vfs_create() is called. 1749 * */ 1750 static void 1751 nfsd_check_ignore_resizing(struct iattr *iap) 1752 { 1753 if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0)) 1754 iap->ia_valid &= ~ATTR_SIZE; 1755 } 1756 1757 /* The parent directory should already be locked: */ 1758 __be32 1759 nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp, 1760 struct nfsd_attrs *attrs, 1761 int type, dev_t rdev, struct svc_fh *resfhp) 1762 { 1763 struct dentry *dentry, *dchild; 1764 struct inode *dirp; 1765 struct iattr *iap = attrs->na_iattr; 1766 __be32 err; 1767 int host_err = 0; 1768 1769 dentry = fhp->fh_dentry; 1770 dirp = d_inode(dentry); 1771 1772 dchild = dget(resfhp->fh_dentry); 1773 err = nfsd_permission(&rqstp->rq_cred, fhp->fh_export, dentry, 1774 NFSD_MAY_CREATE); 1775 if (err) 1776 goto out; 1777 1778 if (!(iap->ia_valid & ATTR_MODE)) 1779 iap->ia_mode = 0; 1780 iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; 1781 1782 if (!IS_POSIXACL(dirp)) 1783 iap->ia_mode &= ~current_umask(); 1784 1785 err = 0; 1786 switch (type) { 1787 case S_IFREG: 1788 host_err = vfs_create(&nop_mnt_idmap, dirp, dchild, 1789 iap->ia_mode, true); 1790 if (!host_err) 1791 nfsd_check_ignore_resizing(iap); 1792 break; 1793 case S_IFDIR: 1794 dchild = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode); 1795 if (IS_ERR(dchild)) { 1796 host_err = PTR_ERR(dchild); 1797 } else if (d_is_negative(dchild)) { 1798 err = nfserr_serverfault; 1799 goto out; 1800 } else if (unlikely(dchild != resfhp->fh_dentry)) { 1801 dput(resfhp->fh_dentry); 1802 resfhp->fh_dentry = dget(dchild); 1803 } 1804 break; 1805 case S_IFCHR: 1806 case S_IFBLK: 1807 case S_IFIFO: 1808 case S_IFSOCK: 1809 host_err = vfs_mknod(&nop_mnt_idmap, dirp, dchild, 1810 iap->ia_mode, rdev); 1811 break; 1812 default: 1813 printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n", 1814 type); 1815 host_err = -EINVAL; 1816 } 1817 if (host_err < 0) 1818 goto out_nfserr; 1819 1820 err = nfsd_create_setattr(rqstp, fhp, resfhp, attrs); 1821 1822 out: 1823 if (!IS_ERR(dchild)) 1824 dput(dchild); 1825 return err; 1826 1827 out_nfserr: 1828 err = nfserrno(host_err); 1829 goto out; 1830 } 1831 1832 /* 1833 * Create a filesystem object (regular, directory, special). 1834 * Note that the parent directory is left locked. 1835 * 1836 * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp 1837 */ 1838 __be32 1839 nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, 1840 char *fname, int flen, struct nfsd_attrs *attrs, 1841 int type, dev_t rdev, struct svc_fh *resfhp) 1842 { 1843 struct dentry *dentry, *dchild = NULL; 1844 __be32 err; 1845 int host_err; 1846 1847 trace_nfsd_vfs_create(rqstp, fhp, type, fname, flen); 1848 1849 if (isdotent(fname, flen)) 1850 return nfserr_exist; 1851 1852 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP); 1853 if (err) 1854 return err; 1855 1856 dentry = fhp->fh_dentry; 1857 1858 host_err = fh_want_write(fhp); 1859 if (host_err) 1860 return nfserrno(host_err); 1861 1862 inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT); 1863 dchild = lookup_one(&nop_mnt_idmap, &QSTR_LEN(fname, flen), dentry); 1864 host_err = PTR_ERR(dchild); 1865 if (IS_ERR(dchild)) { 1866 err = nfserrno(host_err); 1867 goto out_unlock; 1868 } 1869 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); 1870 /* 1871 * We unconditionally drop our ref to dchild as fh_compose will have 1872 * already grabbed its own ref for it. 1873 */ 1874 dput(dchild); 1875 if (err) 1876 goto out_unlock; 1877 err = fh_fill_pre_attrs(fhp); 1878 if (err != nfs_ok) 1879 goto out_unlock; 1880 err = nfsd_create_locked(rqstp, fhp, attrs, type, rdev, resfhp); 1881 fh_fill_post_attrs(fhp); 1882 out_unlock: 1883 inode_unlock(dentry->d_inode); 1884 return err; 1885 } 1886 1887 /* 1888 * Read a symlink. On entry, *lenp must contain the maximum path length that 1889 * fits into the buffer. On return, it contains the true length. 1890 * N.B. After this call fhp needs an fh_put 1891 */ 1892 __be32 1893 nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) 1894 { 1895 __be32 err; 1896 const char *link; 1897 struct path path; 1898 DEFINE_DELAYED_CALL(done); 1899 int len; 1900 1901 err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP); 1902 if (unlikely(err)) 1903 return err; 1904 1905 path.mnt = fhp->fh_export->ex_path.mnt; 1906 path.dentry = fhp->fh_dentry; 1907 1908 if (unlikely(!d_is_symlink(path.dentry))) 1909 return nfserr_inval; 1910 1911 touch_atime(&path); 1912 1913 link = vfs_get_link(path.dentry, &done); 1914 if (IS_ERR(link)) 1915 return nfserrno(PTR_ERR(link)); 1916 1917 len = strlen(link); 1918 if (len < *lenp) 1919 *lenp = len; 1920 memcpy(buf, link, *lenp); 1921 do_delayed_call(&done); 1922 return 0; 1923 } 1924 1925 /** 1926 * nfsd_symlink - Create a symlink and look up its inode 1927 * @rqstp: RPC transaction being executed 1928 * @fhp: NFS filehandle of parent directory 1929 * @fname: filename of the new symlink 1930 * @flen: length of @fname 1931 * @path: content of the new symlink (NUL-terminated) 1932 * @attrs: requested attributes of new object 1933 * @resfhp: NFS filehandle of new object 1934 * 1935 * N.B. After this call _both_ fhp and resfhp need an fh_put 1936 * 1937 * Returns nfs_ok on success, or an nfsstat in network byte order. 1938 */ 1939 __be32 1940 nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, 1941 char *fname, int flen, 1942 char *path, struct nfsd_attrs *attrs, 1943 struct svc_fh *resfhp) 1944 { 1945 struct dentry *dentry, *dnew; 1946 __be32 err, cerr; 1947 int host_err; 1948 1949 trace_nfsd_vfs_symlink(rqstp, fhp, fname, flen, path); 1950 1951 err = nfserr_noent; 1952 if (!flen || path[0] == '\0') 1953 goto out; 1954 err = nfserr_exist; 1955 if (isdotent(fname, flen)) 1956 goto out; 1957 1958 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); 1959 if (err) 1960 goto out; 1961 1962 host_err = fh_want_write(fhp); 1963 if (host_err) { 1964 err = nfserrno(host_err); 1965 goto out; 1966 } 1967 1968 dentry = fhp->fh_dentry; 1969 inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT); 1970 dnew = lookup_one(&nop_mnt_idmap, &QSTR_LEN(fname, flen), dentry); 1971 if (IS_ERR(dnew)) { 1972 err = nfserrno(PTR_ERR(dnew)); 1973 inode_unlock(dentry->d_inode); 1974 goto out_drop_write; 1975 } 1976 err = fh_fill_pre_attrs(fhp); 1977 if (err != nfs_ok) 1978 goto out_unlock; 1979 host_err = vfs_symlink(&nop_mnt_idmap, d_inode(dentry), dnew, path); 1980 err = nfserrno(host_err); 1981 cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp); 1982 if (!err) 1983 nfsd_create_setattr(rqstp, fhp, resfhp, attrs); 1984 fh_fill_post_attrs(fhp); 1985 out_unlock: 1986 inode_unlock(dentry->d_inode); 1987 if (!err) 1988 err = nfserrno(commit_metadata(fhp)); 1989 dput(dnew); 1990 if (err==0) err = cerr; 1991 out_drop_write: 1992 fh_drop_write(fhp); 1993 out: 1994 return err; 1995 } 1996 1997 /** 1998 * nfsd_link - create a link 1999 * @rqstp: RPC transaction context 2000 * @ffhp: the file handle of the directory where the new link is to be created 2001 * @name: the filename of the new link 2002 * @len: the length of @name in octets 2003 * @tfhp: the file handle of an existing file object 2004 * 2005 * After this call _both_ ffhp and tfhp need an fh_put. 2006 * 2007 * Returns a generic NFS status code in network byte-order. 2008 */ 2009 __be32 2010 nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, 2011 char *name, int len, struct svc_fh *tfhp) 2012 { 2013 struct dentry *ddir, *dnew, *dold; 2014 struct inode *dirp; 2015 int type; 2016 __be32 err; 2017 int host_err; 2018 2019 trace_nfsd_vfs_link(rqstp, ffhp, tfhp, name, len); 2020 2021 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE); 2022 if (err) 2023 goto out; 2024 err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP); 2025 if (err) 2026 goto out; 2027 err = nfserr_isdir; 2028 if (d_is_dir(tfhp->fh_dentry)) 2029 goto out; 2030 err = nfserr_perm; 2031 if (!len) 2032 goto out; 2033 err = nfserr_exist; 2034 if (isdotent(name, len)) 2035 goto out; 2036 2037 err = nfs_ok; 2038 type = d_inode(tfhp->fh_dentry)->i_mode & S_IFMT; 2039 host_err = fh_want_write(tfhp); 2040 if (host_err) 2041 goto out; 2042 2043 ddir = ffhp->fh_dentry; 2044 dirp = d_inode(ddir); 2045 inode_lock_nested(dirp, I_MUTEX_PARENT); 2046 2047 dnew = lookup_one(&nop_mnt_idmap, &QSTR_LEN(name, len), ddir); 2048 if (IS_ERR(dnew)) { 2049 host_err = PTR_ERR(dnew); 2050 goto out_unlock; 2051 } 2052 2053 dold = tfhp->fh_dentry; 2054 2055 err = nfserr_noent; 2056 if (d_really_is_negative(dold)) 2057 goto out_dput; 2058 err = fh_fill_pre_attrs(ffhp); 2059 if (err != nfs_ok) 2060 goto out_dput; 2061 host_err = vfs_link(dold, &nop_mnt_idmap, dirp, dnew, NULL); 2062 fh_fill_post_attrs(ffhp); 2063 inode_unlock(dirp); 2064 if (!host_err) { 2065 host_err = commit_metadata(ffhp); 2066 if (!host_err) 2067 host_err = commit_metadata(tfhp); 2068 } 2069 2070 dput(dnew); 2071 out_drop_write: 2072 fh_drop_write(tfhp); 2073 if (host_err == -EBUSY) { 2074 /* 2075 * See RFC 8881 Section 18.9.4 para 1-2: NFSv4 LINK 2076 * wants a status unique to the object type. 2077 */ 2078 if (type != S_IFDIR) 2079 err = nfserr_file_open; 2080 else 2081 err = nfserr_acces; 2082 } 2083 out: 2084 return err != nfs_ok ? err : nfserrno(host_err); 2085 2086 out_dput: 2087 dput(dnew); 2088 out_unlock: 2089 inode_unlock(dirp); 2090 goto out_drop_write; 2091 } 2092 2093 static void 2094 nfsd_close_cached_files(struct dentry *dentry) 2095 { 2096 struct inode *inode = d_inode(dentry); 2097 2098 if (inode && S_ISREG(inode->i_mode)) 2099 nfsd_file_close_inode_sync(inode); 2100 } 2101 2102 static bool 2103 nfsd_has_cached_files(struct dentry *dentry) 2104 { 2105 bool ret = false; 2106 struct inode *inode = d_inode(dentry); 2107 2108 if (inode && S_ISREG(inode->i_mode)) 2109 ret = nfsd_file_is_cached(inode); 2110 return ret; 2111 } 2112 2113 /** 2114 * nfsd_rename - rename a directory entry 2115 * @rqstp: RPC transaction context 2116 * @ffhp: the file handle of parent directory containing the entry to be renamed 2117 * @fname: the filename of directory entry to be renamed 2118 * @flen: the length of @fname in octets 2119 * @tfhp: the file handle of parent directory to contain the renamed entry 2120 * @tname: the filename of the new entry 2121 * @tlen: the length of @tlen in octets 2122 * 2123 * After this call _both_ ffhp and tfhp need an fh_put. 2124 * 2125 * Returns a generic NFS status code in network byte-order. 2126 */ 2127 __be32 2128 nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, 2129 struct svc_fh *tfhp, char *tname, int tlen) 2130 { 2131 struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap; 2132 int type = S_IFDIR; 2133 __be32 err; 2134 int host_err; 2135 bool close_cached = false; 2136 2137 trace_nfsd_vfs_rename(rqstp, ffhp, tfhp, fname, flen, tname, tlen); 2138 2139 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE); 2140 if (err) 2141 goto out; 2142 err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE); 2143 if (err) 2144 goto out; 2145 2146 fdentry = ffhp->fh_dentry; 2147 2148 tdentry = tfhp->fh_dentry; 2149 2150 err = nfserr_perm; 2151 if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) 2152 goto out; 2153 2154 err = nfserr_xdev; 2155 if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) 2156 goto out; 2157 if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry) 2158 goto out; 2159 2160 retry: 2161 host_err = fh_want_write(ffhp); 2162 if (host_err) { 2163 err = nfserrno(host_err); 2164 goto out; 2165 } 2166 2167 trap = lock_rename(tdentry, fdentry); 2168 if (IS_ERR(trap)) { 2169 err = nfserr_xdev; 2170 goto out_want_write; 2171 } 2172 err = fh_fill_pre_attrs(ffhp); 2173 if (err != nfs_ok) 2174 goto out_unlock; 2175 err = fh_fill_pre_attrs(tfhp); 2176 if (err != nfs_ok) 2177 goto out_unlock; 2178 2179 odentry = lookup_one(&nop_mnt_idmap, &QSTR_LEN(fname, flen), fdentry); 2180 host_err = PTR_ERR(odentry); 2181 if (IS_ERR(odentry)) 2182 goto out_nfserr; 2183 2184 host_err = -ENOENT; 2185 if (d_really_is_negative(odentry)) 2186 goto out_dput_old; 2187 host_err = -EINVAL; 2188 if (odentry == trap) 2189 goto out_dput_old; 2190 type = d_inode(odentry)->i_mode & S_IFMT; 2191 2192 ndentry = lookup_one(&nop_mnt_idmap, &QSTR_LEN(tname, tlen), tdentry); 2193 host_err = PTR_ERR(ndentry); 2194 if (IS_ERR(ndentry)) 2195 goto out_dput_old; 2196 if (d_inode(ndentry)) 2197 type = d_inode(ndentry)->i_mode & S_IFMT; 2198 host_err = -ENOTEMPTY; 2199 if (ndentry == trap) 2200 goto out_dput_new; 2201 2202 if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) && 2203 nfsd_has_cached_files(ndentry)) { 2204 close_cached = true; 2205 goto out_dput_old; 2206 } else { 2207 struct renamedata rd = { 2208 .mnt_idmap = &nop_mnt_idmap, 2209 .old_parent = fdentry, 2210 .old_dentry = odentry, 2211 .new_parent = tdentry, 2212 .new_dentry = ndentry, 2213 }; 2214 int retries; 2215 2216 for (retries = 1;;) { 2217 host_err = vfs_rename(&rd); 2218 if (host_err != -EAGAIN || !retries--) 2219 break; 2220 if (!nfsd_wait_for_delegreturn(rqstp, d_inode(odentry))) 2221 break; 2222 } 2223 if (!host_err) { 2224 host_err = commit_metadata(tfhp); 2225 if (!host_err) 2226 host_err = commit_metadata(ffhp); 2227 } 2228 } 2229 out_dput_new: 2230 dput(ndentry); 2231 out_dput_old: 2232 dput(odentry); 2233 out_nfserr: 2234 if (host_err == -EBUSY) { 2235 /* 2236 * See RFC 8881 Section 18.26.4 para 1-3: NFSv4 RENAME 2237 * wants a status unique to the object type. 2238 */ 2239 if (type != S_IFDIR) 2240 err = nfserr_file_open; 2241 else 2242 err = nfserr_acces; 2243 } else { 2244 err = nfserrno(host_err); 2245 } 2246 2247 if (!close_cached) { 2248 fh_fill_post_attrs(ffhp); 2249 fh_fill_post_attrs(tfhp); 2250 } 2251 out_unlock: 2252 unlock_rename(tdentry, fdentry); 2253 out_want_write: 2254 fh_drop_write(ffhp); 2255 2256 /* 2257 * If the target dentry has cached open files, then we need to 2258 * try to close them prior to doing the rename. Final fput 2259 * shouldn't be done with locks held however, so we delay it 2260 * until this point and then reattempt the whole shebang. 2261 */ 2262 if (close_cached) { 2263 close_cached = false; 2264 nfsd_close_cached_files(ndentry); 2265 dput(ndentry); 2266 goto retry; 2267 } 2268 out: 2269 return err; 2270 } 2271 2272 /** 2273 * nfsd_unlink - remove a directory entry 2274 * @rqstp: RPC transaction context 2275 * @fhp: the file handle of the parent directory to be modified 2276 * @type: enforced file type of the object to be removed 2277 * @fname: the name of directory entry to be removed 2278 * @flen: length of @fname in octets 2279 * 2280 * After this call fhp needs an fh_put. 2281 * 2282 * Returns a generic NFS status code in network byte-order. 2283 */ 2284 __be32 2285 nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, 2286 char *fname, int flen) 2287 { 2288 struct dentry *dentry, *rdentry; 2289 struct inode *dirp; 2290 struct inode *rinode; 2291 __be32 err; 2292 int host_err; 2293 2294 trace_nfsd_vfs_unlink(rqstp, fhp, fname, flen); 2295 2296 err = nfserr_acces; 2297 if (!flen || isdotent(fname, flen)) 2298 goto out; 2299 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE); 2300 if (err) 2301 goto out; 2302 2303 host_err = fh_want_write(fhp); 2304 if (host_err) 2305 goto out_nfserr; 2306 2307 dentry = fhp->fh_dentry; 2308 dirp = d_inode(dentry); 2309 inode_lock_nested(dirp, I_MUTEX_PARENT); 2310 2311 rdentry = lookup_one(&nop_mnt_idmap, &QSTR_LEN(fname, flen), dentry); 2312 host_err = PTR_ERR(rdentry); 2313 if (IS_ERR(rdentry)) 2314 goto out_unlock; 2315 2316 if (d_really_is_negative(rdentry)) { 2317 dput(rdentry); 2318 host_err = -ENOENT; 2319 goto out_unlock; 2320 } 2321 rinode = d_inode(rdentry); 2322 err = fh_fill_pre_attrs(fhp); 2323 if (err != nfs_ok) 2324 goto out_unlock; 2325 2326 ihold(rinode); 2327 if (!type) 2328 type = d_inode(rdentry)->i_mode & S_IFMT; 2329 2330 if (type != S_IFDIR) { 2331 int retries; 2332 2333 if (rdentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) 2334 nfsd_close_cached_files(rdentry); 2335 2336 for (retries = 1;;) { 2337 host_err = vfs_unlink(&nop_mnt_idmap, dirp, rdentry, NULL); 2338 if (host_err != -EAGAIN || !retries--) 2339 break; 2340 if (!nfsd_wait_for_delegreturn(rqstp, rinode)) 2341 break; 2342 } 2343 } else { 2344 host_err = vfs_rmdir(&nop_mnt_idmap, dirp, rdentry); 2345 } 2346 fh_fill_post_attrs(fhp); 2347 2348 inode_unlock(dirp); 2349 if (!host_err) 2350 host_err = commit_metadata(fhp); 2351 dput(rdentry); 2352 iput(rinode); /* truncate the inode here */ 2353 2354 out_drop_write: 2355 fh_drop_write(fhp); 2356 out_nfserr: 2357 if (host_err == -EBUSY) { 2358 /* 2359 * See RFC 8881 Section 18.25.4 para 4: NFSv4 REMOVE 2360 * wants a status unique to the object type. 2361 */ 2362 if (type != S_IFDIR) 2363 err = nfserr_file_open; 2364 else 2365 err = nfserr_acces; 2366 } 2367 out: 2368 return err != nfs_ok ? err : nfserrno(host_err); 2369 out_unlock: 2370 inode_unlock(dirp); 2371 goto out_drop_write; 2372 } 2373 2374 /* 2375 * We do this buffering because we must not call back into the file 2376 * system's ->lookup() method from the filldir callback. That may well 2377 * deadlock a number of file systems. 2378 * 2379 * This is based heavily on the implementation of same in XFS. 2380 */ 2381 struct buffered_dirent { 2382 u64 ino; 2383 loff_t offset; 2384 int namlen; 2385 unsigned int d_type; 2386 char name[]; 2387 }; 2388 2389 struct readdir_data { 2390 struct dir_context ctx; 2391 char *dirent; 2392 size_t used; 2393 int full; 2394 }; 2395 2396 static bool nfsd_buffered_filldir(struct dir_context *ctx, const char *name, 2397 int namlen, loff_t offset, u64 ino, 2398 unsigned int d_type) 2399 { 2400 struct readdir_data *buf = 2401 container_of(ctx, struct readdir_data, ctx); 2402 struct buffered_dirent *de = (void *)(buf->dirent + buf->used); 2403 unsigned int reclen; 2404 2405 reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64)); 2406 if (buf->used + reclen > PAGE_SIZE) { 2407 buf->full = 1; 2408 return false; 2409 } 2410 2411 de->namlen = namlen; 2412 de->offset = offset; 2413 de->ino = ino; 2414 de->d_type = d_type; 2415 memcpy(de->name, name, namlen); 2416 buf->used += reclen; 2417 2418 return true; 2419 } 2420 2421 static __be32 nfsd_buffered_readdir(struct file *file, struct svc_fh *fhp, 2422 nfsd_filldir_t func, struct readdir_cd *cdp, 2423 loff_t *offsetp) 2424 { 2425 struct buffered_dirent *de; 2426 int host_err; 2427 int size; 2428 loff_t offset; 2429 struct readdir_data buf = { 2430 .ctx.actor = nfsd_buffered_filldir, 2431 .dirent = (void *)__get_free_page(GFP_KERNEL) 2432 }; 2433 2434 if (!buf.dirent) 2435 return nfserrno(-ENOMEM); 2436 2437 offset = *offsetp; 2438 2439 while (1) { 2440 unsigned int reclen; 2441 2442 cdp->err = nfserr_eof; /* will be cleared on successful read */ 2443 buf.used = 0; 2444 buf.full = 0; 2445 2446 host_err = iterate_dir(file, &buf.ctx); 2447 if (buf.full) 2448 host_err = 0; 2449 2450 if (host_err < 0) 2451 break; 2452 2453 size = buf.used; 2454 2455 if (!size) 2456 break; 2457 2458 de = (struct buffered_dirent *)buf.dirent; 2459 while (size > 0) { 2460 offset = de->offset; 2461 2462 if (func(cdp, de->name, de->namlen, de->offset, 2463 de->ino, de->d_type)) 2464 break; 2465 2466 if (cdp->err != nfs_ok) 2467 break; 2468 2469 trace_nfsd_dirent(fhp, de->ino, de->name, de->namlen); 2470 2471 reclen = ALIGN(sizeof(*de) + de->namlen, 2472 sizeof(u64)); 2473 size -= reclen; 2474 de = (struct buffered_dirent *)((char *)de + reclen); 2475 } 2476 if (size > 0) /* We bailed out early */ 2477 break; 2478 2479 offset = vfs_llseek(file, 0, SEEK_CUR); 2480 } 2481 2482 free_page((unsigned long)(buf.dirent)); 2483 2484 if (host_err) 2485 return nfserrno(host_err); 2486 2487 *offsetp = offset; 2488 return cdp->err; 2489 } 2490 2491 /** 2492 * nfsd_readdir - Read entries from a directory 2493 * @rqstp: RPC transaction context 2494 * @fhp: NFS file handle of directory to be read 2495 * @offsetp: OUT: seek offset of final entry that was read 2496 * @cdp: OUT: an eof error value 2497 * @func: entry filler actor 2498 * 2499 * This implementation ignores the NFSv3/4 verifier cookie. 2500 * 2501 * NB: normal system calls hold file->f_pos_lock when calling 2502 * ->iterate_shared and ->llseek, but nfsd_readdir() does not. 2503 * Because the struct file acquired here is not visible to other 2504 * threads, it's internal state does not need mutex protection. 2505 * 2506 * Returns nfs_ok on success, otherwise an nfsstat code is 2507 * returned. 2508 */ 2509 __be32 2510 nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp, 2511 struct readdir_cd *cdp, nfsd_filldir_t func) 2512 { 2513 __be32 err; 2514 struct file *file; 2515 loff_t offset = *offsetp; 2516 int may_flags = NFSD_MAY_READ; 2517 2518 err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file); 2519 if (err) 2520 goto out; 2521 2522 if (fhp->fh_64bit_cookies) 2523 file->f_mode |= FMODE_64BITHASH; 2524 else 2525 file->f_mode |= FMODE_32BITHASH; 2526 2527 offset = vfs_llseek(file, offset, SEEK_SET); 2528 if (offset < 0) { 2529 err = nfserrno((int)offset); 2530 goto out_close; 2531 } 2532 2533 err = nfsd_buffered_readdir(file, fhp, func, cdp, offsetp); 2534 2535 if (err == nfserr_eof || err == nfserr_toosmall) 2536 err = nfs_ok; /* can still be found in ->err */ 2537 out_close: 2538 nfsd_filp_close(file); 2539 out: 2540 return err; 2541 } 2542 2543 /** 2544 * nfsd_filp_close: close a file synchronously 2545 * @fp: the file to close 2546 * 2547 * nfsd_filp_close() is similar in behaviour to filp_close(). 2548 * The difference is that if this is the final close on the 2549 * file, the that finalisation happens immediately, rather then 2550 * being handed over to a work_queue, as it the case for 2551 * filp_close(). 2552 * When a user-space process closes a file (even when using 2553 * filp_close() the finalisation happens before returning to 2554 * userspace, so it is effectively synchronous. When a kernel thread 2555 * uses file_close(), on the other hand, the handling is completely 2556 * asynchronous. This means that any cost imposed by that finalisation 2557 * is not imposed on the nfsd thread, and nfsd could potentually 2558 * close files more quickly than the work queue finalises the close, 2559 * which would lead to unbounded growth in the queue. 2560 * 2561 * In some contexts is it not safe to synchronously wait for 2562 * close finalisation (see comment for __fput_sync()), but nfsd 2563 * does not match those contexts. In partcilarly it does not, at the 2564 * time that this function is called, hold and locks and no finalisation 2565 * of any file, socket, or device driver would have any cause to wait 2566 * for nfsd to make progress. 2567 */ 2568 void nfsd_filp_close(struct file *fp) 2569 { 2570 get_file(fp); 2571 filp_close(fp, NULL); 2572 __fput_sync(fp); 2573 } 2574 2575 /* 2576 * Get file system stats 2577 * N.B. After this call fhp needs an fh_put 2578 */ 2579 __be32 2580 nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access) 2581 { 2582 __be32 err; 2583 2584 trace_nfsd_vfs_statfs(rqstp, fhp); 2585 2586 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access); 2587 if (!err) { 2588 struct path path = { 2589 .mnt = fhp->fh_export->ex_path.mnt, 2590 .dentry = fhp->fh_dentry, 2591 }; 2592 if (vfs_statfs(&path, stat)) 2593 err = nfserr_io; 2594 } 2595 return err; 2596 } 2597 2598 static int exp_rdonly(struct svc_cred *cred, struct svc_export *exp) 2599 { 2600 return nfsexp_flags(cred, exp) & NFSEXP_READONLY; 2601 } 2602 2603 #ifdef CONFIG_NFSD_V4 2604 /* 2605 * Helper function to translate error numbers. In the case of xattr operations, 2606 * some error codes need to be translated outside of the standard translations. 2607 * 2608 * ENODATA needs to be translated to nfserr_noxattr. 2609 * E2BIG to nfserr_xattr2big. 2610 * 2611 * Additionally, vfs_listxattr can return -ERANGE. This means that the 2612 * file has too many extended attributes to retrieve inside an 2613 * XATTR_LIST_MAX sized buffer. This is a bug in the xattr implementation: 2614 * filesystems will allow the adding of extended attributes until they hit 2615 * their own internal limit. This limit may be larger than XATTR_LIST_MAX. 2616 * So, at that point, the attributes are present and valid, but can't 2617 * be retrieved using listxattr, since the upper level xattr code enforces 2618 * the XATTR_LIST_MAX limit. 2619 * 2620 * This bug means that we need to deal with listxattr returning -ERANGE. The 2621 * best mapping is to return TOOSMALL. 2622 */ 2623 static __be32 2624 nfsd_xattr_errno(int err) 2625 { 2626 switch (err) { 2627 case -ENODATA: 2628 return nfserr_noxattr; 2629 case -E2BIG: 2630 return nfserr_xattr2big; 2631 case -ERANGE: 2632 return nfserr_toosmall; 2633 } 2634 return nfserrno(err); 2635 } 2636 2637 /* 2638 * Retrieve the specified user extended attribute. To avoid always 2639 * having to allocate the maximum size (since we are not getting 2640 * a maximum size from the RPC), do a probe + alloc. Hold a reader 2641 * lock on i_rwsem to prevent the extended attribute from changing 2642 * size while we're doing this. 2643 */ 2644 __be32 2645 nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name, 2646 void **bufp, int *lenp) 2647 { 2648 ssize_t len; 2649 __be32 err; 2650 char *buf; 2651 struct inode *inode; 2652 struct dentry *dentry; 2653 2654 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ); 2655 if (err) 2656 return err; 2657 2658 err = nfs_ok; 2659 dentry = fhp->fh_dentry; 2660 inode = d_inode(dentry); 2661 2662 inode_lock_shared(inode); 2663 2664 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, NULL, 0); 2665 2666 /* 2667 * Zero-length attribute, just return. 2668 */ 2669 if (len == 0) { 2670 *bufp = NULL; 2671 *lenp = 0; 2672 goto out; 2673 } 2674 2675 if (len < 0) { 2676 err = nfsd_xattr_errno(len); 2677 goto out; 2678 } 2679 2680 if (len > *lenp) { 2681 err = nfserr_toosmall; 2682 goto out; 2683 } 2684 2685 buf = kvmalloc(len, GFP_KERNEL); 2686 if (buf == NULL) { 2687 err = nfserr_jukebox; 2688 goto out; 2689 } 2690 2691 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, buf, len); 2692 if (len <= 0) { 2693 kvfree(buf); 2694 buf = NULL; 2695 err = nfsd_xattr_errno(len); 2696 } 2697 2698 *lenp = len; 2699 *bufp = buf; 2700 2701 out: 2702 inode_unlock_shared(inode); 2703 2704 return err; 2705 } 2706 2707 /* 2708 * Retrieve the xattr names. Since we can't know how many are 2709 * user extended attributes, we must get all attributes here, 2710 * and have the XDR encode filter out the "user." ones. 2711 * 2712 * While this could always just allocate an XATTR_LIST_MAX 2713 * buffer, that's a waste, so do a probe + allocate. To 2714 * avoid any changes between the probe and allocate, wrap 2715 * this in inode_lock. 2716 */ 2717 __be32 2718 nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char **bufp, 2719 int *lenp) 2720 { 2721 ssize_t len; 2722 __be32 err; 2723 char *buf; 2724 struct inode *inode; 2725 struct dentry *dentry; 2726 2727 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ); 2728 if (err) 2729 return err; 2730 2731 dentry = fhp->fh_dentry; 2732 inode = d_inode(dentry); 2733 *lenp = 0; 2734 2735 inode_lock_shared(inode); 2736 2737 len = vfs_listxattr(dentry, NULL, 0); 2738 if (len <= 0) { 2739 err = nfsd_xattr_errno(len); 2740 goto out; 2741 } 2742 2743 if (len > XATTR_LIST_MAX) { 2744 err = nfserr_xattr2big; 2745 goto out; 2746 } 2747 2748 buf = kvmalloc(len, GFP_KERNEL); 2749 if (buf == NULL) { 2750 err = nfserr_jukebox; 2751 goto out; 2752 } 2753 2754 len = vfs_listxattr(dentry, buf, len); 2755 if (len <= 0) { 2756 kvfree(buf); 2757 err = nfsd_xattr_errno(len); 2758 goto out; 2759 } 2760 2761 *lenp = len; 2762 *bufp = buf; 2763 2764 err = nfs_ok; 2765 out: 2766 inode_unlock_shared(inode); 2767 2768 return err; 2769 } 2770 2771 /** 2772 * nfsd_removexattr - Remove an extended attribute 2773 * @rqstp: RPC transaction being executed 2774 * @fhp: NFS filehandle of object with xattr to remove 2775 * @name: name of xattr to remove (NUL-terminate) 2776 * 2777 * Pass in a NULL pointer for delegated_inode, and let the client deal 2778 * with NFS4ERR_DELAY (same as with e.g. setattr and remove). 2779 * 2780 * Returns nfs_ok on success, or an nfsstat in network byte order. 2781 */ 2782 __be32 2783 nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name) 2784 { 2785 __be32 err; 2786 int ret; 2787 2788 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE); 2789 if (err) 2790 return err; 2791 2792 ret = fh_want_write(fhp); 2793 if (ret) 2794 return nfserrno(ret); 2795 2796 inode_lock(fhp->fh_dentry->d_inode); 2797 err = fh_fill_pre_attrs(fhp); 2798 if (err != nfs_ok) 2799 goto out_unlock; 2800 ret = __vfs_removexattr_locked(&nop_mnt_idmap, fhp->fh_dentry, 2801 name, NULL); 2802 err = nfsd_xattr_errno(ret); 2803 fh_fill_post_attrs(fhp); 2804 out_unlock: 2805 inode_unlock(fhp->fh_dentry->d_inode); 2806 fh_drop_write(fhp); 2807 2808 return err; 2809 } 2810 2811 __be32 2812 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name, 2813 void *buf, u32 len, u32 flags) 2814 { 2815 __be32 err; 2816 int ret; 2817 2818 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE); 2819 if (err) 2820 return err; 2821 2822 ret = fh_want_write(fhp); 2823 if (ret) 2824 return nfserrno(ret); 2825 inode_lock(fhp->fh_dentry->d_inode); 2826 err = fh_fill_pre_attrs(fhp); 2827 if (err != nfs_ok) 2828 goto out_unlock; 2829 ret = __vfs_setxattr_locked(&nop_mnt_idmap, fhp->fh_dentry, 2830 name, buf, len, flags, NULL); 2831 fh_fill_post_attrs(fhp); 2832 err = nfsd_xattr_errno(ret); 2833 out_unlock: 2834 inode_unlock(fhp->fh_dentry->d_inode); 2835 fh_drop_write(fhp); 2836 return err; 2837 } 2838 #endif 2839 2840 /* 2841 * Check for a user's access permissions to this inode. 2842 */ 2843 __be32 2844 nfsd_permission(struct svc_cred *cred, struct svc_export *exp, 2845 struct dentry *dentry, int acc) 2846 { 2847 struct inode *inode = d_inode(dentry); 2848 int err; 2849 2850 if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP) 2851 return 0; 2852 #if 0 2853 dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n", 2854 acc, 2855 (acc & NFSD_MAY_READ)? " read" : "", 2856 (acc & NFSD_MAY_WRITE)? " write" : "", 2857 (acc & NFSD_MAY_EXEC)? " exec" : "", 2858 (acc & NFSD_MAY_SATTR)? " sattr" : "", 2859 (acc & NFSD_MAY_TRUNC)? " trunc" : "", 2860 (acc & NFSD_MAY_NLM)? " nlm" : "", 2861 (acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "", 2862 inode->i_mode, 2863 IS_IMMUTABLE(inode)? " immut" : "", 2864 IS_APPEND(inode)? " append" : "", 2865 __mnt_is_readonly(exp->ex_path.mnt)? " ro" : ""); 2866 dprintk(" owner %d/%d user %d/%d\n", 2867 inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid()); 2868 #endif 2869 2870 /* Normally we reject any write/sattr etc access on a read-only file 2871 * system. But if it is IRIX doing check on write-access for a 2872 * device special file, we ignore rofs. 2873 */ 2874 if (!(acc & NFSD_MAY_LOCAL_ACCESS)) 2875 if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) { 2876 if (exp_rdonly(cred, exp) || 2877 __mnt_is_readonly(exp->ex_path.mnt)) 2878 return nfserr_rofs; 2879 if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode)) 2880 return nfserr_perm; 2881 } 2882 if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode)) 2883 return nfserr_perm; 2884 2885 /* 2886 * The file owner always gets access permission for accesses that 2887 * would normally be checked at open time. This is to make 2888 * file access work even when the client has done a fchmod(fd, 0). 2889 * 2890 * However, `cp foo bar' should fail nevertheless when bar is 2891 * readonly. A sensible way to do this might be to reject all 2892 * attempts to truncate a read-only file, because a creat() call 2893 * always implies file truncation. 2894 * ... but this isn't really fair. A process may reasonably call 2895 * ftruncate on an open file descriptor on a file with perm 000. 2896 * We must trust the client to do permission checking - using "ACCESS" 2897 * with NFSv3. 2898 */ 2899 if ((acc & NFSD_MAY_OWNER_OVERRIDE) && 2900 uid_eq(inode->i_uid, current_fsuid())) 2901 return 0; 2902 2903 /* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */ 2904 err = inode_permission(&nop_mnt_idmap, inode, 2905 acc & (MAY_READ | MAY_WRITE | MAY_EXEC)); 2906 2907 /* Allow read access to binaries even when mode 111 */ 2908 if (err == -EACCES && S_ISREG(inode->i_mode) && 2909 (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) || 2910 acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC))) 2911 err = inode_permission(&nop_mnt_idmap, inode, MAY_EXEC); 2912 2913 return err? nfserrno(err) : 0; 2914 } 2915