1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * File operations used by nfsd. Some of these have been ripped from 4 * other parts of the kernel because they weren't exported, others 5 * are partial duplicates with added or changed functionality. 6 * 7 * Note that several functions dget() the dentry upon which they want 8 * to act, most notably those that create directory entries. Response 9 * dentry's are dput()'d if necessary in the release callback. 10 * So if you notice code paths that apparently fail to dput() the 11 * dentry, don't worry--they have been taken care of. 12 * 13 * Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de> 14 * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp> 15 */ 16 17 #include <linux/fs.h> 18 #include <linux/file.h> 19 #include <linux/splice.h> 20 #include <linux/falloc.h> 21 #include <linux/fcntl.h> 22 #include <linux/namei.h> 23 #include <linux/delay.h> 24 #include <linux/fsnotify.h> 25 #include <linux/posix_acl_xattr.h> 26 #include <linux/xattr.h> 27 #include <linux/jhash.h> 28 #include <linux/pagemap.h> 29 #include <linux/slab.h> 30 #include <linux/uaccess.h> 31 #include <linux/exportfs.h> 32 #include <linux/writeback.h> 33 #include <linux/security.h> 34 #include <linux/sunrpc/xdr.h> 35 36 #include "xdr3.h" 37 38 #ifdef CONFIG_NFSD_V4 39 #include "acl.h" 40 #include "idmap.h" 41 #include "xdr4.h" 42 #endif /* CONFIG_NFSD_V4 */ 43 44 #include "nfsd.h" 45 #include "vfs.h" 46 #include "filecache.h" 47 #include "trace.h" 48 49 #define NFSDDBG_FACILITY NFSDDBG_FILEOP 50 51 bool nfsd_disable_splice_read __read_mostly; 52 u64 nfsd_io_cache_read __read_mostly = NFSD_IO_BUFFERED; 53 u64 nfsd_io_cache_write __read_mostly = NFSD_IO_BUFFERED; 54 55 /** 56 * nfserrno - Map Linux errnos to NFS errnos 57 * @errno: POSIX(-ish) error code to be mapped 58 * 59 * Returns the appropriate (net-endian) nfserr_* (or nfs_ok if errno is 0). If 60 * it's an error we don't expect, log it once and return nfserr_io. 61 */ 62 __be32 63 nfserrno (int errno) 64 { 65 static struct { 66 __be32 nfserr; 67 int syserr; 68 } nfs_errtbl[] = { 69 { nfs_ok, 0 }, 70 { nfserr_perm, -EPERM }, 71 { nfserr_noent, -ENOENT }, 72 { nfserr_io, -EIO }, 73 { nfserr_nxio, -ENXIO }, 74 { nfserr_fbig, -E2BIG }, 75 { nfserr_stale, -EBADF }, 76 { nfserr_acces, -EACCES }, 77 { nfserr_exist, -EEXIST }, 78 { nfserr_xdev, -EXDEV }, 79 { nfserr_nodev, -ENODEV }, 80 { nfserr_notdir, -ENOTDIR }, 81 { nfserr_isdir, -EISDIR }, 82 { nfserr_inval, -EINVAL }, 83 { nfserr_fbig, -EFBIG }, 84 { nfserr_nospc, -ENOSPC }, 85 { nfserr_rofs, -EROFS }, 86 { nfserr_mlink, -EMLINK }, 87 { nfserr_nametoolong, -ENAMETOOLONG }, 88 { nfserr_notempty, -ENOTEMPTY }, 89 { nfserr_dquot, -EDQUOT }, 90 { nfserr_stale, -ESTALE }, 91 { nfserr_jukebox, -ETIMEDOUT }, 92 { nfserr_jukebox, -ERESTARTSYS }, 93 { nfserr_jukebox, -EAGAIN }, 94 { nfserr_jukebox, -EWOULDBLOCK }, 95 { nfserr_jukebox, -ENOMEM }, 96 { nfserr_io, -ETXTBSY }, 97 { nfserr_notsupp, -EOPNOTSUPP }, 98 { nfserr_toosmall, -ETOOSMALL }, 99 { nfserr_serverfault, -ESERVERFAULT }, 100 { nfserr_serverfault, -ENFILE }, 101 { nfserr_io, -EREMOTEIO }, 102 { nfserr_stale, -EOPENSTALE }, 103 { nfserr_io, -EUCLEAN }, 104 { nfserr_perm, -ENOKEY }, 105 { nfserr_no_grace, -ENOGRACE}, 106 { nfserr_io, -EBADMSG }, 107 }; 108 int i; 109 110 for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) { 111 if (nfs_errtbl[i].syserr == errno) 112 return nfs_errtbl[i].nfserr; 113 } 114 WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno); 115 return nfserr_io; 116 } 117 118 /* 119 * Called from nfsd_lookup and encode_dirent. Check if we have crossed 120 * a mount point. 121 * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged, 122 * or nfs_ok having possibly changed *dpp and *expp 123 */ 124 int 125 nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, 126 struct svc_export **expp) 127 { 128 struct svc_export *exp = *expp, *exp2 = NULL; 129 struct dentry *dentry = *dpp; 130 struct path path = {.mnt = mntget(exp->ex_path.mnt), 131 .dentry = dget(dentry)}; 132 unsigned int follow_flags = 0; 133 int err = 0; 134 135 if (exp->ex_flags & NFSEXP_CROSSMOUNT) 136 follow_flags = LOOKUP_AUTOMOUNT; 137 138 err = follow_down(&path, follow_flags); 139 if (err < 0) 140 goto out; 141 if (path.mnt == exp->ex_path.mnt && path.dentry == dentry && 142 nfsd_mountpoint(dentry, exp) == 2) { 143 /* This is only a mountpoint in some other namespace */ 144 path_put(&path); 145 goto out; 146 } 147 148 exp2 = rqst_exp_get_by_name(rqstp, &path); 149 if (IS_ERR(exp2)) { 150 err = PTR_ERR(exp2); 151 /* 152 * We normally allow NFS clients to continue 153 * "underneath" a mountpoint that is not exported. 154 * The exception is V4ROOT, where no traversal is ever 155 * allowed without an explicit export of the new 156 * directory. 157 */ 158 if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT)) 159 err = 0; 160 path_put(&path); 161 goto out; 162 } 163 if (nfsd_v4client(rqstp) || 164 (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) { 165 /* successfully crossed mount point */ 166 /* 167 * This is subtle: path.dentry is *not* on path.mnt 168 * at this point. The only reason we are safe is that 169 * original mnt is pinned down by exp, so we should 170 * put path *before* putting exp 171 */ 172 *dpp = path.dentry; 173 path.dentry = dentry; 174 *expp = exp2; 175 exp2 = exp; 176 } 177 path_put(&path); 178 exp_put(exp2); 179 out: 180 return err; 181 } 182 183 static void follow_to_parent(struct path *path) 184 { 185 struct dentry *dp; 186 187 while (path->dentry == path->mnt->mnt_root && follow_up(path)) 188 ; 189 dp = dget_parent(path->dentry); 190 dput(path->dentry); 191 path->dentry = dp; 192 } 193 194 static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp) 195 { 196 struct svc_export *exp2; 197 struct path path = {.mnt = mntget((*exp)->ex_path.mnt), 198 .dentry = dget(dparent)}; 199 200 follow_to_parent(&path); 201 202 exp2 = rqst_exp_parent(rqstp, &path); 203 if (PTR_ERR(exp2) == -ENOENT) { 204 *dentryp = dget(dparent); 205 } else if (IS_ERR(exp2)) { 206 path_put(&path); 207 return PTR_ERR(exp2); 208 } else { 209 *dentryp = dget(path.dentry); 210 exp_put(*exp); 211 *exp = exp2; 212 } 213 path_put(&path); 214 return 0; 215 } 216 217 /* 218 * For nfsd purposes, we treat V4ROOT exports as though there was an 219 * export at *every* directory. 220 * We return: 221 * '1' if this dentry *must* be an export point, 222 * '2' if it might be, if there is really a mount here, and 223 * '0' if there is no chance of an export point here. 224 */ 225 int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) 226 { 227 if (!d_inode(dentry)) 228 return 0; 229 if (exp->ex_flags & NFSEXP_V4ROOT) 230 return 1; 231 if (nfsd4_is_junction(dentry)) 232 return 1; 233 if (d_managed(dentry)) 234 /* 235 * Might only be a mountpoint in a different namespace, 236 * but we need to check. 237 */ 238 return 2; 239 return 0; 240 } 241 242 __be32 243 nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp, 244 const char *name, unsigned int len, 245 struct svc_export **exp_ret, struct dentry **dentry_ret) 246 { 247 struct svc_export *exp; 248 struct dentry *dparent; 249 struct dentry *dentry; 250 int host_err; 251 252 trace_nfsd_vfs_lookup(rqstp, fhp, name, len); 253 254 dparent = fhp->fh_dentry; 255 exp = exp_get(fhp->fh_export); 256 257 /* Lookup the name, but don't follow links */ 258 if (isdotent(name, len)) { 259 if (len==1) 260 dentry = dget(dparent); 261 else if (dparent != exp->ex_path.dentry) 262 dentry = dget_parent(dparent); 263 else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp)) 264 dentry = dget(dparent); /* .. == . just like at / */ 265 else { 266 /* checking mountpoint crossing is very different when stepping up */ 267 host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry); 268 if (host_err) 269 goto out_nfserr; 270 } 271 } else { 272 dentry = lookup_one_unlocked(&nop_mnt_idmap, 273 &QSTR_LEN(name, len), dparent); 274 host_err = PTR_ERR(dentry); 275 if (IS_ERR(dentry)) 276 goto out_nfserr; 277 if (nfsd_mountpoint(dentry, exp)) { 278 host_err = nfsd_cross_mnt(rqstp, &dentry, &exp); 279 if (host_err) { 280 dput(dentry); 281 goto out_nfserr; 282 } 283 } 284 } 285 *dentry_ret = dentry; 286 *exp_ret = exp; 287 return 0; 288 289 out_nfserr: 290 exp_put(exp); 291 return nfserrno(host_err); 292 } 293 294 /** 295 * nfsd_lookup - look up a single path component for nfsd 296 * 297 * @rqstp: the request context 298 * @fhp: the file handle of the directory 299 * @name: the component name, or %NULL to look up parent 300 * @len: length of name to examine 301 * @resfh: pointer to pre-initialised filehandle to hold result. 302 * 303 * Look up one component of a pathname. 304 * N.B. After this call _both_ fhp and resfh need an fh_put 305 * 306 * If the lookup would cross a mountpoint, and the mounted filesystem 307 * is exported to the client with NFSEXP_NOHIDE, then the lookup is 308 * accepted as it stands and the mounted directory is 309 * returned. Otherwise the covered directory is returned. 310 * NOTE: this mountpoint crossing is not supported properly by all 311 * clients and is explicitly disallowed for NFSv3 312 * 313 */ 314 __be32 315 nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, 316 unsigned int len, struct svc_fh *resfh) 317 { 318 struct svc_export *exp; 319 struct dentry *dentry; 320 __be32 err; 321 322 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); 323 if (err) 324 return err; 325 err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry); 326 if (err) 327 return err; 328 err = check_nfsd_access(exp, rqstp, false); 329 if (err) 330 goto out; 331 /* 332 * Note: we compose the file handle now, but as the 333 * dentry may be negative, it may need to be updated. 334 */ 335 err = fh_compose(resfh, exp, dentry, fhp); 336 if (!err && d_really_is_negative(dentry)) 337 err = nfserr_noent; 338 out: 339 dput(dentry); 340 exp_put(exp); 341 return err; 342 } 343 344 static void 345 commit_reset_write_verifier(struct nfsd_net *nn, struct svc_rqst *rqstp, 346 int err) 347 { 348 switch (err) { 349 case -EAGAIN: 350 case -ESTALE: 351 /* 352 * Neither of these are the result of a problem with 353 * durable storage, so avoid a write verifier reset. 354 */ 355 break; 356 default: 357 nfsd_reset_write_verifier(nn); 358 trace_nfsd_writeverf_reset(nn, rqstp, err); 359 } 360 } 361 362 /* 363 * Commit metadata changes to stable storage. 364 */ 365 static int 366 commit_inode_metadata(struct inode *inode) 367 { 368 const struct export_operations *export_ops = inode->i_sb->s_export_op; 369 370 if (export_ops->commit_metadata) 371 return export_ops->commit_metadata(inode); 372 return sync_inode_metadata(inode, 1); 373 } 374 375 static int 376 commit_metadata(struct svc_fh *fhp) 377 { 378 struct inode *inode = d_inode(fhp->fh_dentry); 379 380 if (!EX_ISSYNC(fhp->fh_export)) 381 return 0; 382 return commit_inode_metadata(inode); 383 } 384 385 /* 386 * Go over the attributes and take care of the small differences between 387 * NFS semantics and what Linux expects. 388 */ 389 static void 390 nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap) 391 { 392 /* Ignore mode updates on symlinks */ 393 if (S_ISLNK(inode->i_mode)) 394 iap->ia_valid &= ~ATTR_MODE; 395 396 /* sanitize the mode change */ 397 if (iap->ia_valid & ATTR_MODE) { 398 iap->ia_mode &= S_IALLUGO; 399 iap->ia_mode |= (inode->i_mode & ~S_IALLUGO); 400 } 401 402 /* Revoke setuid/setgid on chown */ 403 if (!S_ISDIR(inode->i_mode) && 404 ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) { 405 iap->ia_valid |= ATTR_KILL_PRIV; 406 if (iap->ia_valid & ATTR_MODE) { 407 /* we're setting mode too, just clear the s*id bits */ 408 iap->ia_mode &= ~S_ISUID; 409 if (iap->ia_mode & S_IXGRP) 410 iap->ia_mode &= ~S_ISGID; 411 } else { 412 /* set ATTR_KILL_* bits and let VFS handle it */ 413 iap->ia_valid |= ATTR_KILL_SUID; 414 iap->ia_valid |= 415 setattr_should_drop_sgid(&nop_mnt_idmap, inode); 416 } 417 } 418 } 419 420 static __be32 421 nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp, 422 struct iattr *iap) 423 { 424 struct inode *inode = d_inode(fhp->fh_dentry); 425 426 if (iap->ia_size < inode->i_size) { 427 __be32 err; 428 429 err = nfsd_permission(&rqstp->rq_cred, 430 fhp->fh_export, fhp->fh_dentry, 431 NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE); 432 if (err) 433 return err; 434 } 435 return nfserrno(get_write_access(inode)); 436 } 437 438 static int __nfsd_setattr(struct dentry *dentry, struct iattr *iap) 439 { 440 int host_err; 441 442 if (iap->ia_valid & ATTR_SIZE) { 443 /* 444 * RFC5661, Section 18.30.4: 445 * Changing the size of a file with SETATTR indirectly 446 * changes the time_modify and change attributes. 447 * 448 * (and similar for the older RFCs) 449 */ 450 struct iattr size_attr = { 451 .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME, 452 .ia_size = iap->ia_size, 453 }; 454 455 if (iap->ia_size < 0) 456 return -EFBIG; 457 458 host_err = notify_change(&nop_mnt_idmap, dentry, &size_attr, NULL); 459 if (host_err) 460 return host_err; 461 iap->ia_valid &= ~ATTR_SIZE; 462 463 /* 464 * Avoid the additional setattr call below if the only other 465 * attribute that the client sends is the mtime, as we update 466 * it as part of the size change above. 467 */ 468 if ((iap->ia_valid & ~ATTR_MTIME) == 0) 469 return 0; 470 } 471 472 if ((iap->ia_valid & ~ATTR_DELEG) == 0) 473 return 0; 474 475 /* 476 * If ATTR_DELEG is set, then this is an update from a client that 477 * holds a delegation. If this is an update for only the atime, the 478 * ctime should not be changed. If the update contains the mtime 479 * too, then ATTR_CTIME should already be set. 480 */ 481 if (!(iap->ia_valid & ATTR_DELEG)) 482 iap->ia_valid |= ATTR_CTIME; 483 484 return notify_change(&nop_mnt_idmap, dentry, iap, NULL); 485 } 486 487 /** 488 * nfsd_setattr - Set various file attributes. 489 * @rqstp: controlling RPC transaction 490 * @fhp: filehandle of target 491 * @attr: attributes to set 492 * @guardtime: do not act if ctime.tv_sec does not match this timestamp 493 * 494 * This call may adjust the contents of @attr (in particular, this 495 * call may change the bits in the na_iattr.ia_valid field). 496 * 497 * Returns nfs_ok on success, otherwise an NFS status code is 498 * returned. Caller must release @fhp by calling fh_put in either 499 * case. 500 */ 501 __be32 502 nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, 503 struct nfsd_attrs *attr, const struct timespec64 *guardtime) 504 { 505 struct dentry *dentry; 506 struct inode *inode; 507 struct iattr *iap = attr->na_iattr; 508 int accmode = NFSD_MAY_SATTR; 509 umode_t ftype = 0; 510 __be32 err; 511 int host_err = 0; 512 bool get_write_count; 513 bool size_change = (iap->ia_valid & ATTR_SIZE); 514 int retries; 515 516 trace_nfsd_vfs_setattr(rqstp, fhp, iap, guardtime); 517 518 if (iap->ia_valid & ATTR_SIZE) { 519 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; 520 ftype = S_IFREG; 521 } 522 523 /* 524 * If utimes(2) and friends are called with times not NULL, we should 525 * not set NFSD_MAY_WRITE bit. Otherwise fh_verify->nfsd_permission 526 * will return EACCES, when the caller's effective UID does not match 527 * the owner of the file, and the caller is not privileged. In this 528 * situation, we should return EPERM(notify_change will return this). 529 */ 530 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME)) { 531 accmode |= NFSD_MAY_OWNER_OVERRIDE; 532 if (!(iap->ia_valid & (ATTR_ATIME_SET | ATTR_MTIME_SET))) 533 accmode |= NFSD_MAY_WRITE; 534 } 535 536 /* Callers that do fh_verify should do the fh_want_write: */ 537 get_write_count = !fhp->fh_dentry; 538 539 /* Get inode */ 540 err = fh_verify(rqstp, fhp, ftype, accmode); 541 if (err) 542 return err; 543 if (get_write_count) { 544 host_err = fh_want_write(fhp); 545 if (host_err) 546 goto out; 547 } 548 549 dentry = fhp->fh_dentry; 550 inode = d_inode(dentry); 551 552 nfsd_sanitize_attrs(inode, iap); 553 554 /* 555 * The size case is special, it changes the file in addition to the 556 * attributes, and file systems don't expect it to be mixed with 557 * "random" attribute changes. We thus split out the size change 558 * into a separate call to ->setattr, and do the rest as a separate 559 * setattr call. 560 */ 561 if (size_change) { 562 err = nfsd_get_write_access(rqstp, fhp, iap); 563 if (err) 564 return err; 565 } 566 567 inode_lock(inode); 568 err = fh_fill_pre_attrs(fhp); 569 if (err) 570 goto out_unlock; 571 572 if (guardtime) { 573 struct timespec64 ctime = inode_get_ctime(inode); 574 if ((u32)guardtime->tv_sec != (u32)ctime.tv_sec || 575 guardtime->tv_nsec != ctime.tv_nsec) { 576 err = nfserr_notsync; 577 goto out_fill_attrs; 578 } 579 } 580 581 for (retries = 1;;) { 582 struct iattr attrs; 583 584 /* 585 * notify_change() can alter its iattr argument, making 586 * @iap unsuitable for submission multiple times. Make a 587 * copy for every loop iteration. 588 */ 589 attrs = *iap; 590 host_err = __nfsd_setattr(dentry, &attrs); 591 if (host_err != -EAGAIN || !retries--) 592 break; 593 if (!nfsd_wait_for_delegreturn(rqstp, inode)) 594 break; 595 } 596 if (attr->na_seclabel && attr->na_seclabel->len) 597 attr->na_labelerr = security_inode_setsecctx(dentry, 598 attr->na_seclabel->data, attr->na_seclabel->len); 599 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && attr->na_dpacl) { 600 if (!S_ISDIR(inode->i_mode)) 601 attr->na_dpaclerr = -EINVAL; 602 else if (attr->na_dpacl->a_count > 0) 603 /* a_count == 0 means delete the ACL. */ 604 attr->na_dpaclerr = set_posix_acl(&nop_mnt_idmap, 605 dentry, ACL_TYPE_DEFAULT, 606 attr->na_dpacl); 607 else 608 attr->na_dpaclerr = set_posix_acl(&nop_mnt_idmap, 609 dentry, ACL_TYPE_DEFAULT, 610 NULL); 611 } 612 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && attr->na_pacl) { 613 /* 614 * For any file system that is not ACL_SCOPE_FILE_OBJECT, 615 * a_count == 0 MUST reply nfserr_inval. 616 * For a file system that is ACL_SCOPE_FILE_OBJECT, 617 * a_count == 0 deletes the ACL. 618 * XXX File systems that are ACL_SCOPE_FILE_OBJECT 619 * are not yet supported. 620 */ 621 if (attr->na_pacl->a_count > 0) 622 attr->na_paclerr = set_posix_acl(&nop_mnt_idmap, 623 dentry, ACL_TYPE_ACCESS, 624 attr->na_pacl); 625 else 626 attr->na_paclerr = -EINVAL; 627 } 628 out_fill_attrs: 629 /* 630 * RFC 1813 Section 3.3.2 does not mandate that an NFS server 631 * returns wcc_data for SETATTR. Some client implementations 632 * depend on receiving wcc_data, however, to sort out partial 633 * updates (eg., the client requested that size and mode be 634 * modified, but the server changed only the file mode). 635 */ 636 fh_fill_post_attrs(fhp); 637 out_unlock: 638 inode_unlock(inode); 639 if (size_change) 640 put_write_access(inode); 641 out: 642 if (!host_err) 643 host_err = commit_metadata(fhp); 644 return err != 0 ? err : nfserrno(host_err); 645 } 646 647 #if defined(CONFIG_NFSD_V4) 648 /* 649 * NFS junction information is stored in an extended attribute. 650 */ 651 #define NFSD_JUNCTION_XATTR_NAME XATTR_TRUSTED_PREFIX "junction.nfs" 652 653 /** 654 * nfsd4_is_junction - Test if an object could be an NFS junction 655 * 656 * @dentry: object to test 657 * 658 * Returns 1 if "dentry" appears to contain NFS junction information. 659 * Otherwise 0 is returned. 660 */ 661 int nfsd4_is_junction(struct dentry *dentry) 662 { 663 struct inode *inode = d_inode(dentry); 664 665 if (inode == NULL) 666 return 0; 667 if (inode->i_mode & S_IXUGO) 668 return 0; 669 if (!(inode->i_mode & S_ISVTX)) 670 return 0; 671 if (vfs_getxattr(&nop_mnt_idmap, dentry, NFSD_JUNCTION_XATTR_NAME, 672 NULL, 0) <= 0) 673 return 0; 674 return 1; 675 } 676 677 static struct nfsd4_compound_state *nfsd4_get_cstate(struct svc_rqst *rqstp) 678 { 679 return &((struct nfsd4_compoundres *)rqstp->rq_resp)->cstate; 680 } 681 682 __be32 nfsd4_clone_file_range(struct svc_rqst *rqstp, 683 struct nfsd_file *nf_src, u64 src_pos, 684 struct nfsd_file *nf_dst, u64 dst_pos, 685 u64 count, bool sync) 686 { 687 struct file *src = nf_src->nf_file; 688 struct file *dst = nf_dst->nf_file; 689 errseq_t since; 690 loff_t cloned; 691 __be32 ret = 0; 692 693 since = READ_ONCE(dst->f_wb_err); 694 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); 695 if (cloned < 0) { 696 ret = nfserrno(cloned); 697 goto out_err; 698 } 699 if (count && cloned != count) { 700 ret = nfserrno(-EINVAL); 701 goto out_err; 702 } 703 if (sync) { 704 loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX; 705 int status = vfs_fsync_range(dst, dst_pos, dst_end, 0); 706 707 if (!status) 708 status = filemap_check_wb_err(dst->f_mapping, since); 709 if (!status) 710 status = commit_inode_metadata(file_inode(src)); 711 if (status < 0) { 712 struct nfsd_net *nn = net_generic(nf_dst->nf_net, 713 nfsd_net_id); 714 715 trace_nfsd_clone_file_range_err(rqstp, 716 &nfsd4_get_cstate(rqstp)->save_fh, 717 src_pos, 718 &nfsd4_get_cstate(rqstp)->current_fh, 719 dst_pos, 720 count, status); 721 commit_reset_write_verifier(nn, rqstp, status); 722 ret = nfserrno(status); 723 } 724 } 725 out_err: 726 return ret; 727 } 728 729 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, 730 u64 dst_pos, u64 count) 731 { 732 ssize_t ret; 733 734 /* 735 * Limit copy to 4MB to prevent indefinitely blocking an nfsd 736 * thread and client rpc slot. The choice of 4MB is somewhat 737 * arbitrary. We might instead base this on r/wsize, or make it 738 * tunable, or use a time instead of a byte limit, or implement 739 * asynchronous copy. In theory a client could also recognize a 740 * limit like this and pipeline multiple COPY requests. 741 */ 742 count = min_t(u64, count, 1 << 22); 743 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0); 744 745 if (ret == -EOPNOTSUPP || ret == -EXDEV) 746 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 747 COPY_FILE_SPLICE); 748 return ret; 749 } 750 751 __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp, 752 struct file *file, loff_t offset, loff_t len, 753 int flags) 754 { 755 int error; 756 757 if (!S_ISREG(file_inode(file)->i_mode)) 758 return nfserr_inval; 759 760 error = vfs_fallocate(file, flags, offset, len); 761 if (!error) 762 error = commit_metadata(fhp); 763 764 return nfserrno(error); 765 } 766 #endif /* defined(CONFIG_NFSD_V4) */ 767 768 /* 769 * Check server access rights to a file system object 770 */ 771 struct accessmap { 772 u32 access; 773 int how; 774 }; 775 static struct accessmap nfs3_regaccess[] = { 776 { NFS3_ACCESS_READ, NFSD_MAY_READ }, 777 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, 778 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC }, 779 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE }, 780 781 #ifdef CONFIG_NFSD_V4 782 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ }, 783 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE }, 784 { NFS4_ACCESS_XALIST, NFSD_MAY_READ }, 785 #endif 786 787 { 0, 0 } 788 }; 789 790 static struct accessmap nfs3_diraccess[] = { 791 { NFS3_ACCESS_READ, NFSD_MAY_READ }, 792 { NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC }, 793 { NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC}, 794 { NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE }, 795 { NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE }, 796 797 #ifdef CONFIG_NFSD_V4 798 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ }, 799 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE }, 800 { NFS4_ACCESS_XALIST, NFSD_MAY_READ }, 801 #endif 802 803 { 0, 0 } 804 }; 805 806 static struct accessmap nfs3_anyaccess[] = { 807 /* Some clients - Solaris 2.6 at least, make an access call 808 * to the server to check for access for things like /dev/null 809 * (which really, the server doesn't care about). So 810 * We provide simple access checking for them, looking 811 * mainly at mode bits, and we make sure to ignore read-only 812 * filesystem checks 813 */ 814 { NFS3_ACCESS_READ, NFSD_MAY_READ }, 815 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC }, 816 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, 817 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS }, 818 819 { 0, 0 } 820 }; 821 822 __be32 823 nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported) 824 { 825 struct accessmap *map; 826 struct svc_export *export; 827 struct dentry *dentry; 828 u32 query, result = 0, sresult = 0; 829 __be32 error; 830 831 error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP); 832 if (error) 833 goto out; 834 835 export = fhp->fh_export; 836 dentry = fhp->fh_dentry; 837 838 if (d_is_reg(dentry)) 839 map = nfs3_regaccess; 840 else if (d_is_dir(dentry)) 841 map = nfs3_diraccess; 842 else 843 map = nfs3_anyaccess; 844 845 846 query = *access; 847 for (; map->access; map++) { 848 if (map->access & query) { 849 __be32 err2; 850 851 sresult |= map->access; 852 853 err2 = nfsd_permission(&rqstp->rq_cred, export, 854 dentry, map->how); 855 switch (err2) { 856 case nfs_ok: 857 result |= map->access; 858 break; 859 860 /* the following error codes just mean the access was not allowed, 861 * rather than an error occurred */ 862 case nfserr_rofs: 863 case nfserr_acces: 864 case nfserr_perm: 865 /* simply don't "or" in the access bit. */ 866 break; 867 default: 868 error = err2; 869 goto out; 870 } 871 } 872 } 873 *access = result; 874 if (supported) 875 *supported = sresult; 876 877 out: 878 return error; 879 } 880 881 int nfsd_open_break_lease(struct inode *inode, int access) 882 { 883 unsigned int mode; 884 885 if (access & NFSD_MAY_NOT_BREAK_LEASE) 886 return 0; 887 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY; 888 return break_lease(inode, mode | O_NONBLOCK); 889 } 890 891 /* 892 * Open an existing file or directory. 893 * The may_flags argument indicates the type of open (read/write/lock) 894 * and additional flags. 895 * N.B. After this call fhp needs an fh_put 896 */ 897 static int 898 __nfsd_open(struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp) 899 { 900 struct path path; 901 struct inode *inode; 902 struct file *file; 903 int flags = O_RDONLY|O_LARGEFILE; 904 int host_err = -EPERM; 905 906 path.mnt = fhp->fh_export->ex_path.mnt; 907 path.dentry = fhp->fh_dentry; 908 inode = d_inode(path.dentry); 909 910 if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE)) 911 goto out; 912 913 if (!inode->i_fop) 914 goto out; 915 916 host_err = nfsd_open_break_lease(inode, may_flags); 917 if (host_err) /* NOMEM or WOULDBLOCK */ 918 goto out; 919 920 if (may_flags & NFSD_MAY_WRITE) { 921 if (may_flags & NFSD_MAY_READ) 922 flags = O_RDWR|O_LARGEFILE; 923 else 924 flags = O_WRONLY|O_LARGEFILE; 925 } 926 927 file = dentry_open(&path, flags, current_cred()); 928 if (IS_ERR(file)) { 929 host_err = PTR_ERR(file); 930 goto out; 931 } 932 933 host_err = security_file_post_open(file, may_flags); 934 if (host_err) { 935 fput(file); 936 goto out; 937 } 938 939 *filp = file; 940 out: 941 return host_err; 942 } 943 944 __be32 945 nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, 946 int may_flags, struct file **filp) 947 { 948 __be32 err; 949 int host_err; 950 bool retried = false; 951 952 /* 953 * If we get here, then the client has already done an "open", 954 * and (hopefully) checked permission - so allow OWNER_OVERRIDE 955 * in case a chmod has now revoked permission. 956 * 957 * Arguably we should also allow the owner override for 958 * directories, but we never have and it doesn't seem to have 959 * caused anyone a problem. If we were to change this, note 960 * also that our filldir callbacks would need a variant of 961 * lookup_one_positive_unlocked() that doesn't check permissions. 962 */ 963 if (type == S_IFREG) 964 may_flags |= NFSD_MAY_OWNER_OVERRIDE; 965 retry: 966 err = fh_verify(rqstp, fhp, type, may_flags); 967 if (!err) { 968 host_err = __nfsd_open(fhp, type, may_flags, filp); 969 if (host_err == -EOPENSTALE && !retried) { 970 retried = true; 971 fh_put(fhp); 972 goto retry; 973 } 974 err = nfserrno(host_err); 975 } 976 return err; 977 } 978 979 /** 980 * nfsd_open_verified - Open a regular file for the filecache 981 * @fhp: NFS filehandle of the file to open 982 * @type: S_IFMT inode type allowed (0 means any type is allowed) 983 * @may_flags: internal permission flags 984 * @filp: OUT: open "struct file *" 985 * 986 * Returns zero on success, or a negative errno value. 987 */ 988 int 989 nfsd_open_verified(struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp) 990 { 991 return __nfsd_open(fhp, type, may_flags, filp); 992 } 993 994 /* 995 * Grab and keep cached pages associated with a file in the svc_rqst 996 * so that they can be passed to the network sendmsg routines 997 * directly. They will be released after the sending has completed. 998 * 999 * Return values: Number of bytes consumed, or -EIO if there are no 1000 * remaining pages in rqstp->rq_pages. 1001 */ 1002 static int 1003 nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 1004 struct splice_desc *sd) 1005 { 1006 struct svc_rqst *rqstp = sd->u.data; 1007 struct page *page = buf->page; // may be a compound one 1008 unsigned offset = buf->offset; 1009 struct page *last_page; 1010 1011 last_page = page + (offset + sd->len - 1) / PAGE_SIZE; 1012 for (page += offset / PAGE_SIZE; page <= last_page; page++) { 1013 /* 1014 * Skip page replacement when extending the contents of the 1015 * current page. But note that we may get two zero_pages in a 1016 * row from shmem. 1017 */ 1018 if (page == *(rqstp->rq_next_page - 1) && 1019 offset_in_page(rqstp->rq_res.page_base + 1020 rqstp->rq_res.page_len)) 1021 continue; 1022 if (unlikely(!svc_rqst_replace_page(rqstp, page))) 1023 return -EIO; 1024 } 1025 if (rqstp->rq_res.page_len == 0) // first call 1026 rqstp->rq_res.page_base = offset % PAGE_SIZE; 1027 rqstp->rq_res.page_len += sd->len; 1028 return sd->len; 1029 } 1030 1031 static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe, 1032 struct splice_desc *sd) 1033 { 1034 return __splice_from_pipe(pipe, sd, nfsd_splice_actor); 1035 } 1036 1037 static u32 nfsd_eof_on_read(struct file *file, loff_t offset, ssize_t len, 1038 size_t expected) 1039 { 1040 if (expected != 0 && len == 0) 1041 return 1; 1042 if (offset+len >= i_size_read(file_inode(file))) 1043 return 1; 1044 return 0; 1045 } 1046 1047 static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1048 struct file *file, loff_t offset, 1049 unsigned long *count, u32 *eof, ssize_t host_err) 1050 { 1051 if (host_err >= 0) { 1052 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1053 1054 nfsd_stats_io_read_add(nn, fhp->fh_export, host_err); 1055 *eof = nfsd_eof_on_read(file, offset, host_err, *count); 1056 *count = host_err; 1057 fsnotify_access(file); 1058 trace_nfsd_read_io_done(rqstp, fhp, offset, *count); 1059 return 0; 1060 } else { 1061 trace_nfsd_read_err(rqstp, fhp, offset, host_err); 1062 return nfserrno(host_err); 1063 } 1064 } 1065 1066 /** 1067 * nfsd_splice_read - Perform a VFS read using a splice pipe 1068 * @rqstp: RPC transaction context 1069 * @fhp: file handle of file to be read 1070 * @file: opened struct file of file to be read 1071 * @offset: starting byte offset 1072 * @count: IN: requested number of bytes; OUT: number of bytes read 1073 * @eof: OUT: set non-zero if operation reached the end of the file 1074 * 1075 * Returns nfs_ok on success, otherwise an nfserr stat value is 1076 * returned. 1077 */ 1078 __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1079 struct file *file, loff_t offset, unsigned long *count, 1080 u32 *eof) 1081 { 1082 struct splice_desc sd = { 1083 .len = 0, 1084 .total_len = *count, 1085 .pos = offset, 1086 .u.data = rqstp, 1087 }; 1088 ssize_t host_err; 1089 1090 trace_nfsd_read_splice(rqstp, fhp, offset, *count); 1091 host_err = rw_verify_area(READ, file, &offset, *count); 1092 if (!host_err) 1093 host_err = splice_direct_to_actor(file, &sd, 1094 nfsd_direct_splice_actor); 1095 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err); 1096 } 1097 1098 /* 1099 * The byte range of the client's READ request is expanded on both ends 1100 * until it meets the underlying file system's direct I/O alignment 1101 * requirements. After the internal read is complete, the byte range of 1102 * the NFS READ payload is reduced to the byte range that was originally 1103 * requested. 1104 * 1105 * Note that a direct read can be done only when the xdr_buf containing 1106 * the NFS READ reply does not already have contents in its .pages array. 1107 * This is due to potentially restrictive alignment requirements on the 1108 * read buffer. When .page_len and @base are zero, the .pages array is 1109 * guaranteed to be page-aligned. 1110 */ 1111 static noinline_for_stack __be32 1112 nfsd_direct_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1113 struct nfsd_file *nf, loff_t offset, unsigned long *count, 1114 u32 *eof) 1115 { 1116 u64 dio_start, dio_end; 1117 unsigned long v, total; 1118 struct iov_iter iter; 1119 struct kiocb kiocb; 1120 ssize_t host_err; 1121 size_t len; 1122 1123 init_sync_kiocb(&kiocb, nf->nf_file); 1124 kiocb.ki_flags |= IOCB_DIRECT; 1125 1126 /* Read a properly-aligned region of bytes into rq_bvec */ 1127 dio_start = round_down(offset, nf->nf_dio_read_offset_align); 1128 dio_end = round_up((u64)offset + *count, nf->nf_dio_read_offset_align); 1129 1130 kiocb.ki_pos = dio_start; 1131 1132 v = 0; 1133 total = dio_end - dio_start; 1134 while (total && v < rqstp->rq_maxpages && 1135 rqstp->rq_next_page < rqstp->rq_page_end) { 1136 len = min_t(size_t, total, PAGE_SIZE); 1137 bvec_set_page(&rqstp->rq_bvec[v], *rqstp->rq_next_page, 1138 len, 0); 1139 1140 total -= len; 1141 ++rqstp->rq_next_page; 1142 ++v; 1143 } 1144 1145 trace_nfsd_read_direct(rqstp, fhp, offset, *count - total); 1146 iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v, 1147 dio_end - dio_start - total); 1148 1149 host_err = vfs_iocb_iter_read(nf->nf_file, &kiocb, &iter); 1150 if (host_err >= 0) { 1151 unsigned int pad = offset - dio_start; 1152 1153 /* The returned payload starts after the pad */ 1154 rqstp->rq_res.page_base = pad; 1155 1156 /* Compute the count of bytes to be returned */ 1157 if (host_err > pad + *count) 1158 host_err = *count; 1159 else if (host_err > pad) 1160 host_err -= pad; 1161 else 1162 host_err = 0; 1163 } else if (unlikely(host_err == -EINVAL)) { 1164 struct inode *inode = d_inode(fhp->fh_dentry); 1165 1166 pr_info_ratelimited("nfsd: Direct I/O alignment failure on %s/%ld\n", 1167 inode->i_sb->s_id, inode->i_ino); 1168 host_err = -ESERVERFAULT; 1169 } 1170 1171 return nfsd_finish_read(rqstp, fhp, nf->nf_file, offset, count, 1172 eof, host_err); 1173 } 1174 1175 /** 1176 * nfsd_iter_read - Perform a VFS read using an iterator 1177 * @rqstp: RPC transaction context 1178 * @fhp: file handle of file to be read 1179 * @nf: opened struct nfsd_file of file to be read 1180 * @offset: starting byte offset 1181 * @count: IN: requested number of bytes; OUT: number of bytes read 1182 * @base: offset in first page of read buffer 1183 * @eof: OUT: set non-zero if operation reached the end of the file 1184 * 1185 * Some filesystems or situations cannot use nfsd_splice_read. This 1186 * function is the slightly less-performant fallback for those cases. 1187 * 1188 * Returns nfs_ok on success, otherwise an nfserr stat value is 1189 * returned. 1190 */ 1191 __be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1192 struct nfsd_file *nf, loff_t offset, unsigned long *count, 1193 unsigned int base, u32 *eof) 1194 { 1195 struct file *file = nf->nf_file; 1196 unsigned long v, total; 1197 struct iov_iter iter; 1198 struct kiocb kiocb; 1199 ssize_t host_err; 1200 size_t len; 1201 1202 init_sync_kiocb(&kiocb, file); 1203 1204 switch (nfsd_io_cache_read) { 1205 case NFSD_IO_BUFFERED: 1206 break; 1207 case NFSD_IO_DIRECT: 1208 /* When dio_read_offset_align is zero, dio is not supported */ 1209 if (nf->nf_dio_read_offset_align && !rqstp->rq_res.page_len) 1210 return nfsd_direct_read(rqstp, fhp, nf, offset, 1211 count, eof); 1212 fallthrough; 1213 case NFSD_IO_DONTCACHE: 1214 if (file->f_op->fop_flags & FOP_DONTCACHE) 1215 kiocb.ki_flags = IOCB_DONTCACHE; 1216 break; 1217 } 1218 1219 kiocb.ki_pos = offset; 1220 1221 v = 0; 1222 total = *count; 1223 while (total && v < rqstp->rq_maxpages && 1224 rqstp->rq_next_page < rqstp->rq_page_end) { 1225 len = min_t(size_t, total, PAGE_SIZE - base); 1226 bvec_set_page(&rqstp->rq_bvec[v], *rqstp->rq_next_page, 1227 len, base); 1228 1229 total -= len; 1230 ++rqstp->rq_next_page; 1231 ++v; 1232 base = 0; 1233 } 1234 1235 trace_nfsd_read_vector(rqstp, fhp, offset, *count - total); 1236 iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v, *count - total); 1237 host_err = vfs_iocb_iter_read(file, &kiocb, &iter); 1238 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err); 1239 } 1240 1241 /* 1242 * Gathered writes: If another process is currently writing to the file, 1243 * there's a high chance this is another nfsd (triggered by a bulk write 1244 * from a client's biod). Rather than syncing the file with each write 1245 * request, we sleep for 10 msec. 1246 * 1247 * I don't know if this roughly approximates C. Juszak's idea of 1248 * gathered writes, but it's a nice and simple solution (IMHO), and it 1249 * seems to work:-) 1250 * 1251 * Note: we do this only in the NFSv2 case, since v3 and higher have a 1252 * better tool (separate unstable writes and commits) for solving this 1253 * problem. 1254 */ 1255 static int wait_for_concurrent_writes(struct file *file) 1256 { 1257 struct inode *inode = file_inode(file); 1258 static ino_t last_ino; 1259 static dev_t last_dev; 1260 int err = 0; 1261 1262 if (atomic_read(&inode->i_writecount) > 1 1263 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) { 1264 dprintk("nfsd: write defer %d\n", task_pid_nr(current)); 1265 msleep(10); 1266 dprintk("nfsd: write resume %d\n", task_pid_nr(current)); 1267 } 1268 1269 if (inode_state_read_once(inode) & I_DIRTY) { 1270 dprintk("nfsd: write sync %d\n", task_pid_nr(current)); 1271 err = vfs_fsync(file, 0); 1272 } 1273 last_ino = inode->i_ino; 1274 last_dev = inode->i_sb->s_dev; 1275 return err; 1276 } 1277 1278 struct nfsd_write_dio_seg { 1279 struct iov_iter iter; 1280 int flags; 1281 }; 1282 1283 static unsigned long 1284 iov_iter_bvec_offset(const struct iov_iter *iter) 1285 { 1286 return (unsigned long)(iter->bvec->bv_offset + iter->iov_offset); 1287 } 1288 1289 static void 1290 nfsd_write_dio_seg_init(struct nfsd_write_dio_seg *segment, 1291 struct bio_vec *bvec, unsigned int nvecs, 1292 unsigned long total, size_t start, size_t len, 1293 struct kiocb *iocb) 1294 { 1295 iov_iter_bvec(&segment->iter, ITER_SOURCE, bvec, nvecs, total); 1296 if (start) 1297 iov_iter_advance(&segment->iter, start); 1298 iov_iter_truncate(&segment->iter, len); 1299 segment->flags = iocb->ki_flags; 1300 } 1301 1302 static unsigned int 1303 nfsd_write_dio_iters_init(struct nfsd_file *nf, struct bio_vec *bvec, 1304 unsigned int nvecs, struct kiocb *iocb, 1305 unsigned long total, 1306 struct nfsd_write_dio_seg segments[3]) 1307 { 1308 u32 offset_align = nf->nf_dio_offset_align; 1309 loff_t prefix_end, orig_end, middle_end; 1310 u32 mem_align = nf->nf_dio_mem_align; 1311 size_t prefix, middle, suffix; 1312 loff_t offset = iocb->ki_pos; 1313 unsigned int nsegs = 0; 1314 1315 /* 1316 * Check if direct I/O is feasible for this write request. 1317 * If alignments are not available, the write is too small, 1318 * or no alignment can be found, fall back to buffered I/O. 1319 */ 1320 if (unlikely(!mem_align || !offset_align) || 1321 unlikely(total < max(offset_align, mem_align))) 1322 goto no_dio; 1323 1324 prefix_end = round_up(offset, offset_align); 1325 orig_end = offset + total; 1326 middle_end = round_down(orig_end, offset_align); 1327 1328 prefix = prefix_end - offset; 1329 middle = middle_end - prefix_end; 1330 suffix = orig_end - middle_end; 1331 1332 if (!middle) 1333 goto no_dio; 1334 1335 if (prefix) 1336 nfsd_write_dio_seg_init(&segments[nsegs++], bvec, 1337 nvecs, total, 0, prefix, iocb); 1338 1339 nfsd_write_dio_seg_init(&segments[nsegs], bvec, nvecs, 1340 total, prefix, middle, iocb); 1341 1342 /* 1343 * Check if the bvec iterator is aligned for direct I/O. 1344 * 1345 * bvecs generated from RPC receive buffers are contiguous: After 1346 * the first bvec, all subsequent bvecs start at bv_offset zero 1347 * (page-aligned). Therefore, only the first bvec is checked. 1348 */ 1349 if (iov_iter_bvec_offset(&segments[nsegs].iter) & (mem_align - 1)) 1350 goto no_dio; 1351 segments[nsegs].flags |= IOCB_DIRECT; 1352 nsegs++; 1353 1354 if (suffix) 1355 nfsd_write_dio_seg_init(&segments[nsegs++], bvec, nvecs, total, 1356 prefix + middle, suffix, iocb); 1357 1358 return nsegs; 1359 1360 no_dio: 1361 /* No DIO alignment possible - pack into single non-DIO segment. */ 1362 nfsd_write_dio_seg_init(&segments[0], bvec, nvecs, total, 0, 1363 total, iocb); 1364 return 1; 1365 } 1366 1367 static noinline_for_stack int 1368 nfsd_direct_write(struct svc_rqst *rqstp, struct svc_fh *fhp, 1369 struct nfsd_file *nf, unsigned int nvecs, 1370 unsigned long *cnt, struct kiocb *kiocb) 1371 { 1372 struct nfsd_write_dio_seg segments[3]; 1373 struct file *file = nf->nf_file; 1374 unsigned int nsegs, i; 1375 ssize_t host_err; 1376 1377 nsegs = nfsd_write_dio_iters_init(nf, rqstp->rq_bvec, nvecs, 1378 kiocb, *cnt, segments); 1379 1380 *cnt = 0; 1381 for (i = 0; i < nsegs; i++) { 1382 kiocb->ki_flags = segments[i].flags; 1383 if (kiocb->ki_flags & IOCB_DIRECT) 1384 trace_nfsd_write_direct(rqstp, fhp, kiocb->ki_pos, 1385 segments[i].iter.count); 1386 else { 1387 trace_nfsd_write_vector(rqstp, fhp, kiocb->ki_pos, 1388 segments[i].iter.count); 1389 /* 1390 * Mark the I/O buffer as evict-able to reduce 1391 * memory contention. 1392 */ 1393 if (nf->nf_file->f_op->fop_flags & FOP_DONTCACHE) 1394 kiocb->ki_flags |= IOCB_DONTCACHE; 1395 } 1396 1397 host_err = vfs_iocb_iter_write(file, kiocb, &segments[i].iter); 1398 if (host_err < 0) 1399 return host_err; 1400 *cnt += host_err; 1401 if (host_err < segments[i].iter.count) 1402 break; /* partial write */ 1403 } 1404 1405 return 0; 1406 } 1407 1408 /** 1409 * nfsd_vfs_write - write data to an already-open file 1410 * @rqstp: RPC execution context 1411 * @fhp: File handle of file to write into 1412 * @nf: An open file matching @fhp 1413 * @offset: Byte offset of start 1414 * @payload: xdr_buf containing the write payload 1415 * @cnt: IN: number of bytes to write, OUT: number of bytes actually written 1416 * @stable: An NFS stable_how value 1417 * @verf: NFS WRITE verifier 1418 * 1419 * Upon return, caller must invoke fh_put on @fhp. 1420 * 1421 * Return values: 1422 * An nfsstat value in network byte order. 1423 */ 1424 __be32 1425 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, 1426 struct nfsd_file *nf, loff_t offset, 1427 const struct xdr_buf *payload, unsigned long *cnt, 1428 int stable, __be32 *verf) 1429 { 1430 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1431 struct file *file = nf->nf_file; 1432 struct super_block *sb = file_inode(file)->i_sb; 1433 struct kiocb kiocb; 1434 struct svc_export *exp; 1435 struct iov_iter iter; 1436 errseq_t since; 1437 __be32 nfserr; 1438 int host_err; 1439 unsigned long exp_op_flags = 0; 1440 unsigned int pflags = current->flags; 1441 bool restore_flags = false; 1442 unsigned int nvecs; 1443 1444 trace_nfsd_write_opened(rqstp, fhp, offset, *cnt); 1445 1446 if (sb->s_export_op) 1447 exp_op_flags = sb->s_export_op->flags; 1448 1449 if (test_bit(RQ_LOCAL, &rqstp->rq_flags) && 1450 !(exp_op_flags & EXPORT_OP_REMOTE_FS)) { 1451 /* 1452 * We want throttling in balance_dirty_pages() 1453 * and shrink_inactive_list() to only consider 1454 * the backingdev we are writing to, so that nfs to 1455 * localhost doesn't cause nfsd to lock up due to all 1456 * the client's dirty pages or its congested queue. 1457 */ 1458 current->flags |= PF_LOCAL_THROTTLE; 1459 restore_flags = true; 1460 } 1461 1462 exp = fhp->fh_export; 1463 1464 if (!EX_ISSYNC(exp)) 1465 stable = NFS_UNSTABLE; 1466 init_sync_kiocb(&kiocb, file); 1467 kiocb.ki_pos = offset; 1468 if (likely(!fhp->fh_use_wgather)) { 1469 switch (stable) { 1470 case NFS_FILE_SYNC: 1471 /* persist data and timestamps */ 1472 kiocb.ki_flags |= IOCB_DSYNC | IOCB_SYNC; 1473 break; 1474 case NFS_DATA_SYNC: 1475 /* persist data only */ 1476 kiocb.ki_flags |= IOCB_DSYNC; 1477 break; 1478 } 1479 } 1480 1481 nvecs = xdr_buf_to_bvec(rqstp->rq_bvec, rqstp->rq_maxpages, payload); 1482 1483 since = READ_ONCE(file->f_wb_err); 1484 if (verf) 1485 nfsd_copy_write_verifier(verf, nn); 1486 1487 switch (nfsd_io_cache_write) { 1488 case NFSD_IO_DIRECT: 1489 host_err = nfsd_direct_write(rqstp, fhp, nf, nvecs, 1490 cnt, &kiocb); 1491 break; 1492 case NFSD_IO_DONTCACHE: 1493 if (file->f_op->fop_flags & FOP_DONTCACHE) 1494 kiocb.ki_flags |= IOCB_DONTCACHE; 1495 fallthrough; 1496 case NFSD_IO_BUFFERED: 1497 iov_iter_bvec(&iter, ITER_SOURCE, rqstp->rq_bvec, nvecs, *cnt); 1498 host_err = vfs_iocb_iter_write(file, &kiocb, &iter); 1499 if (host_err < 0) 1500 break; 1501 *cnt = host_err; 1502 break; 1503 } 1504 if (host_err < 0) { 1505 commit_reset_write_verifier(nn, rqstp, host_err); 1506 goto out_nfserr; 1507 } 1508 nfsd_stats_io_write_add(nn, exp, *cnt); 1509 fsnotify_modify(file); 1510 host_err = filemap_check_wb_err(file->f_mapping, since); 1511 if (host_err < 0) 1512 goto out_nfserr; 1513 1514 if (stable && fhp->fh_use_wgather) { 1515 host_err = wait_for_concurrent_writes(file); 1516 if (host_err < 0) 1517 commit_reset_write_verifier(nn, rqstp, host_err); 1518 } 1519 1520 out_nfserr: 1521 if (host_err >= 0) { 1522 trace_nfsd_write_io_done(rqstp, fhp, offset, *cnt); 1523 nfserr = nfs_ok; 1524 } else { 1525 trace_nfsd_write_err(rqstp, fhp, offset, host_err); 1526 nfserr = nfserrno(host_err); 1527 } 1528 if (restore_flags) 1529 current_restore_flags(pflags, PF_LOCAL_THROTTLE); 1530 return nfserr; 1531 } 1532 1533 /** 1534 * nfsd_read_splice_ok - check if spliced reading is supported 1535 * @rqstp: RPC transaction context 1536 * 1537 * Return values: 1538 * %true: nfsd_splice_read() may be used 1539 * %false: nfsd_splice_read() must not be used 1540 * 1541 * NFS READ normally uses splice to send data in-place. However the 1542 * data in cache can change after the reply's MIC is computed but 1543 * before the RPC reply is sent. To prevent the client from 1544 * rejecting the server-computed MIC in this somewhat rare case, do 1545 * not use splice with the GSS integrity and privacy services. 1546 */ 1547 bool nfsd_read_splice_ok(struct svc_rqst *rqstp) 1548 { 1549 if (nfsd_disable_splice_read) 1550 return false; 1551 switch (svc_auth_flavor(rqstp)) { 1552 case RPC_AUTH_GSS_KRB5I: 1553 case RPC_AUTH_GSS_KRB5P: 1554 return false; 1555 } 1556 return true; 1557 } 1558 1559 /** 1560 * nfsd_read - Read data from a file 1561 * @rqstp: RPC transaction context 1562 * @fhp: file handle of file to be read 1563 * @offset: starting byte offset 1564 * @count: IN: requested number of bytes; OUT: number of bytes read 1565 * @eof: OUT: set non-zero if operation reached the end of the file 1566 * 1567 * The caller must verify that there is enough space in @rqstp.rq_res 1568 * to perform this operation. 1569 * 1570 * N.B. After this call fhp needs an fh_put 1571 * 1572 * Returns nfs_ok on success, otherwise an nfserr stat value is 1573 * returned. 1574 */ 1575 __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, 1576 loff_t offset, unsigned long *count, u32 *eof) 1577 { 1578 struct nfsd_file *nf; 1579 struct file *file; 1580 __be32 err; 1581 1582 trace_nfsd_read_start(rqstp, fhp, offset, *count); 1583 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_READ, &nf); 1584 if (err) 1585 return err; 1586 1587 file = nf->nf_file; 1588 if (file->f_op->splice_read && nfsd_read_splice_ok(rqstp)) 1589 err = nfsd_splice_read(rqstp, fhp, file, offset, count, eof); 1590 else 1591 err = nfsd_iter_read(rqstp, fhp, nf, offset, count, 0, eof); 1592 1593 nfsd_file_put(nf); 1594 trace_nfsd_read_done(rqstp, fhp, offset, *count); 1595 return err; 1596 } 1597 1598 /** 1599 * nfsd_write - open a file and write data to it 1600 * @rqstp: RPC execution context 1601 * @fhp: File handle of file to write into; nfsd_write() may modify it 1602 * @offset: Byte offset of start 1603 * @payload: xdr_buf containing the write payload 1604 * @cnt: IN: number of bytes to write, OUT: number of bytes actually written 1605 * @stable: An NFS stable_how value 1606 * @verf: NFS WRITE verifier 1607 * 1608 * Upon return, caller must invoke fh_put on @fhp. 1609 * 1610 * Return values: 1611 * An nfsstat value in network byte order. 1612 */ 1613 __be32 1614 nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset, 1615 const struct xdr_buf *payload, unsigned long *cnt, int stable, 1616 __be32 *verf) 1617 { 1618 struct nfsd_file *nf; 1619 __be32 err; 1620 1621 trace_nfsd_write_start(rqstp, fhp, offset, *cnt); 1622 1623 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_WRITE, &nf); 1624 if (err) 1625 goto out; 1626 1627 err = nfsd_vfs_write(rqstp, fhp, nf, offset, payload, cnt, 1628 stable, verf); 1629 nfsd_file_put(nf); 1630 out: 1631 trace_nfsd_write_done(rqstp, fhp, offset, *cnt); 1632 return err; 1633 } 1634 1635 /** 1636 * nfsd_commit - Commit pending writes to stable storage 1637 * @rqstp: RPC request being processed 1638 * @fhp: NFS filehandle 1639 * @nf: target file 1640 * @offset: raw offset from beginning of file 1641 * @count: raw count of bytes to sync 1642 * @verf: filled in with the server's current write verifier 1643 * 1644 * Note: we guarantee that data that lies within the range specified 1645 * by the 'offset' and 'count' parameters will be synced. The server 1646 * is permitted to sync data that lies outside this range at the 1647 * same time. 1648 * 1649 * Unfortunately we cannot lock the file to make sure we return full WCC 1650 * data to the client, as locking happens lower down in the filesystem. 1651 * 1652 * Return values: 1653 * An nfsstat value in network byte order. 1654 */ 1655 __be32 1656 nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, 1657 u64 offset, u32 count, __be32 *verf) 1658 { 1659 __be32 err = nfs_ok; 1660 u64 maxbytes; 1661 loff_t start, end; 1662 struct nfsd_net *nn; 1663 1664 trace_nfsd_commit_start(rqstp, fhp, offset, count); 1665 1666 /* 1667 * Convert the client-provided (offset, count) range to a 1668 * (start, end) range. If the client-provided range falls 1669 * outside the maximum file size of the underlying FS, 1670 * clamp the sync range appropriately. 1671 */ 1672 start = 0; 1673 end = LLONG_MAX; 1674 maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes; 1675 if (offset < maxbytes) { 1676 start = offset; 1677 if (count && (offset + count - 1 < maxbytes)) 1678 end = offset + count - 1; 1679 } 1680 1681 nn = net_generic(nf->nf_net, nfsd_net_id); 1682 if (EX_ISSYNC(fhp->fh_export)) { 1683 errseq_t since = READ_ONCE(nf->nf_file->f_wb_err); 1684 int err2; 1685 1686 err2 = vfs_fsync_range(nf->nf_file, start, end, 0); 1687 switch (err2) { 1688 case 0: 1689 nfsd_copy_write_verifier(verf, nn); 1690 err2 = filemap_check_wb_err(nf->nf_file->f_mapping, 1691 since); 1692 err = nfserrno(err2); 1693 break; 1694 case -EINVAL: 1695 err = nfserr_notsupp; 1696 break; 1697 default: 1698 commit_reset_write_verifier(nn, rqstp, err2); 1699 err = nfserrno(err2); 1700 } 1701 } else 1702 nfsd_copy_write_verifier(verf, nn); 1703 1704 trace_nfsd_commit_done(rqstp, fhp, offset, count); 1705 return err; 1706 } 1707 1708 /** 1709 * nfsd_create_setattr - Set a created file's attributes 1710 * @rqstp: RPC transaction being executed 1711 * @fhp: NFS filehandle of parent directory 1712 * @resfhp: NFS filehandle of new object 1713 * @attrs: requested attributes of new object 1714 * 1715 * Returns nfs_ok on success, or an nfsstat in network byte order. 1716 */ 1717 __be32 1718 nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, 1719 struct svc_fh *resfhp, struct nfsd_attrs *attrs) 1720 { 1721 struct iattr *iap = attrs->na_iattr; 1722 __be32 status; 1723 1724 /* 1725 * Mode has already been set by file creation. 1726 */ 1727 iap->ia_valid &= ~ATTR_MODE; 1728 1729 /* 1730 * Setting uid/gid works only for root. Irix appears to 1731 * send along the gid on create when it tries to implement 1732 * setgid directories via NFS: 1733 */ 1734 if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) 1735 iap->ia_valid &= ~(ATTR_UID|ATTR_GID); 1736 1737 /* 1738 * Callers expect new file metadata to be committed even 1739 * if the attributes have not changed. 1740 */ 1741 if (nfsd_attrs_valid(attrs)) 1742 status = nfsd_setattr(rqstp, resfhp, attrs, NULL); 1743 else 1744 status = nfserrno(commit_metadata(resfhp)); 1745 1746 /* 1747 * Transactional filesystems had a chance to commit changes 1748 * for both parent and child simultaneously making the 1749 * following commit_metadata a noop in many cases. 1750 */ 1751 if (!status) 1752 status = nfserrno(commit_metadata(fhp)); 1753 1754 /* 1755 * Update the new filehandle to pick up the new attributes. 1756 */ 1757 if (!status) 1758 status = fh_update(resfhp); 1759 1760 return status; 1761 } 1762 1763 /* HPUX client sometimes creates a file in mode 000, and sets size to 0. 1764 * setting size to 0 may fail for some specific file systems by the permission 1765 * checking which requires WRITE permission but the mode is 000. 1766 * we ignore the resizing(to 0) on the just new created file, since the size is 1767 * 0 after file created. 1768 * 1769 * call this only after vfs_create() is called. 1770 * */ 1771 static void 1772 nfsd_check_ignore_resizing(struct iattr *iap) 1773 { 1774 if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0)) 1775 iap->ia_valid &= ~ATTR_SIZE; 1776 } 1777 1778 /* The parent directory should already be locked - we will unlock */ 1779 __be32 1780 nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp, 1781 struct nfsd_attrs *attrs, 1782 int type, dev_t rdev, struct svc_fh *resfhp) 1783 { 1784 struct dentry *dentry, *dchild; 1785 struct inode *dirp; 1786 struct iattr *iap = attrs->na_iattr; 1787 __be32 err; 1788 int host_err = 0; 1789 1790 dentry = fhp->fh_dentry; 1791 dirp = d_inode(dentry); 1792 1793 dchild = dget(resfhp->fh_dentry); 1794 err = nfsd_permission(&rqstp->rq_cred, fhp->fh_export, dentry, 1795 NFSD_MAY_CREATE); 1796 if (err) 1797 goto out; 1798 1799 if (!(iap->ia_valid & ATTR_MODE)) 1800 iap->ia_mode = 0; 1801 iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; 1802 1803 if (!IS_POSIXACL(dirp)) 1804 iap->ia_mode &= ~current_umask(); 1805 1806 err = 0; 1807 switch (type) { 1808 case S_IFREG: 1809 host_err = vfs_create(&nop_mnt_idmap, dchild, iap->ia_mode, NULL); 1810 if (!host_err) 1811 nfsd_check_ignore_resizing(iap); 1812 break; 1813 case S_IFDIR: 1814 dchild = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode, NULL); 1815 if (IS_ERR(dchild)) { 1816 host_err = PTR_ERR(dchild); 1817 } else if (d_is_negative(dchild)) { 1818 err = nfserr_serverfault; 1819 goto out; 1820 } else if (unlikely(dchild != resfhp->fh_dentry)) { 1821 dput(resfhp->fh_dentry); 1822 resfhp->fh_dentry = dget(dchild); 1823 } 1824 break; 1825 case S_IFCHR: 1826 case S_IFBLK: 1827 case S_IFIFO: 1828 case S_IFSOCK: 1829 host_err = vfs_mknod(&nop_mnt_idmap, dirp, dchild, 1830 iap->ia_mode, rdev, NULL); 1831 break; 1832 default: 1833 printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n", 1834 type); 1835 host_err = -EINVAL; 1836 } 1837 if (host_err < 0) 1838 goto out_nfserr; 1839 1840 err = nfsd_create_setattr(rqstp, fhp, resfhp, attrs); 1841 1842 out: 1843 if (!err) 1844 fh_fill_post_attrs(fhp); 1845 end_creating(dchild); 1846 return err; 1847 1848 out_nfserr: 1849 err = nfserrno(host_err); 1850 goto out; 1851 } 1852 1853 /* 1854 * Create a filesystem object (regular, directory, special). 1855 * Note that the parent directory is left locked. 1856 * 1857 * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp 1858 */ 1859 __be32 1860 nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, 1861 char *fname, int flen, struct nfsd_attrs *attrs, 1862 int type, dev_t rdev, struct svc_fh *resfhp) 1863 { 1864 struct dentry *dentry, *dchild = NULL; 1865 __be32 err; 1866 int host_err; 1867 1868 trace_nfsd_vfs_create(rqstp, fhp, type, fname, flen); 1869 1870 if (isdotent(fname, flen)) 1871 return nfserr_exist; 1872 1873 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP); 1874 if (err) 1875 return err; 1876 1877 dentry = fhp->fh_dentry; 1878 1879 host_err = fh_want_write(fhp); 1880 if (host_err) 1881 return nfserrno(host_err); 1882 1883 dchild = start_creating(&nop_mnt_idmap, dentry, &QSTR_LEN(fname, flen)); 1884 host_err = PTR_ERR(dchild); 1885 if (IS_ERR(dchild)) 1886 return nfserrno(host_err); 1887 1888 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); 1889 if (err) 1890 goto out_unlock; 1891 err = fh_fill_pre_attrs(fhp); 1892 if (err != nfs_ok) 1893 goto out_unlock; 1894 err = nfsd_create_locked(rqstp, fhp, attrs, type, rdev, resfhp); 1895 /* nfsd_create_locked() unlocked the parent */ 1896 dput(dchild); 1897 return err; 1898 1899 out_unlock: 1900 end_creating(dchild); 1901 return err; 1902 } 1903 1904 /* 1905 * Read a symlink. On entry, *lenp must contain the maximum path length that 1906 * fits into the buffer. On return, it contains the true length. 1907 * N.B. After this call fhp needs an fh_put 1908 */ 1909 __be32 1910 nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) 1911 { 1912 __be32 err; 1913 const char *link; 1914 struct path path; 1915 DEFINE_DELAYED_CALL(done); 1916 int len; 1917 1918 err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP); 1919 if (unlikely(err)) 1920 return err; 1921 1922 path.mnt = fhp->fh_export->ex_path.mnt; 1923 path.dentry = fhp->fh_dentry; 1924 1925 if (unlikely(!d_is_symlink(path.dentry))) 1926 return nfserr_inval; 1927 1928 touch_atime(&path); 1929 1930 link = vfs_get_link(path.dentry, &done); 1931 if (IS_ERR(link)) 1932 return nfserrno(PTR_ERR(link)); 1933 1934 len = strlen(link); 1935 if (len < *lenp) 1936 *lenp = len; 1937 memcpy(buf, link, *lenp); 1938 do_delayed_call(&done); 1939 return 0; 1940 } 1941 1942 /** 1943 * nfsd_symlink - Create a symlink and look up its inode 1944 * @rqstp: RPC transaction being executed 1945 * @fhp: NFS filehandle of parent directory 1946 * @fname: filename of the new symlink 1947 * @flen: length of @fname 1948 * @path: content of the new symlink (NUL-terminated) 1949 * @attrs: requested attributes of new object 1950 * @resfhp: NFS filehandle of new object 1951 * 1952 * N.B. After this call _both_ fhp and resfhp need an fh_put 1953 * 1954 * Returns nfs_ok on success, or an nfsstat in network byte order. 1955 */ 1956 __be32 1957 nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, 1958 char *fname, int flen, 1959 char *path, struct nfsd_attrs *attrs, 1960 struct svc_fh *resfhp) 1961 { 1962 struct dentry *dentry, *dnew; 1963 __be32 err, cerr; 1964 int host_err; 1965 1966 trace_nfsd_vfs_symlink(rqstp, fhp, fname, flen, path); 1967 1968 err = nfserr_noent; 1969 if (!flen || path[0] == '\0') 1970 goto out; 1971 err = nfserr_exist; 1972 if (isdotent(fname, flen)) 1973 goto out; 1974 1975 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); 1976 if (err) 1977 goto out; 1978 1979 host_err = fh_want_write(fhp); 1980 if (host_err) { 1981 err = nfserrno(host_err); 1982 goto out; 1983 } 1984 1985 dentry = fhp->fh_dentry; 1986 dnew = start_creating(&nop_mnt_idmap, dentry, &QSTR_LEN(fname, flen)); 1987 if (IS_ERR(dnew)) { 1988 err = nfserrno(PTR_ERR(dnew)); 1989 goto out_drop_write; 1990 } 1991 err = fh_fill_pre_attrs(fhp); 1992 if (err != nfs_ok) 1993 goto out_unlock; 1994 host_err = vfs_symlink(&nop_mnt_idmap, d_inode(dentry), dnew, path, NULL); 1995 err = nfserrno(host_err); 1996 cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp); 1997 if (!err) 1998 nfsd_create_setattr(rqstp, fhp, resfhp, attrs); 1999 fh_fill_post_attrs(fhp); 2000 out_unlock: 2001 end_creating(dnew); 2002 if (!err) 2003 err = nfserrno(commit_metadata(fhp)); 2004 if (!err) 2005 err = cerr; 2006 out_drop_write: 2007 fh_drop_write(fhp); 2008 out: 2009 return err; 2010 } 2011 2012 /** 2013 * nfsd_link - create a link 2014 * @rqstp: RPC transaction context 2015 * @ffhp: the file handle of the directory where the new link is to be created 2016 * @name: the filename of the new link 2017 * @len: the length of @name in octets 2018 * @tfhp: the file handle of an existing file object 2019 * 2020 * After this call _both_ ffhp and tfhp need an fh_put. 2021 * 2022 * Returns a generic NFS status code in network byte-order. 2023 */ 2024 __be32 2025 nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, 2026 char *name, int len, struct svc_fh *tfhp) 2027 { 2028 struct dentry *ddir, *dnew, *dold; 2029 struct inode *dirp; 2030 int type; 2031 __be32 err; 2032 int host_err; 2033 2034 trace_nfsd_vfs_link(rqstp, ffhp, tfhp, name, len); 2035 2036 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE); 2037 if (err) 2038 goto out; 2039 err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP); 2040 if (err) 2041 goto out; 2042 err = nfserr_isdir; 2043 if (d_is_dir(tfhp->fh_dentry)) 2044 goto out; 2045 err = nfserr_perm; 2046 if (!len) 2047 goto out; 2048 err = nfserr_exist; 2049 if (isdotent(name, len)) 2050 goto out; 2051 2052 err = nfs_ok; 2053 type = d_inode(tfhp->fh_dentry)->i_mode & S_IFMT; 2054 host_err = fh_want_write(tfhp); 2055 if (host_err) 2056 goto out; 2057 2058 ddir = ffhp->fh_dentry; 2059 dirp = d_inode(ddir); 2060 dnew = start_creating(&nop_mnt_idmap, ddir, &QSTR_LEN(name, len)); 2061 2062 if (IS_ERR(dnew)) { 2063 host_err = PTR_ERR(dnew); 2064 goto out_drop_write; 2065 } 2066 2067 dold = tfhp->fh_dentry; 2068 2069 err = nfserr_noent; 2070 if (d_really_is_negative(dold)) 2071 goto out_unlock; 2072 err = fh_fill_pre_attrs(ffhp); 2073 if (err != nfs_ok) 2074 goto out_unlock; 2075 host_err = vfs_link(dold, &nop_mnt_idmap, dirp, dnew, NULL); 2076 fh_fill_post_attrs(ffhp); 2077 out_unlock: 2078 end_creating(dnew); 2079 if (!host_err) { 2080 host_err = commit_metadata(ffhp); 2081 if (!host_err) 2082 host_err = commit_metadata(tfhp); 2083 } 2084 2085 out_drop_write: 2086 fh_drop_write(tfhp); 2087 if (host_err == -EBUSY) { 2088 /* 2089 * See RFC 8881 Section 18.9.4 para 1-2: NFSv4 LINK 2090 * wants a status unique to the object type. 2091 */ 2092 if (type != S_IFDIR) 2093 err = nfserr_file_open; 2094 else 2095 err = nfserr_acces; 2096 } 2097 out: 2098 return err != nfs_ok ? err : nfserrno(host_err); 2099 } 2100 2101 static void 2102 nfsd_close_cached_files(struct dentry *dentry) 2103 { 2104 struct inode *inode = d_inode(dentry); 2105 2106 if (inode && S_ISREG(inode->i_mode)) 2107 nfsd_file_close_inode_sync(inode); 2108 } 2109 2110 static bool 2111 nfsd_has_cached_files(struct dentry *dentry) 2112 { 2113 bool ret = false; 2114 struct inode *inode = d_inode(dentry); 2115 2116 if (inode && S_ISREG(inode->i_mode)) 2117 ret = nfsd_file_is_cached(inode); 2118 return ret; 2119 } 2120 2121 /** 2122 * nfsd_rename - rename a directory entry 2123 * @rqstp: RPC transaction context 2124 * @ffhp: the file handle of parent directory containing the entry to be renamed 2125 * @fname: the filename of directory entry to be renamed 2126 * @flen: the length of @fname in octets 2127 * @tfhp: the file handle of parent directory to contain the renamed entry 2128 * @tname: the filename of the new entry 2129 * @tlen: the length of @tlen in octets 2130 * 2131 * After this call _both_ ffhp and tfhp need an fh_put. 2132 * 2133 * Returns a generic NFS status code in network byte-order. 2134 */ 2135 __be32 2136 nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, 2137 struct svc_fh *tfhp, char *tname, int tlen) 2138 { 2139 struct dentry *fdentry, *tdentry; 2140 int type = S_IFDIR; 2141 struct renamedata rd = {}; 2142 __be32 err; 2143 int host_err; 2144 struct dentry *close_cached; 2145 2146 trace_nfsd_vfs_rename(rqstp, ffhp, tfhp, fname, flen, tname, tlen); 2147 2148 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE); 2149 if (err) 2150 goto out; 2151 err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE); 2152 if (err) 2153 goto out; 2154 2155 fdentry = ffhp->fh_dentry; 2156 2157 tdentry = tfhp->fh_dentry; 2158 2159 err = nfserr_perm; 2160 if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) 2161 goto out; 2162 2163 err = nfserr_xdev; 2164 if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) 2165 goto out; 2166 if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry) 2167 goto out; 2168 2169 retry: 2170 close_cached = NULL; 2171 host_err = fh_want_write(ffhp); 2172 if (host_err) { 2173 err = nfserrno(host_err); 2174 goto out; 2175 } 2176 2177 rd.mnt_idmap = &nop_mnt_idmap; 2178 rd.old_parent = fdentry; 2179 rd.new_parent = tdentry; 2180 2181 host_err = start_renaming(&rd, 0, &QSTR_LEN(fname, flen), 2182 &QSTR_LEN(tname, tlen)); 2183 2184 if (host_err) { 2185 err = nfserrno(host_err); 2186 goto out_want_write; 2187 } 2188 err = fh_fill_pre_attrs(ffhp); 2189 if (err != nfs_ok) 2190 goto out_unlock; 2191 err = fh_fill_pre_attrs(tfhp); 2192 if (err != nfs_ok) 2193 goto out_unlock; 2194 2195 type = d_inode(rd.old_dentry)->i_mode & S_IFMT; 2196 2197 if (d_inode(rd.new_dentry)) 2198 type = d_inode(rd.new_dentry)->i_mode & S_IFMT; 2199 2200 if ((rd.new_dentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) && 2201 nfsd_has_cached_files(rd.new_dentry)) { 2202 close_cached = dget(rd.new_dentry); 2203 goto out_unlock; 2204 } else { 2205 int retries; 2206 2207 for (retries = 1;;) { 2208 host_err = vfs_rename(&rd); 2209 if (host_err != -EAGAIN || !retries--) 2210 break; 2211 if (!nfsd_wait_for_delegreturn(rqstp, d_inode(rd.old_dentry))) 2212 break; 2213 } 2214 if (!host_err) { 2215 host_err = commit_metadata(tfhp); 2216 if (!host_err) 2217 host_err = commit_metadata(ffhp); 2218 } 2219 } 2220 if (host_err == -EBUSY) { 2221 /* 2222 * See RFC 8881 Section 18.26.4 para 1-3: NFSv4 RENAME 2223 * wants a status unique to the object type. 2224 */ 2225 if (type != S_IFDIR) 2226 err = nfserr_file_open; 2227 else 2228 err = nfserr_acces; 2229 } else { 2230 err = nfserrno(host_err); 2231 } 2232 2233 if (!close_cached) { 2234 fh_fill_post_attrs(ffhp); 2235 fh_fill_post_attrs(tfhp); 2236 } 2237 out_unlock: 2238 end_renaming(&rd); 2239 out_want_write: 2240 fh_drop_write(ffhp); 2241 2242 /* 2243 * If the target dentry has cached open files, then we need to 2244 * try to close them prior to doing the rename. Final fput 2245 * shouldn't be done with locks held however, so we delay it 2246 * until this point and then reattempt the whole shebang. 2247 */ 2248 if (close_cached) { 2249 nfsd_close_cached_files(close_cached); 2250 dput(close_cached); 2251 goto retry; 2252 } 2253 out: 2254 return err; 2255 } 2256 2257 /** 2258 * nfsd_unlink - remove a directory entry 2259 * @rqstp: RPC transaction context 2260 * @fhp: the file handle of the parent directory to be modified 2261 * @type: enforced file type of the object to be removed 2262 * @fname: the name of directory entry to be removed 2263 * @flen: length of @fname in octets 2264 * 2265 * After this call fhp needs an fh_put. 2266 * 2267 * Returns a generic NFS status code in network byte-order. 2268 */ 2269 __be32 2270 nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, 2271 char *fname, int flen) 2272 { 2273 struct dentry *dentry, *rdentry; 2274 struct inode *dirp; 2275 struct inode *rinode = NULL; 2276 __be32 err; 2277 int host_err; 2278 2279 trace_nfsd_vfs_unlink(rqstp, fhp, fname, flen); 2280 2281 err = nfserr_acces; 2282 if (!flen || isdotent(fname, flen)) 2283 goto out; 2284 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE); 2285 if (err) 2286 goto out; 2287 2288 host_err = fh_want_write(fhp); 2289 if (host_err) 2290 goto out_nfserr; 2291 2292 dentry = fhp->fh_dentry; 2293 dirp = d_inode(dentry); 2294 2295 rdentry = start_removing(&nop_mnt_idmap, dentry, &QSTR_LEN(fname, flen)); 2296 2297 host_err = PTR_ERR(rdentry); 2298 if (IS_ERR(rdentry)) 2299 goto out_drop_write; 2300 2301 err = fh_fill_pre_attrs(fhp); 2302 if (err != nfs_ok) 2303 goto out_unlock; 2304 2305 rinode = d_inode(rdentry); 2306 /* Prevent truncation until after locks dropped */ 2307 ihold(rinode); 2308 2309 if (!type) 2310 type = d_inode(rdentry)->i_mode & S_IFMT; 2311 2312 if (type != S_IFDIR) { 2313 int retries; 2314 2315 if (rdentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) 2316 nfsd_close_cached_files(rdentry); 2317 2318 for (retries = 1;;) { 2319 host_err = vfs_unlink(&nop_mnt_idmap, dirp, rdentry, NULL); 2320 if (host_err != -EAGAIN || !retries--) 2321 break; 2322 if (!nfsd_wait_for_delegreturn(rqstp, rinode)) 2323 break; 2324 } 2325 } else { 2326 host_err = vfs_rmdir(&nop_mnt_idmap, dirp, rdentry, NULL); 2327 } 2328 fh_fill_post_attrs(fhp); 2329 2330 out_unlock: 2331 end_removing(rdentry); 2332 if (!err && !host_err) 2333 host_err = commit_metadata(fhp); 2334 iput(rinode); /* truncate the inode here */ 2335 2336 out_drop_write: 2337 fh_drop_write(fhp); 2338 out_nfserr: 2339 if (host_err == -EBUSY) { 2340 /* 2341 * See RFC 8881 Section 18.25.4 para 4: NFSv4 REMOVE 2342 * wants a status unique to the object type. 2343 */ 2344 if (type != S_IFDIR) 2345 err = nfserr_file_open; 2346 else 2347 err = nfserr_acces; 2348 } 2349 out: 2350 return err != nfs_ok ? err : nfserrno(host_err); 2351 } 2352 2353 /* 2354 * We do this buffering because we must not call back into the file 2355 * system's ->lookup() method from the filldir callback. That may well 2356 * deadlock a number of file systems. 2357 * 2358 * This is based heavily on the implementation of same in XFS. 2359 */ 2360 struct buffered_dirent { 2361 u64 ino; 2362 loff_t offset; 2363 int namlen; 2364 unsigned int d_type; 2365 char name[]; 2366 }; 2367 2368 struct readdir_data { 2369 struct dir_context ctx; 2370 char *dirent; 2371 size_t used; 2372 int full; 2373 }; 2374 2375 static bool nfsd_buffered_filldir(struct dir_context *ctx, const char *name, 2376 int namlen, loff_t offset, u64 ino, 2377 unsigned int d_type) 2378 { 2379 struct readdir_data *buf = 2380 container_of(ctx, struct readdir_data, ctx); 2381 struct buffered_dirent *de = (void *)(buf->dirent + buf->used); 2382 unsigned int reclen; 2383 2384 reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64)); 2385 if (buf->used + reclen > PAGE_SIZE) { 2386 buf->full = 1; 2387 return false; 2388 } 2389 2390 de->namlen = namlen; 2391 de->offset = offset; 2392 de->ino = ino; 2393 de->d_type = d_type; 2394 memcpy(de->name, name, namlen); 2395 buf->used += reclen; 2396 2397 return true; 2398 } 2399 2400 static __be32 nfsd_buffered_readdir(struct file *file, struct svc_fh *fhp, 2401 nfsd_filldir_t func, struct readdir_cd *cdp, 2402 loff_t *offsetp) 2403 { 2404 struct buffered_dirent *de; 2405 int host_err; 2406 int size; 2407 loff_t offset; 2408 struct readdir_data buf = { 2409 .ctx.actor = nfsd_buffered_filldir, 2410 .dirent = (void *)__get_free_page(GFP_KERNEL) 2411 }; 2412 2413 if (!buf.dirent) 2414 return nfserrno(-ENOMEM); 2415 2416 offset = *offsetp; 2417 2418 while (1) { 2419 unsigned int reclen; 2420 2421 cdp->err = nfserr_eof; /* will be cleared on successful read */ 2422 buf.used = 0; 2423 buf.full = 0; 2424 2425 host_err = iterate_dir(file, &buf.ctx); 2426 if (buf.full) 2427 host_err = 0; 2428 2429 if (host_err < 0) 2430 break; 2431 2432 size = buf.used; 2433 2434 if (!size) 2435 break; 2436 2437 de = (struct buffered_dirent *)buf.dirent; 2438 while (size > 0) { 2439 offset = de->offset; 2440 2441 if (func(cdp, de->name, de->namlen, de->offset, 2442 de->ino, de->d_type)) 2443 break; 2444 2445 if (cdp->err != nfs_ok) 2446 break; 2447 2448 trace_nfsd_dirent(fhp, de->ino, de->name, de->namlen); 2449 2450 reclen = ALIGN(sizeof(*de) + de->namlen, 2451 sizeof(u64)); 2452 size -= reclen; 2453 de = (struct buffered_dirent *)((char *)de + reclen); 2454 } 2455 if (size > 0) /* We bailed out early */ 2456 break; 2457 2458 offset = vfs_llseek(file, 0, SEEK_CUR); 2459 } 2460 2461 free_page((unsigned long)(buf.dirent)); 2462 2463 if (host_err) 2464 return nfserrno(host_err); 2465 2466 *offsetp = offset; 2467 return cdp->err; 2468 } 2469 2470 /** 2471 * nfsd_readdir - Read entries from a directory 2472 * @rqstp: RPC transaction context 2473 * @fhp: NFS file handle of directory to be read 2474 * @offsetp: OUT: seek offset of final entry that was read 2475 * @cdp: OUT: an eof error value 2476 * @func: entry filler actor 2477 * 2478 * This implementation ignores the NFSv3/4 verifier cookie. 2479 * 2480 * NB: normal system calls hold file->f_pos_lock when calling 2481 * ->iterate_shared and ->llseek, but nfsd_readdir() does not. 2482 * Because the struct file acquired here is not visible to other 2483 * threads, it's internal state does not need mutex protection. 2484 * 2485 * Returns nfs_ok on success, otherwise an nfsstat code is 2486 * returned. 2487 */ 2488 __be32 2489 nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp, 2490 struct readdir_cd *cdp, nfsd_filldir_t func) 2491 { 2492 __be32 err; 2493 struct file *file; 2494 loff_t offset = *offsetp; 2495 int may_flags = NFSD_MAY_READ; 2496 2497 err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file); 2498 if (err) 2499 goto out; 2500 2501 if (fhp->fh_64bit_cookies) 2502 file->f_mode |= FMODE_64BITHASH; 2503 else 2504 file->f_mode |= FMODE_32BITHASH; 2505 2506 offset = vfs_llseek(file, offset, SEEK_SET); 2507 if (offset < 0) { 2508 err = nfserrno((int)offset); 2509 goto out_close; 2510 } 2511 2512 err = nfsd_buffered_readdir(file, fhp, func, cdp, offsetp); 2513 2514 if (err == nfserr_eof || err == nfserr_toosmall) 2515 err = nfs_ok; /* can still be found in ->err */ 2516 out_close: 2517 nfsd_filp_close(file); 2518 out: 2519 return err; 2520 } 2521 2522 /** 2523 * nfsd_filp_close: close a file synchronously 2524 * @fp: the file to close 2525 * 2526 * nfsd_filp_close() is similar in behaviour to filp_close(). 2527 * The difference is that if this is the final close on the 2528 * file, the that finalisation happens immediately, rather then 2529 * being handed over to a work_queue, as it the case for 2530 * filp_close(). 2531 * When a user-space process closes a file (even when using 2532 * filp_close() the finalisation happens before returning to 2533 * userspace, so it is effectively synchronous. When a kernel thread 2534 * uses file_close(), on the other hand, the handling is completely 2535 * asynchronous. This means that any cost imposed by that finalisation 2536 * is not imposed on the nfsd thread, and nfsd could potentually 2537 * close files more quickly than the work queue finalises the close, 2538 * which would lead to unbounded growth in the queue. 2539 * 2540 * In some contexts is it not safe to synchronously wait for 2541 * close finalisation (see comment for __fput_sync()), but nfsd 2542 * does not match those contexts. In partcilarly it does not, at the 2543 * time that this function is called, hold and locks and no finalisation 2544 * of any file, socket, or device driver would have any cause to wait 2545 * for nfsd to make progress. 2546 */ 2547 void nfsd_filp_close(struct file *fp) 2548 { 2549 get_file(fp); 2550 filp_close(fp, NULL); 2551 __fput_sync(fp); 2552 } 2553 2554 /* 2555 * Get file system stats 2556 * N.B. After this call fhp needs an fh_put 2557 */ 2558 __be32 2559 nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access) 2560 { 2561 __be32 err; 2562 2563 trace_nfsd_vfs_statfs(rqstp, fhp); 2564 2565 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access); 2566 if (!err) { 2567 struct path path = { 2568 .mnt = fhp->fh_export->ex_path.mnt, 2569 .dentry = fhp->fh_dentry, 2570 }; 2571 if (vfs_statfs(&path, stat)) 2572 err = nfserr_io; 2573 } 2574 return err; 2575 } 2576 2577 static int exp_rdonly(struct svc_cred *cred, struct svc_export *exp) 2578 { 2579 return nfsexp_flags(cred, exp) & NFSEXP_READONLY; 2580 } 2581 2582 #ifdef CONFIG_NFSD_V4 2583 /* 2584 * Helper function to translate error numbers. In the case of xattr operations, 2585 * some error codes need to be translated outside of the standard translations. 2586 * 2587 * ENODATA needs to be translated to nfserr_noxattr. 2588 * E2BIG to nfserr_xattr2big. 2589 * 2590 * Additionally, vfs_listxattr can return -ERANGE. This means that the 2591 * file has too many extended attributes to retrieve inside an 2592 * XATTR_LIST_MAX sized buffer. This is a bug in the xattr implementation: 2593 * filesystems will allow the adding of extended attributes until they hit 2594 * their own internal limit. This limit may be larger than XATTR_LIST_MAX. 2595 * So, at that point, the attributes are present and valid, but can't 2596 * be retrieved using listxattr, since the upper level xattr code enforces 2597 * the XATTR_LIST_MAX limit. 2598 * 2599 * This bug means that we need to deal with listxattr returning -ERANGE. The 2600 * best mapping is to return TOOSMALL. 2601 */ 2602 static __be32 2603 nfsd_xattr_errno(int err) 2604 { 2605 switch (err) { 2606 case -ENODATA: 2607 return nfserr_noxattr; 2608 case -E2BIG: 2609 return nfserr_xattr2big; 2610 case -ERANGE: 2611 return nfserr_toosmall; 2612 } 2613 return nfserrno(err); 2614 } 2615 2616 /* 2617 * Retrieve the specified user extended attribute. To avoid always 2618 * having to allocate the maximum size (since we are not getting 2619 * a maximum size from the RPC), do a probe + alloc. Hold a reader 2620 * lock on i_rwsem to prevent the extended attribute from changing 2621 * size while we're doing this. 2622 */ 2623 __be32 2624 nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name, 2625 void **bufp, int *lenp) 2626 { 2627 ssize_t len; 2628 __be32 err; 2629 char *buf; 2630 struct inode *inode; 2631 struct dentry *dentry; 2632 2633 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ); 2634 if (err) 2635 return err; 2636 2637 err = nfs_ok; 2638 dentry = fhp->fh_dentry; 2639 inode = d_inode(dentry); 2640 2641 inode_lock_shared(inode); 2642 2643 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, NULL, 0); 2644 2645 /* 2646 * Zero-length attribute, just return. 2647 */ 2648 if (len == 0) { 2649 *bufp = NULL; 2650 *lenp = 0; 2651 goto out; 2652 } 2653 2654 if (len < 0) { 2655 err = nfsd_xattr_errno(len); 2656 goto out; 2657 } 2658 2659 if (len > *lenp) { 2660 err = nfserr_toosmall; 2661 goto out; 2662 } 2663 2664 buf = kvmalloc(len, GFP_KERNEL); 2665 if (buf == NULL) { 2666 err = nfserr_jukebox; 2667 goto out; 2668 } 2669 2670 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, buf, len); 2671 if (len <= 0) { 2672 kvfree(buf); 2673 buf = NULL; 2674 err = nfsd_xattr_errno(len); 2675 } 2676 2677 *lenp = len; 2678 *bufp = buf; 2679 2680 out: 2681 inode_unlock_shared(inode); 2682 2683 return err; 2684 } 2685 2686 /* 2687 * Retrieve the xattr names. Since we can't know how many are 2688 * user extended attributes, we must get all attributes here, 2689 * and have the XDR encode filter out the "user." ones. 2690 * 2691 * While this could always just allocate an XATTR_LIST_MAX 2692 * buffer, that's a waste, so do a probe + allocate. To 2693 * avoid any changes between the probe and allocate, wrap 2694 * this in inode_lock. 2695 */ 2696 __be32 2697 nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char **bufp, 2698 int *lenp) 2699 { 2700 ssize_t len; 2701 __be32 err; 2702 char *buf; 2703 struct inode *inode; 2704 struct dentry *dentry; 2705 2706 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ); 2707 if (err) 2708 return err; 2709 2710 dentry = fhp->fh_dentry; 2711 inode = d_inode(dentry); 2712 *lenp = 0; 2713 2714 inode_lock_shared(inode); 2715 2716 len = vfs_listxattr(dentry, NULL, 0); 2717 if (len <= 0) { 2718 err = nfsd_xattr_errno(len); 2719 goto out; 2720 } 2721 2722 if (len > XATTR_LIST_MAX) { 2723 err = nfserr_xattr2big; 2724 goto out; 2725 } 2726 2727 buf = kvmalloc(len, GFP_KERNEL); 2728 if (buf == NULL) { 2729 err = nfserr_jukebox; 2730 goto out; 2731 } 2732 2733 len = vfs_listxattr(dentry, buf, len); 2734 if (len <= 0) { 2735 kvfree(buf); 2736 err = nfsd_xattr_errno(len); 2737 goto out; 2738 } 2739 2740 *lenp = len; 2741 *bufp = buf; 2742 2743 err = nfs_ok; 2744 out: 2745 inode_unlock_shared(inode); 2746 2747 return err; 2748 } 2749 2750 /** 2751 * nfsd_removexattr - Remove an extended attribute 2752 * @rqstp: RPC transaction being executed 2753 * @fhp: NFS filehandle of object with xattr to remove 2754 * @name: name of xattr to remove (NUL-terminate) 2755 * 2756 * Pass in a NULL pointer for delegated_inode, and let the client deal 2757 * with NFS4ERR_DELAY (same as with e.g. setattr and remove). 2758 * 2759 * Returns nfs_ok on success, or an nfsstat in network byte order. 2760 */ 2761 __be32 2762 nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name) 2763 { 2764 __be32 err; 2765 int ret; 2766 2767 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE); 2768 if (err) 2769 return err; 2770 2771 ret = fh_want_write(fhp); 2772 if (ret) 2773 return nfserrno(ret); 2774 2775 inode_lock(fhp->fh_dentry->d_inode); 2776 err = fh_fill_pre_attrs(fhp); 2777 if (err != nfs_ok) 2778 goto out_unlock; 2779 ret = __vfs_removexattr_locked(&nop_mnt_idmap, fhp->fh_dentry, 2780 name, NULL); 2781 err = nfsd_xattr_errno(ret); 2782 fh_fill_post_attrs(fhp); 2783 out_unlock: 2784 inode_unlock(fhp->fh_dentry->d_inode); 2785 fh_drop_write(fhp); 2786 2787 return err; 2788 } 2789 2790 __be32 2791 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name, 2792 void *buf, u32 len, u32 flags) 2793 { 2794 __be32 err; 2795 int ret; 2796 2797 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE); 2798 if (err) 2799 return err; 2800 2801 ret = fh_want_write(fhp); 2802 if (ret) 2803 return nfserrno(ret); 2804 inode_lock(fhp->fh_dentry->d_inode); 2805 err = fh_fill_pre_attrs(fhp); 2806 if (err != nfs_ok) 2807 goto out_unlock; 2808 ret = __vfs_setxattr_locked(&nop_mnt_idmap, fhp->fh_dentry, 2809 name, buf, len, flags, NULL); 2810 fh_fill_post_attrs(fhp); 2811 err = nfsd_xattr_errno(ret); 2812 out_unlock: 2813 inode_unlock(fhp->fh_dentry->d_inode); 2814 fh_drop_write(fhp); 2815 return err; 2816 } 2817 #endif 2818 2819 /* 2820 * Check for a user's access permissions to this inode. 2821 */ 2822 __be32 2823 nfsd_permission(struct svc_cred *cred, struct svc_export *exp, 2824 struct dentry *dentry, int acc) 2825 { 2826 struct inode *inode = d_inode(dentry); 2827 int err; 2828 2829 if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP) 2830 return 0; 2831 #if 0 2832 dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n", 2833 acc, 2834 (acc & NFSD_MAY_READ)? " read" : "", 2835 (acc & NFSD_MAY_WRITE)? " write" : "", 2836 (acc & NFSD_MAY_EXEC)? " exec" : "", 2837 (acc & NFSD_MAY_SATTR)? " sattr" : "", 2838 (acc & NFSD_MAY_TRUNC)? " trunc" : "", 2839 (acc & NFSD_MAY_NLM)? " nlm" : "", 2840 (acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "", 2841 inode->i_mode, 2842 IS_IMMUTABLE(inode)? " immut" : "", 2843 IS_APPEND(inode)? " append" : "", 2844 __mnt_is_readonly(exp->ex_path.mnt)? " ro" : ""); 2845 dprintk(" owner %d/%d user %d/%d\n", 2846 inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid()); 2847 #endif 2848 2849 /* Normally we reject any write/sattr etc access on a read-only file 2850 * system. But if it is IRIX doing check on write-access for a 2851 * device special file, we ignore rofs. 2852 */ 2853 if (!(acc & NFSD_MAY_LOCAL_ACCESS)) 2854 if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) { 2855 if (exp_rdonly(cred, exp) || 2856 __mnt_is_readonly(exp->ex_path.mnt)) 2857 return nfserr_rofs; 2858 if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode)) 2859 return nfserr_perm; 2860 } 2861 if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode)) 2862 return nfserr_perm; 2863 2864 /* 2865 * The file owner always gets access permission for accesses that 2866 * would normally be checked at open time. This is to make 2867 * file access work even when the client has done a fchmod(fd, 0). 2868 * 2869 * However, `cp foo bar' should fail nevertheless when bar is 2870 * readonly. A sensible way to do this might be to reject all 2871 * attempts to truncate a read-only file, because a creat() call 2872 * always implies file truncation. 2873 * ... but this isn't really fair. A process may reasonably call 2874 * ftruncate on an open file descriptor on a file with perm 000. 2875 * We must trust the client to do permission checking - using "ACCESS" 2876 * with NFSv3. 2877 */ 2878 if ((acc & NFSD_MAY_OWNER_OVERRIDE) && 2879 uid_eq(inode->i_uid, current_fsuid())) 2880 return 0; 2881 2882 /* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */ 2883 err = inode_permission(&nop_mnt_idmap, inode, 2884 acc & (MAY_READ | MAY_WRITE | MAY_EXEC)); 2885 2886 /* Allow read access to binaries even when mode 111 */ 2887 if (err == -EACCES && S_ISREG(inode->i_mode) && 2888 (((acc & NFSD_MAY_MASK) == NFSD_MAY_READ) && 2889 (acc & (NFSD_MAY_OWNER_OVERRIDE | NFSD_MAY_READ_IF_EXEC)))) 2890 err = inode_permission(&nop_mnt_idmap, inode, MAY_EXEC); 2891 2892 return err? nfserrno(err) : 0; 2893 } 2894