1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from nfs_vnops.c 8.16 (Berkeley) 5/27/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 /* 41 * vnode op calls for Sun NFS version 2, 3 and 4 42 */ 43 44 #include "opt_inet.h" 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/systm.h> 49 #include <sys/resourcevar.h> 50 #include <sys/proc.h> 51 #include <sys/mount.h> 52 #include <sys/bio.h> 53 #include <sys/buf.h> 54 #include <sys/jail.h> 55 #include <sys/malloc.h> 56 #include <sys/mbuf.h> 57 #include <sys/namei.h> 58 #include <sys/socket.h> 59 #include <sys/vnode.h> 60 #include <sys/dirent.h> 61 #include <sys/fcntl.h> 62 #include <sys/lockf.h> 63 #include <sys/stat.h> 64 #include <sys/sysctl.h> 65 #include <sys/signalvar.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_extern.h> 69 #include <vm/vm_object.h> 70 71 #include <fs/nfs/nfsport.h> 72 #include <fs/nfsclient/nfsnode.h> 73 #include <fs/nfsclient/nfsmount.h> 74 #include <fs/nfsclient/nfs.h> 75 #include <fs/nfsclient/nfs_kdtrace.h> 76 77 #include <net/if.h> 78 #include <netinet/in.h> 79 #include <netinet/in_var.h> 80 81 #include <nfs/nfs_lock.h> 82 83 #ifdef KDTRACE_HOOKS 84 #include <sys/dtrace_bsd.h> 85 86 dtrace_nfsclient_accesscache_flush_probe_func_t 87 dtrace_nfscl_accesscache_flush_done_probe; 88 uint32_t nfscl_accesscache_flush_done_id; 89 90 dtrace_nfsclient_accesscache_get_probe_func_t 91 dtrace_nfscl_accesscache_get_hit_probe, 92 dtrace_nfscl_accesscache_get_miss_probe; 93 uint32_t nfscl_accesscache_get_hit_id; 94 uint32_t nfscl_accesscache_get_miss_id; 95 96 dtrace_nfsclient_accesscache_load_probe_func_t 97 dtrace_nfscl_accesscache_load_done_probe; 98 uint32_t nfscl_accesscache_load_done_id; 99 #endif /* !KDTRACE_HOOKS */ 100 101 /* Defs */ 102 #define TRUE 1 103 #define FALSE 0 104 105 extern struct nfsstatsv1 nfsstatsv1; 106 extern int nfsrv_useacl; 107 extern int nfscl_debuglevel; 108 MALLOC_DECLARE(M_NEWNFSREQ); 109 110 static vop_read_t nfsfifo_read; 111 static vop_write_t nfsfifo_write; 112 static vop_close_t nfsfifo_close; 113 static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *, 114 struct thread *); 115 static vop_lookup_t nfs_lookup; 116 static vop_create_t nfs_create; 117 static vop_mknod_t nfs_mknod; 118 static vop_open_t nfs_open; 119 static vop_pathconf_t nfs_pathconf; 120 static vop_close_t nfs_close; 121 static vop_access_t nfs_access; 122 static vop_getattr_t nfs_getattr; 123 static vop_setattr_t nfs_setattr; 124 static vop_read_t nfs_read; 125 static vop_fsync_t nfs_fsync; 126 static vop_remove_t nfs_remove; 127 static vop_link_t nfs_link; 128 static vop_rename_t nfs_rename; 129 static vop_mkdir_t nfs_mkdir; 130 static vop_rmdir_t nfs_rmdir; 131 static vop_symlink_t nfs_symlink; 132 static vop_readdir_t nfs_readdir; 133 static vop_strategy_t nfs_strategy; 134 static int nfs_lookitup(struct vnode *, char *, int, 135 struct ucred *, struct thread *, struct nfsnode **); 136 static int nfs_sillyrename(struct vnode *, struct vnode *, 137 struct componentname *); 138 static vop_access_t nfsspec_access; 139 static vop_readlink_t nfs_readlink; 140 static vop_print_t nfs_print; 141 static vop_advlock_t nfs_advlock; 142 static vop_advlockasync_t nfs_advlockasync; 143 static vop_getacl_t nfs_getacl; 144 static vop_setacl_t nfs_setacl; 145 146 /* 147 * Global vfs data structures for nfs 148 */ 149 150 static struct vop_vector newnfs_vnodeops_nosig = { 151 .vop_default = &default_vnodeops, 152 .vop_access = nfs_access, 153 .vop_advlock = nfs_advlock, 154 .vop_advlockasync = nfs_advlockasync, 155 .vop_close = nfs_close, 156 .vop_create = nfs_create, 157 .vop_fsync = nfs_fsync, 158 .vop_getattr = nfs_getattr, 159 .vop_getpages = ncl_getpages, 160 .vop_putpages = ncl_putpages, 161 .vop_inactive = ncl_inactive, 162 .vop_link = nfs_link, 163 .vop_lookup = nfs_lookup, 164 .vop_mkdir = nfs_mkdir, 165 .vop_mknod = nfs_mknod, 166 .vop_open = nfs_open, 167 .vop_pathconf = nfs_pathconf, 168 .vop_print = nfs_print, 169 .vop_read = nfs_read, 170 .vop_readdir = nfs_readdir, 171 .vop_readlink = nfs_readlink, 172 .vop_reclaim = ncl_reclaim, 173 .vop_remove = nfs_remove, 174 .vop_rename = nfs_rename, 175 .vop_rmdir = nfs_rmdir, 176 .vop_setattr = nfs_setattr, 177 .vop_strategy = nfs_strategy, 178 .vop_symlink = nfs_symlink, 179 .vop_write = ncl_write, 180 .vop_getacl = nfs_getacl, 181 .vop_setacl = nfs_setacl, 182 }; 183 184 static int 185 nfs_vnodeops_bypass(struct vop_generic_args *a) 186 { 187 188 return (vop_sigdefer(&newnfs_vnodeops_nosig, a)); 189 } 190 191 struct vop_vector newnfs_vnodeops = { 192 .vop_default = &default_vnodeops, 193 .vop_bypass = nfs_vnodeops_bypass, 194 }; 195 196 static struct vop_vector newnfs_fifoops_nosig = { 197 .vop_default = &fifo_specops, 198 .vop_access = nfsspec_access, 199 .vop_close = nfsfifo_close, 200 .vop_fsync = nfs_fsync, 201 .vop_getattr = nfs_getattr, 202 .vop_inactive = ncl_inactive, 203 .vop_pathconf = nfs_pathconf, 204 .vop_print = nfs_print, 205 .vop_read = nfsfifo_read, 206 .vop_reclaim = ncl_reclaim, 207 .vop_setattr = nfs_setattr, 208 .vop_write = nfsfifo_write, 209 }; 210 211 static int 212 nfs_fifoops_bypass(struct vop_generic_args *a) 213 { 214 215 return (vop_sigdefer(&newnfs_fifoops_nosig, a)); 216 } 217 218 struct vop_vector newnfs_fifoops = { 219 .vop_default = &default_vnodeops, 220 .vop_bypass = nfs_fifoops_bypass, 221 }; 222 223 static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, 224 struct componentname *cnp, struct vattr *vap); 225 static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 226 int namelen, struct ucred *cred, struct thread *td); 227 static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, 228 char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp, 229 char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td); 230 static int nfs_renameit(struct vnode *sdvp, struct vnode *svp, 231 struct componentname *scnp, struct sillyrename *sp); 232 233 /* 234 * Global variables 235 */ 236 SYSCTL_DECL(_vfs_nfs); 237 238 static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO; 239 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW, 240 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout"); 241 242 static int nfs_prime_access_cache = 0; 243 SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW, 244 &nfs_prime_access_cache, 0, 245 "Prime NFS ACCESS cache when fetching attributes"); 246 247 static int newnfs_commit_on_close = 0; 248 SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW, 249 &newnfs_commit_on_close, 0, "write+commit on close, else only write"); 250 251 static int nfs_clean_pages_on_close = 1; 252 SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW, 253 &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close"); 254 255 int newnfs_directio_enable = 0; 256 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW, 257 &newnfs_directio_enable, 0, "Enable NFS directio"); 258 259 int nfs_keep_dirty_on_error; 260 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_keep_dirty_on_error, CTLFLAG_RW, 261 &nfs_keep_dirty_on_error, 0, "Retry pageout if error returned"); 262 263 /* 264 * This sysctl allows other processes to mmap a file that has been opened 265 * O_DIRECT by a process. In general, having processes mmap the file while 266 * Direct IO is in progress can lead to Data Inconsistencies. But, we allow 267 * this by default to prevent DoS attacks - to prevent a malicious user from 268 * opening up files O_DIRECT preventing other users from mmap'ing these 269 * files. "Protected" environments where stricter consistency guarantees are 270 * required can disable this knob. The process that opened the file O_DIRECT 271 * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not 272 * meaningful. 273 */ 274 int newnfs_directio_allow_mmap = 1; 275 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW, 276 &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens"); 277 278 #define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \ 279 | NFSACCESS_EXTEND | NFSACCESS_EXECUTE \ 280 | NFSACCESS_DELETE | NFSACCESS_LOOKUP) 281 282 /* 283 * SMP Locking Note : 284 * The list of locks after the description of the lock is the ordering 285 * of other locks acquired with the lock held. 286 * np->n_mtx : Protects the fields in the nfsnode. 287 VM Object Lock 288 VI_MTX (acquired indirectly) 289 * nmp->nm_mtx : Protects the fields in the nfsmount. 290 rep->r_mtx 291 * ncl_iod_mutex : Global lock, protects shared nfsiod state. 292 * nfs_reqq_mtx : Global lock, protects the nfs_reqq list. 293 nmp->nm_mtx 294 rep->r_mtx 295 * rep->r_mtx : Protects the fields in an nfsreq. 296 */ 297 298 static int 299 nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td, 300 struct ucred *cred, u_int32_t *retmode) 301 { 302 int error = 0, attrflag, i, lrupos; 303 u_int32_t rmode; 304 struct nfsnode *np = VTONFS(vp); 305 struct nfsvattr nfsva; 306 307 error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag, 308 &rmode, NULL); 309 if (attrflag) 310 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 311 if (!error) { 312 lrupos = 0; 313 mtx_lock(&np->n_mtx); 314 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 315 if (np->n_accesscache[i].uid == cred->cr_uid) { 316 np->n_accesscache[i].mode = rmode; 317 np->n_accesscache[i].stamp = time_second; 318 break; 319 } 320 if (i > 0 && np->n_accesscache[i].stamp < 321 np->n_accesscache[lrupos].stamp) 322 lrupos = i; 323 } 324 if (i == NFS_ACCESSCACHESIZE) { 325 np->n_accesscache[lrupos].uid = cred->cr_uid; 326 np->n_accesscache[lrupos].mode = rmode; 327 np->n_accesscache[lrupos].stamp = time_second; 328 } 329 mtx_unlock(&np->n_mtx); 330 if (retmode != NULL) 331 *retmode = rmode; 332 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0); 333 } else if (NFS_ISV4(vp)) { 334 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 335 } 336 #ifdef KDTRACE_HOOKS 337 if (error != 0) 338 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0, 339 error); 340 #endif 341 return (error); 342 } 343 344 /* 345 * nfs access vnode op. 346 * For nfs version 2, just return ok. File accesses may fail later. 347 * For nfs version 3, use the access rpc to check accessibility. If file modes 348 * are changed on the server, accesses might still fail later. 349 */ 350 static int 351 nfs_access(struct vop_access_args *ap) 352 { 353 struct vnode *vp = ap->a_vp; 354 int error = 0, i, gotahit; 355 u_int32_t mode, wmode, rmode; 356 int v34 = NFS_ISV34(vp); 357 struct nfsnode *np = VTONFS(vp); 358 359 /* 360 * Disallow write attempts on filesystems mounted read-only; 361 * unless the file is a socket, fifo, or a block or character 362 * device resident on the filesystem. 363 */ 364 if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS | 365 VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL | 366 VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) { 367 switch (vp->v_type) { 368 case VREG: 369 case VDIR: 370 case VLNK: 371 return (EROFS); 372 default: 373 break; 374 } 375 } 376 /* 377 * For nfs v3 or v4, check to see if we have done this recently, and if 378 * so return our cached result instead of making an ACCESS call. 379 * If not, do an access rpc, otherwise you are stuck emulating 380 * ufs_access() locally using the vattr. This may not be correct, 381 * since the server may apply other access criteria such as 382 * client uid-->server uid mapping that we do not know about. 383 */ 384 if (v34) { 385 if (ap->a_accmode & VREAD) 386 mode = NFSACCESS_READ; 387 else 388 mode = 0; 389 if (vp->v_type != VDIR) { 390 if (ap->a_accmode & VWRITE) 391 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 392 if (ap->a_accmode & VAPPEND) 393 mode |= NFSACCESS_EXTEND; 394 if (ap->a_accmode & VEXEC) 395 mode |= NFSACCESS_EXECUTE; 396 if (ap->a_accmode & VDELETE) 397 mode |= NFSACCESS_DELETE; 398 } else { 399 if (ap->a_accmode & VWRITE) 400 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 401 if (ap->a_accmode & VAPPEND) 402 mode |= NFSACCESS_EXTEND; 403 if (ap->a_accmode & VEXEC) 404 mode |= NFSACCESS_LOOKUP; 405 if (ap->a_accmode & VDELETE) 406 mode |= NFSACCESS_DELETE; 407 if (ap->a_accmode & VDELETE_CHILD) 408 mode |= NFSACCESS_MODIFY; 409 } 410 /* XXX safety belt, only make blanket request if caching */ 411 if (nfsaccess_cache_timeout > 0) { 412 wmode = NFSACCESS_READ | NFSACCESS_MODIFY | 413 NFSACCESS_EXTEND | NFSACCESS_EXECUTE | 414 NFSACCESS_DELETE | NFSACCESS_LOOKUP; 415 } else { 416 wmode = mode; 417 } 418 419 /* 420 * Does our cached result allow us to give a definite yes to 421 * this request? 422 */ 423 gotahit = 0; 424 mtx_lock(&np->n_mtx); 425 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 426 if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) { 427 if (time_second < (np->n_accesscache[i].stamp 428 + nfsaccess_cache_timeout) && 429 (np->n_accesscache[i].mode & mode) == mode) { 430 NFSINCRGLOBAL(nfsstatsv1.accesscache_hits); 431 gotahit = 1; 432 } 433 break; 434 } 435 } 436 mtx_unlock(&np->n_mtx); 437 #ifdef KDTRACE_HOOKS 438 if (gotahit != 0) 439 KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp, 440 ap->a_cred->cr_uid, mode); 441 else 442 KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp, 443 ap->a_cred->cr_uid, mode); 444 #endif 445 if (gotahit == 0) { 446 /* 447 * Either a no, or a don't know. Go to the wire. 448 */ 449 NFSINCRGLOBAL(nfsstatsv1.accesscache_misses); 450 error = nfs34_access_otw(vp, wmode, ap->a_td, 451 ap->a_cred, &rmode); 452 if (!error && 453 (rmode & mode) != mode) 454 error = EACCES; 455 } 456 return (error); 457 } else { 458 if ((error = nfsspec_access(ap)) != 0) { 459 return (error); 460 } 461 /* 462 * Attempt to prevent a mapped root from accessing a file 463 * which it shouldn't. We try to read a byte from the file 464 * if the user is root and the file is not zero length. 465 * After calling nfsspec_access, we should have the correct 466 * file size cached. 467 */ 468 mtx_lock(&np->n_mtx); 469 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD) 470 && VTONFS(vp)->n_size > 0) { 471 struct iovec aiov; 472 struct uio auio; 473 char buf[1]; 474 475 mtx_unlock(&np->n_mtx); 476 aiov.iov_base = buf; 477 aiov.iov_len = 1; 478 auio.uio_iov = &aiov; 479 auio.uio_iovcnt = 1; 480 auio.uio_offset = 0; 481 auio.uio_resid = 1; 482 auio.uio_segflg = UIO_SYSSPACE; 483 auio.uio_rw = UIO_READ; 484 auio.uio_td = ap->a_td; 485 486 if (vp->v_type == VREG) 487 error = ncl_readrpc(vp, &auio, ap->a_cred); 488 else if (vp->v_type == VDIR) { 489 char* bp; 490 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 491 aiov.iov_base = bp; 492 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ; 493 error = ncl_readdirrpc(vp, &auio, ap->a_cred, 494 ap->a_td); 495 free(bp, M_TEMP); 496 } else if (vp->v_type == VLNK) 497 error = ncl_readlinkrpc(vp, &auio, ap->a_cred); 498 else 499 error = EACCES; 500 } else 501 mtx_unlock(&np->n_mtx); 502 return (error); 503 } 504 } 505 506 507 /* 508 * nfs open vnode op 509 * Check to see if the type is ok 510 * and that deletion is not in progress. 511 * For paged in text files, you will need to flush the page cache 512 * if consistency is lost. 513 */ 514 /* ARGSUSED */ 515 static int 516 nfs_open(struct vop_open_args *ap) 517 { 518 struct vnode *vp = ap->a_vp; 519 struct nfsnode *np = VTONFS(vp); 520 struct vattr vattr; 521 int error; 522 int fmode = ap->a_mode; 523 struct ucred *cred; 524 vm_object_t obj; 525 526 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) 527 return (EOPNOTSUPP); 528 529 /* 530 * For NFSv4, we need to do the Open Op before cache validation, 531 * so that we conform to RFC3530 Sec. 9.3.1. 532 */ 533 if (NFS_ISV4(vp)) { 534 error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td); 535 if (error) { 536 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 537 (gid_t)0); 538 return (error); 539 } 540 } 541 542 /* 543 * Now, if this Open will be doing reading, re-validate/flush the 544 * cache, so that Close/Open coherency is maintained. 545 */ 546 mtx_lock(&np->n_mtx); 547 if (np->n_flag & NMODIFIED) { 548 mtx_unlock(&np->n_mtx); 549 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 550 if (error == EINTR || error == EIO) { 551 if (NFS_ISV4(vp)) 552 (void) nfsrpc_close(vp, 0, ap->a_td); 553 return (error); 554 } 555 mtx_lock(&np->n_mtx); 556 np->n_attrstamp = 0; 557 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 558 if (vp->v_type == VDIR) 559 np->n_direofoffset = 0; 560 mtx_unlock(&np->n_mtx); 561 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 562 if (error) { 563 if (NFS_ISV4(vp)) 564 (void) nfsrpc_close(vp, 0, ap->a_td); 565 return (error); 566 } 567 mtx_lock(&np->n_mtx); 568 np->n_mtime = vattr.va_mtime; 569 if (NFS_ISV4(vp)) 570 np->n_change = vattr.va_filerev; 571 } else { 572 mtx_unlock(&np->n_mtx); 573 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 574 if (error) { 575 if (NFS_ISV4(vp)) 576 (void) nfsrpc_close(vp, 0, ap->a_td); 577 return (error); 578 } 579 mtx_lock(&np->n_mtx); 580 if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) || 581 NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 582 if (vp->v_type == VDIR) 583 np->n_direofoffset = 0; 584 mtx_unlock(&np->n_mtx); 585 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 586 if (error == EINTR || error == EIO) { 587 if (NFS_ISV4(vp)) 588 (void) nfsrpc_close(vp, 0, ap->a_td); 589 return (error); 590 } 591 mtx_lock(&np->n_mtx); 592 np->n_mtime = vattr.va_mtime; 593 if (NFS_ISV4(vp)) 594 np->n_change = vattr.va_filerev; 595 } 596 } 597 598 /* 599 * If the object has >= 1 O_DIRECT active opens, we disable caching. 600 */ 601 if (newnfs_directio_enable && (fmode & O_DIRECT) && 602 (vp->v_type == VREG)) { 603 if (np->n_directio_opens == 0) { 604 mtx_unlock(&np->n_mtx); 605 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 606 if (error) { 607 if (NFS_ISV4(vp)) 608 (void) nfsrpc_close(vp, 0, ap->a_td); 609 return (error); 610 } 611 mtx_lock(&np->n_mtx); 612 np->n_flag |= NNONCACHE; 613 } 614 np->n_directio_opens++; 615 } 616 617 /* If opened for writing via NFSv4.1 or later, mark that for pNFS. */ 618 if (NFSHASPNFS(VFSTONFS(vp->v_mount)) && (fmode & FWRITE) != 0) 619 np->n_flag |= NWRITEOPENED; 620 621 /* 622 * If this is an open for writing, capture a reference to the 623 * credentials, so they can be used by ncl_putpages(). Using 624 * these write credentials is preferable to the credentials of 625 * whatever thread happens to be doing the VOP_PUTPAGES() since 626 * the write RPCs are less likely to fail with EACCES. 627 */ 628 if ((fmode & FWRITE) != 0) { 629 cred = np->n_writecred; 630 np->n_writecred = crhold(ap->a_cred); 631 } else 632 cred = NULL; 633 mtx_unlock(&np->n_mtx); 634 635 if (cred != NULL) 636 crfree(cred); 637 vnode_create_vobject(vp, vattr.va_size, ap->a_td); 638 639 /* 640 * If the text file has been mmap'd, flush any dirty pages to the 641 * buffer cache and then... 642 * Make sure all writes are pushed to the NFS server. If this is not 643 * done, the modify time of the file can change while the text 644 * file is being executed. This will cause the process that is 645 * executing the text file to be terminated. 646 */ 647 if (vp->v_writecount <= -1) { 648 if ((obj = vp->v_object) != NULL && 649 (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 650 VM_OBJECT_WLOCK(obj); 651 vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); 652 VM_OBJECT_WUNLOCK(obj); 653 } 654 655 /* Now, flush the buffer cache. */ 656 ncl_flush(vp, MNT_WAIT, curthread, 0, 0); 657 658 /* And, finally, make sure that n_mtime is up to date. */ 659 np = VTONFS(vp); 660 mtx_lock(&np->n_mtx); 661 np->n_mtime = np->n_vattr.na_mtime; 662 mtx_unlock(&np->n_mtx); 663 } 664 return (0); 665 } 666 667 /* 668 * nfs close vnode op 669 * What an NFS client should do upon close after writing is a debatable issue. 670 * Most NFS clients push delayed writes to the server upon close, basically for 671 * two reasons: 672 * 1 - So that any write errors may be reported back to the client process 673 * doing the close system call. By far the two most likely errors are 674 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 675 * 2 - To put a worst case upper bound on cache inconsistency between 676 * multiple clients for the file. 677 * There is also a consistency problem for Version 2 of the protocol w.r.t. 678 * not being able to tell if other clients are writing a file concurrently, 679 * since there is no way of knowing if the changed modify time in the reply 680 * is only due to the write for this client. 681 * (NFS Version 3 provides weak cache consistency data in the reply that 682 * should be sufficient to detect and handle this case.) 683 * 684 * The current code does the following: 685 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 686 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 687 * or commit them (this satisfies 1 and 2 except for the 688 * case where the server crashes after this close but 689 * before the commit RPC, which is felt to be "good 690 * enough". Changing the last argument to ncl_flush() to 691 * a 1 would force a commit operation, if it is felt a 692 * commit is necessary now. 693 * for NFS Version 4 - flush the dirty buffers and commit them, if 694 * nfscl_mustflush() says this is necessary. 695 * It is necessary if there is no write delegation held, 696 * in order to satisfy open/close coherency. 697 * If the file isn't cached on local stable storage, 698 * it may be necessary in order to detect "out of space" 699 * errors from the server, if the write delegation 700 * issued by the server doesn't allow the file to grow. 701 */ 702 /* ARGSUSED */ 703 static int 704 nfs_close(struct vop_close_args *ap) 705 { 706 struct vnode *vp = ap->a_vp; 707 struct nfsnode *np = VTONFS(vp); 708 struct nfsvattr nfsva; 709 struct ucred *cred; 710 int error = 0, ret, localcred = 0; 711 int fmode = ap->a_fflag; 712 713 if (NFSCL_FORCEDISM(vp->v_mount)) 714 return (0); 715 /* 716 * During shutdown, a_cred isn't valid, so just use root. 717 */ 718 if (ap->a_cred == NOCRED) { 719 cred = newnfs_getcred(); 720 localcred = 1; 721 } else { 722 cred = ap->a_cred; 723 } 724 if (vp->v_type == VREG) { 725 /* 726 * Examine and clean dirty pages, regardless of NMODIFIED. 727 * This closes a major hole in close-to-open consistency. 728 * We want to push out all dirty pages (and buffers) on 729 * close, regardless of whether they were dirtied by 730 * mmap'ed writes or via write(). 731 */ 732 if (nfs_clean_pages_on_close && vp->v_object) { 733 VM_OBJECT_WLOCK(vp->v_object); 734 vm_object_page_clean(vp->v_object, 0, 0, 0); 735 VM_OBJECT_WUNLOCK(vp->v_object); 736 } 737 mtx_lock(&np->n_mtx); 738 if (np->n_flag & NMODIFIED) { 739 mtx_unlock(&np->n_mtx); 740 if (NFS_ISV3(vp)) { 741 /* 742 * Under NFSv3 we have dirty buffers to dispose of. We 743 * must flush them to the NFS server. We have the option 744 * of waiting all the way through the commit rpc or just 745 * waiting for the initial write. The default is to only 746 * wait through the initial write so the data is in the 747 * server's cache, which is roughly similar to the state 748 * a standard disk subsystem leaves the file in on close(). 749 * 750 * We cannot clear the NMODIFIED bit in np->n_flag due to 751 * potential races with other processes, and certainly 752 * cannot clear it if we don't commit. 753 * These races occur when there is no longer the old 754 * traditional vnode locking implemented for Vnode Ops. 755 */ 756 int cm = newnfs_commit_on_close ? 1 : 0; 757 error = ncl_flush(vp, MNT_WAIT, ap->a_td, cm, 0); 758 /* np->n_flag &= ~NMODIFIED; */ 759 } else if (NFS_ISV4(vp)) { 760 if (nfscl_mustflush(vp) != 0) { 761 int cm = newnfs_commit_on_close ? 1 : 0; 762 error = ncl_flush(vp, MNT_WAIT, ap->a_td, 763 cm, 0); 764 /* 765 * as above w.r.t races when clearing 766 * NMODIFIED. 767 * np->n_flag &= ~NMODIFIED; 768 */ 769 } 770 } else { 771 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 772 } 773 mtx_lock(&np->n_mtx); 774 } 775 /* 776 * Invalidate the attribute cache in all cases. 777 * An open is going to fetch fresh attrs any way, other procs 778 * on this node that have file open will be forced to do an 779 * otw attr fetch, but this is safe. 780 * --> A user found that their RPC count dropped by 20% when 781 * this was commented out and I can't see any requirement 782 * for it, so I've disabled it when negative lookups are 783 * enabled. (What does this have to do with negative lookup 784 * caching? Well nothing, except it was reported by the 785 * same user that needed negative lookup caching and I wanted 786 * there to be a way to disable it to see if it 787 * is the cause of some caching/coherency issue that might 788 * crop up.) 789 */ 790 if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) { 791 np->n_attrstamp = 0; 792 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 793 } 794 if (np->n_flag & NWRITEERR) { 795 np->n_flag &= ~NWRITEERR; 796 error = np->n_error; 797 } 798 mtx_unlock(&np->n_mtx); 799 } 800 801 if (NFS_ISV4(vp)) { 802 /* 803 * Get attributes so "change" is up to date. 804 */ 805 if (error == 0 && nfscl_mustflush(vp) != 0 && 806 vp->v_type == VREG && 807 (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOCTO) == 0) { 808 ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva, 809 NULL); 810 if (!ret) { 811 np->n_change = nfsva.na_filerev; 812 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 813 NULL, 0, 0); 814 } 815 } 816 817 /* 818 * and do the close. 819 */ 820 ret = nfsrpc_close(vp, 0, ap->a_td); 821 if (!error && ret) 822 error = ret; 823 if (error) 824 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 825 (gid_t)0); 826 } 827 if (newnfs_directio_enable) 828 KASSERT((np->n_directio_asyncwr == 0), 829 ("nfs_close: dirty unflushed (%d) directio buffers\n", 830 np->n_directio_asyncwr)); 831 if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) { 832 mtx_lock(&np->n_mtx); 833 KASSERT((np->n_directio_opens > 0), 834 ("nfs_close: unexpectedly value (0) of n_directio_opens\n")); 835 np->n_directio_opens--; 836 if (np->n_directio_opens == 0) 837 np->n_flag &= ~NNONCACHE; 838 mtx_unlock(&np->n_mtx); 839 } 840 if (localcred) 841 NFSFREECRED(cred); 842 return (error); 843 } 844 845 /* 846 * nfs getattr call from vfs. 847 */ 848 static int 849 nfs_getattr(struct vop_getattr_args *ap) 850 { 851 struct vnode *vp = ap->a_vp; 852 struct thread *td = curthread; /* XXX */ 853 struct nfsnode *np = VTONFS(vp); 854 int error = 0; 855 struct nfsvattr nfsva; 856 struct vattr *vap = ap->a_vap; 857 struct vattr vattr; 858 859 /* 860 * Update local times for special files. 861 */ 862 mtx_lock(&np->n_mtx); 863 if (np->n_flag & (NACC | NUPD)) 864 np->n_flag |= NCHG; 865 mtx_unlock(&np->n_mtx); 866 /* 867 * First look in the cache. 868 */ 869 if (ncl_getattrcache(vp, &vattr) == 0) { 870 vap->va_type = vattr.va_type; 871 vap->va_mode = vattr.va_mode; 872 vap->va_nlink = vattr.va_nlink; 873 vap->va_uid = vattr.va_uid; 874 vap->va_gid = vattr.va_gid; 875 vap->va_fsid = vattr.va_fsid; 876 vap->va_fileid = vattr.va_fileid; 877 vap->va_size = vattr.va_size; 878 vap->va_blocksize = vattr.va_blocksize; 879 vap->va_atime = vattr.va_atime; 880 vap->va_mtime = vattr.va_mtime; 881 vap->va_ctime = vattr.va_ctime; 882 vap->va_gen = vattr.va_gen; 883 vap->va_flags = vattr.va_flags; 884 vap->va_rdev = vattr.va_rdev; 885 vap->va_bytes = vattr.va_bytes; 886 vap->va_filerev = vattr.va_filerev; 887 /* 888 * Get the local modify time for the case of a write 889 * delegation. 890 */ 891 nfscl_deleggetmodtime(vp, &vap->va_mtime); 892 return (0); 893 } 894 895 if (NFS_ISV34(vp) && nfs_prime_access_cache && 896 nfsaccess_cache_timeout > 0) { 897 NFSINCRGLOBAL(nfsstatsv1.accesscache_misses); 898 nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL); 899 if (ncl_getattrcache(vp, ap->a_vap) == 0) { 900 nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime); 901 return (0); 902 } 903 } 904 error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL); 905 if (!error) 906 error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0); 907 if (!error) { 908 /* 909 * Get the local modify time for the case of a write 910 * delegation. 911 */ 912 nfscl_deleggetmodtime(vp, &vap->va_mtime); 913 } else if (NFS_ISV4(vp)) { 914 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 915 } 916 return (error); 917 } 918 919 /* 920 * nfs setattr call. 921 */ 922 static int 923 nfs_setattr(struct vop_setattr_args *ap) 924 { 925 struct vnode *vp = ap->a_vp; 926 struct nfsnode *np = VTONFS(vp); 927 struct thread *td = curthread; /* XXX */ 928 struct vattr *vap = ap->a_vap; 929 int error = 0; 930 u_quad_t tsize; 931 932 #ifndef nolint 933 tsize = (u_quad_t)0; 934 #endif 935 936 /* 937 * Setting of flags and marking of atimes are not supported. 938 */ 939 if (vap->va_flags != VNOVAL) 940 return (EOPNOTSUPP); 941 942 /* 943 * Disallow write attempts if the filesystem is mounted read-only. 944 */ 945 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 946 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 947 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 948 (vp->v_mount->mnt_flag & MNT_RDONLY)) 949 return (EROFS); 950 if (vap->va_size != VNOVAL) { 951 switch (vp->v_type) { 952 case VDIR: 953 return (EISDIR); 954 case VCHR: 955 case VBLK: 956 case VSOCK: 957 case VFIFO: 958 if (vap->va_mtime.tv_sec == VNOVAL && 959 vap->va_atime.tv_sec == VNOVAL && 960 vap->va_mode == (mode_t)VNOVAL && 961 vap->va_uid == (uid_t)VNOVAL && 962 vap->va_gid == (gid_t)VNOVAL) 963 return (0); 964 vap->va_size = VNOVAL; 965 break; 966 default: 967 /* 968 * Disallow write attempts if the filesystem is 969 * mounted read-only. 970 */ 971 if (vp->v_mount->mnt_flag & MNT_RDONLY) 972 return (EROFS); 973 /* 974 * We run vnode_pager_setsize() early (why?), 975 * we must set np->n_size now to avoid vinvalbuf 976 * V_SAVE races that might setsize a lower 977 * value. 978 */ 979 mtx_lock(&np->n_mtx); 980 tsize = np->n_size; 981 mtx_unlock(&np->n_mtx); 982 error = ncl_meta_setsize(vp, td, vap->va_size); 983 mtx_lock(&np->n_mtx); 984 if (np->n_flag & NMODIFIED) { 985 tsize = np->n_size; 986 mtx_unlock(&np->n_mtx); 987 error = ncl_vinvalbuf(vp, vap->va_size == 0 ? 988 0 : V_SAVE, td, 1); 989 if (error != 0) { 990 vnode_pager_setsize(vp, tsize); 991 return (error); 992 } 993 /* 994 * Call nfscl_delegmodtime() to set the modify time 995 * locally, as required. 996 */ 997 nfscl_delegmodtime(vp); 998 } else 999 mtx_unlock(&np->n_mtx); 1000 /* 1001 * np->n_size has already been set to vap->va_size 1002 * in ncl_meta_setsize(). We must set it again since 1003 * nfs_loadattrcache() could be called through 1004 * ncl_meta_setsize() and could modify np->n_size. 1005 */ 1006 mtx_lock(&np->n_mtx); 1007 np->n_vattr.na_size = np->n_size = vap->va_size; 1008 mtx_unlock(&np->n_mtx); 1009 } 1010 } else { 1011 mtx_lock(&np->n_mtx); 1012 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 1013 (np->n_flag & NMODIFIED) && vp->v_type == VREG) { 1014 mtx_unlock(&np->n_mtx); 1015 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 1016 if (error == EINTR || error == EIO) 1017 return (error); 1018 } else 1019 mtx_unlock(&np->n_mtx); 1020 } 1021 error = nfs_setattrrpc(vp, vap, ap->a_cred, td); 1022 if (error && vap->va_size != VNOVAL) { 1023 mtx_lock(&np->n_mtx); 1024 np->n_size = np->n_vattr.na_size = tsize; 1025 vnode_pager_setsize(vp, tsize); 1026 mtx_unlock(&np->n_mtx); 1027 } 1028 return (error); 1029 } 1030 1031 /* 1032 * Do an nfs setattr rpc. 1033 */ 1034 static int 1035 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, 1036 struct thread *td) 1037 { 1038 struct nfsnode *np = VTONFS(vp); 1039 int error, ret, attrflag, i; 1040 struct nfsvattr nfsva; 1041 1042 if (NFS_ISV34(vp)) { 1043 mtx_lock(&np->n_mtx); 1044 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) 1045 np->n_accesscache[i].stamp = 0; 1046 np->n_flag |= NDELEGMOD; 1047 mtx_unlock(&np->n_mtx); 1048 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp); 1049 } 1050 error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag, 1051 NULL); 1052 if (attrflag) { 1053 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1054 if (ret && !error) 1055 error = ret; 1056 } 1057 if (error && NFS_ISV4(vp)) 1058 error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid); 1059 return (error); 1060 } 1061 1062 /* 1063 * nfs lookup call, one step at a time... 1064 * First look in cache 1065 * If not found, unlock the directory nfsnode and do the rpc 1066 */ 1067 static int 1068 nfs_lookup(struct vop_lookup_args *ap) 1069 { 1070 struct componentname *cnp = ap->a_cnp; 1071 struct vnode *dvp = ap->a_dvp; 1072 struct vnode **vpp = ap->a_vpp; 1073 struct mount *mp = dvp->v_mount; 1074 int flags = cnp->cn_flags; 1075 struct vnode *newvp; 1076 struct nfsmount *nmp; 1077 struct nfsnode *np, *newnp; 1078 int error = 0, attrflag, dattrflag, ltype, ncticks; 1079 struct thread *td = cnp->cn_thread; 1080 struct nfsfh *nfhp; 1081 struct nfsvattr dnfsva, nfsva; 1082 struct vattr vattr; 1083 struct timespec nctime; 1084 1085 *vpp = NULLVP; 1086 if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) && 1087 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 1088 return (EROFS); 1089 if (dvp->v_type != VDIR) 1090 return (ENOTDIR); 1091 nmp = VFSTONFS(mp); 1092 np = VTONFS(dvp); 1093 1094 /* For NFSv4, wait until any remove is done. */ 1095 mtx_lock(&np->n_mtx); 1096 while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) { 1097 np->n_flag |= NREMOVEWANT; 1098 (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0); 1099 } 1100 mtx_unlock(&np->n_mtx); 1101 1102 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) 1103 return (error); 1104 error = cache_lookup(dvp, vpp, cnp, &nctime, &ncticks); 1105 if (error > 0 && error != ENOENT) 1106 return (error); 1107 if (error == -1) { 1108 /* 1109 * Lookups of "." are special and always return the 1110 * current directory. cache_lookup() already handles 1111 * associated locking bookkeeping, etc. 1112 */ 1113 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 1114 /* XXX: Is this really correct? */ 1115 if (cnp->cn_nameiop != LOOKUP && 1116 (flags & ISLASTCN)) 1117 cnp->cn_flags |= SAVENAME; 1118 return (0); 1119 } 1120 1121 /* 1122 * We only accept a positive hit in the cache if the 1123 * change time of the file matches our cached copy. 1124 * Otherwise, we discard the cache entry and fallback 1125 * to doing a lookup RPC. We also only trust cache 1126 * entries for less than nm_nametimeo seconds. 1127 * 1128 * To better handle stale file handles and attributes, 1129 * clear the attribute cache of this node if it is a 1130 * leaf component, part of an open() call, and not 1131 * locally modified before fetching the attributes. 1132 * This should allow stale file handles to be detected 1133 * here where we can fall back to a LOOKUP RPC to 1134 * recover rather than having nfs_open() detect the 1135 * stale file handle and failing open(2) with ESTALE. 1136 */ 1137 newvp = *vpp; 1138 newnp = VTONFS(newvp); 1139 if (!(nmp->nm_flag & NFSMNT_NOCTO) && 1140 (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1141 !(newnp->n_flag & NMODIFIED)) { 1142 mtx_lock(&newnp->n_mtx); 1143 newnp->n_attrstamp = 0; 1144 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp); 1145 mtx_unlock(&newnp->n_mtx); 1146 } 1147 if (nfscl_nodeleg(newvp, 0) == 0 || 1148 ((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) && 1149 VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 && 1150 timespeccmp(&vattr.va_ctime, &nctime, ==))) { 1151 NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits); 1152 if (cnp->cn_nameiop != LOOKUP && 1153 (flags & ISLASTCN)) 1154 cnp->cn_flags |= SAVENAME; 1155 return (0); 1156 } 1157 cache_purge(newvp); 1158 if (dvp != newvp) 1159 vput(newvp); 1160 else 1161 vrele(newvp); 1162 *vpp = NULLVP; 1163 } else if (error == ENOENT) { 1164 if (dvp->v_iflag & VI_DOOMED) 1165 return (ENOENT); 1166 /* 1167 * We only accept a negative hit in the cache if the 1168 * modification time of the parent directory matches 1169 * the cached copy in the name cache entry. 1170 * Otherwise, we discard all of the negative cache 1171 * entries for this directory. We also only trust 1172 * negative cache entries for up to nm_negnametimeo 1173 * seconds. 1174 */ 1175 if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) && 1176 VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 && 1177 timespeccmp(&vattr.va_mtime, &nctime, ==)) { 1178 NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits); 1179 return (ENOENT); 1180 } 1181 cache_purge_negative(dvp); 1182 } 1183 1184 newvp = NULLVP; 1185 NFSINCRGLOBAL(nfsstatsv1.lookupcache_misses); 1186 error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1187 cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1188 NULL); 1189 if (dattrflag) 1190 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1191 if (error) { 1192 if (newvp != NULLVP) { 1193 vput(newvp); 1194 *vpp = NULLVP; 1195 } 1196 1197 if (error != ENOENT) { 1198 if (NFS_ISV4(dvp)) 1199 error = nfscl_maperr(td, error, (uid_t)0, 1200 (gid_t)0); 1201 return (error); 1202 } 1203 1204 /* The requested file was not found. */ 1205 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1206 (flags & ISLASTCN)) { 1207 /* 1208 * XXX: UFS does a full VOP_ACCESS(dvp, 1209 * VWRITE) here instead of just checking 1210 * MNT_RDONLY. 1211 */ 1212 if (mp->mnt_flag & MNT_RDONLY) 1213 return (EROFS); 1214 cnp->cn_flags |= SAVENAME; 1215 return (EJUSTRETURN); 1216 } 1217 1218 if ((cnp->cn_flags & MAKEENTRY) != 0 && dattrflag) { 1219 /* 1220 * Cache the modification time of the parent 1221 * directory from the post-op attributes in 1222 * the name cache entry. The negative cache 1223 * entry will be ignored once the directory 1224 * has changed. Don't bother adding the entry 1225 * if the directory has already changed. 1226 */ 1227 mtx_lock(&np->n_mtx); 1228 if (timespeccmp(&np->n_vattr.na_mtime, 1229 &dnfsva.na_mtime, ==)) { 1230 mtx_unlock(&np->n_mtx); 1231 cache_enter_time(dvp, NULL, cnp, 1232 &dnfsva.na_mtime, NULL); 1233 } else 1234 mtx_unlock(&np->n_mtx); 1235 } 1236 return (ENOENT); 1237 } 1238 1239 /* 1240 * Handle RENAME case... 1241 */ 1242 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { 1243 if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1244 free(nfhp, M_NFSFH); 1245 return (EISDIR); 1246 } 1247 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1248 LK_EXCLUSIVE); 1249 if (error) 1250 return (error); 1251 newvp = NFSTOV(np); 1252 if (attrflag) 1253 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1254 0, 1); 1255 *vpp = newvp; 1256 cnp->cn_flags |= SAVENAME; 1257 return (0); 1258 } 1259 1260 if (flags & ISDOTDOT) { 1261 ltype = NFSVOPISLOCKED(dvp); 1262 error = vfs_busy(mp, MBF_NOWAIT); 1263 if (error != 0) { 1264 vfs_ref(mp); 1265 NFSVOPUNLOCK(dvp, 0); 1266 error = vfs_busy(mp, 0); 1267 NFSVOPLOCK(dvp, ltype | LK_RETRY); 1268 vfs_rel(mp); 1269 if (error == 0 && (dvp->v_iflag & VI_DOOMED)) { 1270 vfs_unbusy(mp); 1271 error = ENOENT; 1272 } 1273 if (error != 0) 1274 return (error); 1275 } 1276 NFSVOPUNLOCK(dvp, 0); 1277 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1278 cnp->cn_lkflags); 1279 if (error == 0) 1280 newvp = NFSTOV(np); 1281 vfs_unbusy(mp); 1282 if (newvp != dvp) 1283 NFSVOPLOCK(dvp, ltype | LK_RETRY); 1284 if (dvp->v_iflag & VI_DOOMED) { 1285 if (error == 0) { 1286 if (newvp == dvp) 1287 vrele(newvp); 1288 else 1289 vput(newvp); 1290 } 1291 error = ENOENT; 1292 } 1293 if (error != 0) 1294 return (error); 1295 if (attrflag) 1296 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1297 0, 1); 1298 } else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1299 free(nfhp, M_NFSFH); 1300 VREF(dvp); 1301 newvp = dvp; 1302 if (attrflag) 1303 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1304 0, 1); 1305 } else { 1306 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1307 cnp->cn_lkflags); 1308 if (error) 1309 return (error); 1310 newvp = NFSTOV(np); 1311 if (attrflag) 1312 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1313 0, 1); 1314 else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1315 !(np->n_flag & NMODIFIED)) { 1316 /* 1317 * Flush the attribute cache when opening a 1318 * leaf node to ensure that fresh attributes 1319 * are fetched in nfs_open() since we did not 1320 * fetch attributes from the LOOKUP reply. 1321 */ 1322 mtx_lock(&np->n_mtx); 1323 np->n_attrstamp = 0; 1324 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp); 1325 mtx_unlock(&np->n_mtx); 1326 } 1327 } 1328 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 1329 cnp->cn_flags |= SAVENAME; 1330 if ((cnp->cn_flags & MAKEENTRY) && 1331 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) && 1332 attrflag != 0 && (newvp->v_type != VDIR || dattrflag != 0)) 1333 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 1334 newvp->v_type != VDIR ? NULL : &dnfsva.na_ctime); 1335 *vpp = newvp; 1336 return (0); 1337 } 1338 1339 /* 1340 * nfs read call. 1341 * Just call ncl_bioread() to do the work. 1342 */ 1343 static int 1344 nfs_read(struct vop_read_args *ap) 1345 { 1346 struct vnode *vp = ap->a_vp; 1347 1348 switch (vp->v_type) { 1349 case VREG: 1350 return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 1351 case VDIR: 1352 return (EISDIR); 1353 default: 1354 return (EOPNOTSUPP); 1355 } 1356 } 1357 1358 /* 1359 * nfs readlink call 1360 */ 1361 static int 1362 nfs_readlink(struct vop_readlink_args *ap) 1363 { 1364 struct vnode *vp = ap->a_vp; 1365 1366 if (vp->v_type != VLNK) 1367 return (EINVAL); 1368 return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred)); 1369 } 1370 1371 /* 1372 * Do a readlink rpc. 1373 * Called by ncl_doio() from below the buffer cache. 1374 */ 1375 int 1376 ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1377 { 1378 int error, ret, attrflag; 1379 struct nfsvattr nfsva; 1380 1381 error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva, 1382 &attrflag, NULL); 1383 if (attrflag) { 1384 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1385 if (ret && !error) 1386 error = ret; 1387 } 1388 if (error && NFS_ISV4(vp)) 1389 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1390 return (error); 1391 } 1392 1393 /* 1394 * nfs read rpc call 1395 * Ditto above 1396 */ 1397 int 1398 ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1399 { 1400 int error, ret, attrflag; 1401 struct nfsvattr nfsva; 1402 struct nfsmount *nmp; 1403 1404 nmp = VFSTONFS(vnode_mount(vp)); 1405 error = EIO; 1406 attrflag = 0; 1407 if (NFSHASPNFS(nmp)) 1408 error = nfscl_doiods(vp, uiop, NULL, NULL, 1409 NFSV4OPEN_ACCESSREAD, 0, cred, uiop->uio_td); 1410 NFSCL_DEBUG(4, "readrpc: aft doiods=%d\n", error); 1411 if (error != 0) 1412 error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva, 1413 &attrflag, NULL); 1414 if (attrflag) { 1415 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1416 if (ret && !error) 1417 error = ret; 1418 } 1419 if (error && NFS_ISV4(vp)) 1420 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1421 return (error); 1422 } 1423 1424 /* 1425 * nfs write call 1426 */ 1427 int 1428 ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 1429 int *iomode, int *must_commit, int called_from_strategy) 1430 { 1431 struct nfsvattr nfsva; 1432 int error, attrflag, ret; 1433 struct nfsmount *nmp; 1434 1435 nmp = VFSTONFS(vnode_mount(vp)); 1436 error = EIO; 1437 attrflag = 0; 1438 if (NFSHASPNFS(nmp)) 1439 error = nfscl_doiods(vp, uiop, iomode, must_commit, 1440 NFSV4OPEN_ACCESSWRITE, 0, cred, uiop->uio_td); 1441 NFSCL_DEBUG(4, "writerpc: aft doiods=%d\n", error); 1442 if (error != 0) 1443 error = nfsrpc_write(vp, uiop, iomode, must_commit, cred, 1444 uiop->uio_td, &nfsva, &attrflag, NULL, 1445 called_from_strategy); 1446 if (attrflag) { 1447 if (VTONFS(vp)->n_flag & ND_NFSV4) 1448 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1, 1449 1); 1450 else 1451 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1452 1); 1453 if (ret && !error) 1454 error = ret; 1455 } 1456 if (DOINGASYNC(vp)) 1457 *iomode = NFSWRITE_FILESYNC; 1458 if (error && NFS_ISV4(vp)) 1459 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1460 return (error); 1461 } 1462 1463 /* 1464 * nfs mknod rpc 1465 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1466 * mode set to specify the file type and the size field for rdev. 1467 */ 1468 static int 1469 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1470 struct vattr *vap) 1471 { 1472 struct nfsvattr nfsva, dnfsva; 1473 struct vnode *newvp = NULL; 1474 struct nfsnode *np = NULL, *dnp; 1475 struct nfsfh *nfhp; 1476 struct vattr vattr; 1477 int error = 0, attrflag, dattrflag; 1478 u_int32_t rdev; 1479 1480 if (vap->va_type == VCHR || vap->va_type == VBLK) 1481 rdev = vap->va_rdev; 1482 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1483 rdev = 0xffffffff; 1484 else 1485 return (EOPNOTSUPP); 1486 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1487 return (error); 1488 error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap, 1489 rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva, 1490 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 1491 if (!error) { 1492 if (!nfhp) 1493 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1494 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1495 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1496 NULL); 1497 if (nfhp) 1498 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1499 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE); 1500 } 1501 if (dattrflag) 1502 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1503 if (!error) { 1504 newvp = NFSTOV(np); 1505 if (attrflag != 0) { 1506 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1507 0, 1); 1508 if (error != 0) 1509 vput(newvp); 1510 } 1511 } 1512 if (!error) { 1513 *vpp = newvp; 1514 } else if (NFS_ISV4(dvp)) { 1515 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1516 vap->va_gid); 1517 } 1518 dnp = VTONFS(dvp); 1519 mtx_lock(&dnp->n_mtx); 1520 dnp->n_flag |= NMODIFIED; 1521 if (!dattrflag) { 1522 dnp->n_attrstamp = 0; 1523 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1524 } 1525 mtx_unlock(&dnp->n_mtx); 1526 return (error); 1527 } 1528 1529 /* 1530 * nfs mknod vop 1531 * just call nfs_mknodrpc() to do the work. 1532 */ 1533 /* ARGSUSED */ 1534 static int 1535 nfs_mknod(struct vop_mknod_args *ap) 1536 { 1537 return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap)); 1538 } 1539 1540 static struct mtx nfs_cverf_mtx; 1541 MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex", 1542 MTX_DEF); 1543 1544 static nfsquad_t 1545 nfs_get_cverf(void) 1546 { 1547 static nfsquad_t cverf; 1548 nfsquad_t ret; 1549 static int cverf_initialized = 0; 1550 1551 mtx_lock(&nfs_cverf_mtx); 1552 if (cverf_initialized == 0) { 1553 cverf.lval[0] = arc4random(); 1554 cverf.lval[1] = arc4random(); 1555 cverf_initialized = 1; 1556 } else 1557 cverf.qval++; 1558 ret = cverf; 1559 mtx_unlock(&nfs_cverf_mtx); 1560 1561 return (ret); 1562 } 1563 1564 /* 1565 * nfs file create call 1566 */ 1567 static int 1568 nfs_create(struct vop_create_args *ap) 1569 { 1570 struct vnode *dvp = ap->a_dvp; 1571 struct vattr *vap = ap->a_vap; 1572 struct componentname *cnp = ap->a_cnp; 1573 struct nfsnode *np = NULL, *dnp; 1574 struct vnode *newvp = NULL; 1575 struct nfsmount *nmp; 1576 struct nfsvattr dnfsva, nfsva; 1577 struct nfsfh *nfhp; 1578 nfsquad_t cverf; 1579 int error = 0, attrflag, dattrflag, fmode = 0; 1580 struct vattr vattr; 1581 1582 /* 1583 * Oops, not for me.. 1584 */ 1585 if (vap->va_type == VSOCK) 1586 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1587 1588 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1589 return (error); 1590 if (vap->va_vaflags & VA_EXCLUSIVE) 1591 fmode |= O_EXCL; 1592 dnp = VTONFS(dvp); 1593 nmp = VFSTONFS(vnode_mount(dvp)); 1594 again: 1595 /* For NFSv4, wait until any remove is done. */ 1596 mtx_lock(&dnp->n_mtx); 1597 while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) { 1598 dnp->n_flag |= NREMOVEWANT; 1599 (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0); 1600 } 1601 mtx_unlock(&dnp->n_mtx); 1602 1603 cverf = nfs_get_cverf(); 1604 error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1605 vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, 1606 &nfhp, &attrflag, &dattrflag, NULL); 1607 if (!error) { 1608 if (nfhp == NULL) 1609 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1610 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1611 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1612 NULL); 1613 if (nfhp != NULL) 1614 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1615 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE); 1616 } 1617 if (dattrflag) 1618 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1619 if (!error) { 1620 newvp = NFSTOV(np); 1621 if (attrflag == 0) 1622 error = nfsrpc_getattr(newvp, cnp->cn_cred, 1623 cnp->cn_thread, &nfsva, NULL); 1624 if (error == 0) 1625 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1626 0, 1); 1627 } 1628 if (error) { 1629 if (newvp != NULL) { 1630 vput(newvp); 1631 newvp = NULL; 1632 } 1633 if (NFS_ISV34(dvp) && (fmode & O_EXCL) && 1634 error == NFSERR_NOTSUPP) { 1635 fmode &= ~O_EXCL; 1636 goto again; 1637 } 1638 } else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) { 1639 if (nfscl_checksattr(vap, &nfsva)) { 1640 error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred, 1641 cnp->cn_thread, &nfsva, &attrflag, NULL); 1642 if (error && (vap->va_uid != (uid_t)VNOVAL || 1643 vap->va_gid != (gid_t)VNOVAL)) { 1644 /* try again without setting uid/gid */ 1645 vap->va_uid = (uid_t)VNOVAL; 1646 vap->va_gid = (uid_t)VNOVAL; 1647 error = nfsrpc_setattr(newvp, vap, NULL, 1648 cnp->cn_cred, cnp->cn_thread, &nfsva, 1649 &attrflag, NULL); 1650 } 1651 if (attrflag) 1652 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 1653 NULL, 0, 1); 1654 if (error != 0) 1655 vput(newvp); 1656 } 1657 } 1658 if (!error) { 1659 if ((cnp->cn_flags & MAKEENTRY) && attrflag) 1660 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 1661 NULL); 1662 *ap->a_vpp = newvp; 1663 } else if (NFS_ISV4(dvp)) { 1664 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1665 vap->va_gid); 1666 } 1667 mtx_lock(&dnp->n_mtx); 1668 dnp->n_flag |= NMODIFIED; 1669 if (!dattrflag) { 1670 dnp->n_attrstamp = 0; 1671 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1672 } 1673 mtx_unlock(&dnp->n_mtx); 1674 return (error); 1675 } 1676 1677 /* 1678 * nfs file remove call 1679 * To try and make nfs semantics closer to ufs semantics, a file that has 1680 * other processes using the vnode is renamed instead of removed and then 1681 * removed later on the last close. 1682 * - If v_usecount > 1 1683 * If a rename is not already in the works 1684 * call nfs_sillyrename() to set it up 1685 * else 1686 * do the remove rpc 1687 */ 1688 static int 1689 nfs_remove(struct vop_remove_args *ap) 1690 { 1691 struct vnode *vp = ap->a_vp; 1692 struct vnode *dvp = ap->a_dvp; 1693 struct componentname *cnp = ap->a_cnp; 1694 struct nfsnode *np = VTONFS(vp); 1695 int error = 0; 1696 struct vattr vattr; 1697 1698 KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name")); 1699 KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount")); 1700 if (vp->v_type == VDIR) 1701 error = EPERM; 1702 else if (vrefcnt(vp) == 1 || (np->n_sillyrename && 1703 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && 1704 vattr.va_nlink > 1)) { 1705 /* 1706 * Purge the name cache so that the chance of a lookup for 1707 * the name succeeding while the remove is in progress is 1708 * minimized. Without node locking it can still happen, such 1709 * that an I/O op returns ESTALE, but since you get this if 1710 * another host removes the file.. 1711 */ 1712 cache_purge(vp); 1713 /* 1714 * throw away biocache buffers, mainly to avoid 1715 * unnecessary delayed writes later. 1716 */ 1717 error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1); 1718 if (error != EINTR && error != EIO) 1719 /* Do the rpc */ 1720 error = nfs_removerpc(dvp, vp, cnp->cn_nameptr, 1721 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread); 1722 /* 1723 * Kludge City: If the first reply to the remove rpc is lost.. 1724 * the reply to the retransmitted request will be ENOENT 1725 * since the file was in fact removed 1726 * Therefore, we cheat and return success. 1727 */ 1728 if (error == ENOENT) 1729 error = 0; 1730 } else if (!np->n_sillyrename) 1731 error = nfs_sillyrename(dvp, vp, cnp); 1732 mtx_lock(&np->n_mtx); 1733 np->n_attrstamp = 0; 1734 mtx_unlock(&np->n_mtx); 1735 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 1736 return (error); 1737 } 1738 1739 /* 1740 * nfs file remove rpc called from nfs_inactive 1741 */ 1742 int 1743 ncl_removeit(struct sillyrename *sp, struct vnode *vp) 1744 { 1745 /* 1746 * Make sure that the directory vnode is still valid. 1747 * XXX we should lock sp->s_dvp here. 1748 */ 1749 if (sp->s_dvp->v_type == VBAD) 1750 return (0); 1751 return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen, 1752 sp->s_cred, NULL)); 1753 } 1754 1755 /* 1756 * Nfs remove rpc, called from nfs_remove() and ncl_removeit(). 1757 */ 1758 static int 1759 nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 1760 int namelen, struct ucred *cred, struct thread *td) 1761 { 1762 struct nfsvattr dnfsva; 1763 struct nfsnode *dnp = VTONFS(dvp); 1764 int error = 0, dattrflag; 1765 1766 mtx_lock(&dnp->n_mtx); 1767 dnp->n_flag |= NREMOVEINPROG; 1768 mtx_unlock(&dnp->n_mtx); 1769 error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva, 1770 &dattrflag, NULL); 1771 mtx_lock(&dnp->n_mtx); 1772 if ((dnp->n_flag & NREMOVEWANT)) { 1773 dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG); 1774 mtx_unlock(&dnp->n_mtx); 1775 wakeup((caddr_t)dnp); 1776 } else { 1777 dnp->n_flag &= ~NREMOVEINPROG; 1778 mtx_unlock(&dnp->n_mtx); 1779 } 1780 if (dattrflag) 1781 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1782 mtx_lock(&dnp->n_mtx); 1783 dnp->n_flag |= NMODIFIED; 1784 if (!dattrflag) { 1785 dnp->n_attrstamp = 0; 1786 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1787 } 1788 mtx_unlock(&dnp->n_mtx); 1789 if (error && NFS_ISV4(dvp)) 1790 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1791 return (error); 1792 } 1793 1794 /* 1795 * nfs file rename call 1796 */ 1797 static int 1798 nfs_rename(struct vop_rename_args *ap) 1799 { 1800 struct vnode *fvp = ap->a_fvp; 1801 struct vnode *tvp = ap->a_tvp; 1802 struct vnode *fdvp = ap->a_fdvp; 1803 struct vnode *tdvp = ap->a_tdvp; 1804 struct componentname *tcnp = ap->a_tcnp; 1805 struct componentname *fcnp = ap->a_fcnp; 1806 struct nfsnode *fnp = VTONFS(ap->a_fvp); 1807 struct nfsnode *tdnp = VTONFS(ap->a_tdvp); 1808 struct nfsv4node *newv4 = NULL; 1809 int error; 1810 1811 KASSERT((tcnp->cn_flags & HASBUF) != 0 && 1812 (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name")); 1813 /* Check for cross-device rename */ 1814 if ((fvp->v_mount != tdvp->v_mount) || 1815 (tvp && (fvp->v_mount != tvp->v_mount))) { 1816 error = EXDEV; 1817 goto out; 1818 } 1819 1820 if (fvp == tvp) { 1821 printf("nfs_rename: fvp == tvp (can't happen)\n"); 1822 error = 0; 1823 goto out; 1824 } 1825 if ((error = NFSVOPLOCK(fvp, LK_EXCLUSIVE)) != 0) 1826 goto out; 1827 1828 /* 1829 * We have to flush B_DELWRI data prior to renaming 1830 * the file. If we don't, the delayed-write buffers 1831 * can be flushed out later after the file has gone stale 1832 * under NFSV3. NFSV2 does not have this problem because 1833 * ( as far as I can tell ) it flushes dirty buffers more 1834 * often. 1835 * 1836 * Skip the rename operation if the fsync fails, this can happen 1837 * due to the server's volume being full, when we pushed out data 1838 * that was written back to our cache earlier. Not checking for 1839 * this condition can result in potential (silent) data loss. 1840 */ 1841 error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread); 1842 NFSVOPUNLOCK(fvp, 0); 1843 if (!error && tvp) 1844 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread); 1845 if (error) 1846 goto out; 1847 1848 /* 1849 * If the tvp exists and is in use, sillyrename it before doing the 1850 * rename of the new file over it. 1851 * XXX Can't sillyrename a directory. 1852 */ 1853 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename && 1854 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1855 vput(tvp); 1856 tvp = NULL; 1857 } 1858 1859 error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1860 tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1861 tcnp->cn_thread); 1862 1863 if (error == 0 && NFS_ISV4(tdvp)) { 1864 /* 1865 * For NFSv4, check to see if it is the same name and 1866 * replace the name, if it is different. 1867 */ 1868 newv4 = malloc( 1869 sizeof (struct nfsv4node) + 1870 tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1, 1871 M_NFSV4NODE, M_WAITOK); 1872 mtx_lock(&tdnp->n_mtx); 1873 mtx_lock(&fnp->n_mtx); 1874 if (fnp->n_v4 != NULL && fvp->v_type == VREG && 1875 (fnp->n_v4->n4_namelen != tcnp->cn_namelen || 1876 NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4), 1877 tcnp->cn_namelen) || 1878 tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen || 1879 NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1880 tdnp->n_fhp->nfh_len))) { 1881 #ifdef notdef 1882 { char nnn[100]; int nnnl; 1883 nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99; 1884 bcopy(tcnp->cn_nameptr, nnn, nnnl); 1885 nnn[nnnl] = '\0'; 1886 printf("ren replace=%s\n",nnn); 1887 } 1888 #endif 1889 free(fnp->n_v4, M_NFSV4NODE); 1890 fnp->n_v4 = newv4; 1891 newv4 = NULL; 1892 fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len; 1893 fnp->n_v4->n4_namelen = tcnp->cn_namelen; 1894 NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1895 tdnp->n_fhp->nfh_len); 1896 NFSBCOPY(tcnp->cn_nameptr, 1897 NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen); 1898 } 1899 mtx_unlock(&tdnp->n_mtx); 1900 mtx_unlock(&fnp->n_mtx); 1901 if (newv4 != NULL) 1902 free(newv4, M_NFSV4NODE); 1903 } 1904 1905 if (fvp->v_type == VDIR) { 1906 if (tvp != NULL && tvp->v_type == VDIR) 1907 cache_purge(tdvp); 1908 cache_purge(fdvp); 1909 } 1910 1911 out: 1912 if (tdvp == tvp) 1913 vrele(tdvp); 1914 else 1915 vput(tdvp); 1916 if (tvp) 1917 vput(tvp); 1918 vrele(fdvp); 1919 vrele(fvp); 1920 /* 1921 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1922 */ 1923 if (error == ENOENT) 1924 error = 0; 1925 return (error); 1926 } 1927 1928 /* 1929 * nfs file rename rpc called from nfs_remove() above 1930 */ 1931 static int 1932 nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp, 1933 struct sillyrename *sp) 1934 { 1935 1936 return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen, 1937 sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred, 1938 scnp->cn_thread)); 1939 } 1940 1941 /* 1942 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1943 */ 1944 static int 1945 nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr, 1946 int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr, 1947 int tnamelen, struct ucred *cred, struct thread *td) 1948 { 1949 struct nfsvattr fnfsva, tnfsva; 1950 struct nfsnode *fdnp = VTONFS(fdvp); 1951 struct nfsnode *tdnp = VTONFS(tdvp); 1952 int error = 0, fattrflag, tattrflag; 1953 1954 error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp, 1955 tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag, 1956 &tattrflag, NULL, NULL); 1957 mtx_lock(&fdnp->n_mtx); 1958 fdnp->n_flag |= NMODIFIED; 1959 if (fattrflag != 0) { 1960 mtx_unlock(&fdnp->n_mtx); 1961 (void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1); 1962 } else { 1963 fdnp->n_attrstamp = 0; 1964 mtx_unlock(&fdnp->n_mtx); 1965 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp); 1966 } 1967 mtx_lock(&tdnp->n_mtx); 1968 tdnp->n_flag |= NMODIFIED; 1969 if (tattrflag != 0) { 1970 mtx_unlock(&tdnp->n_mtx); 1971 (void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1); 1972 } else { 1973 tdnp->n_attrstamp = 0; 1974 mtx_unlock(&tdnp->n_mtx); 1975 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp); 1976 } 1977 if (error && NFS_ISV4(fdvp)) 1978 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1979 return (error); 1980 } 1981 1982 /* 1983 * nfs hard link create call 1984 */ 1985 static int 1986 nfs_link(struct vop_link_args *ap) 1987 { 1988 struct vnode *vp = ap->a_vp; 1989 struct vnode *tdvp = ap->a_tdvp; 1990 struct componentname *cnp = ap->a_cnp; 1991 struct nfsnode *np, *tdnp; 1992 struct nfsvattr nfsva, dnfsva; 1993 int error = 0, attrflag, dattrflag; 1994 1995 /* 1996 * Push all writes to the server, so that the attribute cache 1997 * doesn't get "out of sync" with the server. 1998 * XXX There should be a better way! 1999 */ 2000 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread); 2001 2002 error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 2003 cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag, 2004 &dattrflag, NULL); 2005 tdnp = VTONFS(tdvp); 2006 mtx_lock(&tdnp->n_mtx); 2007 tdnp->n_flag |= NMODIFIED; 2008 if (dattrflag != 0) { 2009 mtx_unlock(&tdnp->n_mtx); 2010 (void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1); 2011 } else { 2012 tdnp->n_attrstamp = 0; 2013 mtx_unlock(&tdnp->n_mtx); 2014 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp); 2015 } 2016 if (attrflag) 2017 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2018 else { 2019 np = VTONFS(vp); 2020 mtx_lock(&np->n_mtx); 2021 np->n_attrstamp = 0; 2022 mtx_unlock(&np->n_mtx); 2023 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 2024 } 2025 /* 2026 * If negative lookup caching is enabled, I might as well 2027 * add an entry for this node. Not necessary for correctness, 2028 * but if negative caching is enabled, then the system 2029 * must care about lookup caching hit rate, so... 2030 */ 2031 if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 && 2032 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) { 2033 cache_enter_time(tdvp, vp, cnp, &nfsva.na_ctime, NULL); 2034 } 2035 if (error && NFS_ISV4(vp)) 2036 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 2037 (gid_t)0); 2038 return (error); 2039 } 2040 2041 /* 2042 * nfs symbolic link create call 2043 */ 2044 static int 2045 nfs_symlink(struct vop_symlink_args *ap) 2046 { 2047 struct vnode *dvp = ap->a_dvp; 2048 struct vattr *vap = ap->a_vap; 2049 struct componentname *cnp = ap->a_cnp; 2050 struct nfsvattr nfsva, dnfsva; 2051 struct nfsfh *nfhp; 2052 struct nfsnode *np = NULL, *dnp; 2053 struct vnode *newvp = NULL; 2054 int error = 0, attrflag, dattrflag, ret; 2055 2056 vap->va_type = VLNK; 2057 error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2058 ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, 2059 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 2060 if (nfhp) { 2061 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 2062 &np, NULL, LK_EXCLUSIVE); 2063 if (!ret) 2064 newvp = NFSTOV(np); 2065 else if (!error) 2066 error = ret; 2067 } 2068 if (newvp != NULL) { 2069 if (attrflag) 2070 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 2071 0, 1); 2072 } else if (!error) { 2073 /* 2074 * If we do not have an error and we could not extract the 2075 * newvp from the response due to the request being NFSv2, we 2076 * have to do a lookup in order to obtain a newvp to return. 2077 */ 2078 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2079 cnp->cn_cred, cnp->cn_thread, &np); 2080 if (!error) 2081 newvp = NFSTOV(np); 2082 } 2083 if (error) { 2084 if (newvp) 2085 vput(newvp); 2086 if (NFS_ISV4(dvp)) 2087 error = nfscl_maperr(cnp->cn_thread, error, 2088 vap->va_uid, vap->va_gid); 2089 } else { 2090 *ap->a_vpp = newvp; 2091 } 2092 2093 dnp = VTONFS(dvp); 2094 mtx_lock(&dnp->n_mtx); 2095 dnp->n_flag |= NMODIFIED; 2096 if (dattrflag != 0) { 2097 mtx_unlock(&dnp->n_mtx); 2098 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2099 } else { 2100 dnp->n_attrstamp = 0; 2101 mtx_unlock(&dnp->n_mtx); 2102 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2103 } 2104 /* 2105 * If negative lookup caching is enabled, I might as well 2106 * add an entry for this node. Not necessary for correctness, 2107 * but if negative caching is enabled, then the system 2108 * must care about lookup caching hit rate, so... 2109 */ 2110 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 2111 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) { 2112 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, NULL); 2113 } 2114 return (error); 2115 } 2116 2117 /* 2118 * nfs make dir call 2119 */ 2120 static int 2121 nfs_mkdir(struct vop_mkdir_args *ap) 2122 { 2123 struct vnode *dvp = ap->a_dvp; 2124 struct vattr *vap = ap->a_vap; 2125 struct componentname *cnp = ap->a_cnp; 2126 struct nfsnode *np = NULL, *dnp; 2127 struct vnode *newvp = NULL; 2128 struct vattr vattr; 2129 struct nfsfh *nfhp; 2130 struct nfsvattr nfsva, dnfsva; 2131 int error = 0, attrflag, dattrflag, ret; 2132 2133 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0) 2134 return (error); 2135 vap->va_type = VDIR; 2136 error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2137 vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp, 2138 &attrflag, &dattrflag, NULL); 2139 dnp = VTONFS(dvp); 2140 mtx_lock(&dnp->n_mtx); 2141 dnp->n_flag |= NMODIFIED; 2142 if (dattrflag != 0) { 2143 mtx_unlock(&dnp->n_mtx); 2144 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2145 } else { 2146 dnp->n_attrstamp = 0; 2147 mtx_unlock(&dnp->n_mtx); 2148 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2149 } 2150 if (nfhp) { 2151 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 2152 &np, NULL, LK_EXCLUSIVE); 2153 if (!ret) { 2154 newvp = NFSTOV(np); 2155 if (attrflag) 2156 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 2157 NULL, 0, 1); 2158 } else if (!error) 2159 error = ret; 2160 } 2161 if (!error && newvp == NULL) { 2162 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2163 cnp->cn_cred, cnp->cn_thread, &np); 2164 if (!error) { 2165 newvp = NFSTOV(np); 2166 if (newvp->v_type != VDIR) 2167 error = EEXIST; 2168 } 2169 } 2170 if (error) { 2171 if (newvp) 2172 vput(newvp); 2173 if (NFS_ISV4(dvp)) 2174 error = nfscl_maperr(cnp->cn_thread, error, 2175 vap->va_uid, vap->va_gid); 2176 } else { 2177 /* 2178 * If negative lookup caching is enabled, I might as well 2179 * add an entry for this node. Not necessary for correctness, 2180 * but if negative caching is enabled, then the system 2181 * must care about lookup caching hit rate, so... 2182 */ 2183 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 2184 (cnp->cn_flags & MAKEENTRY) && 2185 attrflag != 0 && dattrflag != 0) 2186 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 2187 &dnfsva.na_ctime); 2188 *ap->a_vpp = newvp; 2189 } 2190 return (error); 2191 } 2192 2193 /* 2194 * nfs remove directory call 2195 */ 2196 static int 2197 nfs_rmdir(struct vop_rmdir_args *ap) 2198 { 2199 struct vnode *vp = ap->a_vp; 2200 struct vnode *dvp = ap->a_dvp; 2201 struct componentname *cnp = ap->a_cnp; 2202 struct nfsnode *dnp; 2203 struct nfsvattr dnfsva; 2204 int error, dattrflag; 2205 2206 if (dvp == vp) 2207 return (EINVAL); 2208 error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2209 cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL); 2210 dnp = VTONFS(dvp); 2211 mtx_lock(&dnp->n_mtx); 2212 dnp->n_flag |= NMODIFIED; 2213 if (dattrflag != 0) { 2214 mtx_unlock(&dnp->n_mtx); 2215 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2216 } else { 2217 dnp->n_attrstamp = 0; 2218 mtx_unlock(&dnp->n_mtx); 2219 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2220 } 2221 2222 cache_purge(dvp); 2223 cache_purge(vp); 2224 if (error && NFS_ISV4(dvp)) 2225 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 2226 (gid_t)0); 2227 /* 2228 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2229 */ 2230 if (error == ENOENT) 2231 error = 0; 2232 return (error); 2233 } 2234 2235 /* 2236 * nfs readdir call 2237 */ 2238 static int 2239 nfs_readdir(struct vop_readdir_args *ap) 2240 { 2241 struct vnode *vp = ap->a_vp; 2242 struct nfsnode *np = VTONFS(vp); 2243 struct uio *uio = ap->a_uio; 2244 ssize_t tresid, left; 2245 int error = 0; 2246 struct vattr vattr; 2247 2248 if (ap->a_eofflag != NULL) 2249 *ap->a_eofflag = 0; 2250 if (vp->v_type != VDIR) 2251 return(EPERM); 2252 2253 /* 2254 * First, check for hit on the EOF offset cache 2255 */ 2256 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset && 2257 (np->n_flag & NMODIFIED) == 0) { 2258 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) { 2259 mtx_lock(&np->n_mtx); 2260 if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) || 2261 !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 2262 mtx_unlock(&np->n_mtx); 2263 NFSINCRGLOBAL(nfsstatsv1.direofcache_hits); 2264 if (ap->a_eofflag != NULL) 2265 *ap->a_eofflag = 1; 2266 return (0); 2267 } else 2268 mtx_unlock(&np->n_mtx); 2269 } 2270 } 2271 2272 /* 2273 * NFS always guarantees that directory entries don't straddle 2274 * DIRBLKSIZ boundaries. As such, we need to limit the size 2275 * to an exact multiple of DIRBLKSIZ, to avoid copying a partial 2276 * directory entry. 2277 */ 2278 left = uio->uio_resid % DIRBLKSIZ; 2279 if (left == uio->uio_resid) 2280 return (EINVAL); 2281 uio->uio_resid -= left; 2282 2283 /* 2284 * Call ncl_bioread() to do the real work. 2285 */ 2286 tresid = uio->uio_resid; 2287 error = ncl_bioread(vp, uio, 0, ap->a_cred); 2288 2289 if (!error && uio->uio_resid == tresid) { 2290 NFSINCRGLOBAL(nfsstatsv1.direofcache_misses); 2291 if (ap->a_eofflag != NULL) 2292 *ap->a_eofflag = 1; 2293 } 2294 2295 /* Add the partial DIRBLKSIZ (left) back in. */ 2296 uio->uio_resid += left; 2297 return (error); 2298 } 2299 2300 /* 2301 * Readdir rpc call. 2302 * Called from below the buffer cache by ncl_doio(). 2303 */ 2304 int 2305 ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2306 struct thread *td) 2307 { 2308 struct nfsvattr nfsva; 2309 nfsuint64 *cookiep, cookie; 2310 struct nfsnode *dnp = VTONFS(vp); 2311 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2312 int error = 0, eof, attrflag; 2313 2314 KASSERT(uiop->uio_iovcnt == 1 && 2315 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2316 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2317 ("nfs readdirrpc bad uio")); 2318 2319 /* 2320 * If there is no cookie, assume directory was stale. 2321 */ 2322 ncl_dircookie_lock(dnp); 2323 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2324 if (cookiep) { 2325 cookie = *cookiep; 2326 ncl_dircookie_unlock(dnp); 2327 } else { 2328 ncl_dircookie_unlock(dnp); 2329 return (NFSERR_BAD_COOKIE); 2330 } 2331 2332 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2333 (void)ncl_fsinfo(nmp, vp, cred, td); 2334 2335 error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva, 2336 &attrflag, &eof, NULL); 2337 if (attrflag) 2338 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2339 2340 if (!error) { 2341 /* 2342 * We are now either at the end of the directory or have filled 2343 * the block. 2344 */ 2345 if (eof) 2346 dnp->n_direofoffset = uiop->uio_offset; 2347 else { 2348 if (uiop->uio_resid > 0) 2349 printf("EEK! readdirrpc resid > 0\n"); 2350 ncl_dircookie_lock(dnp); 2351 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2352 *cookiep = cookie; 2353 ncl_dircookie_unlock(dnp); 2354 } 2355 } else if (NFS_ISV4(vp)) { 2356 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2357 } 2358 return (error); 2359 } 2360 2361 /* 2362 * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc(). 2363 */ 2364 int 2365 ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2366 struct thread *td) 2367 { 2368 struct nfsvattr nfsva; 2369 nfsuint64 *cookiep, cookie; 2370 struct nfsnode *dnp = VTONFS(vp); 2371 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2372 int error = 0, attrflag, eof; 2373 2374 KASSERT(uiop->uio_iovcnt == 1 && 2375 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2376 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2377 ("nfs readdirplusrpc bad uio")); 2378 2379 /* 2380 * If there is no cookie, assume directory was stale. 2381 */ 2382 ncl_dircookie_lock(dnp); 2383 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2384 if (cookiep) { 2385 cookie = *cookiep; 2386 ncl_dircookie_unlock(dnp); 2387 } else { 2388 ncl_dircookie_unlock(dnp); 2389 return (NFSERR_BAD_COOKIE); 2390 } 2391 2392 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2393 (void)ncl_fsinfo(nmp, vp, cred, td); 2394 error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva, 2395 &attrflag, &eof, NULL); 2396 if (attrflag) 2397 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2398 2399 if (!error) { 2400 /* 2401 * We are now either at end of the directory or have filled the 2402 * the block. 2403 */ 2404 if (eof) 2405 dnp->n_direofoffset = uiop->uio_offset; 2406 else { 2407 if (uiop->uio_resid > 0) 2408 printf("EEK! readdirplusrpc resid > 0\n"); 2409 ncl_dircookie_lock(dnp); 2410 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2411 *cookiep = cookie; 2412 ncl_dircookie_unlock(dnp); 2413 } 2414 } else if (NFS_ISV4(vp)) { 2415 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2416 } 2417 return (error); 2418 } 2419 2420 /* 2421 * Silly rename. To make the NFS filesystem that is stateless look a little 2422 * more like the "ufs" a remove of an active vnode is translated to a rename 2423 * to a funny looking filename that is removed by nfs_inactive on the 2424 * nfsnode. There is the potential for another process on a different client 2425 * to create the same funny name between the nfs_lookitup() fails and the 2426 * nfs_rename() completes, but... 2427 */ 2428 static int 2429 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2430 { 2431 struct sillyrename *sp; 2432 struct nfsnode *np; 2433 int error; 2434 short pid; 2435 unsigned int lticks; 2436 2437 cache_purge(dvp); 2438 np = VTONFS(vp); 2439 KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir")); 2440 sp = malloc(sizeof (struct sillyrename), 2441 M_NEWNFSREQ, M_WAITOK); 2442 sp->s_cred = crhold(cnp->cn_cred); 2443 sp->s_dvp = dvp; 2444 VREF(dvp); 2445 2446 /* 2447 * Fudge together a funny name. 2448 * Changing the format of the funny name to accommodate more 2449 * sillynames per directory. 2450 * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 2451 * CPU ticks since boot. 2452 */ 2453 pid = cnp->cn_thread->td_proc->p_pid; 2454 lticks = (unsigned int)ticks; 2455 for ( ; ; ) { 2456 sp->s_namlen = sprintf(sp->s_name, 2457 ".nfs.%08x.%04x4.4", lticks, 2458 pid); 2459 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2460 cnp->cn_thread, NULL)) 2461 break; 2462 lticks++; 2463 } 2464 error = nfs_renameit(dvp, vp, cnp, sp); 2465 if (error) 2466 goto bad; 2467 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2468 cnp->cn_thread, &np); 2469 np->n_sillyrename = sp; 2470 return (0); 2471 bad: 2472 vrele(sp->s_dvp); 2473 crfree(sp->s_cred); 2474 free(sp, M_NEWNFSREQ); 2475 return (error); 2476 } 2477 2478 /* 2479 * Look up a file name and optionally either update the file handle or 2480 * allocate an nfsnode, depending on the value of npp. 2481 * npp == NULL --> just do the lookup 2482 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2483 * handled too 2484 * *npp != NULL --> update the file handle in the vnode 2485 */ 2486 static int 2487 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, 2488 struct thread *td, struct nfsnode **npp) 2489 { 2490 struct vnode *newvp = NULL, *vp; 2491 struct nfsnode *np, *dnp = VTONFS(dvp); 2492 struct nfsfh *nfhp, *onfhp; 2493 struct nfsvattr nfsva, dnfsva; 2494 struct componentname cn; 2495 int error = 0, attrflag, dattrflag; 2496 u_int hash; 2497 2498 error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva, 2499 &nfhp, &attrflag, &dattrflag, NULL); 2500 if (dattrflag) 2501 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2502 if (npp && !error) { 2503 if (*npp != NULL) { 2504 np = *npp; 2505 vp = NFSTOV(np); 2506 /* 2507 * For NFSv4, check to see if it is the same name and 2508 * replace the name, if it is different. 2509 */ 2510 if (np->n_v4 != NULL && nfsva.na_type == VREG && 2511 (np->n_v4->n4_namelen != len || 2512 NFSBCMP(name, NFS4NODENAME(np->n_v4), len) || 2513 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || 2514 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2515 dnp->n_fhp->nfh_len))) { 2516 #ifdef notdef 2517 { char nnn[100]; int nnnl; 2518 nnnl = (len < 100) ? len : 99; 2519 bcopy(name, nnn, nnnl); 2520 nnn[nnnl] = '\0'; 2521 printf("replace=%s\n",nnn); 2522 } 2523 #endif 2524 free(np->n_v4, M_NFSV4NODE); 2525 np->n_v4 = malloc( 2526 sizeof (struct nfsv4node) + 2527 dnp->n_fhp->nfh_len + len - 1, 2528 M_NFSV4NODE, M_WAITOK); 2529 np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; 2530 np->n_v4->n4_namelen = len; 2531 NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2532 dnp->n_fhp->nfh_len); 2533 NFSBCOPY(name, NFS4NODENAME(np->n_v4), len); 2534 } 2535 hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, 2536 FNV1_32_INIT); 2537 onfhp = np->n_fhp; 2538 /* 2539 * Rehash node for new file handle. 2540 */ 2541 vfs_hash_rehash(vp, hash); 2542 np->n_fhp = nfhp; 2543 if (onfhp != NULL) 2544 free(onfhp, M_NFSFH); 2545 newvp = NFSTOV(np); 2546 } else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) { 2547 free(nfhp, M_NFSFH); 2548 VREF(dvp); 2549 newvp = dvp; 2550 } else { 2551 cn.cn_nameptr = name; 2552 cn.cn_namelen = len; 2553 error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td, 2554 &np, NULL, LK_EXCLUSIVE); 2555 if (error) 2556 return (error); 2557 newvp = NFSTOV(np); 2558 } 2559 if (!attrflag && *npp == NULL) { 2560 if (newvp == dvp) 2561 vrele(newvp); 2562 else 2563 vput(newvp); 2564 return (ENOENT); 2565 } 2566 if (attrflag) 2567 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 2568 0, 1); 2569 } 2570 if (npp && *npp == NULL) { 2571 if (error) { 2572 if (newvp) { 2573 if (newvp == dvp) 2574 vrele(newvp); 2575 else 2576 vput(newvp); 2577 } 2578 } else 2579 *npp = np; 2580 } 2581 if (error && NFS_ISV4(dvp)) 2582 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2583 return (error); 2584 } 2585 2586 /* 2587 * Nfs Version 3 and 4 commit rpc 2588 */ 2589 int 2590 ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred, 2591 struct thread *td) 2592 { 2593 struct nfsvattr nfsva; 2594 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2595 struct nfsnode *np; 2596 struct uio uio; 2597 int error, attrflag; 2598 2599 np = VTONFS(vp); 2600 error = EIO; 2601 attrflag = 0; 2602 if (NFSHASPNFS(nmp) && (np->n_flag & NDSCOMMIT) != 0) { 2603 uio.uio_offset = offset; 2604 uio.uio_resid = cnt; 2605 error = nfscl_doiods(vp, &uio, NULL, NULL, 2606 NFSV4OPEN_ACCESSWRITE, 1, cred, td); 2607 if (error != 0) { 2608 mtx_lock(&np->n_mtx); 2609 np->n_flag &= ~NDSCOMMIT; 2610 mtx_unlock(&np->n_mtx); 2611 } 2612 } 2613 if (error != 0) { 2614 mtx_lock(&nmp->nm_mtx); 2615 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) { 2616 mtx_unlock(&nmp->nm_mtx); 2617 return (0); 2618 } 2619 mtx_unlock(&nmp->nm_mtx); 2620 error = nfsrpc_commit(vp, offset, cnt, cred, td, &nfsva, 2621 &attrflag, NULL); 2622 } 2623 if (attrflag != 0) 2624 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 2625 0, 1); 2626 if (error != 0 && NFS_ISV4(vp)) 2627 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2628 return (error); 2629 } 2630 2631 /* 2632 * Strategy routine. 2633 * For async requests when nfsiod(s) are running, queue the request by 2634 * calling ncl_asyncio(), otherwise just all ncl_doio() to do the 2635 * request. 2636 */ 2637 static int 2638 nfs_strategy(struct vop_strategy_args *ap) 2639 { 2640 struct buf *bp; 2641 struct vnode *vp; 2642 struct ucred *cr; 2643 2644 bp = ap->a_bp; 2645 vp = ap->a_vp; 2646 KASSERT(bp->b_vp == vp, ("missing b_getvp")); 2647 KASSERT(!(bp->b_flags & B_DONE), 2648 ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp)); 2649 BUF_ASSERT_HELD(bp); 2650 2651 if (vp->v_type == VREG && bp->b_blkno == bp->b_lblkno) 2652 bp->b_blkno = bp->b_lblkno * (vp->v_bufobj.bo_bsize / 2653 DEV_BSIZE); 2654 if (bp->b_iocmd == BIO_READ) 2655 cr = bp->b_rcred; 2656 else 2657 cr = bp->b_wcred; 2658 2659 /* 2660 * If the op is asynchronous and an i/o daemon is waiting 2661 * queue the request, wake it up and wait for completion 2662 * otherwise just do it ourselves. 2663 */ 2664 if ((bp->b_flags & B_ASYNC) == 0 || 2665 ncl_asyncio(VFSTONFS(vp->v_mount), bp, NOCRED, curthread)) 2666 (void) ncl_doio(vp, bp, cr, curthread, 1); 2667 return (0); 2668 } 2669 2670 /* 2671 * fsync vnode op. Just call ncl_flush() with commit == 1. 2672 */ 2673 /* ARGSUSED */ 2674 static int 2675 nfs_fsync(struct vop_fsync_args *ap) 2676 { 2677 2678 if (ap->a_vp->v_type != VREG) { 2679 /* 2680 * For NFS, metadata is changed synchronously on the server, 2681 * so there is nothing to flush. Also, ncl_flush() clears 2682 * the NMODIFIED flag and that shouldn't be done here for 2683 * directories. 2684 */ 2685 return (0); 2686 } 2687 return (ncl_flush(ap->a_vp, ap->a_waitfor, ap->a_td, 1, 0)); 2688 } 2689 2690 /* 2691 * Flush all the blocks associated with a vnode. 2692 * Walk through the buffer pool and push any dirty pages 2693 * associated with the vnode. 2694 * If the called_from_renewthread argument is TRUE, it has been called 2695 * from the NFSv4 renew thread and, as such, cannot block indefinitely 2696 * waiting for a buffer write to complete. 2697 */ 2698 int 2699 ncl_flush(struct vnode *vp, int waitfor, struct thread *td, 2700 int commit, int called_from_renewthread) 2701 { 2702 struct nfsnode *np = VTONFS(vp); 2703 struct buf *bp; 2704 int i; 2705 struct buf *nbp; 2706 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2707 int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2708 int passone = 1, trycnt = 0; 2709 u_quad_t off, endoff, toff; 2710 struct ucred* wcred = NULL; 2711 struct buf **bvec = NULL; 2712 struct bufobj *bo; 2713 #ifndef NFS_COMMITBVECSIZ 2714 #define NFS_COMMITBVECSIZ 20 2715 #endif 2716 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; 2717 u_int bvecsize = 0, bveccount; 2718 2719 if (called_from_renewthread != 0) 2720 slptimeo = hz; 2721 if (nmp->nm_flag & NFSMNT_INT) 2722 slpflag = PCATCH; 2723 if (!commit) 2724 passone = 0; 2725 bo = &vp->v_bufobj; 2726 /* 2727 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2728 * server, but has not been committed to stable storage on the server 2729 * yet. On the first pass, the byte range is worked out and the commit 2730 * rpc is done. On the second pass, ncl_writebp() is called to do the 2731 * job. 2732 */ 2733 again: 2734 off = (u_quad_t)-1; 2735 endoff = 0; 2736 bvecpos = 0; 2737 if (NFS_ISV34(vp) && commit) { 2738 if (bvec != NULL && bvec != bvec_on_stack) 2739 free(bvec, M_TEMP); 2740 /* 2741 * Count up how many buffers waiting for a commit. 2742 */ 2743 bveccount = 0; 2744 BO_LOCK(bo); 2745 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2746 if (!BUF_ISLOCKED(bp) && 2747 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) 2748 == (B_DELWRI | B_NEEDCOMMIT)) 2749 bveccount++; 2750 } 2751 /* 2752 * Allocate space to remember the list of bufs to commit. It is 2753 * important to use M_NOWAIT here to avoid a race with nfs_write. 2754 * If we can't get memory (for whatever reason), we will end up 2755 * committing the buffers one-by-one in the loop below. 2756 */ 2757 if (bveccount > NFS_COMMITBVECSIZ) { 2758 /* 2759 * Release the vnode interlock to avoid a lock 2760 * order reversal. 2761 */ 2762 BO_UNLOCK(bo); 2763 bvec = (struct buf **) 2764 malloc(bveccount * sizeof(struct buf *), 2765 M_TEMP, M_NOWAIT); 2766 BO_LOCK(bo); 2767 if (bvec == NULL) { 2768 bvec = bvec_on_stack; 2769 bvecsize = NFS_COMMITBVECSIZ; 2770 } else 2771 bvecsize = bveccount; 2772 } else { 2773 bvec = bvec_on_stack; 2774 bvecsize = NFS_COMMITBVECSIZ; 2775 } 2776 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2777 if (bvecpos >= bvecsize) 2778 break; 2779 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2780 nbp = TAILQ_NEXT(bp, b_bobufs); 2781 continue; 2782 } 2783 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) != 2784 (B_DELWRI | B_NEEDCOMMIT)) { 2785 BUF_UNLOCK(bp); 2786 nbp = TAILQ_NEXT(bp, b_bobufs); 2787 continue; 2788 } 2789 BO_UNLOCK(bo); 2790 bremfree(bp); 2791 /* 2792 * Work out if all buffers are using the same cred 2793 * so we can deal with them all with one commit. 2794 * 2795 * NOTE: we are not clearing B_DONE here, so we have 2796 * to do it later on in this routine if we intend to 2797 * initiate I/O on the bp. 2798 * 2799 * Note: to avoid loopback deadlocks, we do not 2800 * assign b_runningbufspace. 2801 */ 2802 if (wcred == NULL) 2803 wcred = bp->b_wcred; 2804 else if (wcred != bp->b_wcred) 2805 wcred = NOCRED; 2806 vfs_busy_pages(bp, 1); 2807 2808 BO_LOCK(bo); 2809 /* 2810 * bp is protected by being locked, but nbp is not 2811 * and vfs_busy_pages() may sleep. We have to 2812 * recalculate nbp. 2813 */ 2814 nbp = TAILQ_NEXT(bp, b_bobufs); 2815 2816 /* 2817 * A list of these buffers is kept so that the 2818 * second loop knows which buffers have actually 2819 * been committed. This is necessary, since there 2820 * may be a race between the commit rpc and new 2821 * uncommitted writes on the file. 2822 */ 2823 bvec[bvecpos++] = bp; 2824 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2825 bp->b_dirtyoff; 2826 if (toff < off) 2827 off = toff; 2828 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2829 if (toff > endoff) 2830 endoff = toff; 2831 } 2832 BO_UNLOCK(bo); 2833 } 2834 if (bvecpos > 0) { 2835 /* 2836 * Commit data on the server, as required. 2837 * If all bufs are using the same wcred, then use that with 2838 * one call for all of them, otherwise commit each one 2839 * separately. 2840 */ 2841 if (wcred != NOCRED) 2842 retv = ncl_commit(vp, off, (int)(endoff - off), 2843 wcred, td); 2844 else { 2845 retv = 0; 2846 for (i = 0; i < bvecpos; i++) { 2847 off_t off, size; 2848 bp = bvec[i]; 2849 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2850 bp->b_dirtyoff; 2851 size = (u_quad_t)(bp->b_dirtyend 2852 - bp->b_dirtyoff); 2853 retv = ncl_commit(vp, off, (int)size, 2854 bp->b_wcred, td); 2855 if (retv) break; 2856 } 2857 } 2858 2859 if (retv == NFSERR_STALEWRITEVERF) 2860 ncl_clearcommit(vp->v_mount); 2861 2862 /* 2863 * Now, either mark the blocks I/O done or mark the 2864 * blocks dirty, depending on whether the commit 2865 * succeeded. 2866 */ 2867 for (i = 0; i < bvecpos; i++) { 2868 bp = bvec[i]; 2869 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 2870 if (retv) { 2871 /* 2872 * Error, leave B_DELWRI intact 2873 */ 2874 vfs_unbusy_pages(bp); 2875 brelse(bp); 2876 } else { 2877 /* 2878 * Success, remove B_DELWRI ( bundirty() ). 2879 * 2880 * b_dirtyoff/b_dirtyend seem to be NFS 2881 * specific. We should probably move that 2882 * into bundirty(). XXX 2883 */ 2884 bufobj_wref(bo); 2885 bp->b_flags |= B_ASYNC; 2886 bundirty(bp); 2887 bp->b_flags &= ~B_DONE; 2888 bp->b_ioflags &= ~BIO_ERROR; 2889 bp->b_dirtyoff = bp->b_dirtyend = 0; 2890 bufdone(bp); 2891 } 2892 } 2893 } 2894 2895 /* 2896 * Start/do any write(s) that are required. 2897 */ 2898 loop: 2899 BO_LOCK(bo); 2900 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2901 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2902 if (waitfor != MNT_WAIT || passone) 2903 continue; 2904 2905 error = BUF_TIMELOCK(bp, 2906 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2907 BO_LOCKPTR(bo), "nfsfsync", slpflag, slptimeo); 2908 if (error == 0) { 2909 BUF_UNLOCK(bp); 2910 goto loop; 2911 } 2912 if (error == ENOLCK) { 2913 error = 0; 2914 goto loop; 2915 } 2916 if (called_from_renewthread != 0) { 2917 /* 2918 * Return EIO so the flush will be retried 2919 * later. 2920 */ 2921 error = EIO; 2922 goto done; 2923 } 2924 if (newnfs_sigintr(nmp, td)) { 2925 error = EINTR; 2926 goto done; 2927 } 2928 if (slpflag == PCATCH) { 2929 slpflag = 0; 2930 slptimeo = 2 * hz; 2931 } 2932 goto loop; 2933 } 2934 if ((bp->b_flags & B_DELWRI) == 0) 2935 panic("nfs_fsync: not dirty"); 2936 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) { 2937 BUF_UNLOCK(bp); 2938 continue; 2939 } 2940 BO_UNLOCK(bo); 2941 bremfree(bp); 2942 bp->b_flags |= B_ASYNC; 2943 bwrite(bp); 2944 if (newnfs_sigintr(nmp, td)) { 2945 error = EINTR; 2946 goto done; 2947 } 2948 goto loop; 2949 } 2950 if (passone) { 2951 passone = 0; 2952 BO_UNLOCK(bo); 2953 goto again; 2954 } 2955 if (waitfor == MNT_WAIT) { 2956 while (bo->bo_numoutput) { 2957 error = bufobj_wwait(bo, slpflag, slptimeo); 2958 if (error) { 2959 BO_UNLOCK(bo); 2960 if (called_from_renewthread != 0) { 2961 /* 2962 * Return EIO so that the flush will be 2963 * retried later. 2964 */ 2965 error = EIO; 2966 goto done; 2967 } 2968 error = newnfs_sigintr(nmp, td); 2969 if (error) 2970 goto done; 2971 if (slpflag == PCATCH) { 2972 slpflag = 0; 2973 slptimeo = 2 * hz; 2974 } 2975 BO_LOCK(bo); 2976 } 2977 } 2978 if (bo->bo_dirty.bv_cnt != 0 && commit) { 2979 BO_UNLOCK(bo); 2980 goto loop; 2981 } 2982 /* 2983 * Wait for all the async IO requests to drain 2984 */ 2985 BO_UNLOCK(bo); 2986 mtx_lock(&np->n_mtx); 2987 while (np->n_directio_asyncwr > 0) { 2988 np->n_flag |= NFSYNCWAIT; 2989 error = newnfs_msleep(td, &np->n_directio_asyncwr, 2990 &np->n_mtx, slpflag | (PRIBIO + 1), 2991 "nfsfsync", 0); 2992 if (error) { 2993 if (newnfs_sigintr(nmp, td)) { 2994 mtx_unlock(&np->n_mtx); 2995 error = EINTR; 2996 goto done; 2997 } 2998 } 2999 } 3000 mtx_unlock(&np->n_mtx); 3001 } else 3002 BO_UNLOCK(bo); 3003 if (NFSHASPNFS(nmp)) { 3004 nfscl_layoutcommit(vp, td); 3005 /* 3006 * Invalidate the attribute cache, since writes to a DS 3007 * won't update the size attribute. 3008 */ 3009 mtx_lock(&np->n_mtx); 3010 np->n_attrstamp = 0; 3011 } else 3012 mtx_lock(&np->n_mtx); 3013 if (np->n_flag & NWRITEERR) { 3014 error = np->n_error; 3015 np->n_flag &= ~NWRITEERR; 3016 } 3017 if (commit && bo->bo_dirty.bv_cnt == 0 && 3018 bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0) 3019 np->n_flag &= ~NMODIFIED; 3020 mtx_unlock(&np->n_mtx); 3021 done: 3022 if (bvec != NULL && bvec != bvec_on_stack) 3023 free(bvec, M_TEMP); 3024 if (error == 0 && commit != 0 && waitfor == MNT_WAIT && 3025 (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 || 3026 np->n_directio_asyncwr != 0)) { 3027 if (trycnt++ < 5) { 3028 /* try, try again... */ 3029 passone = 1; 3030 wcred = NULL; 3031 bvec = NULL; 3032 bvecsize = 0; 3033 goto again; 3034 } 3035 vn_printf(vp, "ncl_flush failed"); 3036 error = called_from_renewthread != 0 ? EIO : EBUSY; 3037 } 3038 return (error); 3039 } 3040 3041 /* 3042 * NFS advisory byte-level locks. 3043 */ 3044 static int 3045 nfs_advlock(struct vop_advlock_args *ap) 3046 { 3047 struct vnode *vp = ap->a_vp; 3048 struct ucred *cred; 3049 struct nfsnode *np = VTONFS(ap->a_vp); 3050 struct proc *p = (struct proc *)ap->a_id; 3051 struct thread *td = curthread; /* XXX */ 3052 struct vattr va; 3053 int ret, error; 3054 u_quad_t size; 3055 3056 error = NFSVOPLOCK(vp, LK_SHARED); 3057 if (error != 0) 3058 return (EBADF); 3059 if (NFS_ISV4(vp) && (ap->a_flags & (F_POSIX | F_FLOCK)) != 0) { 3060 if (vp->v_type != VREG) { 3061 error = EINVAL; 3062 goto out; 3063 } 3064 if ((ap->a_flags & F_POSIX) != 0) 3065 cred = p->p_ucred; 3066 else 3067 cred = td->td_ucred; 3068 NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY); 3069 if (vp->v_iflag & VI_DOOMED) { 3070 error = EBADF; 3071 goto out; 3072 } 3073 3074 /* 3075 * If this is unlocking a write locked region, flush and 3076 * commit them before unlocking. This is required by 3077 * RFC3530 Sec. 9.3.2. 3078 */ 3079 if (ap->a_op == F_UNLCK && 3080 nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id, 3081 ap->a_flags)) 3082 (void) ncl_flush(vp, MNT_WAIT, td, 1, 0); 3083 3084 /* 3085 * Loop around doing the lock op, while a blocking lock 3086 * must wait for the lock op to succeed. 3087 */ 3088 do { 3089 ret = nfsrpc_advlock(vp, np->n_size, ap->a_op, 3090 ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags); 3091 if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 3092 ap->a_op == F_SETLK) { 3093 NFSVOPUNLOCK(vp, 0); 3094 error = nfs_catnap(PZERO | PCATCH, ret, 3095 "ncladvl"); 3096 if (error) 3097 return (EINTR); 3098 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 3099 if (vp->v_iflag & VI_DOOMED) { 3100 error = EBADF; 3101 goto out; 3102 } 3103 } 3104 } while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 3105 ap->a_op == F_SETLK); 3106 if (ret == NFSERR_DENIED) { 3107 error = EAGAIN; 3108 goto out; 3109 } else if (ret == EINVAL || ret == EBADF || ret == EINTR) { 3110 error = ret; 3111 goto out; 3112 } else if (ret != 0) { 3113 error = EACCES; 3114 goto out; 3115 } 3116 3117 /* 3118 * Now, if we just got a lock, invalidate data in the buffer 3119 * cache, as required, so that the coherency conforms with 3120 * RFC3530 Sec. 9.3.2. 3121 */ 3122 if (ap->a_op == F_SETLK) { 3123 if ((np->n_flag & NMODIFIED) == 0) { 3124 np->n_attrstamp = 0; 3125 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3126 ret = VOP_GETATTR(vp, &va, cred); 3127 } 3128 if ((np->n_flag & NMODIFIED) || ret || 3129 np->n_change != va.va_filerev) { 3130 (void) ncl_vinvalbuf(vp, V_SAVE, td, 1); 3131 np->n_attrstamp = 0; 3132 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3133 ret = VOP_GETATTR(vp, &va, cred); 3134 if (!ret) { 3135 np->n_mtime = va.va_mtime; 3136 np->n_change = va.va_filerev; 3137 } 3138 } 3139 /* Mark that a file lock has been acquired. */ 3140 mtx_lock(&np->n_mtx); 3141 np->n_flag |= NHASBEENLOCKED; 3142 mtx_unlock(&np->n_mtx); 3143 } 3144 } else if (!NFS_ISV4(vp)) { 3145 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 3146 size = VTONFS(vp)->n_size; 3147 NFSVOPUNLOCK(vp, 0); 3148 error = lf_advlock(ap, &(vp->v_lockf), size); 3149 } else { 3150 if (nfs_advlock_p != NULL) 3151 error = nfs_advlock_p(ap); 3152 else { 3153 NFSVOPUNLOCK(vp, 0); 3154 error = ENOLCK; 3155 } 3156 } 3157 if (error == 0 && ap->a_op == F_SETLK) { 3158 error = NFSVOPLOCK(vp, LK_SHARED); 3159 if (error == 0) { 3160 /* Mark that a file lock has been acquired. */ 3161 mtx_lock(&np->n_mtx); 3162 np->n_flag |= NHASBEENLOCKED; 3163 mtx_unlock(&np->n_mtx); 3164 NFSVOPUNLOCK(vp, 0); 3165 } 3166 } 3167 return (error); 3168 } else 3169 error = EOPNOTSUPP; 3170 out: 3171 NFSVOPUNLOCK(vp, 0); 3172 return (error); 3173 } 3174 3175 /* 3176 * NFS advisory byte-level locks. 3177 */ 3178 static int 3179 nfs_advlockasync(struct vop_advlockasync_args *ap) 3180 { 3181 struct vnode *vp = ap->a_vp; 3182 u_quad_t size; 3183 int error; 3184 3185 if (NFS_ISV4(vp)) 3186 return (EOPNOTSUPP); 3187 error = NFSVOPLOCK(vp, LK_SHARED); 3188 if (error) 3189 return (error); 3190 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 3191 size = VTONFS(vp)->n_size; 3192 NFSVOPUNLOCK(vp, 0); 3193 error = lf_advlockasync(ap, &(vp->v_lockf), size); 3194 } else { 3195 NFSVOPUNLOCK(vp, 0); 3196 error = EOPNOTSUPP; 3197 } 3198 return (error); 3199 } 3200 3201 /* 3202 * Print out the contents of an nfsnode. 3203 */ 3204 static int 3205 nfs_print(struct vop_print_args *ap) 3206 { 3207 struct vnode *vp = ap->a_vp; 3208 struct nfsnode *np = VTONFS(vp); 3209 3210 printf("\tfileid %jd fsid 0x%jx", (uintmax_t)np->n_vattr.na_fileid, 3211 (uintmax_t)np->n_vattr.na_fsid); 3212 if (vp->v_type == VFIFO) 3213 fifo_printinfo(vp); 3214 printf("\n"); 3215 return (0); 3216 } 3217 3218 /* 3219 * This is the "real" nfs::bwrite(struct buf*). 3220 * We set B_CACHE if this is a VMIO buffer. 3221 */ 3222 int 3223 ncl_writebp(struct buf *bp, int force __unused, struct thread *td) 3224 { 3225 int oldflags, rtval; 3226 3227 BUF_ASSERT_HELD(bp); 3228 3229 if (bp->b_flags & B_INVAL) { 3230 brelse(bp); 3231 return (0); 3232 } 3233 3234 oldflags = bp->b_flags; 3235 bp->b_flags |= B_CACHE; 3236 3237 /* 3238 * Undirty the bp. We will redirty it later if the I/O fails. 3239 */ 3240 bundirty(bp); 3241 bp->b_flags &= ~B_DONE; 3242 bp->b_ioflags &= ~BIO_ERROR; 3243 bp->b_iocmd = BIO_WRITE; 3244 3245 bufobj_wref(bp->b_bufobj); 3246 curthread->td_ru.ru_oublock++; 3247 3248 /* 3249 * Note: to avoid loopback deadlocks, we do not 3250 * assign b_runningbufspace. 3251 */ 3252 vfs_busy_pages(bp, 1); 3253 3254 BUF_KERNPROC(bp); 3255 bp->b_iooffset = dbtob(bp->b_blkno); 3256 bstrategy(bp); 3257 3258 if ((oldflags & B_ASYNC) != 0) 3259 return (0); 3260 3261 rtval = bufwait(bp); 3262 if (oldflags & B_DELWRI) 3263 reassignbuf(bp); 3264 brelse(bp); 3265 return (rtval); 3266 } 3267 3268 /* 3269 * nfs special file access vnode op. 3270 * Essentially just get vattr and then imitate iaccess() since the device is 3271 * local to the client. 3272 */ 3273 static int 3274 nfsspec_access(struct vop_access_args *ap) 3275 { 3276 struct vattr *vap; 3277 struct ucred *cred = ap->a_cred; 3278 struct vnode *vp = ap->a_vp; 3279 accmode_t accmode = ap->a_accmode; 3280 struct vattr vattr; 3281 int error; 3282 3283 /* 3284 * Disallow write attempts on filesystems mounted read-only; 3285 * unless the file is a socket, fifo, or a block or character 3286 * device resident on the filesystem. 3287 */ 3288 if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3289 switch (vp->v_type) { 3290 case VREG: 3291 case VDIR: 3292 case VLNK: 3293 return (EROFS); 3294 default: 3295 break; 3296 } 3297 } 3298 vap = &vattr; 3299 error = VOP_GETATTR(vp, vap, cred); 3300 if (error) 3301 goto out; 3302 error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid, 3303 accmode, cred, NULL); 3304 out: 3305 return error; 3306 } 3307 3308 /* 3309 * Read wrapper for fifos. 3310 */ 3311 static int 3312 nfsfifo_read(struct vop_read_args *ap) 3313 { 3314 struct nfsnode *np = VTONFS(ap->a_vp); 3315 int error; 3316 3317 /* 3318 * Set access flag. 3319 */ 3320 mtx_lock(&np->n_mtx); 3321 np->n_flag |= NACC; 3322 vfs_timestamp(&np->n_atim); 3323 mtx_unlock(&np->n_mtx); 3324 error = fifo_specops.vop_read(ap); 3325 return error; 3326 } 3327 3328 /* 3329 * Write wrapper for fifos. 3330 */ 3331 static int 3332 nfsfifo_write(struct vop_write_args *ap) 3333 { 3334 struct nfsnode *np = VTONFS(ap->a_vp); 3335 3336 /* 3337 * Set update flag. 3338 */ 3339 mtx_lock(&np->n_mtx); 3340 np->n_flag |= NUPD; 3341 vfs_timestamp(&np->n_mtim); 3342 mtx_unlock(&np->n_mtx); 3343 return(fifo_specops.vop_write(ap)); 3344 } 3345 3346 /* 3347 * Close wrapper for fifos. 3348 * 3349 * Update the times on the nfsnode then do fifo close. 3350 */ 3351 static int 3352 nfsfifo_close(struct vop_close_args *ap) 3353 { 3354 struct vnode *vp = ap->a_vp; 3355 struct nfsnode *np = VTONFS(vp); 3356 struct vattr vattr; 3357 struct timespec ts; 3358 3359 mtx_lock(&np->n_mtx); 3360 if (np->n_flag & (NACC | NUPD)) { 3361 vfs_timestamp(&ts); 3362 if (np->n_flag & NACC) 3363 np->n_atim = ts; 3364 if (np->n_flag & NUPD) 3365 np->n_mtim = ts; 3366 np->n_flag |= NCHG; 3367 if (vrefcnt(vp) == 1 && 3368 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3369 VATTR_NULL(&vattr); 3370 if (np->n_flag & NACC) 3371 vattr.va_atime = np->n_atim; 3372 if (np->n_flag & NUPD) 3373 vattr.va_mtime = np->n_mtim; 3374 mtx_unlock(&np->n_mtx); 3375 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3376 goto out; 3377 } 3378 } 3379 mtx_unlock(&np->n_mtx); 3380 out: 3381 return (fifo_specops.vop_close(ap)); 3382 } 3383 3384 /* 3385 * Just call ncl_writebp() with the force argument set to 1. 3386 * 3387 * NOTE: B_DONE may or may not be set in a_bp on call. 3388 */ 3389 static int 3390 nfs_bwrite(struct buf *bp) 3391 { 3392 3393 return (ncl_writebp(bp, 1, curthread)); 3394 } 3395 3396 struct buf_ops buf_ops_newnfs = { 3397 .bop_name = "buf_ops_nfs", 3398 .bop_write = nfs_bwrite, 3399 .bop_strategy = bufstrategy, 3400 .bop_sync = bufsync, 3401 .bop_bdflush = bufbdflush, 3402 }; 3403 3404 static int 3405 nfs_getacl(struct vop_getacl_args *ap) 3406 { 3407 int error; 3408 3409 if (ap->a_type != ACL_TYPE_NFS4) 3410 return (EOPNOTSUPP); 3411 error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3412 NULL); 3413 if (error > NFSERR_STALE) { 3414 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3415 error = EPERM; 3416 } 3417 return (error); 3418 } 3419 3420 static int 3421 nfs_setacl(struct vop_setacl_args *ap) 3422 { 3423 int error; 3424 3425 if (ap->a_type != ACL_TYPE_NFS4) 3426 return (EOPNOTSUPP); 3427 error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3428 NULL); 3429 if (error > NFSERR_STALE) { 3430 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3431 error = EPERM; 3432 } 3433 return (error); 3434 } 3435 3436 /* 3437 * Return POSIX pathconf information applicable to nfs filesystems. 3438 */ 3439 static int 3440 nfs_pathconf(struct vop_pathconf_args *ap) 3441 { 3442 struct nfsv3_pathconf pc; 3443 struct nfsvattr nfsva; 3444 struct vnode *vp = ap->a_vp; 3445 struct thread *td = curthread; 3446 int attrflag, error; 3447 3448 if ((NFS_ISV34(vp) && (ap->a_name == _PC_LINK_MAX || 3449 ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED || 3450 ap->a_name == _PC_NO_TRUNC)) || 3451 (NFS_ISV4(vp) && ap->a_name == _PC_ACL_NFS4)) { 3452 /* 3453 * Since only the above 4 a_names are returned by the NFSv3 3454 * Pathconf RPC, there is no point in doing it for others. 3455 * For NFSv4, the Pathconf RPC (actually a Getattr Op.) can 3456 * be used for _PC_NFS4_ACL as well. 3457 */ 3458 error = nfsrpc_pathconf(vp, &pc, td->td_ucred, td, &nfsva, 3459 &attrflag, NULL); 3460 if (attrflag != 0) 3461 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 3462 1); 3463 if (error != 0) 3464 return (error); 3465 } else { 3466 /* 3467 * For NFSv2 (or NFSv3 when not one of the above 4 a_names), 3468 * just fake them. 3469 */ 3470 pc.pc_linkmax = NFS_LINK_MAX; 3471 pc.pc_namemax = NFS_MAXNAMLEN; 3472 pc.pc_notrunc = 1; 3473 pc.pc_chownrestricted = 1; 3474 pc.pc_caseinsensitive = 0; 3475 pc.pc_casepreserving = 1; 3476 error = 0; 3477 } 3478 switch (ap->a_name) { 3479 case _PC_LINK_MAX: 3480 #ifdef _LP64 3481 *ap->a_retval = pc.pc_linkmax; 3482 #else 3483 *ap->a_retval = MIN(LONG_MAX, pc.pc_linkmax); 3484 #endif 3485 break; 3486 case _PC_NAME_MAX: 3487 *ap->a_retval = pc.pc_namemax; 3488 break; 3489 case _PC_PIPE_BUF: 3490 if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO) 3491 *ap->a_retval = PIPE_BUF; 3492 else 3493 error = EINVAL; 3494 break; 3495 case _PC_CHOWN_RESTRICTED: 3496 *ap->a_retval = pc.pc_chownrestricted; 3497 break; 3498 case _PC_NO_TRUNC: 3499 *ap->a_retval = pc.pc_notrunc; 3500 break; 3501 case _PC_ACL_NFS4: 3502 if (NFS_ISV4(vp) && nfsrv_useacl != 0 && attrflag != 0 && 3503 NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL)) 3504 *ap->a_retval = 1; 3505 else 3506 *ap->a_retval = 0; 3507 break; 3508 case _PC_ACL_PATH_MAX: 3509 if (NFS_ISV4(vp)) 3510 *ap->a_retval = ACL_MAX_ENTRIES; 3511 else 3512 *ap->a_retval = 3; 3513 break; 3514 case _PC_PRIO_IO: 3515 *ap->a_retval = 0; 3516 break; 3517 case _PC_SYNC_IO: 3518 *ap->a_retval = 0; 3519 break; 3520 case _PC_ALLOC_SIZE_MIN: 3521 *ap->a_retval = vp->v_mount->mnt_stat.f_bsize; 3522 break; 3523 case _PC_FILESIZEBITS: 3524 if (NFS_ISV34(vp)) 3525 *ap->a_retval = 64; 3526 else 3527 *ap->a_retval = 32; 3528 break; 3529 case _PC_REC_INCR_XFER_SIZE: 3530 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize; 3531 break; 3532 case _PC_REC_MAX_XFER_SIZE: 3533 *ap->a_retval = -1; /* means ``unlimited'' */ 3534 break; 3535 case _PC_REC_MIN_XFER_SIZE: 3536 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize; 3537 break; 3538 case _PC_REC_XFER_ALIGN: 3539 *ap->a_retval = PAGE_SIZE; 3540 break; 3541 case _PC_SYMLINK_MAX: 3542 *ap->a_retval = NFS_MAXPATHLEN; 3543 break; 3544 3545 default: 3546 error = vop_stdpathconf(ap); 3547 break; 3548 } 3549 return (error); 3550 } 3551 3552