1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from nfs_vnops.c 8.16 (Berkeley) 5/27/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /* 39 * vnode op calls for Sun NFS version 2, 3 and 4 40 */ 41 42 #include "opt_kdtrace.h" 43 #include "opt_inet.h" 44 45 #include <sys/param.h> 46 #include <sys/kernel.h> 47 #include <sys/systm.h> 48 #include <sys/resourcevar.h> 49 #include <sys/proc.h> 50 #include <sys/mount.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/jail.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/namei.h> 57 #include <sys/socket.h> 58 #include <sys/vnode.h> 59 #include <sys/dirent.h> 60 #include <sys/fcntl.h> 61 #include <sys/lockf.h> 62 #include <sys/stat.h> 63 #include <sys/sysctl.h> 64 #include <sys/signalvar.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_extern.h> 68 #include <vm/vm_object.h> 69 70 #include <fs/nfs/nfsport.h> 71 #include <fs/nfsclient/nfsnode.h> 72 #include <fs/nfsclient/nfsmount.h> 73 #include <fs/nfsclient/nfs.h> 74 #include <fs/nfsclient/nfs_kdtrace.h> 75 76 #include <net/if.h> 77 #include <netinet/in.h> 78 #include <netinet/in_var.h> 79 80 #include <nfs/nfs_lock.h> 81 82 #ifdef KDTRACE_HOOKS 83 #include <sys/dtrace_bsd.h> 84 85 dtrace_nfsclient_accesscache_flush_probe_func_t 86 dtrace_nfscl_accesscache_flush_done_probe; 87 uint32_t nfscl_accesscache_flush_done_id; 88 89 dtrace_nfsclient_accesscache_get_probe_func_t 90 dtrace_nfscl_accesscache_get_hit_probe, 91 dtrace_nfscl_accesscache_get_miss_probe; 92 uint32_t nfscl_accesscache_get_hit_id; 93 uint32_t nfscl_accesscache_get_miss_id; 94 95 dtrace_nfsclient_accesscache_load_probe_func_t 96 dtrace_nfscl_accesscache_load_done_probe; 97 uint32_t nfscl_accesscache_load_done_id; 98 #endif /* !KDTRACE_HOOKS */ 99 100 /* Defs */ 101 #define TRUE 1 102 #define FALSE 0 103 104 extern struct nfsstats newnfsstats; 105 extern int nfsrv_useacl; 106 extern int nfscl_debuglevel; 107 MALLOC_DECLARE(M_NEWNFSREQ); 108 109 /* 110 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these 111 * calls are not in getblk() and brelse() so that they would not be necessary 112 * here. 113 */ 114 #ifndef B_VMIO 115 #define vfs_busy_pages(bp, f) 116 #endif 117 118 static vop_read_t nfsfifo_read; 119 static vop_write_t nfsfifo_write; 120 static vop_close_t nfsfifo_close; 121 static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *, 122 struct thread *); 123 static vop_lookup_t nfs_lookup; 124 static vop_create_t nfs_create; 125 static vop_mknod_t nfs_mknod; 126 static vop_open_t nfs_open; 127 static vop_pathconf_t nfs_pathconf; 128 static vop_close_t nfs_close; 129 static vop_access_t nfs_access; 130 static vop_getattr_t nfs_getattr; 131 static vop_setattr_t nfs_setattr; 132 static vop_read_t nfs_read; 133 static vop_fsync_t nfs_fsync; 134 static vop_remove_t nfs_remove; 135 static vop_link_t nfs_link; 136 static vop_rename_t nfs_rename; 137 static vop_mkdir_t nfs_mkdir; 138 static vop_rmdir_t nfs_rmdir; 139 static vop_symlink_t nfs_symlink; 140 static vop_readdir_t nfs_readdir; 141 static vop_strategy_t nfs_strategy; 142 static vop_lock1_t nfs_lock1; 143 static int nfs_lookitup(struct vnode *, char *, int, 144 struct ucred *, struct thread *, struct nfsnode **); 145 static int nfs_sillyrename(struct vnode *, struct vnode *, 146 struct componentname *); 147 static vop_access_t nfsspec_access; 148 static vop_readlink_t nfs_readlink; 149 static vop_print_t nfs_print; 150 static vop_advlock_t nfs_advlock; 151 static vop_advlockasync_t nfs_advlockasync; 152 static vop_getacl_t nfs_getacl; 153 static vop_setacl_t nfs_setacl; 154 155 /* 156 * Global vfs data structures for nfs 157 */ 158 struct vop_vector newnfs_vnodeops = { 159 .vop_default = &default_vnodeops, 160 .vop_access = nfs_access, 161 .vop_advlock = nfs_advlock, 162 .vop_advlockasync = nfs_advlockasync, 163 .vop_close = nfs_close, 164 .vop_create = nfs_create, 165 .vop_fsync = nfs_fsync, 166 .vop_getattr = nfs_getattr, 167 .vop_getpages = ncl_getpages, 168 .vop_putpages = ncl_putpages, 169 .vop_inactive = ncl_inactive, 170 .vop_link = nfs_link, 171 .vop_lock1 = nfs_lock1, 172 .vop_lookup = nfs_lookup, 173 .vop_mkdir = nfs_mkdir, 174 .vop_mknod = nfs_mknod, 175 .vop_open = nfs_open, 176 .vop_pathconf = nfs_pathconf, 177 .vop_print = nfs_print, 178 .vop_read = nfs_read, 179 .vop_readdir = nfs_readdir, 180 .vop_readlink = nfs_readlink, 181 .vop_reclaim = ncl_reclaim, 182 .vop_remove = nfs_remove, 183 .vop_rename = nfs_rename, 184 .vop_rmdir = nfs_rmdir, 185 .vop_setattr = nfs_setattr, 186 .vop_strategy = nfs_strategy, 187 .vop_symlink = nfs_symlink, 188 .vop_write = ncl_write, 189 .vop_getacl = nfs_getacl, 190 .vop_setacl = nfs_setacl, 191 }; 192 193 struct vop_vector newnfs_fifoops = { 194 .vop_default = &fifo_specops, 195 .vop_access = nfsspec_access, 196 .vop_close = nfsfifo_close, 197 .vop_fsync = nfs_fsync, 198 .vop_getattr = nfs_getattr, 199 .vop_inactive = ncl_inactive, 200 .vop_print = nfs_print, 201 .vop_read = nfsfifo_read, 202 .vop_reclaim = ncl_reclaim, 203 .vop_setattr = nfs_setattr, 204 .vop_write = nfsfifo_write, 205 }; 206 207 static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, 208 struct componentname *cnp, struct vattr *vap); 209 static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 210 int namelen, struct ucred *cred, struct thread *td); 211 static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, 212 char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp, 213 char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td); 214 static int nfs_renameit(struct vnode *sdvp, struct vnode *svp, 215 struct componentname *scnp, struct sillyrename *sp); 216 217 /* 218 * Global variables 219 */ 220 #define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) 221 222 SYSCTL_DECL(_vfs_nfs); 223 224 static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO; 225 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW, 226 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout"); 227 228 static int nfs_prime_access_cache = 0; 229 SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW, 230 &nfs_prime_access_cache, 0, 231 "Prime NFS ACCESS cache when fetching attributes"); 232 233 static int newnfs_commit_on_close = 0; 234 SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW, 235 &newnfs_commit_on_close, 0, "write+commit on close, else only write"); 236 237 static int nfs_clean_pages_on_close = 1; 238 SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW, 239 &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close"); 240 241 int newnfs_directio_enable = 0; 242 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW, 243 &newnfs_directio_enable, 0, "Enable NFS directio"); 244 245 int nfs_keep_dirty_on_error; 246 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_keep_dirty_on_error, CTLFLAG_RW, 247 &nfs_keep_dirty_on_error, 0, "Retry pageout if error returned"); 248 249 /* 250 * This sysctl allows other processes to mmap a file that has been opened 251 * O_DIRECT by a process. In general, having processes mmap the file while 252 * Direct IO is in progress can lead to Data Inconsistencies. But, we allow 253 * this by default to prevent DoS attacks - to prevent a malicious user from 254 * opening up files O_DIRECT preventing other users from mmap'ing these 255 * files. "Protected" environments where stricter consistency guarantees are 256 * required can disable this knob. The process that opened the file O_DIRECT 257 * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not 258 * meaningful. 259 */ 260 int newnfs_directio_allow_mmap = 1; 261 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW, 262 &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens"); 263 264 #if 0 265 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD, 266 &newnfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count"); 267 268 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD, 269 &newnfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count"); 270 #endif 271 272 #define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \ 273 | NFSACCESS_EXTEND | NFSACCESS_EXECUTE \ 274 | NFSACCESS_DELETE | NFSACCESS_LOOKUP) 275 276 /* 277 * SMP Locking Note : 278 * The list of locks after the description of the lock is the ordering 279 * of other locks acquired with the lock held. 280 * np->n_mtx : Protects the fields in the nfsnode. 281 VM Object Lock 282 VI_MTX (acquired indirectly) 283 * nmp->nm_mtx : Protects the fields in the nfsmount. 284 rep->r_mtx 285 * ncl_iod_mutex : Global lock, protects shared nfsiod state. 286 * nfs_reqq_mtx : Global lock, protects the nfs_reqq list. 287 nmp->nm_mtx 288 rep->r_mtx 289 * rep->r_mtx : Protects the fields in an nfsreq. 290 */ 291 292 static int 293 nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td, 294 struct ucred *cred, u_int32_t *retmode) 295 { 296 int error = 0, attrflag, i, lrupos; 297 u_int32_t rmode; 298 struct nfsnode *np = VTONFS(vp); 299 struct nfsvattr nfsva; 300 301 error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag, 302 &rmode, NULL); 303 if (attrflag) 304 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 305 if (!error) { 306 lrupos = 0; 307 mtx_lock(&np->n_mtx); 308 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 309 if (np->n_accesscache[i].uid == cred->cr_uid) { 310 np->n_accesscache[i].mode = rmode; 311 np->n_accesscache[i].stamp = time_second; 312 break; 313 } 314 if (i > 0 && np->n_accesscache[i].stamp < 315 np->n_accesscache[lrupos].stamp) 316 lrupos = i; 317 } 318 if (i == NFS_ACCESSCACHESIZE) { 319 np->n_accesscache[lrupos].uid = cred->cr_uid; 320 np->n_accesscache[lrupos].mode = rmode; 321 np->n_accesscache[lrupos].stamp = time_second; 322 } 323 mtx_unlock(&np->n_mtx); 324 if (retmode != NULL) 325 *retmode = rmode; 326 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0); 327 } else if (NFS_ISV4(vp)) { 328 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 329 } 330 #ifdef KDTRACE_HOOKS 331 if (error != 0) 332 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0, 333 error); 334 #endif 335 return (error); 336 } 337 338 /* 339 * nfs access vnode op. 340 * For nfs version 2, just return ok. File accesses may fail later. 341 * For nfs version 3, use the access rpc to check accessibility. If file modes 342 * are changed on the server, accesses might still fail later. 343 */ 344 static int 345 nfs_access(struct vop_access_args *ap) 346 { 347 struct vnode *vp = ap->a_vp; 348 int error = 0, i, gotahit; 349 u_int32_t mode, wmode, rmode; 350 int v34 = NFS_ISV34(vp); 351 struct nfsnode *np = VTONFS(vp); 352 353 /* 354 * Disallow write attempts on filesystems mounted read-only; 355 * unless the file is a socket, fifo, or a block or character 356 * device resident on the filesystem. 357 */ 358 if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS | 359 VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL | 360 VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) { 361 switch (vp->v_type) { 362 case VREG: 363 case VDIR: 364 case VLNK: 365 return (EROFS); 366 default: 367 break; 368 } 369 } 370 /* 371 * For nfs v3 or v4, check to see if we have done this recently, and if 372 * so return our cached result instead of making an ACCESS call. 373 * If not, do an access rpc, otherwise you are stuck emulating 374 * ufs_access() locally using the vattr. This may not be correct, 375 * since the server may apply other access criteria such as 376 * client uid-->server uid mapping that we do not know about. 377 */ 378 if (v34) { 379 if (ap->a_accmode & VREAD) 380 mode = NFSACCESS_READ; 381 else 382 mode = 0; 383 if (vp->v_type != VDIR) { 384 if (ap->a_accmode & VWRITE) 385 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 386 if (ap->a_accmode & VAPPEND) 387 mode |= NFSACCESS_EXTEND; 388 if (ap->a_accmode & VEXEC) 389 mode |= NFSACCESS_EXECUTE; 390 if (ap->a_accmode & VDELETE) 391 mode |= NFSACCESS_DELETE; 392 } else { 393 if (ap->a_accmode & VWRITE) 394 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 395 if (ap->a_accmode & VAPPEND) 396 mode |= NFSACCESS_EXTEND; 397 if (ap->a_accmode & VEXEC) 398 mode |= NFSACCESS_LOOKUP; 399 if (ap->a_accmode & VDELETE) 400 mode |= NFSACCESS_DELETE; 401 if (ap->a_accmode & VDELETE_CHILD) 402 mode |= NFSACCESS_MODIFY; 403 } 404 /* XXX safety belt, only make blanket request if caching */ 405 if (nfsaccess_cache_timeout > 0) { 406 wmode = NFSACCESS_READ | NFSACCESS_MODIFY | 407 NFSACCESS_EXTEND | NFSACCESS_EXECUTE | 408 NFSACCESS_DELETE | NFSACCESS_LOOKUP; 409 } else { 410 wmode = mode; 411 } 412 413 /* 414 * Does our cached result allow us to give a definite yes to 415 * this request? 416 */ 417 gotahit = 0; 418 mtx_lock(&np->n_mtx); 419 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 420 if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) { 421 if (time_second < (np->n_accesscache[i].stamp 422 + nfsaccess_cache_timeout) && 423 (np->n_accesscache[i].mode & mode) == mode) { 424 NFSINCRGLOBAL(newnfsstats.accesscache_hits); 425 gotahit = 1; 426 } 427 break; 428 } 429 } 430 mtx_unlock(&np->n_mtx); 431 #ifdef KDTRACE_HOOKS 432 if (gotahit != 0) 433 KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp, 434 ap->a_cred->cr_uid, mode); 435 else 436 KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp, 437 ap->a_cred->cr_uid, mode); 438 #endif 439 if (gotahit == 0) { 440 /* 441 * Either a no, or a don't know. Go to the wire. 442 */ 443 NFSINCRGLOBAL(newnfsstats.accesscache_misses); 444 error = nfs34_access_otw(vp, wmode, ap->a_td, 445 ap->a_cred, &rmode); 446 if (!error && 447 (rmode & mode) != mode) 448 error = EACCES; 449 } 450 return (error); 451 } else { 452 if ((error = nfsspec_access(ap)) != 0) { 453 return (error); 454 } 455 /* 456 * Attempt to prevent a mapped root from accessing a file 457 * which it shouldn't. We try to read a byte from the file 458 * if the user is root and the file is not zero length. 459 * After calling nfsspec_access, we should have the correct 460 * file size cached. 461 */ 462 mtx_lock(&np->n_mtx); 463 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD) 464 && VTONFS(vp)->n_size > 0) { 465 struct iovec aiov; 466 struct uio auio; 467 char buf[1]; 468 469 mtx_unlock(&np->n_mtx); 470 aiov.iov_base = buf; 471 aiov.iov_len = 1; 472 auio.uio_iov = &aiov; 473 auio.uio_iovcnt = 1; 474 auio.uio_offset = 0; 475 auio.uio_resid = 1; 476 auio.uio_segflg = UIO_SYSSPACE; 477 auio.uio_rw = UIO_READ; 478 auio.uio_td = ap->a_td; 479 480 if (vp->v_type == VREG) 481 error = ncl_readrpc(vp, &auio, ap->a_cred); 482 else if (vp->v_type == VDIR) { 483 char* bp; 484 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 485 aiov.iov_base = bp; 486 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ; 487 error = ncl_readdirrpc(vp, &auio, ap->a_cred, 488 ap->a_td); 489 free(bp, M_TEMP); 490 } else if (vp->v_type == VLNK) 491 error = ncl_readlinkrpc(vp, &auio, ap->a_cred); 492 else 493 error = EACCES; 494 } else 495 mtx_unlock(&np->n_mtx); 496 return (error); 497 } 498 } 499 500 501 /* 502 * nfs open vnode op 503 * Check to see if the type is ok 504 * and that deletion is not in progress. 505 * For paged in text files, you will need to flush the page cache 506 * if consistency is lost. 507 */ 508 /* ARGSUSED */ 509 static int 510 nfs_open(struct vop_open_args *ap) 511 { 512 struct vnode *vp = ap->a_vp; 513 struct nfsnode *np = VTONFS(vp); 514 struct vattr vattr; 515 int error; 516 int fmode = ap->a_mode; 517 struct ucred *cred; 518 519 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) 520 return (EOPNOTSUPP); 521 522 /* 523 * For NFSv4, we need to do the Open Op before cache validation, 524 * so that we conform to RFC3530 Sec. 9.3.1. 525 */ 526 if (NFS_ISV4(vp)) { 527 error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td); 528 if (error) { 529 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 530 (gid_t)0); 531 return (error); 532 } 533 } 534 535 /* 536 * Now, if this Open will be doing reading, re-validate/flush the 537 * cache, so that Close/Open coherency is maintained. 538 */ 539 mtx_lock(&np->n_mtx); 540 if (np->n_flag & NMODIFIED) { 541 mtx_unlock(&np->n_mtx); 542 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 543 if (error == EINTR || error == EIO) { 544 if (NFS_ISV4(vp)) 545 (void) nfsrpc_close(vp, 0, ap->a_td); 546 return (error); 547 } 548 mtx_lock(&np->n_mtx); 549 np->n_attrstamp = 0; 550 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 551 if (vp->v_type == VDIR) 552 np->n_direofoffset = 0; 553 mtx_unlock(&np->n_mtx); 554 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 555 if (error) { 556 if (NFS_ISV4(vp)) 557 (void) nfsrpc_close(vp, 0, ap->a_td); 558 return (error); 559 } 560 mtx_lock(&np->n_mtx); 561 np->n_mtime = vattr.va_mtime; 562 if (NFS_ISV4(vp)) 563 np->n_change = vattr.va_filerev; 564 } else { 565 mtx_unlock(&np->n_mtx); 566 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 567 if (error) { 568 if (NFS_ISV4(vp)) 569 (void) nfsrpc_close(vp, 0, ap->a_td); 570 return (error); 571 } 572 mtx_lock(&np->n_mtx); 573 if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) || 574 NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 575 if (vp->v_type == VDIR) 576 np->n_direofoffset = 0; 577 mtx_unlock(&np->n_mtx); 578 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 579 if (error == EINTR || error == EIO) { 580 if (NFS_ISV4(vp)) 581 (void) nfsrpc_close(vp, 0, ap->a_td); 582 return (error); 583 } 584 mtx_lock(&np->n_mtx); 585 np->n_mtime = vattr.va_mtime; 586 if (NFS_ISV4(vp)) 587 np->n_change = vattr.va_filerev; 588 } 589 } 590 591 /* 592 * If the object has >= 1 O_DIRECT active opens, we disable caching. 593 */ 594 if (newnfs_directio_enable && (fmode & O_DIRECT) && 595 (vp->v_type == VREG)) { 596 if (np->n_directio_opens == 0) { 597 mtx_unlock(&np->n_mtx); 598 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 599 if (error) { 600 if (NFS_ISV4(vp)) 601 (void) nfsrpc_close(vp, 0, ap->a_td); 602 return (error); 603 } 604 mtx_lock(&np->n_mtx); 605 np->n_flag |= NNONCACHE; 606 } 607 np->n_directio_opens++; 608 } 609 610 /* If opened for writing via NFSv4.1 or later, mark that for pNFS. */ 611 if (NFSHASPNFS(VFSTONFS(vp->v_mount)) && (fmode & FWRITE) != 0) 612 np->n_flag |= NWRITEOPENED; 613 614 /* 615 * If this is an open for writing, capture a reference to the 616 * credentials, so they can be used by ncl_putpages(). Using 617 * these write credentials is preferable to the credentials of 618 * whatever thread happens to be doing the VOP_PUTPAGES() since 619 * the write RPCs are less likely to fail with EACCES. 620 */ 621 if ((fmode & FWRITE) != 0) { 622 cred = np->n_writecred; 623 np->n_writecred = crhold(ap->a_cred); 624 } else 625 cred = NULL; 626 mtx_unlock(&np->n_mtx); 627 628 if (cred != NULL) 629 crfree(cred); 630 vnode_create_vobject(vp, vattr.va_size, ap->a_td); 631 return (0); 632 } 633 634 /* 635 * nfs close vnode op 636 * What an NFS client should do upon close after writing is a debatable issue. 637 * Most NFS clients push delayed writes to the server upon close, basically for 638 * two reasons: 639 * 1 - So that any write errors may be reported back to the client process 640 * doing the close system call. By far the two most likely errors are 641 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 642 * 2 - To put a worst case upper bound on cache inconsistency between 643 * multiple clients for the file. 644 * There is also a consistency problem for Version 2 of the protocol w.r.t. 645 * not being able to tell if other clients are writing a file concurrently, 646 * since there is no way of knowing if the changed modify time in the reply 647 * is only due to the write for this client. 648 * (NFS Version 3 provides weak cache consistency data in the reply that 649 * should be sufficient to detect and handle this case.) 650 * 651 * The current code does the following: 652 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 653 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 654 * or commit them (this satisfies 1 and 2 except for the 655 * case where the server crashes after this close but 656 * before the commit RPC, which is felt to be "good 657 * enough". Changing the last argument to ncl_flush() to 658 * a 1 would force a commit operation, if it is felt a 659 * commit is necessary now. 660 * for NFS Version 4 - flush the dirty buffers and commit them, if 661 * nfscl_mustflush() says this is necessary. 662 * It is necessary if there is no write delegation held, 663 * in order to satisfy open/close coherency. 664 * If the file isn't cached on local stable storage, 665 * it may be necessary in order to detect "out of space" 666 * errors from the server, if the write delegation 667 * issued by the server doesn't allow the file to grow. 668 */ 669 /* ARGSUSED */ 670 static int 671 nfs_close(struct vop_close_args *ap) 672 { 673 struct vnode *vp = ap->a_vp; 674 struct nfsnode *np = VTONFS(vp); 675 struct nfsvattr nfsva; 676 struct ucred *cred; 677 int error = 0, ret, localcred = 0; 678 int fmode = ap->a_fflag; 679 680 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)) 681 return (0); 682 /* 683 * During shutdown, a_cred isn't valid, so just use root. 684 */ 685 if (ap->a_cred == NOCRED) { 686 cred = newnfs_getcred(); 687 localcred = 1; 688 } else { 689 cred = ap->a_cred; 690 } 691 if (vp->v_type == VREG) { 692 /* 693 * Examine and clean dirty pages, regardless of NMODIFIED. 694 * This closes a major hole in close-to-open consistency. 695 * We want to push out all dirty pages (and buffers) on 696 * close, regardless of whether they were dirtied by 697 * mmap'ed writes or via write(). 698 */ 699 if (nfs_clean_pages_on_close && vp->v_object) { 700 VM_OBJECT_WLOCK(vp->v_object); 701 vm_object_page_clean(vp->v_object, 0, 0, 0); 702 VM_OBJECT_WUNLOCK(vp->v_object); 703 } 704 mtx_lock(&np->n_mtx); 705 if (np->n_flag & NMODIFIED) { 706 mtx_unlock(&np->n_mtx); 707 if (NFS_ISV3(vp)) { 708 /* 709 * Under NFSv3 we have dirty buffers to dispose of. We 710 * must flush them to the NFS server. We have the option 711 * of waiting all the way through the commit rpc or just 712 * waiting for the initial write. The default is to only 713 * wait through the initial write so the data is in the 714 * server's cache, which is roughly similar to the state 715 * a standard disk subsystem leaves the file in on close(). 716 * 717 * We cannot clear the NMODIFIED bit in np->n_flag due to 718 * potential races with other processes, and certainly 719 * cannot clear it if we don't commit. 720 * These races occur when there is no longer the old 721 * traditional vnode locking implemented for Vnode Ops. 722 */ 723 int cm = newnfs_commit_on_close ? 1 : 0; 724 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, cm, 0); 725 /* np->n_flag &= ~NMODIFIED; */ 726 } else if (NFS_ISV4(vp)) { 727 if (nfscl_mustflush(vp) != 0) { 728 int cm = newnfs_commit_on_close ? 1 : 0; 729 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, 730 cm, 0); 731 /* 732 * as above w.r.t races when clearing 733 * NMODIFIED. 734 * np->n_flag &= ~NMODIFIED; 735 */ 736 } 737 } else 738 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 739 mtx_lock(&np->n_mtx); 740 } 741 /* 742 * Invalidate the attribute cache in all cases. 743 * An open is going to fetch fresh attrs any way, other procs 744 * on this node that have file open will be forced to do an 745 * otw attr fetch, but this is safe. 746 * --> A user found that their RPC count dropped by 20% when 747 * this was commented out and I can't see any requirement 748 * for it, so I've disabled it when negative lookups are 749 * enabled. (What does this have to do with negative lookup 750 * caching? Well nothing, except it was reported by the 751 * same user that needed negative lookup caching and I wanted 752 * there to be a way to disable it to see if it 753 * is the cause of some caching/coherency issue that might 754 * crop up.) 755 */ 756 if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) { 757 np->n_attrstamp = 0; 758 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 759 } 760 if (np->n_flag & NWRITEERR) { 761 np->n_flag &= ~NWRITEERR; 762 error = np->n_error; 763 } 764 mtx_unlock(&np->n_mtx); 765 } 766 767 if (NFS_ISV4(vp)) { 768 /* 769 * Get attributes so "change" is up to date. 770 */ 771 if (error == 0 && nfscl_mustflush(vp) != 0) { 772 ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva, 773 NULL); 774 if (!ret) { 775 np->n_change = nfsva.na_filerev; 776 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 777 NULL, 0, 0); 778 } 779 } 780 781 /* 782 * and do the close. 783 */ 784 ret = nfsrpc_close(vp, 0, ap->a_td); 785 if (!error && ret) 786 error = ret; 787 if (error) 788 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 789 (gid_t)0); 790 } 791 if (newnfs_directio_enable) 792 KASSERT((np->n_directio_asyncwr == 0), 793 ("nfs_close: dirty unflushed (%d) directio buffers\n", 794 np->n_directio_asyncwr)); 795 if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) { 796 mtx_lock(&np->n_mtx); 797 KASSERT((np->n_directio_opens > 0), 798 ("nfs_close: unexpectedly value (0) of n_directio_opens\n")); 799 np->n_directio_opens--; 800 if (np->n_directio_opens == 0) 801 np->n_flag &= ~NNONCACHE; 802 mtx_unlock(&np->n_mtx); 803 } 804 if (localcred) 805 NFSFREECRED(cred); 806 return (error); 807 } 808 809 /* 810 * nfs getattr call from vfs. 811 */ 812 static int 813 nfs_getattr(struct vop_getattr_args *ap) 814 { 815 struct vnode *vp = ap->a_vp; 816 struct thread *td = curthread; /* XXX */ 817 struct nfsnode *np = VTONFS(vp); 818 int error = 0; 819 struct nfsvattr nfsva; 820 struct vattr *vap = ap->a_vap; 821 struct vattr vattr; 822 823 /* 824 * Update local times for special files. 825 */ 826 mtx_lock(&np->n_mtx); 827 if (np->n_flag & (NACC | NUPD)) 828 np->n_flag |= NCHG; 829 mtx_unlock(&np->n_mtx); 830 /* 831 * First look in the cache. 832 */ 833 if (ncl_getattrcache(vp, &vattr) == 0) { 834 vap->va_type = vattr.va_type; 835 vap->va_mode = vattr.va_mode; 836 vap->va_nlink = vattr.va_nlink; 837 vap->va_uid = vattr.va_uid; 838 vap->va_gid = vattr.va_gid; 839 vap->va_fsid = vattr.va_fsid; 840 vap->va_fileid = vattr.va_fileid; 841 vap->va_size = vattr.va_size; 842 vap->va_blocksize = vattr.va_blocksize; 843 vap->va_atime = vattr.va_atime; 844 vap->va_mtime = vattr.va_mtime; 845 vap->va_ctime = vattr.va_ctime; 846 vap->va_gen = vattr.va_gen; 847 vap->va_flags = vattr.va_flags; 848 vap->va_rdev = vattr.va_rdev; 849 vap->va_bytes = vattr.va_bytes; 850 vap->va_filerev = vattr.va_filerev; 851 /* 852 * Get the local modify time for the case of a write 853 * delegation. 854 */ 855 nfscl_deleggetmodtime(vp, &vap->va_mtime); 856 return (0); 857 } 858 859 if (NFS_ISV34(vp) && nfs_prime_access_cache && 860 nfsaccess_cache_timeout > 0) { 861 NFSINCRGLOBAL(newnfsstats.accesscache_misses); 862 nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL); 863 if (ncl_getattrcache(vp, ap->a_vap) == 0) { 864 nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime); 865 return (0); 866 } 867 } 868 error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL); 869 if (!error) 870 error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0); 871 if (!error) { 872 /* 873 * Get the local modify time for the case of a write 874 * delegation. 875 */ 876 nfscl_deleggetmodtime(vp, &vap->va_mtime); 877 } else if (NFS_ISV4(vp)) { 878 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 879 } 880 return (error); 881 } 882 883 /* 884 * nfs setattr call. 885 */ 886 static int 887 nfs_setattr(struct vop_setattr_args *ap) 888 { 889 struct vnode *vp = ap->a_vp; 890 struct nfsnode *np = VTONFS(vp); 891 struct thread *td = curthread; /* XXX */ 892 struct vattr *vap = ap->a_vap; 893 int error = 0; 894 u_quad_t tsize; 895 896 #ifndef nolint 897 tsize = (u_quad_t)0; 898 #endif 899 900 /* 901 * Setting of flags and marking of atimes are not supported. 902 */ 903 if (vap->va_flags != VNOVAL) 904 return (EOPNOTSUPP); 905 906 /* 907 * Disallow write attempts if the filesystem is mounted read-only. 908 */ 909 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 910 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 911 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 912 (vp->v_mount->mnt_flag & MNT_RDONLY)) 913 return (EROFS); 914 if (vap->va_size != VNOVAL) { 915 switch (vp->v_type) { 916 case VDIR: 917 return (EISDIR); 918 case VCHR: 919 case VBLK: 920 case VSOCK: 921 case VFIFO: 922 if (vap->va_mtime.tv_sec == VNOVAL && 923 vap->va_atime.tv_sec == VNOVAL && 924 vap->va_mode == (mode_t)VNOVAL && 925 vap->va_uid == (uid_t)VNOVAL && 926 vap->va_gid == (gid_t)VNOVAL) 927 return (0); 928 vap->va_size = VNOVAL; 929 break; 930 default: 931 /* 932 * Disallow write attempts if the filesystem is 933 * mounted read-only. 934 */ 935 if (vp->v_mount->mnt_flag & MNT_RDONLY) 936 return (EROFS); 937 /* 938 * We run vnode_pager_setsize() early (why?), 939 * we must set np->n_size now to avoid vinvalbuf 940 * V_SAVE races that might setsize a lower 941 * value. 942 */ 943 mtx_lock(&np->n_mtx); 944 tsize = np->n_size; 945 mtx_unlock(&np->n_mtx); 946 error = ncl_meta_setsize(vp, ap->a_cred, td, 947 vap->va_size); 948 mtx_lock(&np->n_mtx); 949 if (np->n_flag & NMODIFIED) { 950 tsize = np->n_size; 951 mtx_unlock(&np->n_mtx); 952 if (vap->va_size == 0) 953 error = ncl_vinvalbuf(vp, 0, td, 1); 954 else 955 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 956 if (error) { 957 vnode_pager_setsize(vp, tsize); 958 return (error); 959 } 960 /* 961 * Call nfscl_delegmodtime() to set the modify time 962 * locally, as required. 963 */ 964 nfscl_delegmodtime(vp); 965 } else 966 mtx_unlock(&np->n_mtx); 967 /* 968 * np->n_size has already been set to vap->va_size 969 * in ncl_meta_setsize(). We must set it again since 970 * nfs_loadattrcache() could be called through 971 * ncl_meta_setsize() and could modify np->n_size. 972 */ 973 mtx_lock(&np->n_mtx); 974 np->n_vattr.na_size = np->n_size = vap->va_size; 975 mtx_unlock(&np->n_mtx); 976 }; 977 } else { 978 mtx_lock(&np->n_mtx); 979 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 980 (np->n_flag & NMODIFIED) && vp->v_type == VREG) { 981 mtx_unlock(&np->n_mtx); 982 if ((error = ncl_vinvalbuf(vp, V_SAVE, td, 1)) != 0 && 983 (error == EINTR || error == EIO)) 984 return (error); 985 } else 986 mtx_unlock(&np->n_mtx); 987 } 988 error = nfs_setattrrpc(vp, vap, ap->a_cred, td); 989 if (error && vap->va_size != VNOVAL) { 990 mtx_lock(&np->n_mtx); 991 np->n_size = np->n_vattr.na_size = tsize; 992 vnode_pager_setsize(vp, tsize); 993 mtx_unlock(&np->n_mtx); 994 } 995 return (error); 996 } 997 998 /* 999 * Do an nfs setattr rpc. 1000 */ 1001 static int 1002 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, 1003 struct thread *td) 1004 { 1005 struct nfsnode *np = VTONFS(vp); 1006 int error, ret, attrflag, i; 1007 struct nfsvattr nfsva; 1008 1009 if (NFS_ISV34(vp)) { 1010 mtx_lock(&np->n_mtx); 1011 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) 1012 np->n_accesscache[i].stamp = 0; 1013 np->n_flag |= NDELEGMOD; 1014 mtx_unlock(&np->n_mtx); 1015 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp); 1016 } 1017 error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag, 1018 NULL); 1019 if (attrflag) { 1020 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1021 if (ret && !error) 1022 error = ret; 1023 } 1024 if (error && NFS_ISV4(vp)) 1025 error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid); 1026 return (error); 1027 } 1028 1029 /* 1030 * nfs lookup call, one step at a time... 1031 * First look in cache 1032 * If not found, unlock the directory nfsnode and do the rpc 1033 */ 1034 static int 1035 nfs_lookup(struct vop_lookup_args *ap) 1036 { 1037 struct componentname *cnp = ap->a_cnp; 1038 struct vnode *dvp = ap->a_dvp; 1039 struct vnode **vpp = ap->a_vpp; 1040 struct mount *mp = dvp->v_mount; 1041 int flags = cnp->cn_flags; 1042 struct vnode *newvp; 1043 struct nfsmount *nmp; 1044 struct nfsnode *np, *newnp; 1045 int error = 0, attrflag, dattrflag, ltype, ncticks; 1046 struct thread *td = cnp->cn_thread; 1047 struct nfsfh *nfhp; 1048 struct nfsvattr dnfsva, nfsva; 1049 struct vattr vattr; 1050 struct timespec nctime; 1051 1052 *vpp = NULLVP; 1053 if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) && 1054 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 1055 return (EROFS); 1056 if (dvp->v_type != VDIR) 1057 return (ENOTDIR); 1058 nmp = VFSTONFS(mp); 1059 np = VTONFS(dvp); 1060 1061 /* For NFSv4, wait until any remove is done. */ 1062 mtx_lock(&np->n_mtx); 1063 while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) { 1064 np->n_flag |= NREMOVEWANT; 1065 (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0); 1066 } 1067 mtx_unlock(&np->n_mtx); 1068 1069 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) 1070 return (error); 1071 error = cache_lookup(dvp, vpp, cnp, &nctime, &ncticks); 1072 if (error > 0 && error != ENOENT) 1073 return (error); 1074 if (error == -1) { 1075 /* 1076 * Lookups of "." are special and always return the 1077 * current directory. cache_lookup() already handles 1078 * associated locking bookkeeping, etc. 1079 */ 1080 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 1081 /* XXX: Is this really correct? */ 1082 if (cnp->cn_nameiop != LOOKUP && 1083 (flags & ISLASTCN)) 1084 cnp->cn_flags |= SAVENAME; 1085 return (0); 1086 } 1087 1088 /* 1089 * We only accept a positive hit in the cache if the 1090 * change time of the file matches our cached copy. 1091 * Otherwise, we discard the cache entry and fallback 1092 * to doing a lookup RPC. We also only trust cache 1093 * entries for less than nm_nametimeo seconds. 1094 * 1095 * To better handle stale file handles and attributes, 1096 * clear the attribute cache of this node if it is a 1097 * leaf component, part of an open() call, and not 1098 * locally modified before fetching the attributes. 1099 * This should allow stale file handles to be detected 1100 * here where we can fall back to a LOOKUP RPC to 1101 * recover rather than having nfs_open() detect the 1102 * stale file handle and failing open(2) with ESTALE. 1103 */ 1104 newvp = *vpp; 1105 newnp = VTONFS(newvp); 1106 if (!(nmp->nm_flag & NFSMNT_NOCTO) && 1107 (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1108 !(newnp->n_flag & NMODIFIED)) { 1109 mtx_lock(&newnp->n_mtx); 1110 newnp->n_attrstamp = 0; 1111 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp); 1112 mtx_unlock(&newnp->n_mtx); 1113 } 1114 if (nfscl_nodeleg(newvp, 0) == 0 || 1115 ((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) && 1116 VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 && 1117 timespeccmp(&vattr.va_ctime, &nctime, ==))) { 1118 NFSINCRGLOBAL(newnfsstats.lookupcache_hits); 1119 if (cnp->cn_nameiop != LOOKUP && 1120 (flags & ISLASTCN)) 1121 cnp->cn_flags |= SAVENAME; 1122 return (0); 1123 } 1124 cache_purge(newvp); 1125 if (dvp != newvp) 1126 vput(newvp); 1127 else 1128 vrele(newvp); 1129 *vpp = NULLVP; 1130 } else if (error == ENOENT) { 1131 if (dvp->v_iflag & VI_DOOMED) 1132 return (ENOENT); 1133 /* 1134 * We only accept a negative hit in the cache if the 1135 * modification time of the parent directory matches 1136 * the cached copy in the name cache entry. 1137 * Otherwise, we discard all of the negative cache 1138 * entries for this directory. We also only trust 1139 * negative cache entries for up to nm_negnametimeo 1140 * seconds. 1141 */ 1142 if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) && 1143 VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 && 1144 timespeccmp(&vattr.va_mtime, &nctime, ==)) { 1145 NFSINCRGLOBAL(newnfsstats.lookupcache_hits); 1146 return (ENOENT); 1147 } 1148 cache_purge_negative(dvp); 1149 } 1150 1151 error = 0; 1152 newvp = NULLVP; 1153 NFSINCRGLOBAL(newnfsstats.lookupcache_misses); 1154 error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1155 cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1156 NULL); 1157 if (dattrflag) 1158 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1159 if (error) { 1160 if (newvp != NULLVP) { 1161 vput(newvp); 1162 *vpp = NULLVP; 1163 } 1164 1165 if (error != ENOENT) { 1166 if (NFS_ISV4(dvp)) 1167 error = nfscl_maperr(td, error, (uid_t)0, 1168 (gid_t)0); 1169 return (error); 1170 } 1171 1172 /* The requested file was not found. */ 1173 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1174 (flags & ISLASTCN)) { 1175 /* 1176 * XXX: UFS does a full VOP_ACCESS(dvp, 1177 * VWRITE) here instead of just checking 1178 * MNT_RDONLY. 1179 */ 1180 if (mp->mnt_flag & MNT_RDONLY) 1181 return (EROFS); 1182 cnp->cn_flags |= SAVENAME; 1183 return (EJUSTRETURN); 1184 } 1185 1186 if ((cnp->cn_flags & MAKEENTRY) && cnp->cn_nameiop != CREATE && 1187 dattrflag) { 1188 /* 1189 * Cache the modification time of the parent 1190 * directory from the post-op attributes in 1191 * the name cache entry. The negative cache 1192 * entry will be ignored once the directory 1193 * has changed. Don't bother adding the entry 1194 * if the directory has already changed. 1195 */ 1196 mtx_lock(&np->n_mtx); 1197 if (timespeccmp(&np->n_vattr.na_mtime, 1198 &dnfsva.na_mtime, ==)) { 1199 mtx_unlock(&np->n_mtx); 1200 cache_enter_time(dvp, NULL, cnp, 1201 &dnfsva.na_mtime, NULL); 1202 } else 1203 mtx_unlock(&np->n_mtx); 1204 } 1205 return (ENOENT); 1206 } 1207 1208 /* 1209 * Handle RENAME case... 1210 */ 1211 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { 1212 if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1213 FREE((caddr_t)nfhp, M_NFSFH); 1214 return (EISDIR); 1215 } 1216 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1217 LK_EXCLUSIVE); 1218 if (error) 1219 return (error); 1220 newvp = NFSTOV(np); 1221 if (attrflag) 1222 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1223 0, 1); 1224 *vpp = newvp; 1225 cnp->cn_flags |= SAVENAME; 1226 return (0); 1227 } 1228 1229 if (flags & ISDOTDOT) { 1230 ltype = NFSVOPISLOCKED(dvp); 1231 error = vfs_busy(mp, MBF_NOWAIT); 1232 if (error != 0) { 1233 vfs_ref(mp); 1234 NFSVOPUNLOCK(dvp, 0); 1235 error = vfs_busy(mp, 0); 1236 NFSVOPLOCK(dvp, ltype | LK_RETRY); 1237 vfs_rel(mp); 1238 if (error == 0 && (dvp->v_iflag & VI_DOOMED)) { 1239 vfs_unbusy(mp); 1240 error = ENOENT; 1241 } 1242 if (error != 0) 1243 return (error); 1244 } 1245 NFSVOPUNLOCK(dvp, 0); 1246 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1247 cnp->cn_lkflags); 1248 if (error == 0) 1249 newvp = NFSTOV(np); 1250 vfs_unbusy(mp); 1251 if (newvp != dvp) 1252 NFSVOPLOCK(dvp, ltype | LK_RETRY); 1253 if (dvp->v_iflag & VI_DOOMED) { 1254 if (error == 0) { 1255 if (newvp == dvp) 1256 vrele(newvp); 1257 else 1258 vput(newvp); 1259 } 1260 error = ENOENT; 1261 } 1262 if (error != 0) 1263 return (error); 1264 if (attrflag) 1265 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1266 0, 1); 1267 } else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1268 FREE((caddr_t)nfhp, M_NFSFH); 1269 VREF(dvp); 1270 newvp = dvp; 1271 if (attrflag) 1272 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1273 0, 1); 1274 } else { 1275 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1276 cnp->cn_lkflags); 1277 if (error) 1278 return (error); 1279 newvp = NFSTOV(np); 1280 if (attrflag) 1281 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1282 0, 1); 1283 else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1284 !(np->n_flag & NMODIFIED)) { 1285 /* 1286 * Flush the attribute cache when opening a 1287 * leaf node to ensure that fresh attributes 1288 * are fetched in nfs_open() since we did not 1289 * fetch attributes from the LOOKUP reply. 1290 */ 1291 mtx_lock(&np->n_mtx); 1292 np->n_attrstamp = 0; 1293 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp); 1294 mtx_unlock(&np->n_mtx); 1295 } 1296 } 1297 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 1298 cnp->cn_flags |= SAVENAME; 1299 if ((cnp->cn_flags & MAKEENTRY) && 1300 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) && 1301 attrflag != 0 && (newvp->v_type != VDIR || dattrflag != 0)) 1302 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 1303 newvp->v_type != VDIR ? NULL : &dnfsva.na_ctime); 1304 *vpp = newvp; 1305 return (0); 1306 } 1307 1308 /* 1309 * nfs read call. 1310 * Just call ncl_bioread() to do the work. 1311 */ 1312 static int 1313 nfs_read(struct vop_read_args *ap) 1314 { 1315 struct vnode *vp = ap->a_vp; 1316 1317 switch (vp->v_type) { 1318 case VREG: 1319 return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 1320 case VDIR: 1321 return (EISDIR); 1322 default: 1323 return (EOPNOTSUPP); 1324 } 1325 } 1326 1327 /* 1328 * nfs readlink call 1329 */ 1330 static int 1331 nfs_readlink(struct vop_readlink_args *ap) 1332 { 1333 struct vnode *vp = ap->a_vp; 1334 1335 if (vp->v_type != VLNK) 1336 return (EINVAL); 1337 return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred)); 1338 } 1339 1340 /* 1341 * Do a readlink rpc. 1342 * Called by ncl_doio() from below the buffer cache. 1343 */ 1344 int 1345 ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1346 { 1347 int error, ret, attrflag; 1348 struct nfsvattr nfsva; 1349 1350 error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva, 1351 &attrflag, NULL); 1352 if (attrflag) { 1353 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1354 if (ret && !error) 1355 error = ret; 1356 } 1357 if (error && NFS_ISV4(vp)) 1358 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1359 return (error); 1360 } 1361 1362 /* 1363 * nfs read rpc call 1364 * Ditto above 1365 */ 1366 int 1367 ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1368 { 1369 int error, ret, attrflag; 1370 struct nfsvattr nfsva; 1371 struct nfsmount *nmp; 1372 1373 nmp = VFSTONFS(vnode_mount(vp)); 1374 error = EIO; 1375 attrflag = 0; 1376 if (NFSHASPNFS(nmp)) 1377 error = nfscl_doiods(vp, uiop, NULL, NULL, 1378 NFSV4OPEN_ACCESSREAD, cred, uiop->uio_td); 1379 NFSCL_DEBUG(4, "readrpc: aft doiods=%d\n", error); 1380 if (error != 0) 1381 error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva, 1382 &attrflag, NULL); 1383 if (attrflag) { 1384 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1385 if (ret && !error) 1386 error = ret; 1387 } 1388 if (error && NFS_ISV4(vp)) 1389 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1390 return (error); 1391 } 1392 1393 /* 1394 * nfs write call 1395 */ 1396 int 1397 ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 1398 int *iomode, int *must_commit, int called_from_strategy) 1399 { 1400 struct nfsvattr nfsva; 1401 int error, attrflag, ret; 1402 struct nfsmount *nmp; 1403 1404 nmp = VFSTONFS(vnode_mount(vp)); 1405 error = EIO; 1406 attrflag = 0; 1407 if (NFSHASPNFS(nmp)) 1408 error = nfscl_doiods(vp, uiop, iomode, must_commit, 1409 NFSV4OPEN_ACCESSWRITE, cred, uiop->uio_td); 1410 NFSCL_DEBUG(4, "writerpc: aft doiods=%d\n", error); 1411 if (error != 0) 1412 error = nfsrpc_write(vp, uiop, iomode, must_commit, cred, 1413 uiop->uio_td, &nfsva, &attrflag, NULL, 1414 called_from_strategy); 1415 if (attrflag) { 1416 if (VTONFS(vp)->n_flag & ND_NFSV4) 1417 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1, 1418 1); 1419 else 1420 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1421 1); 1422 if (ret && !error) 1423 error = ret; 1424 } 1425 if (DOINGASYNC(vp)) 1426 *iomode = NFSWRITE_FILESYNC; 1427 if (error && NFS_ISV4(vp)) 1428 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1429 return (error); 1430 } 1431 1432 /* 1433 * nfs mknod rpc 1434 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1435 * mode set to specify the file type and the size field for rdev. 1436 */ 1437 static int 1438 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1439 struct vattr *vap) 1440 { 1441 struct nfsvattr nfsva, dnfsva; 1442 struct vnode *newvp = NULL; 1443 struct nfsnode *np = NULL, *dnp; 1444 struct nfsfh *nfhp; 1445 struct vattr vattr; 1446 int error = 0, attrflag, dattrflag; 1447 u_int32_t rdev; 1448 1449 if (vap->va_type == VCHR || vap->va_type == VBLK) 1450 rdev = vap->va_rdev; 1451 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1452 rdev = 0xffffffff; 1453 else 1454 return (EOPNOTSUPP); 1455 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1456 return (error); 1457 error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap, 1458 rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva, 1459 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 1460 if (!error) { 1461 if (!nfhp) 1462 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1463 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1464 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1465 NULL); 1466 if (nfhp) 1467 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1468 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE); 1469 } 1470 if (dattrflag) 1471 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1472 if (!error) { 1473 newvp = NFSTOV(np); 1474 if (attrflag != 0) { 1475 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1476 0, 1); 1477 if (error != 0) 1478 vput(newvp); 1479 } 1480 } 1481 if (!error) { 1482 *vpp = newvp; 1483 } else if (NFS_ISV4(dvp)) { 1484 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1485 vap->va_gid); 1486 } 1487 dnp = VTONFS(dvp); 1488 mtx_lock(&dnp->n_mtx); 1489 dnp->n_flag |= NMODIFIED; 1490 if (!dattrflag) { 1491 dnp->n_attrstamp = 0; 1492 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1493 } 1494 mtx_unlock(&dnp->n_mtx); 1495 return (error); 1496 } 1497 1498 /* 1499 * nfs mknod vop 1500 * just call nfs_mknodrpc() to do the work. 1501 */ 1502 /* ARGSUSED */ 1503 static int 1504 nfs_mknod(struct vop_mknod_args *ap) 1505 { 1506 return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap)); 1507 } 1508 1509 static struct mtx nfs_cverf_mtx; 1510 MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex", 1511 MTX_DEF); 1512 1513 static nfsquad_t 1514 nfs_get_cverf(void) 1515 { 1516 static nfsquad_t cverf; 1517 nfsquad_t ret; 1518 static int cverf_initialized = 0; 1519 1520 mtx_lock(&nfs_cverf_mtx); 1521 if (cverf_initialized == 0) { 1522 cverf.lval[0] = arc4random(); 1523 cverf.lval[1] = arc4random(); 1524 cverf_initialized = 1; 1525 } else 1526 cverf.qval++; 1527 ret = cverf; 1528 mtx_unlock(&nfs_cverf_mtx); 1529 1530 return (ret); 1531 } 1532 1533 /* 1534 * nfs file create call 1535 */ 1536 static int 1537 nfs_create(struct vop_create_args *ap) 1538 { 1539 struct vnode *dvp = ap->a_dvp; 1540 struct vattr *vap = ap->a_vap; 1541 struct componentname *cnp = ap->a_cnp; 1542 struct nfsnode *np = NULL, *dnp; 1543 struct vnode *newvp = NULL; 1544 struct nfsmount *nmp; 1545 struct nfsvattr dnfsva, nfsva; 1546 struct nfsfh *nfhp; 1547 nfsquad_t cverf; 1548 int error = 0, attrflag, dattrflag, fmode = 0; 1549 struct vattr vattr; 1550 1551 /* 1552 * Oops, not for me.. 1553 */ 1554 if (vap->va_type == VSOCK) 1555 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1556 1557 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1558 return (error); 1559 if (vap->va_vaflags & VA_EXCLUSIVE) 1560 fmode |= O_EXCL; 1561 dnp = VTONFS(dvp); 1562 nmp = VFSTONFS(vnode_mount(dvp)); 1563 again: 1564 /* For NFSv4, wait until any remove is done. */ 1565 mtx_lock(&dnp->n_mtx); 1566 while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) { 1567 dnp->n_flag |= NREMOVEWANT; 1568 (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0); 1569 } 1570 mtx_unlock(&dnp->n_mtx); 1571 1572 cverf = nfs_get_cverf(); 1573 error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1574 vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, 1575 &nfhp, &attrflag, &dattrflag, NULL); 1576 if (!error) { 1577 if (nfhp == NULL) 1578 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1579 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1580 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1581 NULL); 1582 if (nfhp != NULL) 1583 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1584 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE); 1585 } 1586 if (dattrflag) 1587 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1588 if (!error) { 1589 newvp = NFSTOV(np); 1590 if (attrflag == 0) 1591 error = nfsrpc_getattr(newvp, cnp->cn_cred, 1592 cnp->cn_thread, &nfsva, NULL); 1593 if (error == 0) 1594 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1595 0, 1); 1596 } 1597 if (error) { 1598 if (newvp != NULL) { 1599 vput(newvp); 1600 newvp = NULL; 1601 } 1602 if (NFS_ISV34(dvp) && (fmode & O_EXCL) && 1603 error == NFSERR_NOTSUPP) { 1604 fmode &= ~O_EXCL; 1605 goto again; 1606 } 1607 } else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) { 1608 if (nfscl_checksattr(vap, &nfsva)) { 1609 /* 1610 * We are normally called with only a partially 1611 * initialized VAP. Since the NFSv3 spec says that 1612 * the server may use the file attributes to 1613 * store the verifier, the spec requires us to do a 1614 * SETATTR RPC. FreeBSD servers store the verifier in 1615 * atime, but we can't really assume that all servers 1616 * will so we ensure that our SETATTR sets both atime 1617 * and mtime. 1618 */ 1619 if (vap->va_mtime.tv_sec == VNOVAL) 1620 vfs_timestamp(&vap->va_mtime); 1621 if (vap->va_atime.tv_sec == VNOVAL) 1622 vap->va_atime = vap->va_mtime; 1623 error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred, 1624 cnp->cn_thread, &nfsva, &attrflag, NULL); 1625 if (error && (vap->va_uid != (uid_t)VNOVAL || 1626 vap->va_gid != (gid_t)VNOVAL)) { 1627 /* try again without setting uid/gid */ 1628 vap->va_uid = (uid_t)VNOVAL; 1629 vap->va_gid = (uid_t)VNOVAL; 1630 error = nfsrpc_setattr(newvp, vap, NULL, 1631 cnp->cn_cred, cnp->cn_thread, &nfsva, 1632 &attrflag, NULL); 1633 } 1634 if (attrflag) 1635 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 1636 NULL, 0, 1); 1637 if (error != 0) 1638 vput(newvp); 1639 } 1640 } 1641 if (!error) { 1642 if ((cnp->cn_flags & MAKEENTRY) && attrflag) 1643 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 1644 NULL); 1645 *ap->a_vpp = newvp; 1646 } else if (NFS_ISV4(dvp)) { 1647 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1648 vap->va_gid); 1649 } 1650 mtx_lock(&dnp->n_mtx); 1651 dnp->n_flag |= NMODIFIED; 1652 if (!dattrflag) { 1653 dnp->n_attrstamp = 0; 1654 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1655 } 1656 mtx_unlock(&dnp->n_mtx); 1657 return (error); 1658 } 1659 1660 /* 1661 * nfs file remove call 1662 * To try and make nfs semantics closer to ufs semantics, a file that has 1663 * other processes using the vnode is renamed instead of removed and then 1664 * removed later on the last close. 1665 * - If v_usecount > 1 1666 * If a rename is not already in the works 1667 * call nfs_sillyrename() to set it up 1668 * else 1669 * do the remove rpc 1670 */ 1671 static int 1672 nfs_remove(struct vop_remove_args *ap) 1673 { 1674 struct vnode *vp = ap->a_vp; 1675 struct vnode *dvp = ap->a_dvp; 1676 struct componentname *cnp = ap->a_cnp; 1677 struct nfsnode *np = VTONFS(vp); 1678 int error = 0; 1679 struct vattr vattr; 1680 1681 KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name")); 1682 KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount")); 1683 if (vp->v_type == VDIR) 1684 error = EPERM; 1685 else if (vrefcnt(vp) == 1 || (np->n_sillyrename && 1686 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && 1687 vattr.va_nlink > 1)) { 1688 /* 1689 * Purge the name cache so that the chance of a lookup for 1690 * the name succeeding while the remove is in progress is 1691 * minimized. Without node locking it can still happen, such 1692 * that an I/O op returns ESTALE, but since you get this if 1693 * another host removes the file.. 1694 */ 1695 cache_purge(vp); 1696 /* 1697 * throw away biocache buffers, mainly to avoid 1698 * unnecessary delayed writes later. 1699 */ 1700 error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1); 1701 /* Do the rpc */ 1702 if (error != EINTR && error != EIO) 1703 error = nfs_removerpc(dvp, vp, cnp->cn_nameptr, 1704 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread); 1705 /* 1706 * Kludge City: If the first reply to the remove rpc is lost.. 1707 * the reply to the retransmitted request will be ENOENT 1708 * since the file was in fact removed 1709 * Therefore, we cheat and return success. 1710 */ 1711 if (error == ENOENT) 1712 error = 0; 1713 } else if (!np->n_sillyrename) 1714 error = nfs_sillyrename(dvp, vp, cnp); 1715 mtx_lock(&np->n_mtx); 1716 np->n_attrstamp = 0; 1717 mtx_unlock(&np->n_mtx); 1718 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 1719 return (error); 1720 } 1721 1722 /* 1723 * nfs file remove rpc called from nfs_inactive 1724 */ 1725 int 1726 ncl_removeit(struct sillyrename *sp, struct vnode *vp) 1727 { 1728 /* 1729 * Make sure that the directory vnode is still valid. 1730 * XXX we should lock sp->s_dvp here. 1731 */ 1732 if (sp->s_dvp->v_type == VBAD) 1733 return (0); 1734 return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen, 1735 sp->s_cred, NULL)); 1736 } 1737 1738 /* 1739 * Nfs remove rpc, called from nfs_remove() and ncl_removeit(). 1740 */ 1741 static int 1742 nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 1743 int namelen, struct ucred *cred, struct thread *td) 1744 { 1745 struct nfsvattr dnfsva; 1746 struct nfsnode *dnp = VTONFS(dvp); 1747 int error = 0, dattrflag; 1748 1749 mtx_lock(&dnp->n_mtx); 1750 dnp->n_flag |= NREMOVEINPROG; 1751 mtx_unlock(&dnp->n_mtx); 1752 error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva, 1753 &dattrflag, NULL); 1754 mtx_lock(&dnp->n_mtx); 1755 if ((dnp->n_flag & NREMOVEWANT)) { 1756 dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG); 1757 mtx_unlock(&dnp->n_mtx); 1758 wakeup((caddr_t)dnp); 1759 } else { 1760 dnp->n_flag &= ~NREMOVEINPROG; 1761 mtx_unlock(&dnp->n_mtx); 1762 } 1763 if (dattrflag) 1764 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1765 mtx_lock(&dnp->n_mtx); 1766 dnp->n_flag |= NMODIFIED; 1767 if (!dattrflag) { 1768 dnp->n_attrstamp = 0; 1769 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1770 } 1771 mtx_unlock(&dnp->n_mtx); 1772 if (error && NFS_ISV4(dvp)) 1773 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1774 return (error); 1775 } 1776 1777 /* 1778 * nfs file rename call 1779 */ 1780 static int 1781 nfs_rename(struct vop_rename_args *ap) 1782 { 1783 struct vnode *fvp = ap->a_fvp; 1784 struct vnode *tvp = ap->a_tvp; 1785 struct vnode *fdvp = ap->a_fdvp; 1786 struct vnode *tdvp = ap->a_tdvp; 1787 struct componentname *tcnp = ap->a_tcnp; 1788 struct componentname *fcnp = ap->a_fcnp; 1789 struct nfsnode *fnp = VTONFS(ap->a_fvp); 1790 struct nfsnode *tdnp = VTONFS(ap->a_tdvp); 1791 struct nfsv4node *newv4 = NULL; 1792 int error; 1793 1794 KASSERT((tcnp->cn_flags & HASBUF) != 0 && 1795 (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name")); 1796 /* Check for cross-device rename */ 1797 if ((fvp->v_mount != tdvp->v_mount) || 1798 (tvp && (fvp->v_mount != tvp->v_mount))) { 1799 error = EXDEV; 1800 goto out; 1801 } 1802 1803 if (fvp == tvp) { 1804 ncl_printf("nfs_rename: fvp == tvp (can't happen)\n"); 1805 error = 0; 1806 goto out; 1807 } 1808 if ((error = NFSVOPLOCK(fvp, LK_EXCLUSIVE)) != 0) 1809 goto out; 1810 1811 /* 1812 * We have to flush B_DELWRI data prior to renaming 1813 * the file. If we don't, the delayed-write buffers 1814 * can be flushed out later after the file has gone stale 1815 * under NFSV3. NFSV2 does not have this problem because 1816 * ( as far as I can tell ) it flushes dirty buffers more 1817 * often. 1818 * 1819 * Skip the rename operation if the fsync fails, this can happen 1820 * due to the server's volume being full, when we pushed out data 1821 * that was written back to our cache earlier. Not checking for 1822 * this condition can result in potential (silent) data loss. 1823 */ 1824 error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread); 1825 NFSVOPUNLOCK(fvp, 0); 1826 if (!error && tvp) 1827 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread); 1828 if (error) 1829 goto out; 1830 1831 /* 1832 * If the tvp exists and is in use, sillyrename it before doing the 1833 * rename of the new file over it. 1834 * XXX Can't sillyrename a directory. 1835 */ 1836 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename && 1837 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1838 vput(tvp); 1839 tvp = NULL; 1840 } 1841 1842 error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1843 tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1844 tcnp->cn_thread); 1845 1846 if (error == 0 && NFS_ISV4(tdvp)) { 1847 /* 1848 * For NFSv4, check to see if it is the same name and 1849 * replace the name, if it is different. 1850 */ 1851 MALLOC(newv4, struct nfsv4node *, 1852 sizeof (struct nfsv4node) + 1853 tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1, 1854 M_NFSV4NODE, M_WAITOK); 1855 mtx_lock(&tdnp->n_mtx); 1856 mtx_lock(&fnp->n_mtx); 1857 if (fnp->n_v4 != NULL && fvp->v_type == VREG && 1858 (fnp->n_v4->n4_namelen != tcnp->cn_namelen || 1859 NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4), 1860 tcnp->cn_namelen) || 1861 tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen || 1862 NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1863 tdnp->n_fhp->nfh_len))) { 1864 #ifdef notdef 1865 { char nnn[100]; int nnnl; 1866 nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99; 1867 bcopy(tcnp->cn_nameptr, nnn, nnnl); 1868 nnn[nnnl] = '\0'; 1869 printf("ren replace=%s\n",nnn); 1870 } 1871 #endif 1872 FREE((caddr_t)fnp->n_v4, M_NFSV4NODE); 1873 fnp->n_v4 = newv4; 1874 newv4 = NULL; 1875 fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len; 1876 fnp->n_v4->n4_namelen = tcnp->cn_namelen; 1877 NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1878 tdnp->n_fhp->nfh_len); 1879 NFSBCOPY(tcnp->cn_nameptr, 1880 NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen); 1881 } 1882 mtx_unlock(&tdnp->n_mtx); 1883 mtx_unlock(&fnp->n_mtx); 1884 if (newv4 != NULL) 1885 FREE((caddr_t)newv4, M_NFSV4NODE); 1886 } 1887 1888 if (fvp->v_type == VDIR) { 1889 if (tvp != NULL && tvp->v_type == VDIR) 1890 cache_purge(tdvp); 1891 cache_purge(fdvp); 1892 } 1893 1894 out: 1895 if (tdvp == tvp) 1896 vrele(tdvp); 1897 else 1898 vput(tdvp); 1899 if (tvp) 1900 vput(tvp); 1901 vrele(fdvp); 1902 vrele(fvp); 1903 /* 1904 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1905 */ 1906 if (error == ENOENT) 1907 error = 0; 1908 return (error); 1909 } 1910 1911 /* 1912 * nfs file rename rpc called from nfs_remove() above 1913 */ 1914 static int 1915 nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp, 1916 struct sillyrename *sp) 1917 { 1918 1919 return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen, 1920 sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred, 1921 scnp->cn_thread)); 1922 } 1923 1924 /* 1925 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1926 */ 1927 static int 1928 nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr, 1929 int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr, 1930 int tnamelen, struct ucred *cred, struct thread *td) 1931 { 1932 struct nfsvattr fnfsva, tnfsva; 1933 struct nfsnode *fdnp = VTONFS(fdvp); 1934 struct nfsnode *tdnp = VTONFS(tdvp); 1935 int error = 0, fattrflag, tattrflag; 1936 1937 error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp, 1938 tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag, 1939 &tattrflag, NULL, NULL); 1940 mtx_lock(&fdnp->n_mtx); 1941 fdnp->n_flag |= NMODIFIED; 1942 if (fattrflag != 0) { 1943 mtx_unlock(&fdnp->n_mtx); 1944 (void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1); 1945 } else { 1946 fdnp->n_attrstamp = 0; 1947 mtx_unlock(&fdnp->n_mtx); 1948 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp); 1949 } 1950 mtx_lock(&tdnp->n_mtx); 1951 tdnp->n_flag |= NMODIFIED; 1952 if (tattrflag != 0) { 1953 mtx_unlock(&tdnp->n_mtx); 1954 (void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1); 1955 } else { 1956 tdnp->n_attrstamp = 0; 1957 mtx_unlock(&tdnp->n_mtx); 1958 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp); 1959 } 1960 if (error && NFS_ISV4(fdvp)) 1961 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1962 return (error); 1963 } 1964 1965 /* 1966 * nfs hard link create call 1967 */ 1968 static int 1969 nfs_link(struct vop_link_args *ap) 1970 { 1971 struct vnode *vp = ap->a_vp; 1972 struct vnode *tdvp = ap->a_tdvp; 1973 struct componentname *cnp = ap->a_cnp; 1974 struct nfsnode *np, *tdnp; 1975 struct nfsvattr nfsva, dnfsva; 1976 int error = 0, attrflag, dattrflag; 1977 1978 if (vp->v_mount != tdvp->v_mount) { 1979 return (EXDEV); 1980 } 1981 1982 /* 1983 * Push all writes to the server, so that the attribute cache 1984 * doesn't get "out of sync" with the server. 1985 * XXX There should be a better way! 1986 */ 1987 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread); 1988 1989 error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 1990 cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag, 1991 &dattrflag, NULL); 1992 tdnp = VTONFS(tdvp); 1993 mtx_lock(&tdnp->n_mtx); 1994 tdnp->n_flag |= NMODIFIED; 1995 if (dattrflag != 0) { 1996 mtx_unlock(&tdnp->n_mtx); 1997 (void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1); 1998 } else { 1999 tdnp->n_attrstamp = 0; 2000 mtx_unlock(&tdnp->n_mtx); 2001 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp); 2002 } 2003 if (attrflag) 2004 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2005 else { 2006 np = VTONFS(vp); 2007 mtx_lock(&np->n_mtx); 2008 np->n_attrstamp = 0; 2009 mtx_unlock(&np->n_mtx); 2010 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 2011 } 2012 /* 2013 * If negative lookup caching is enabled, I might as well 2014 * add an entry for this node. Not necessary for correctness, 2015 * but if negative caching is enabled, then the system 2016 * must care about lookup caching hit rate, so... 2017 */ 2018 if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 && 2019 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) { 2020 cache_enter_time(tdvp, vp, cnp, &nfsva.na_ctime, NULL); 2021 } 2022 if (error && NFS_ISV4(vp)) 2023 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 2024 (gid_t)0); 2025 return (error); 2026 } 2027 2028 /* 2029 * nfs symbolic link create call 2030 */ 2031 static int 2032 nfs_symlink(struct vop_symlink_args *ap) 2033 { 2034 struct vnode *dvp = ap->a_dvp; 2035 struct vattr *vap = ap->a_vap; 2036 struct componentname *cnp = ap->a_cnp; 2037 struct nfsvattr nfsva, dnfsva; 2038 struct nfsfh *nfhp; 2039 struct nfsnode *np = NULL, *dnp; 2040 struct vnode *newvp = NULL; 2041 int error = 0, attrflag, dattrflag, ret; 2042 2043 vap->va_type = VLNK; 2044 error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2045 ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, 2046 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 2047 if (nfhp) { 2048 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 2049 &np, NULL, LK_EXCLUSIVE); 2050 if (!ret) 2051 newvp = NFSTOV(np); 2052 else if (!error) 2053 error = ret; 2054 } 2055 if (newvp != NULL) { 2056 if (attrflag) 2057 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 2058 0, 1); 2059 } else if (!error) { 2060 /* 2061 * If we do not have an error and we could not extract the 2062 * newvp from the response due to the request being NFSv2, we 2063 * have to do a lookup in order to obtain a newvp to return. 2064 */ 2065 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2066 cnp->cn_cred, cnp->cn_thread, &np); 2067 if (!error) 2068 newvp = NFSTOV(np); 2069 } 2070 if (error) { 2071 if (newvp) 2072 vput(newvp); 2073 if (NFS_ISV4(dvp)) 2074 error = nfscl_maperr(cnp->cn_thread, error, 2075 vap->va_uid, vap->va_gid); 2076 } else { 2077 *ap->a_vpp = newvp; 2078 } 2079 2080 dnp = VTONFS(dvp); 2081 mtx_lock(&dnp->n_mtx); 2082 dnp->n_flag |= NMODIFIED; 2083 if (dattrflag != 0) { 2084 mtx_unlock(&dnp->n_mtx); 2085 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2086 } else { 2087 dnp->n_attrstamp = 0; 2088 mtx_unlock(&dnp->n_mtx); 2089 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2090 } 2091 /* 2092 * If negative lookup caching is enabled, I might as well 2093 * add an entry for this node. Not necessary for correctness, 2094 * but if negative caching is enabled, then the system 2095 * must care about lookup caching hit rate, so... 2096 */ 2097 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 2098 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) { 2099 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, NULL); 2100 } 2101 return (error); 2102 } 2103 2104 /* 2105 * nfs make dir call 2106 */ 2107 static int 2108 nfs_mkdir(struct vop_mkdir_args *ap) 2109 { 2110 struct vnode *dvp = ap->a_dvp; 2111 struct vattr *vap = ap->a_vap; 2112 struct componentname *cnp = ap->a_cnp; 2113 struct nfsnode *np = NULL, *dnp; 2114 struct vnode *newvp = NULL; 2115 struct vattr vattr; 2116 struct nfsfh *nfhp; 2117 struct nfsvattr nfsva, dnfsva; 2118 int error = 0, attrflag, dattrflag, ret; 2119 2120 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0) 2121 return (error); 2122 vap->va_type = VDIR; 2123 error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2124 vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp, 2125 &attrflag, &dattrflag, NULL); 2126 dnp = VTONFS(dvp); 2127 mtx_lock(&dnp->n_mtx); 2128 dnp->n_flag |= NMODIFIED; 2129 if (dattrflag != 0) { 2130 mtx_unlock(&dnp->n_mtx); 2131 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2132 } else { 2133 dnp->n_attrstamp = 0; 2134 mtx_unlock(&dnp->n_mtx); 2135 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2136 } 2137 if (nfhp) { 2138 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 2139 &np, NULL, LK_EXCLUSIVE); 2140 if (!ret) { 2141 newvp = NFSTOV(np); 2142 if (attrflag) 2143 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 2144 NULL, 0, 1); 2145 } else if (!error) 2146 error = ret; 2147 } 2148 if (!error && newvp == NULL) { 2149 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2150 cnp->cn_cred, cnp->cn_thread, &np); 2151 if (!error) { 2152 newvp = NFSTOV(np); 2153 if (newvp->v_type != VDIR) 2154 error = EEXIST; 2155 } 2156 } 2157 if (error) { 2158 if (newvp) 2159 vput(newvp); 2160 if (NFS_ISV4(dvp)) 2161 error = nfscl_maperr(cnp->cn_thread, error, 2162 vap->va_uid, vap->va_gid); 2163 } else { 2164 /* 2165 * If negative lookup caching is enabled, I might as well 2166 * add an entry for this node. Not necessary for correctness, 2167 * but if negative caching is enabled, then the system 2168 * must care about lookup caching hit rate, so... 2169 */ 2170 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 2171 (cnp->cn_flags & MAKEENTRY) && 2172 attrflag != 0 && dattrflag != 0) 2173 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 2174 &dnfsva.na_ctime); 2175 *ap->a_vpp = newvp; 2176 } 2177 return (error); 2178 } 2179 2180 /* 2181 * nfs remove directory call 2182 */ 2183 static int 2184 nfs_rmdir(struct vop_rmdir_args *ap) 2185 { 2186 struct vnode *vp = ap->a_vp; 2187 struct vnode *dvp = ap->a_dvp; 2188 struct componentname *cnp = ap->a_cnp; 2189 struct nfsnode *dnp; 2190 struct nfsvattr dnfsva; 2191 int error, dattrflag; 2192 2193 if (dvp == vp) 2194 return (EINVAL); 2195 error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2196 cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL); 2197 dnp = VTONFS(dvp); 2198 mtx_lock(&dnp->n_mtx); 2199 dnp->n_flag |= NMODIFIED; 2200 if (dattrflag != 0) { 2201 mtx_unlock(&dnp->n_mtx); 2202 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2203 } else { 2204 dnp->n_attrstamp = 0; 2205 mtx_unlock(&dnp->n_mtx); 2206 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2207 } 2208 2209 cache_purge(dvp); 2210 cache_purge(vp); 2211 if (error && NFS_ISV4(dvp)) 2212 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 2213 (gid_t)0); 2214 /* 2215 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2216 */ 2217 if (error == ENOENT) 2218 error = 0; 2219 return (error); 2220 } 2221 2222 /* 2223 * nfs readdir call 2224 */ 2225 static int 2226 nfs_readdir(struct vop_readdir_args *ap) 2227 { 2228 struct vnode *vp = ap->a_vp; 2229 struct nfsnode *np = VTONFS(vp); 2230 struct uio *uio = ap->a_uio; 2231 ssize_t tresid; 2232 int error = 0; 2233 struct vattr vattr; 2234 2235 if (ap->a_eofflag != NULL) 2236 *ap->a_eofflag = 0; 2237 if (vp->v_type != VDIR) 2238 return(EPERM); 2239 2240 /* 2241 * First, check for hit on the EOF offset cache 2242 */ 2243 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset && 2244 (np->n_flag & NMODIFIED) == 0) { 2245 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) { 2246 mtx_lock(&np->n_mtx); 2247 if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) || 2248 !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 2249 mtx_unlock(&np->n_mtx); 2250 NFSINCRGLOBAL(newnfsstats.direofcache_hits); 2251 if (ap->a_eofflag != NULL) 2252 *ap->a_eofflag = 1; 2253 return (0); 2254 } else 2255 mtx_unlock(&np->n_mtx); 2256 } 2257 } 2258 2259 /* 2260 * Call ncl_bioread() to do the real work. 2261 */ 2262 tresid = uio->uio_resid; 2263 error = ncl_bioread(vp, uio, 0, ap->a_cred); 2264 2265 if (!error && uio->uio_resid == tresid) { 2266 NFSINCRGLOBAL(newnfsstats.direofcache_misses); 2267 if (ap->a_eofflag != NULL) 2268 *ap->a_eofflag = 1; 2269 } 2270 return (error); 2271 } 2272 2273 /* 2274 * Readdir rpc call. 2275 * Called from below the buffer cache by ncl_doio(). 2276 */ 2277 int 2278 ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2279 struct thread *td) 2280 { 2281 struct nfsvattr nfsva; 2282 nfsuint64 *cookiep, cookie; 2283 struct nfsnode *dnp = VTONFS(vp); 2284 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2285 int error = 0, eof, attrflag; 2286 2287 KASSERT(uiop->uio_iovcnt == 1 && 2288 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2289 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2290 ("nfs readdirrpc bad uio")); 2291 2292 /* 2293 * If there is no cookie, assume directory was stale. 2294 */ 2295 ncl_dircookie_lock(dnp); 2296 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2297 if (cookiep) { 2298 cookie = *cookiep; 2299 ncl_dircookie_unlock(dnp); 2300 } else { 2301 ncl_dircookie_unlock(dnp); 2302 return (NFSERR_BAD_COOKIE); 2303 } 2304 2305 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2306 (void)ncl_fsinfo(nmp, vp, cred, td); 2307 2308 error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva, 2309 &attrflag, &eof, NULL); 2310 if (attrflag) 2311 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2312 2313 if (!error) { 2314 /* 2315 * We are now either at the end of the directory or have filled 2316 * the block. 2317 */ 2318 if (eof) 2319 dnp->n_direofoffset = uiop->uio_offset; 2320 else { 2321 if (uiop->uio_resid > 0) 2322 ncl_printf("EEK! readdirrpc resid > 0\n"); 2323 ncl_dircookie_lock(dnp); 2324 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2325 *cookiep = cookie; 2326 ncl_dircookie_unlock(dnp); 2327 } 2328 } else if (NFS_ISV4(vp)) { 2329 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2330 } 2331 return (error); 2332 } 2333 2334 /* 2335 * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc(). 2336 */ 2337 int 2338 ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2339 struct thread *td) 2340 { 2341 struct nfsvattr nfsva; 2342 nfsuint64 *cookiep, cookie; 2343 struct nfsnode *dnp = VTONFS(vp); 2344 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2345 int error = 0, attrflag, eof; 2346 2347 KASSERT(uiop->uio_iovcnt == 1 && 2348 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2349 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2350 ("nfs readdirplusrpc bad uio")); 2351 2352 /* 2353 * If there is no cookie, assume directory was stale. 2354 */ 2355 ncl_dircookie_lock(dnp); 2356 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2357 if (cookiep) { 2358 cookie = *cookiep; 2359 ncl_dircookie_unlock(dnp); 2360 } else { 2361 ncl_dircookie_unlock(dnp); 2362 return (NFSERR_BAD_COOKIE); 2363 } 2364 2365 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2366 (void)ncl_fsinfo(nmp, vp, cred, td); 2367 error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva, 2368 &attrflag, &eof, NULL); 2369 if (attrflag) 2370 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2371 2372 if (!error) { 2373 /* 2374 * We are now either at end of the directory or have filled the 2375 * the block. 2376 */ 2377 if (eof) 2378 dnp->n_direofoffset = uiop->uio_offset; 2379 else { 2380 if (uiop->uio_resid > 0) 2381 ncl_printf("EEK! readdirplusrpc resid > 0\n"); 2382 ncl_dircookie_lock(dnp); 2383 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2384 *cookiep = cookie; 2385 ncl_dircookie_unlock(dnp); 2386 } 2387 } else if (NFS_ISV4(vp)) { 2388 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2389 } 2390 return (error); 2391 } 2392 2393 /* 2394 * Silly rename. To make the NFS filesystem that is stateless look a little 2395 * more like the "ufs" a remove of an active vnode is translated to a rename 2396 * to a funny looking filename that is removed by nfs_inactive on the 2397 * nfsnode. There is the potential for another process on a different client 2398 * to create the same funny name between the nfs_lookitup() fails and the 2399 * nfs_rename() completes, but... 2400 */ 2401 static int 2402 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2403 { 2404 struct sillyrename *sp; 2405 struct nfsnode *np; 2406 int error; 2407 short pid; 2408 unsigned int lticks; 2409 2410 cache_purge(dvp); 2411 np = VTONFS(vp); 2412 KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir")); 2413 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename), 2414 M_NEWNFSREQ, M_WAITOK); 2415 sp->s_cred = crhold(cnp->cn_cred); 2416 sp->s_dvp = dvp; 2417 VREF(dvp); 2418 2419 /* 2420 * Fudge together a funny name. 2421 * Changing the format of the funny name to accomodate more 2422 * sillynames per directory. 2423 * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 2424 * CPU ticks since boot. 2425 */ 2426 pid = cnp->cn_thread->td_proc->p_pid; 2427 lticks = (unsigned int)ticks; 2428 for ( ; ; ) { 2429 sp->s_namlen = sprintf(sp->s_name, 2430 ".nfs.%08x.%04x4.4", lticks, 2431 pid); 2432 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2433 cnp->cn_thread, NULL)) 2434 break; 2435 lticks++; 2436 } 2437 error = nfs_renameit(dvp, vp, cnp, sp); 2438 if (error) 2439 goto bad; 2440 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2441 cnp->cn_thread, &np); 2442 np->n_sillyrename = sp; 2443 return (0); 2444 bad: 2445 vrele(sp->s_dvp); 2446 crfree(sp->s_cred); 2447 free((caddr_t)sp, M_NEWNFSREQ); 2448 return (error); 2449 } 2450 2451 /* 2452 * Look up a file name and optionally either update the file handle or 2453 * allocate an nfsnode, depending on the value of npp. 2454 * npp == NULL --> just do the lookup 2455 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2456 * handled too 2457 * *npp != NULL --> update the file handle in the vnode 2458 */ 2459 static int 2460 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, 2461 struct thread *td, struct nfsnode **npp) 2462 { 2463 struct vnode *newvp = NULL, *vp; 2464 struct nfsnode *np, *dnp = VTONFS(dvp); 2465 struct nfsfh *nfhp, *onfhp; 2466 struct nfsvattr nfsva, dnfsva; 2467 struct componentname cn; 2468 int error = 0, attrflag, dattrflag; 2469 u_int hash; 2470 2471 error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva, 2472 &nfhp, &attrflag, &dattrflag, NULL); 2473 if (dattrflag) 2474 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2475 if (npp && !error) { 2476 if (*npp != NULL) { 2477 np = *npp; 2478 vp = NFSTOV(np); 2479 /* 2480 * For NFSv4, check to see if it is the same name and 2481 * replace the name, if it is different. 2482 */ 2483 if (np->n_v4 != NULL && nfsva.na_type == VREG && 2484 (np->n_v4->n4_namelen != len || 2485 NFSBCMP(name, NFS4NODENAME(np->n_v4), len) || 2486 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || 2487 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2488 dnp->n_fhp->nfh_len))) { 2489 #ifdef notdef 2490 { char nnn[100]; int nnnl; 2491 nnnl = (len < 100) ? len : 99; 2492 bcopy(name, nnn, nnnl); 2493 nnn[nnnl] = '\0'; 2494 printf("replace=%s\n",nnn); 2495 } 2496 #endif 2497 FREE((caddr_t)np->n_v4, M_NFSV4NODE); 2498 MALLOC(np->n_v4, struct nfsv4node *, 2499 sizeof (struct nfsv4node) + 2500 dnp->n_fhp->nfh_len + len - 1, 2501 M_NFSV4NODE, M_WAITOK); 2502 np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; 2503 np->n_v4->n4_namelen = len; 2504 NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2505 dnp->n_fhp->nfh_len); 2506 NFSBCOPY(name, NFS4NODENAME(np->n_v4), len); 2507 } 2508 hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, 2509 FNV1_32_INIT); 2510 onfhp = np->n_fhp; 2511 /* 2512 * Rehash node for new file handle. 2513 */ 2514 vfs_hash_rehash(vp, hash); 2515 np->n_fhp = nfhp; 2516 if (onfhp != NULL) 2517 FREE((caddr_t)onfhp, M_NFSFH); 2518 newvp = NFSTOV(np); 2519 } else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) { 2520 FREE((caddr_t)nfhp, M_NFSFH); 2521 VREF(dvp); 2522 newvp = dvp; 2523 } else { 2524 cn.cn_nameptr = name; 2525 cn.cn_namelen = len; 2526 error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td, 2527 &np, NULL, LK_EXCLUSIVE); 2528 if (error) 2529 return (error); 2530 newvp = NFSTOV(np); 2531 } 2532 if (!attrflag && *npp == NULL) { 2533 if (newvp == dvp) 2534 vrele(newvp); 2535 else 2536 vput(newvp); 2537 return (ENOENT); 2538 } 2539 if (attrflag) 2540 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 2541 0, 1); 2542 } 2543 if (npp && *npp == NULL) { 2544 if (error) { 2545 if (newvp) { 2546 if (newvp == dvp) 2547 vrele(newvp); 2548 else 2549 vput(newvp); 2550 } 2551 } else 2552 *npp = np; 2553 } 2554 if (error && NFS_ISV4(dvp)) 2555 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2556 return (error); 2557 } 2558 2559 /* 2560 * Nfs Version 3 and 4 commit rpc 2561 */ 2562 int 2563 ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred, 2564 struct thread *td) 2565 { 2566 struct nfsvattr nfsva; 2567 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2568 int error, attrflag; 2569 2570 mtx_lock(&nmp->nm_mtx); 2571 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) { 2572 mtx_unlock(&nmp->nm_mtx); 2573 return (0); 2574 } 2575 mtx_unlock(&nmp->nm_mtx); 2576 error = nfsrpc_commit(vp, offset, cnt, cred, td, &nfsva, 2577 &attrflag, NULL); 2578 if (attrflag != 0) 2579 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 2580 0, 1); 2581 if (error != 0 && NFS_ISV4(vp)) 2582 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2583 return (error); 2584 } 2585 2586 /* 2587 * Strategy routine. 2588 * For async requests when nfsiod(s) are running, queue the request by 2589 * calling ncl_asyncio(), otherwise just all ncl_doio() to do the 2590 * request. 2591 */ 2592 static int 2593 nfs_strategy(struct vop_strategy_args *ap) 2594 { 2595 struct buf *bp = ap->a_bp; 2596 struct ucred *cr; 2597 2598 KASSERT(!(bp->b_flags & B_DONE), 2599 ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp)); 2600 BUF_ASSERT_HELD(bp); 2601 2602 if (bp->b_iocmd == BIO_READ) 2603 cr = bp->b_rcred; 2604 else 2605 cr = bp->b_wcred; 2606 2607 /* 2608 * If the op is asynchronous and an i/o daemon is waiting 2609 * queue the request, wake it up and wait for completion 2610 * otherwise just do it ourselves. 2611 */ 2612 if ((bp->b_flags & B_ASYNC) == 0 || 2613 ncl_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread)) 2614 (void) ncl_doio(ap->a_vp, bp, cr, curthread, 1); 2615 return (0); 2616 } 2617 2618 /* 2619 * fsync vnode op. Just call ncl_flush() with commit == 1. 2620 */ 2621 /* ARGSUSED */ 2622 static int 2623 nfs_fsync(struct vop_fsync_args *ap) 2624 { 2625 2626 if (ap->a_vp->v_type != VREG) { 2627 /* 2628 * For NFS, metadata is changed synchronously on the server, 2629 * so there is nothing to flush. Also, ncl_flush() clears 2630 * the NMODIFIED flag and that shouldn't be done here for 2631 * directories. 2632 */ 2633 return (0); 2634 } 2635 return (ncl_flush(ap->a_vp, ap->a_waitfor, NULL, ap->a_td, 1, 0)); 2636 } 2637 2638 /* 2639 * Flush all the blocks associated with a vnode. 2640 * Walk through the buffer pool and push any dirty pages 2641 * associated with the vnode. 2642 * If the called_from_renewthread argument is TRUE, it has been called 2643 * from the NFSv4 renew thread and, as such, cannot block indefinitely 2644 * waiting for a buffer write to complete. 2645 */ 2646 int 2647 ncl_flush(struct vnode *vp, int waitfor, struct ucred *cred, struct thread *td, 2648 int commit, int called_from_renewthread) 2649 { 2650 struct nfsnode *np = VTONFS(vp); 2651 struct buf *bp; 2652 int i; 2653 struct buf *nbp; 2654 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2655 int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2656 int passone = 1, trycnt = 0; 2657 u_quad_t off, endoff, toff; 2658 struct ucred* wcred = NULL; 2659 struct buf **bvec = NULL; 2660 struct bufobj *bo; 2661 #ifndef NFS_COMMITBVECSIZ 2662 #define NFS_COMMITBVECSIZ 20 2663 #endif 2664 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; 2665 int bvecsize = 0, bveccount; 2666 2667 if (called_from_renewthread != 0) 2668 slptimeo = hz; 2669 if (nmp->nm_flag & NFSMNT_INT) 2670 slpflag = PCATCH; 2671 if (!commit) 2672 passone = 0; 2673 bo = &vp->v_bufobj; 2674 /* 2675 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2676 * server, but has not been committed to stable storage on the server 2677 * yet. On the first pass, the byte range is worked out and the commit 2678 * rpc is done. On the second pass, ncl_writebp() is called to do the 2679 * job. 2680 */ 2681 again: 2682 off = (u_quad_t)-1; 2683 endoff = 0; 2684 bvecpos = 0; 2685 if (NFS_ISV34(vp) && commit) { 2686 if (bvec != NULL && bvec != bvec_on_stack) 2687 free(bvec, M_TEMP); 2688 /* 2689 * Count up how many buffers waiting for a commit. 2690 */ 2691 bveccount = 0; 2692 BO_LOCK(bo); 2693 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2694 if (!BUF_ISLOCKED(bp) && 2695 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) 2696 == (B_DELWRI | B_NEEDCOMMIT)) 2697 bveccount++; 2698 } 2699 /* 2700 * Allocate space to remember the list of bufs to commit. It is 2701 * important to use M_NOWAIT here to avoid a race with nfs_write. 2702 * If we can't get memory (for whatever reason), we will end up 2703 * committing the buffers one-by-one in the loop below. 2704 */ 2705 if (bveccount > NFS_COMMITBVECSIZ) { 2706 /* 2707 * Release the vnode interlock to avoid a lock 2708 * order reversal. 2709 */ 2710 BO_UNLOCK(bo); 2711 bvec = (struct buf **) 2712 malloc(bveccount * sizeof(struct buf *), 2713 M_TEMP, M_NOWAIT); 2714 BO_LOCK(bo); 2715 if (bvec == NULL) { 2716 bvec = bvec_on_stack; 2717 bvecsize = NFS_COMMITBVECSIZ; 2718 } else 2719 bvecsize = bveccount; 2720 } else { 2721 bvec = bvec_on_stack; 2722 bvecsize = NFS_COMMITBVECSIZ; 2723 } 2724 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2725 if (bvecpos >= bvecsize) 2726 break; 2727 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2728 nbp = TAILQ_NEXT(bp, b_bobufs); 2729 continue; 2730 } 2731 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) != 2732 (B_DELWRI | B_NEEDCOMMIT)) { 2733 BUF_UNLOCK(bp); 2734 nbp = TAILQ_NEXT(bp, b_bobufs); 2735 continue; 2736 } 2737 BO_UNLOCK(bo); 2738 bremfree(bp); 2739 /* 2740 * Work out if all buffers are using the same cred 2741 * so we can deal with them all with one commit. 2742 * 2743 * NOTE: we are not clearing B_DONE here, so we have 2744 * to do it later on in this routine if we intend to 2745 * initiate I/O on the bp. 2746 * 2747 * Note: to avoid loopback deadlocks, we do not 2748 * assign b_runningbufspace. 2749 */ 2750 if (wcred == NULL) 2751 wcred = bp->b_wcred; 2752 else if (wcred != bp->b_wcred) 2753 wcred = NOCRED; 2754 vfs_busy_pages(bp, 1); 2755 2756 BO_LOCK(bo); 2757 /* 2758 * bp is protected by being locked, but nbp is not 2759 * and vfs_busy_pages() may sleep. We have to 2760 * recalculate nbp. 2761 */ 2762 nbp = TAILQ_NEXT(bp, b_bobufs); 2763 2764 /* 2765 * A list of these buffers is kept so that the 2766 * second loop knows which buffers have actually 2767 * been committed. This is necessary, since there 2768 * may be a race between the commit rpc and new 2769 * uncommitted writes on the file. 2770 */ 2771 bvec[bvecpos++] = bp; 2772 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2773 bp->b_dirtyoff; 2774 if (toff < off) 2775 off = toff; 2776 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2777 if (toff > endoff) 2778 endoff = toff; 2779 } 2780 BO_UNLOCK(bo); 2781 } 2782 if (bvecpos > 0) { 2783 /* 2784 * Commit data on the server, as required. 2785 * If all bufs are using the same wcred, then use that with 2786 * one call for all of them, otherwise commit each one 2787 * separately. 2788 */ 2789 if (wcred != NOCRED) 2790 retv = ncl_commit(vp, off, (int)(endoff - off), 2791 wcred, td); 2792 else { 2793 retv = 0; 2794 for (i = 0; i < bvecpos; i++) { 2795 off_t off, size; 2796 bp = bvec[i]; 2797 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2798 bp->b_dirtyoff; 2799 size = (u_quad_t)(bp->b_dirtyend 2800 - bp->b_dirtyoff); 2801 retv = ncl_commit(vp, off, (int)size, 2802 bp->b_wcred, td); 2803 if (retv) break; 2804 } 2805 } 2806 2807 if (retv == NFSERR_STALEWRITEVERF) 2808 ncl_clearcommit(vp->v_mount); 2809 2810 /* 2811 * Now, either mark the blocks I/O done or mark the 2812 * blocks dirty, depending on whether the commit 2813 * succeeded. 2814 */ 2815 for (i = 0; i < bvecpos; i++) { 2816 bp = bvec[i]; 2817 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 2818 if (retv) { 2819 /* 2820 * Error, leave B_DELWRI intact 2821 */ 2822 vfs_unbusy_pages(bp); 2823 brelse(bp); 2824 } else { 2825 /* 2826 * Success, remove B_DELWRI ( bundirty() ). 2827 * 2828 * b_dirtyoff/b_dirtyend seem to be NFS 2829 * specific. We should probably move that 2830 * into bundirty(). XXX 2831 */ 2832 bufobj_wref(bo); 2833 bp->b_flags |= B_ASYNC; 2834 bundirty(bp); 2835 bp->b_flags &= ~B_DONE; 2836 bp->b_ioflags &= ~BIO_ERROR; 2837 bp->b_dirtyoff = bp->b_dirtyend = 0; 2838 bufdone(bp); 2839 } 2840 } 2841 } 2842 2843 /* 2844 * Start/do any write(s) that are required. 2845 */ 2846 loop: 2847 BO_LOCK(bo); 2848 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2849 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2850 if (waitfor != MNT_WAIT || passone) 2851 continue; 2852 2853 error = BUF_TIMELOCK(bp, 2854 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2855 BO_LOCKPTR(bo), "nfsfsync", slpflag, slptimeo); 2856 if (error == 0) { 2857 BUF_UNLOCK(bp); 2858 goto loop; 2859 } 2860 if (error == ENOLCK) { 2861 error = 0; 2862 goto loop; 2863 } 2864 if (called_from_renewthread != 0) { 2865 /* 2866 * Return EIO so the flush will be retried 2867 * later. 2868 */ 2869 error = EIO; 2870 goto done; 2871 } 2872 if (newnfs_sigintr(nmp, td)) { 2873 error = EINTR; 2874 goto done; 2875 } 2876 if (slpflag == PCATCH) { 2877 slpflag = 0; 2878 slptimeo = 2 * hz; 2879 } 2880 goto loop; 2881 } 2882 if ((bp->b_flags & B_DELWRI) == 0) 2883 panic("nfs_fsync: not dirty"); 2884 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) { 2885 BUF_UNLOCK(bp); 2886 continue; 2887 } 2888 BO_UNLOCK(bo); 2889 bremfree(bp); 2890 if (passone || !commit) 2891 bp->b_flags |= B_ASYNC; 2892 else 2893 bp->b_flags |= B_ASYNC; 2894 bwrite(bp); 2895 if (newnfs_sigintr(nmp, td)) { 2896 error = EINTR; 2897 goto done; 2898 } 2899 goto loop; 2900 } 2901 if (passone) { 2902 passone = 0; 2903 BO_UNLOCK(bo); 2904 goto again; 2905 } 2906 if (waitfor == MNT_WAIT) { 2907 while (bo->bo_numoutput) { 2908 error = bufobj_wwait(bo, slpflag, slptimeo); 2909 if (error) { 2910 BO_UNLOCK(bo); 2911 if (called_from_renewthread != 0) { 2912 /* 2913 * Return EIO so that the flush will be 2914 * retried later. 2915 */ 2916 error = EIO; 2917 goto done; 2918 } 2919 error = newnfs_sigintr(nmp, td); 2920 if (error) 2921 goto done; 2922 if (slpflag == PCATCH) { 2923 slpflag = 0; 2924 slptimeo = 2 * hz; 2925 } 2926 BO_LOCK(bo); 2927 } 2928 } 2929 if (bo->bo_dirty.bv_cnt != 0 && commit) { 2930 BO_UNLOCK(bo); 2931 goto loop; 2932 } 2933 /* 2934 * Wait for all the async IO requests to drain 2935 */ 2936 BO_UNLOCK(bo); 2937 mtx_lock(&np->n_mtx); 2938 while (np->n_directio_asyncwr > 0) { 2939 np->n_flag |= NFSYNCWAIT; 2940 error = newnfs_msleep(td, &np->n_directio_asyncwr, 2941 &np->n_mtx, slpflag | (PRIBIO + 1), 2942 "nfsfsync", 0); 2943 if (error) { 2944 if (newnfs_sigintr(nmp, td)) { 2945 mtx_unlock(&np->n_mtx); 2946 error = EINTR; 2947 goto done; 2948 } 2949 } 2950 } 2951 mtx_unlock(&np->n_mtx); 2952 } else 2953 BO_UNLOCK(bo); 2954 if (NFSHASPNFS(nmp)) { 2955 nfscl_layoutcommit(vp, td); 2956 /* 2957 * Invalidate the attribute cache, since writes to a DS 2958 * won't update the size attribute. 2959 */ 2960 mtx_lock(&np->n_mtx); 2961 np->n_attrstamp = 0; 2962 } else 2963 mtx_lock(&np->n_mtx); 2964 if (np->n_flag & NWRITEERR) { 2965 error = np->n_error; 2966 np->n_flag &= ~NWRITEERR; 2967 } 2968 if (commit && bo->bo_dirty.bv_cnt == 0 && 2969 bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0) 2970 np->n_flag &= ~NMODIFIED; 2971 mtx_unlock(&np->n_mtx); 2972 done: 2973 if (bvec != NULL && bvec != bvec_on_stack) 2974 free(bvec, M_TEMP); 2975 if (error == 0 && commit != 0 && waitfor == MNT_WAIT && 2976 (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 || 2977 np->n_directio_asyncwr != 0) && trycnt++ < 5) { 2978 /* try, try again... */ 2979 passone = 1; 2980 wcred = NULL; 2981 bvec = NULL; 2982 bvecsize = 0; 2983 printf("try%d\n", trycnt); 2984 goto again; 2985 } 2986 return (error); 2987 } 2988 2989 /* 2990 * NFS advisory byte-level locks. 2991 */ 2992 static int 2993 nfs_advlock(struct vop_advlock_args *ap) 2994 { 2995 struct vnode *vp = ap->a_vp; 2996 struct ucred *cred; 2997 struct nfsnode *np = VTONFS(ap->a_vp); 2998 struct proc *p = (struct proc *)ap->a_id; 2999 struct thread *td = curthread; /* XXX */ 3000 struct vattr va; 3001 int ret, error = EOPNOTSUPP; 3002 u_quad_t size; 3003 3004 if (NFS_ISV4(vp) && (ap->a_flags & (F_POSIX | F_FLOCK)) != 0) { 3005 if (vp->v_type != VREG) 3006 return (EINVAL); 3007 if ((ap->a_flags & F_POSIX) != 0) 3008 cred = p->p_ucred; 3009 else 3010 cred = td->td_ucred; 3011 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 3012 if (vp->v_iflag & VI_DOOMED) { 3013 NFSVOPUNLOCK(vp, 0); 3014 return (EBADF); 3015 } 3016 3017 /* 3018 * If this is unlocking a write locked region, flush and 3019 * commit them before unlocking. This is required by 3020 * RFC3530 Sec. 9.3.2. 3021 */ 3022 if (ap->a_op == F_UNLCK && 3023 nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id, 3024 ap->a_flags)) 3025 (void) ncl_flush(vp, MNT_WAIT, cred, td, 1, 0); 3026 3027 /* 3028 * Loop around doing the lock op, while a blocking lock 3029 * must wait for the lock op to succeed. 3030 */ 3031 do { 3032 ret = nfsrpc_advlock(vp, np->n_size, ap->a_op, 3033 ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags); 3034 if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 3035 ap->a_op == F_SETLK) { 3036 NFSVOPUNLOCK(vp, 0); 3037 error = nfs_catnap(PZERO | PCATCH, ret, 3038 "ncladvl"); 3039 if (error) 3040 return (EINTR); 3041 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 3042 if (vp->v_iflag & VI_DOOMED) { 3043 NFSVOPUNLOCK(vp, 0); 3044 return (EBADF); 3045 } 3046 } 3047 } while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 3048 ap->a_op == F_SETLK); 3049 if (ret == NFSERR_DENIED) { 3050 NFSVOPUNLOCK(vp, 0); 3051 return (EAGAIN); 3052 } else if (ret == EINVAL || ret == EBADF || ret == EINTR) { 3053 NFSVOPUNLOCK(vp, 0); 3054 return (ret); 3055 } else if (ret != 0) { 3056 NFSVOPUNLOCK(vp, 0); 3057 return (EACCES); 3058 } 3059 3060 /* 3061 * Now, if we just got a lock, invalidate data in the buffer 3062 * cache, as required, so that the coherency conforms with 3063 * RFC3530 Sec. 9.3.2. 3064 */ 3065 if (ap->a_op == F_SETLK) { 3066 if ((np->n_flag & NMODIFIED) == 0) { 3067 np->n_attrstamp = 0; 3068 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3069 ret = VOP_GETATTR(vp, &va, cred); 3070 } 3071 if ((np->n_flag & NMODIFIED) || ret || 3072 np->n_change != va.va_filerev) { 3073 (void) ncl_vinvalbuf(vp, V_SAVE, td, 1); 3074 np->n_attrstamp = 0; 3075 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3076 ret = VOP_GETATTR(vp, &va, cred); 3077 if (!ret) { 3078 np->n_mtime = va.va_mtime; 3079 np->n_change = va.va_filerev; 3080 } 3081 } 3082 } 3083 NFSVOPUNLOCK(vp, 0); 3084 return (0); 3085 } else if (!NFS_ISV4(vp)) { 3086 error = NFSVOPLOCK(vp, LK_SHARED); 3087 if (error) 3088 return (error); 3089 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 3090 size = VTONFS(vp)->n_size; 3091 NFSVOPUNLOCK(vp, 0); 3092 error = lf_advlock(ap, &(vp->v_lockf), size); 3093 } else { 3094 if (nfs_advlock_p != NULL) 3095 error = nfs_advlock_p(ap); 3096 else { 3097 NFSVOPUNLOCK(vp, 0); 3098 error = ENOLCK; 3099 } 3100 } 3101 } 3102 return (error); 3103 } 3104 3105 /* 3106 * NFS advisory byte-level locks. 3107 */ 3108 static int 3109 nfs_advlockasync(struct vop_advlockasync_args *ap) 3110 { 3111 struct vnode *vp = ap->a_vp; 3112 u_quad_t size; 3113 int error; 3114 3115 if (NFS_ISV4(vp)) 3116 return (EOPNOTSUPP); 3117 error = NFSVOPLOCK(vp, LK_SHARED); 3118 if (error) 3119 return (error); 3120 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 3121 size = VTONFS(vp)->n_size; 3122 NFSVOPUNLOCK(vp, 0); 3123 error = lf_advlockasync(ap, &(vp->v_lockf), size); 3124 } else { 3125 NFSVOPUNLOCK(vp, 0); 3126 error = EOPNOTSUPP; 3127 } 3128 return (error); 3129 } 3130 3131 /* 3132 * Print out the contents of an nfsnode. 3133 */ 3134 static int 3135 nfs_print(struct vop_print_args *ap) 3136 { 3137 struct vnode *vp = ap->a_vp; 3138 struct nfsnode *np = VTONFS(vp); 3139 3140 ncl_printf("\tfileid %ld fsid 0x%x", 3141 np->n_vattr.na_fileid, np->n_vattr.na_fsid); 3142 if (vp->v_type == VFIFO) 3143 fifo_printinfo(vp); 3144 printf("\n"); 3145 return (0); 3146 } 3147 3148 /* 3149 * This is the "real" nfs::bwrite(struct buf*). 3150 * We set B_CACHE if this is a VMIO buffer. 3151 */ 3152 int 3153 ncl_writebp(struct buf *bp, int force __unused, struct thread *td) 3154 { 3155 int s; 3156 int oldflags = bp->b_flags; 3157 #if 0 3158 int retv = 1; 3159 off_t off; 3160 #endif 3161 3162 BUF_ASSERT_HELD(bp); 3163 3164 if (bp->b_flags & B_INVAL) { 3165 brelse(bp); 3166 return(0); 3167 } 3168 3169 bp->b_flags |= B_CACHE; 3170 3171 /* 3172 * Undirty the bp. We will redirty it later if the I/O fails. 3173 */ 3174 3175 s = splbio(); 3176 bundirty(bp); 3177 bp->b_flags &= ~B_DONE; 3178 bp->b_ioflags &= ~BIO_ERROR; 3179 bp->b_iocmd = BIO_WRITE; 3180 3181 bufobj_wref(bp->b_bufobj); 3182 curthread->td_ru.ru_oublock++; 3183 splx(s); 3184 3185 /* 3186 * Note: to avoid loopback deadlocks, we do not 3187 * assign b_runningbufspace. 3188 */ 3189 vfs_busy_pages(bp, 1); 3190 3191 BUF_KERNPROC(bp); 3192 bp->b_iooffset = dbtob(bp->b_blkno); 3193 bstrategy(bp); 3194 3195 if( (oldflags & B_ASYNC) == 0) { 3196 int rtval = bufwait(bp); 3197 3198 if (oldflags & B_DELWRI) { 3199 s = splbio(); 3200 reassignbuf(bp); 3201 splx(s); 3202 } 3203 brelse(bp); 3204 return (rtval); 3205 } 3206 3207 return (0); 3208 } 3209 3210 /* 3211 * nfs special file access vnode op. 3212 * Essentially just get vattr and then imitate iaccess() since the device is 3213 * local to the client. 3214 */ 3215 static int 3216 nfsspec_access(struct vop_access_args *ap) 3217 { 3218 struct vattr *vap; 3219 struct ucred *cred = ap->a_cred; 3220 struct vnode *vp = ap->a_vp; 3221 accmode_t accmode = ap->a_accmode; 3222 struct vattr vattr; 3223 int error; 3224 3225 /* 3226 * Disallow write attempts on filesystems mounted read-only; 3227 * unless the file is a socket, fifo, or a block or character 3228 * device resident on the filesystem. 3229 */ 3230 if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3231 switch (vp->v_type) { 3232 case VREG: 3233 case VDIR: 3234 case VLNK: 3235 return (EROFS); 3236 default: 3237 break; 3238 } 3239 } 3240 vap = &vattr; 3241 error = VOP_GETATTR(vp, vap, cred); 3242 if (error) 3243 goto out; 3244 error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid, 3245 accmode, cred, NULL); 3246 out: 3247 return error; 3248 } 3249 3250 /* 3251 * Read wrapper for fifos. 3252 */ 3253 static int 3254 nfsfifo_read(struct vop_read_args *ap) 3255 { 3256 struct nfsnode *np = VTONFS(ap->a_vp); 3257 int error; 3258 3259 /* 3260 * Set access flag. 3261 */ 3262 mtx_lock(&np->n_mtx); 3263 np->n_flag |= NACC; 3264 vfs_timestamp(&np->n_atim); 3265 mtx_unlock(&np->n_mtx); 3266 error = fifo_specops.vop_read(ap); 3267 return error; 3268 } 3269 3270 /* 3271 * Write wrapper for fifos. 3272 */ 3273 static int 3274 nfsfifo_write(struct vop_write_args *ap) 3275 { 3276 struct nfsnode *np = VTONFS(ap->a_vp); 3277 3278 /* 3279 * Set update flag. 3280 */ 3281 mtx_lock(&np->n_mtx); 3282 np->n_flag |= NUPD; 3283 vfs_timestamp(&np->n_mtim); 3284 mtx_unlock(&np->n_mtx); 3285 return(fifo_specops.vop_write(ap)); 3286 } 3287 3288 /* 3289 * Close wrapper for fifos. 3290 * 3291 * Update the times on the nfsnode then do fifo close. 3292 */ 3293 static int 3294 nfsfifo_close(struct vop_close_args *ap) 3295 { 3296 struct vnode *vp = ap->a_vp; 3297 struct nfsnode *np = VTONFS(vp); 3298 struct vattr vattr; 3299 struct timespec ts; 3300 3301 mtx_lock(&np->n_mtx); 3302 if (np->n_flag & (NACC | NUPD)) { 3303 vfs_timestamp(&ts); 3304 if (np->n_flag & NACC) 3305 np->n_atim = ts; 3306 if (np->n_flag & NUPD) 3307 np->n_mtim = ts; 3308 np->n_flag |= NCHG; 3309 if (vrefcnt(vp) == 1 && 3310 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3311 VATTR_NULL(&vattr); 3312 if (np->n_flag & NACC) 3313 vattr.va_atime = np->n_atim; 3314 if (np->n_flag & NUPD) 3315 vattr.va_mtime = np->n_mtim; 3316 mtx_unlock(&np->n_mtx); 3317 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3318 goto out; 3319 } 3320 } 3321 mtx_unlock(&np->n_mtx); 3322 out: 3323 return (fifo_specops.vop_close(ap)); 3324 } 3325 3326 /* 3327 * Just call ncl_writebp() with the force argument set to 1. 3328 * 3329 * NOTE: B_DONE may or may not be set in a_bp on call. 3330 */ 3331 static int 3332 nfs_bwrite(struct buf *bp) 3333 { 3334 3335 return (ncl_writebp(bp, 1, curthread)); 3336 } 3337 3338 struct buf_ops buf_ops_newnfs = { 3339 .bop_name = "buf_ops_nfs", 3340 .bop_write = nfs_bwrite, 3341 .bop_strategy = bufstrategy, 3342 .bop_sync = bufsync, 3343 .bop_bdflush = bufbdflush, 3344 }; 3345 3346 /* 3347 * Cloned from vop_stdlock(), and then the ugly hack added. 3348 */ 3349 static int 3350 nfs_lock1(struct vop_lock1_args *ap) 3351 { 3352 struct vnode *vp = ap->a_vp; 3353 int error = 0; 3354 3355 /* 3356 * Since vfs_hash_get() calls vget() and it will no longer work 3357 * for FreeBSD8 with flags == 0, I can only think of this horrible 3358 * hack to work around it. I call vfs_hash_get() with LK_EXCLOTHER 3359 * and then handle it here. All I want for this case is a v_usecount 3360 * on the vnode to use for recovery, while another thread might 3361 * hold a lock on the vnode. I have the other threads blocked, so 3362 * there isn't any race problem. 3363 */ 3364 if ((ap->a_flags & LK_TYPE_MASK) == LK_EXCLOTHER) { 3365 if ((ap->a_flags & LK_INTERLOCK) == 0) 3366 panic("ncllock1"); 3367 if ((vp->v_iflag & VI_DOOMED)) 3368 error = ENOENT; 3369 VI_UNLOCK(vp); 3370 return (error); 3371 } 3372 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 3373 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, 3374 ap->a_line)); 3375 } 3376 3377 static int 3378 nfs_getacl(struct vop_getacl_args *ap) 3379 { 3380 int error; 3381 3382 if (ap->a_type != ACL_TYPE_NFS4) 3383 return (EOPNOTSUPP); 3384 error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3385 NULL); 3386 if (error > NFSERR_STALE) { 3387 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3388 error = EPERM; 3389 } 3390 return (error); 3391 } 3392 3393 static int 3394 nfs_setacl(struct vop_setacl_args *ap) 3395 { 3396 int error; 3397 3398 if (ap->a_type != ACL_TYPE_NFS4) 3399 return (EOPNOTSUPP); 3400 error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3401 NULL); 3402 if (error > NFSERR_STALE) { 3403 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3404 error = EPERM; 3405 } 3406 return (error); 3407 } 3408 3409 /* 3410 * Return POSIX pathconf information applicable to nfs filesystems. 3411 */ 3412 static int 3413 nfs_pathconf(struct vop_pathconf_args *ap) 3414 { 3415 struct nfsv3_pathconf pc; 3416 struct nfsvattr nfsva; 3417 struct vnode *vp = ap->a_vp; 3418 struct thread *td = curthread; 3419 int attrflag, error; 3420 3421 if (NFS_ISV4(vp) || (NFS_ISV3(vp) && (ap->a_name == _PC_LINK_MAX || 3422 ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED || 3423 ap->a_name == _PC_NO_TRUNC))) { 3424 /* 3425 * Since only the above 4 a_names are returned by the NFSv3 3426 * Pathconf RPC, there is no point in doing it for others. 3427 */ 3428 error = nfsrpc_pathconf(vp, &pc, td->td_ucred, td, &nfsva, 3429 &attrflag, NULL); 3430 if (attrflag != 0) 3431 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 3432 1); 3433 if (error != 0) 3434 return (error); 3435 } else { 3436 /* 3437 * For NFSv2 (or NFSv3 when not one of the above 4 a_names), 3438 * just fake them. 3439 */ 3440 pc.pc_linkmax = LINK_MAX; 3441 pc.pc_namemax = NFS_MAXNAMLEN; 3442 pc.pc_notrunc = 1; 3443 pc.pc_chownrestricted = 1; 3444 pc.pc_caseinsensitive = 0; 3445 pc.pc_casepreserving = 1; 3446 error = 0; 3447 } 3448 switch (ap->a_name) { 3449 case _PC_LINK_MAX: 3450 *ap->a_retval = pc.pc_linkmax; 3451 break; 3452 case _PC_NAME_MAX: 3453 *ap->a_retval = pc.pc_namemax; 3454 break; 3455 case _PC_PATH_MAX: 3456 *ap->a_retval = PATH_MAX; 3457 break; 3458 case _PC_PIPE_BUF: 3459 *ap->a_retval = PIPE_BUF; 3460 break; 3461 case _PC_CHOWN_RESTRICTED: 3462 *ap->a_retval = pc.pc_chownrestricted; 3463 break; 3464 case _PC_NO_TRUNC: 3465 *ap->a_retval = pc.pc_notrunc; 3466 break; 3467 case _PC_ACL_EXTENDED: 3468 *ap->a_retval = 0; 3469 break; 3470 case _PC_ACL_NFS4: 3471 if (NFS_ISV4(vp) && nfsrv_useacl != 0 && attrflag != 0 && 3472 NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL)) 3473 *ap->a_retval = 1; 3474 else 3475 *ap->a_retval = 0; 3476 break; 3477 case _PC_ACL_PATH_MAX: 3478 if (NFS_ISV4(vp)) 3479 *ap->a_retval = ACL_MAX_ENTRIES; 3480 else 3481 *ap->a_retval = 3; 3482 break; 3483 case _PC_MAC_PRESENT: 3484 *ap->a_retval = 0; 3485 break; 3486 case _PC_ASYNC_IO: 3487 /* _PC_ASYNC_IO should have been handled by upper layers. */ 3488 KASSERT(0, ("_PC_ASYNC_IO should not get here")); 3489 error = EINVAL; 3490 break; 3491 case _PC_PRIO_IO: 3492 *ap->a_retval = 0; 3493 break; 3494 case _PC_SYNC_IO: 3495 *ap->a_retval = 0; 3496 break; 3497 case _PC_ALLOC_SIZE_MIN: 3498 *ap->a_retval = vp->v_mount->mnt_stat.f_bsize; 3499 break; 3500 case _PC_FILESIZEBITS: 3501 if (NFS_ISV34(vp)) 3502 *ap->a_retval = 64; 3503 else 3504 *ap->a_retval = 32; 3505 break; 3506 case _PC_REC_INCR_XFER_SIZE: 3507 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize; 3508 break; 3509 case _PC_REC_MAX_XFER_SIZE: 3510 *ap->a_retval = -1; /* means ``unlimited'' */ 3511 break; 3512 case _PC_REC_MIN_XFER_SIZE: 3513 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize; 3514 break; 3515 case _PC_REC_XFER_ALIGN: 3516 *ap->a_retval = PAGE_SIZE; 3517 break; 3518 case _PC_SYMLINK_MAX: 3519 *ap->a_retval = NFS_MAXPATHLEN; 3520 break; 3521 3522 default: 3523 error = EINVAL; 3524 break; 3525 } 3526 return (error); 3527 } 3528 3529