1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from nfs_vnops.c 8.16 (Berkeley) 5/27/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /* 39 * vnode op calls for Sun NFS version 2, 3 and 4 40 */ 41 42 #include "opt_kdtrace.h" 43 #include "opt_inet.h" 44 45 #include <sys/param.h> 46 #include <sys/kernel.h> 47 #include <sys/systm.h> 48 #include <sys/resourcevar.h> 49 #include <sys/proc.h> 50 #include <sys/mount.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/jail.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/namei.h> 57 #include <sys/socket.h> 58 #include <sys/vnode.h> 59 #include <sys/dirent.h> 60 #include <sys/fcntl.h> 61 #include <sys/lockf.h> 62 #include <sys/stat.h> 63 #include <sys/sysctl.h> 64 #include <sys/signalvar.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_extern.h> 68 #include <vm/vm_object.h> 69 70 #include <fs/nfs/nfsport.h> 71 #include <fs/nfsclient/nfsnode.h> 72 #include <fs/nfsclient/nfsmount.h> 73 #include <fs/nfsclient/nfs.h> 74 #include <fs/nfsclient/nfs_kdtrace.h> 75 76 #include <net/if.h> 77 #include <netinet/in.h> 78 #include <netinet/in_var.h> 79 80 #include <nfs/nfs_lock.h> 81 82 #ifdef KDTRACE_HOOKS 83 #include <sys/dtrace_bsd.h> 84 85 dtrace_nfsclient_accesscache_flush_probe_func_t 86 dtrace_nfscl_accesscache_flush_done_probe; 87 uint32_t nfscl_accesscache_flush_done_id; 88 89 dtrace_nfsclient_accesscache_get_probe_func_t 90 dtrace_nfscl_accesscache_get_hit_probe, 91 dtrace_nfscl_accesscache_get_miss_probe; 92 uint32_t nfscl_accesscache_get_hit_id; 93 uint32_t nfscl_accesscache_get_miss_id; 94 95 dtrace_nfsclient_accesscache_load_probe_func_t 96 dtrace_nfscl_accesscache_load_done_probe; 97 uint32_t nfscl_accesscache_load_done_id; 98 #endif /* !KDTRACE_HOOKS */ 99 100 /* Defs */ 101 #define TRUE 1 102 #define FALSE 0 103 104 extern struct nfsstats newnfsstats; 105 extern int nfsrv_useacl; 106 MALLOC_DECLARE(M_NEWNFSREQ); 107 108 /* 109 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these 110 * calls are not in getblk() and brelse() so that they would not be necessary 111 * here. 112 */ 113 #ifndef B_VMIO 114 #define vfs_busy_pages(bp, f) 115 #endif 116 117 static vop_read_t nfsfifo_read; 118 static vop_write_t nfsfifo_write; 119 static vop_close_t nfsfifo_close; 120 static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *, 121 struct thread *); 122 static vop_lookup_t nfs_lookup; 123 static vop_create_t nfs_create; 124 static vop_mknod_t nfs_mknod; 125 static vop_open_t nfs_open; 126 static vop_pathconf_t nfs_pathconf; 127 static vop_close_t nfs_close; 128 static vop_access_t nfs_access; 129 static vop_getattr_t nfs_getattr; 130 static vop_setattr_t nfs_setattr; 131 static vop_read_t nfs_read; 132 static vop_fsync_t nfs_fsync; 133 static vop_remove_t nfs_remove; 134 static vop_link_t nfs_link; 135 static vop_rename_t nfs_rename; 136 static vop_mkdir_t nfs_mkdir; 137 static vop_rmdir_t nfs_rmdir; 138 static vop_symlink_t nfs_symlink; 139 static vop_readdir_t nfs_readdir; 140 static vop_strategy_t nfs_strategy; 141 static vop_lock1_t nfs_lock1; 142 static int nfs_lookitup(struct vnode *, char *, int, 143 struct ucred *, struct thread *, struct nfsnode **); 144 static int nfs_sillyrename(struct vnode *, struct vnode *, 145 struct componentname *); 146 static vop_access_t nfsspec_access; 147 static vop_readlink_t nfs_readlink; 148 static vop_print_t nfs_print; 149 static vop_advlock_t nfs_advlock; 150 static vop_advlockasync_t nfs_advlockasync; 151 static vop_getacl_t nfs_getacl; 152 static vop_setacl_t nfs_setacl; 153 154 /* 155 * Global vfs data structures for nfs 156 */ 157 struct vop_vector newnfs_vnodeops = { 158 .vop_default = &default_vnodeops, 159 .vop_access = nfs_access, 160 .vop_advlock = nfs_advlock, 161 .vop_advlockasync = nfs_advlockasync, 162 .vop_close = nfs_close, 163 .vop_create = nfs_create, 164 .vop_fsync = nfs_fsync, 165 .vop_getattr = nfs_getattr, 166 .vop_getpages = ncl_getpages, 167 .vop_putpages = ncl_putpages, 168 .vop_inactive = ncl_inactive, 169 .vop_link = nfs_link, 170 .vop_lock1 = nfs_lock1, 171 .vop_lookup = nfs_lookup, 172 .vop_mkdir = nfs_mkdir, 173 .vop_mknod = nfs_mknod, 174 .vop_open = nfs_open, 175 .vop_pathconf = nfs_pathconf, 176 .vop_print = nfs_print, 177 .vop_read = nfs_read, 178 .vop_readdir = nfs_readdir, 179 .vop_readlink = nfs_readlink, 180 .vop_reclaim = ncl_reclaim, 181 .vop_remove = nfs_remove, 182 .vop_rename = nfs_rename, 183 .vop_rmdir = nfs_rmdir, 184 .vop_setattr = nfs_setattr, 185 .vop_strategy = nfs_strategy, 186 .vop_symlink = nfs_symlink, 187 .vop_write = ncl_write, 188 .vop_getacl = nfs_getacl, 189 .vop_setacl = nfs_setacl, 190 }; 191 192 struct vop_vector newnfs_fifoops = { 193 .vop_default = &fifo_specops, 194 .vop_access = nfsspec_access, 195 .vop_close = nfsfifo_close, 196 .vop_fsync = nfs_fsync, 197 .vop_getattr = nfs_getattr, 198 .vop_inactive = ncl_inactive, 199 .vop_print = nfs_print, 200 .vop_read = nfsfifo_read, 201 .vop_reclaim = ncl_reclaim, 202 .vop_setattr = nfs_setattr, 203 .vop_write = nfsfifo_write, 204 }; 205 206 static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, 207 struct componentname *cnp, struct vattr *vap); 208 static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 209 int namelen, struct ucred *cred, struct thread *td); 210 static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, 211 char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp, 212 char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td); 213 static int nfs_renameit(struct vnode *sdvp, struct vnode *svp, 214 struct componentname *scnp, struct sillyrename *sp); 215 216 /* 217 * Global variables 218 */ 219 #define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) 220 221 SYSCTL_DECL(_vfs_nfs); 222 223 static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO; 224 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW, 225 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout"); 226 227 static int nfs_prime_access_cache = 0; 228 SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW, 229 &nfs_prime_access_cache, 0, 230 "Prime NFS ACCESS cache when fetching attributes"); 231 232 static int newnfs_commit_on_close = 0; 233 SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW, 234 &newnfs_commit_on_close, 0, "write+commit on close, else only write"); 235 236 static int nfs_clean_pages_on_close = 1; 237 SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW, 238 &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close"); 239 240 int newnfs_directio_enable = 0; 241 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW, 242 &newnfs_directio_enable, 0, "Enable NFS directio"); 243 244 /* 245 * This sysctl allows other processes to mmap a file that has been opened 246 * O_DIRECT by a process. In general, having processes mmap the file while 247 * Direct IO is in progress can lead to Data Inconsistencies. But, we allow 248 * this by default to prevent DoS attacks - to prevent a malicious user from 249 * opening up files O_DIRECT preventing other users from mmap'ing these 250 * files. "Protected" environments where stricter consistency guarantees are 251 * required can disable this knob. The process that opened the file O_DIRECT 252 * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not 253 * meaningful. 254 */ 255 int newnfs_directio_allow_mmap = 1; 256 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW, 257 &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens"); 258 259 #if 0 260 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD, 261 &newnfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count"); 262 263 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD, 264 &newnfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count"); 265 #endif 266 267 #define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \ 268 | NFSACCESS_EXTEND | NFSACCESS_EXECUTE \ 269 | NFSACCESS_DELETE | NFSACCESS_LOOKUP) 270 271 /* 272 * SMP Locking Note : 273 * The list of locks after the description of the lock is the ordering 274 * of other locks acquired with the lock held. 275 * np->n_mtx : Protects the fields in the nfsnode. 276 VM Object Lock 277 VI_MTX (acquired indirectly) 278 * nmp->nm_mtx : Protects the fields in the nfsmount. 279 rep->r_mtx 280 * ncl_iod_mutex : Global lock, protects shared nfsiod state. 281 * nfs_reqq_mtx : Global lock, protects the nfs_reqq list. 282 nmp->nm_mtx 283 rep->r_mtx 284 * rep->r_mtx : Protects the fields in an nfsreq. 285 */ 286 287 static int 288 nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td, 289 struct ucred *cred, u_int32_t *retmode) 290 { 291 int error = 0, attrflag, i, lrupos; 292 u_int32_t rmode; 293 struct nfsnode *np = VTONFS(vp); 294 struct nfsvattr nfsva; 295 296 error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag, 297 &rmode, NULL); 298 if (attrflag) 299 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 300 if (!error) { 301 lrupos = 0; 302 mtx_lock(&np->n_mtx); 303 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 304 if (np->n_accesscache[i].uid == cred->cr_uid) { 305 np->n_accesscache[i].mode = rmode; 306 np->n_accesscache[i].stamp = time_second; 307 break; 308 } 309 if (i > 0 && np->n_accesscache[i].stamp < 310 np->n_accesscache[lrupos].stamp) 311 lrupos = i; 312 } 313 if (i == NFS_ACCESSCACHESIZE) { 314 np->n_accesscache[lrupos].uid = cred->cr_uid; 315 np->n_accesscache[lrupos].mode = rmode; 316 np->n_accesscache[lrupos].stamp = time_second; 317 } 318 mtx_unlock(&np->n_mtx); 319 if (retmode != NULL) 320 *retmode = rmode; 321 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0); 322 } else if (NFS_ISV4(vp)) { 323 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 324 } 325 #ifdef KDTRACE_HOOKS 326 if (error != 0) 327 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0, 328 error); 329 #endif 330 return (error); 331 } 332 333 /* 334 * nfs access vnode op. 335 * For nfs version 2, just return ok. File accesses may fail later. 336 * For nfs version 3, use the access rpc to check accessibility. If file modes 337 * are changed on the server, accesses might still fail later. 338 */ 339 static int 340 nfs_access(struct vop_access_args *ap) 341 { 342 struct vnode *vp = ap->a_vp; 343 int error = 0, i, gotahit; 344 u_int32_t mode, wmode, rmode; 345 int v34 = NFS_ISV34(vp); 346 struct nfsnode *np = VTONFS(vp); 347 348 /* 349 * Disallow write attempts on filesystems mounted read-only; 350 * unless the file is a socket, fifo, or a block or character 351 * device resident on the filesystem. 352 */ 353 if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS | 354 VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL | 355 VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) { 356 switch (vp->v_type) { 357 case VREG: 358 case VDIR: 359 case VLNK: 360 return (EROFS); 361 default: 362 break; 363 } 364 } 365 /* 366 * For nfs v3 or v4, check to see if we have done this recently, and if 367 * so return our cached result instead of making an ACCESS call. 368 * If not, do an access rpc, otherwise you are stuck emulating 369 * ufs_access() locally using the vattr. This may not be correct, 370 * since the server may apply other access criteria such as 371 * client uid-->server uid mapping that we do not know about. 372 */ 373 if (v34) { 374 if (ap->a_accmode & VREAD) 375 mode = NFSACCESS_READ; 376 else 377 mode = 0; 378 if (vp->v_type != VDIR) { 379 if (ap->a_accmode & VWRITE) 380 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 381 if (ap->a_accmode & VAPPEND) 382 mode |= NFSACCESS_EXTEND; 383 if (ap->a_accmode & VEXEC) 384 mode |= NFSACCESS_EXECUTE; 385 if (ap->a_accmode & VDELETE) 386 mode |= NFSACCESS_DELETE; 387 } else { 388 if (ap->a_accmode & VWRITE) 389 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 390 if (ap->a_accmode & VAPPEND) 391 mode |= NFSACCESS_EXTEND; 392 if (ap->a_accmode & VEXEC) 393 mode |= NFSACCESS_LOOKUP; 394 if (ap->a_accmode & VDELETE) 395 mode |= NFSACCESS_DELETE; 396 if (ap->a_accmode & VDELETE_CHILD) 397 mode |= NFSACCESS_MODIFY; 398 } 399 /* XXX safety belt, only make blanket request if caching */ 400 if (nfsaccess_cache_timeout > 0) { 401 wmode = NFSACCESS_READ | NFSACCESS_MODIFY | 402 NFSACCESS_EXTEND | NFSACCESS_EXECUTE | 403 NFSACCESS_DELETE | NFSACCESS_LOOKUP; 404 } else { 405 wmode = mode; 406 } 407 408 /* 409 * Does our cached result allow us to give a definite yes to 410 * this request? 411 */ 412 gotahit = 0; 413 mtx_lock(&np->n_mtx); 414 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 415 if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) { 416 if (time_second < (np->n_accesscache[i].stamp 417 + nfsaccess_cache_timeout) && 418 (np->n_accesscache[i].mode & mode) == mode) { 419 NFSINCRGLOBAL(newnfsstats.accesscache_hits); 420 gotahit = 1; 421 } 422 break; 423 } 424 } 425 mtx_unlock(&np->n_mtx); 426 #ifdef KDTRACE_HOOKS 427 if (gotahit != 0) 428 KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp, 429 ap->a_cred->cr_uid, mode); 430 else 431 KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp, 432 ap->a_cred->cr_uid, mode); 433 #endif 434 if (gotahit == 0) { 435 /* 436 * Either a no, or a don't know. Go to the wire. 437 */ 438 NFSINCRGLOBAL(newnfsstats.accesscache_misses); 439 error = nfs34_access_otw(vp, wmode, ap->a_td, 440 ap->a_cred, &rmode); 441 if (!error && 442 (rmode & mode) != mode) 443 error = EACCES; 444 } 445 return (error); 446 } else { 447 if ((error = nfsspec_access(ap)) != 0) { 448 return (error); 449 } 450 /* 451 * Attempt to prevent a mapped root from accessing a file 452 * which it shouldn't. We try to read a byte from the file 453 * if the user is root and the file is not zero length. 454 * After calling nfsspec_access, we should have the correct 455 * file size cached. 456 */ 457 mtx_lock(&np->n_mtx); 458 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD) 459 && VTONFS(vp)->n_size > 0) { 460 struct iovec aiov; 461 struct uio auio; 462 char buf[1]; 463 464 mtx_unlock(&np->n_mtx); 465 aiov.iov_base = buf; 466 aiov.iov_len = 1; 467 auio.uio_iov = &aiov; 468 auio.uio_iovcnt = 1; 469 auio.uio_offset = 0; 470 auio.uio_resid = 1; 471 auio.uio_segflg = UIO_SYSSPACE; 472 auio.uio_rw = UIO_READ; 473 auio.uio_td = ap->a_td; 474 475 if (vp->v_type == VREG) 476 error = ncl_readrpc(vp, &auio, ap->a_cred); 477 else if (vp->v_type == VDIR) { 478 char* bp; 479 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 480 aiov.iov_base = bp; 481 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ; 482 error = ncl_readdirrpc(vp, &auio, ap->a_cred, 483 ap->a_td); 484 free(bp, M_TEMP); 485 } else if (vp->v_type == VLNK) 486 error = ncl_readlinkrpc(vp, &auio, ap->a_cred); 487 else 488 error = EACCES; 489 } else 490 mtx_unlock(&np->n_mtx); 491 return (error); 492 } 493 } 494 495 496 /* 497 * nfs open vnode op 498 * Check to see if the type is ok 499 * and that deletion is not in progress. 500 * For paged in text files, you will need to flush the page cache 501 * if consistency is lost. 502 */ 503 /* ARGSUSED */ 504 static int 505 nfs_open(struct vop_open_args *ap) 506 { 507 struct vnode *vp = ap->a_vp; 508 struct nfsnode *np = VTONFS(vp); 509 struct vattr vattr; 510 int error; 511 int fmode = ap->a_mode; 512 513 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) 514 return (EOPNOTSUPP); 515 516 /* 517 * For NFSv4, we need to do the Open Op before cache validation, 518 * so that we conform to RFC3530 Sec. 9.3.1. 519 */ 520 if (NFS_ISV4(vp)) { 521 error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td); 522 if (error) { 523 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 524 (gid_t)0); 525 return (error); 526 } 527 } 528 529 /* 530 * Now, if this Open will be doing reading, re-validate/flush the 531 * cache, so that Close/Open coherency is maintained. 532 */ 533 mtx_lock(&np->n_mtx); 534 if (np->n_flag & NMODIFIED) { 535 mtx_unlock(&np->n_mtx); 536 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 537 if (error == EINTR || error == EIO) { 538 if (NFS_ISV4(vp)) 539 (void) nfsrpc_close(vp, 0, ap->a_td); 540 return (error); 541 } 542 mtx_lock(&np->n_mtx); 543 np->n_attrstamp = 0; 544 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 545 if (vp->v_type == VDIR) 546 np->n_direofoffset = 0; 547 mtx_unlock(&np->n_mtx); 548 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 549 if (error) { 550 if (NFS_ISV4(vp)) 551 (void) nfsrpc_close(vp, 0, ap->a_td); 552 return (error); 553 } 554 mtx_lock(&np->n_mtx); 555 np->n_mtime = vattr.va_mtime; 556 if (NFS_ISV4(vp)) 557 np->n_change = vattr.va_filerev; 558 } else { 559 mtx_unlock(&np->n_mtx); 560 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 561 if (error) { 562 if (NFS_ISV4(vp)) 563 (void) nfsrpc_close(vp, 0, ap->a_td); 564 return (error); 565 } 566 mtx_lock(&np->n_mtx); 567 if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) || 568 NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 569 if (vp->v_type == VDIR) 570 np->n_direofoffset = 0; 571 mtx_unlock(&np->n_mtx); 572 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 573 if (error == EINTR || error == EIO) { 574 if (NFS_ISV4(vp)) 575 (void) nfsrpc_close(vp, 0, ap->a_td); 576 return (error); 577 } 578 mtx_lock(&np->n_mtx); 579 np->n_mtime = vattr.va_mtime; 580 if (NFS_ISV4(vp)) 581 np->n_change = vattr.va_filerev; 582 } 583 } 584 585 /* 586 * If the object has >= 1 O_DIRECT active opens, we disable caching. 587 */ 588 if (newnfs_directio_enable && (fmode & O_DIRECT) && 589 (vp->v_type == VREG)) { 590 if (np->n_directio_opens == 0) { 591 mtx_unlock(&np->n_mtx); 592 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 593 if (error) { 594 if (NFS_ISV4(vp)) 595 (void) nfsrpc_close(vp, 0, ap->a_td); 596 return (error); 597 } 598 mtx_lock(&np->n_mtx); 599 np->n_flag |= NNONCACHE; 600 } 601 np->n_directio_opens++; 602 } 603 mtx_unlock(&np->n_mtx); 604 vnode_create_vobject(vp, vattr.va_size, ap->a_td); 605 return (0); 606 } 607 608 /* 609 * nfs close vnode op 610 * What an NFS client should do upon close after writing is a debatable issue. 611 * Most NFS clients push delayed writes to the server upon close, basically for 612 * two reasons: 613 * 1 - So that any write errors may be reported back to the client process 614 * doing the close system call. By far the two most likely errors are 615 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 616 * 2 - To put a worst case upper bound on cache inconsistency between 617 * multiple clients for the file. 618 * There is also a consistency problem for Version 2 of the protocol w.r.t. 619 * not being able to tell if other clients are writing a file concurrently, 620 * since there is no way of knowing if the changed modify time in the reply 621 * is only due to the write for this client. 622 * (NFS Version 3 provides weak cache consistency data in the reply that 623 * should be sufficient to detect and handle this case.) 624 * 625 * The current code does the following: 626 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 627 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 628 * or commit them (this satisfies 1 and 2 except for the 629 * case where the server crashes after this close but 630 * before the commit RPC, which is felt to be "good 631 * enough". Changing the last argument to ncl_flush() to 632 * a 1 would force a commit operation, if it is felt a 633 * commit is necessary now. 634 * for NFS Version 4 - flush the dirty buffers and commit them, if 635 * nfscl_mustflush() says this is necessary. 636 * It is necessary if there is no write delegation held, 637 * in order to satisfy open/close coherency. 638 * If the file isn't cached on local stable storage, 639 * it may be necessary in order to detect "out of space" 640 * errors from the server, if the write delegation 641 * issued by the server doesn't allow the file to grow. 642 */ 643 /* ARGSUSED */ 644 static int 645 nfs_close(struct vop_close_args *ap) 646 { 647 struct vnode *vp = ap->a_vp; 648 struct nfsnode *np = VTONFS(vp); 649 struct nfsvattr nfsva; 650 struct ucred *cred; 651 int error = 0, ret, localcred = 0; 652 int fmode = ap->a_fflag; 653 654 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)) 655 return (0); 656 /* 657 * During shutdown, a_cred isn't valid, so just use root. 658 */ 659 if (ap->a_cred == NOCRED) { 660 cred = newnfs_getcred(); 661 localcred = 1; 662 } else { 663 cred = ap->a_cred; 664 } 665 if (vp->v_type == VREG) { 666 /* 667 * Examine and clean dirty pages, regardless of NMODIFIED. 668 * This closes a major hole in close-to-open consistency. 669 * We want to push out all dirty pages (and buffers) on 670 * close, regardless of whether they were dirtied by 671 * mmap'ed writes or via write(). 672 */ 673 if (nfs_clean_pages_on_close && vp->v_object) { 674 VM_OBJECT_LOCK(vp->v_object); 675 vm_object_page_clean(vp->v_object, 0, 0, 0); 676 VM_OBJECT_UNLOCK(vp->v_object); 677 } 678 mtx_lock(&np->n_mtx); 679 if (np->n_flag & NMODIFIED) { 680 mtx_unlock(&np->n_mtx); 681 if (NFS_ISV3(vp)) { 682 /* 683 * Under NFSv3 we have dirty buffers to dispose of. We 684 * must flush them to the NFS server. We have the option 685 * of waiting all the way through the commit rpc or just 686 * waiting for the initial write. The default is to only 687 * wait through the initial write so the data is in the 688 * server's cache, which is roughly similar to the state 689 * a standard disk subsystem leaves the file in on close(). 690 * 691 * We cannot clear the NMODIFIED bit in np->n_flag due to 692 * potential races with other processes, and certainly 693 * cannot clear it if we don't commit. 694 * These races occur when there is no longer the old 695 * traditional vnode locking implemented for Vnode Ops. 696 */ 697 int cm = newnfs_commit_on_close ? 1 : 0; 698 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, cm, 0); 699 /* np->n_flag &= ~NMODIFIED; */ 700 } else if (NFS_ISV4(vp)) { 701 if (nfscl_mustflush(vp) != 0) { 702 int cm = newnfs_commit_on_close ? 1 : 0; 703 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, 704 cm, 0); 705 /* 706 * as above w.r.t races when clearing 707 * NMODIFIED. 708 * np->n_flag &= ~NMODIFIED; 709 */ 710 } 711 } else 712 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 713 mtx_lock(&np->n_mtx); 714 } 715 /* 716 * Invalidate the attribute cache in all cases. 717 * An open is going to fetch fresh attrs any way, other procs 718 * on this node that have file open will be forced to do an 719 * otw attr fetch, but this is safe. 720 * --> A user found that their RPC count dropped by 20% when 721 * this was commented out and I can't see any requirement 722 * for it, so I've disabled it when negative lookups are 723 * enabled. (What does this have to do with negative lookup 724 * caching? Well nothing, except it was reported by the 725 * same user that needed negative lookup caching and I wanted 726 * there to be a way to disable it to see if it 727 * is the cause of some caching/coherency issue that might 728 * crop up.) 729 */ 730 if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) { 731 np->n_attrstamp = 0; 732 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 733 } 734 if (np->n_flag & NWRITEERR) { 735 np->n_flag &= ~NWRITEERR; 736 error = np->n_error; 737 } 738 mtx_unlock(&np->n_mtx); 739 } 740 741 if (NFS_ISV4(vp)) { 742 /* 743 * Get attributes so "change" is up to date. 744 */ 745 if (error == 0 && nfscl_mustflush(vp) != 0) { 746 ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva, 747 NULL); 748 if (!ret) { 749 np->n_change = nfsva.na_filerev; 750 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 751 NULL, 0, 0); 752 } 753 } 754 755 /* 756 * and do the close. 757 */ 758 ret = nfsrpc_close(vp, 0, ap->a_td); 759 if (!error && ret) 760 error = ret; 761 if (error) 762 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 763 (gid_t)0); 764 } 765 if (newnfs_directio_enable) 766 KASSERT((np->n_directio_asyncwr == 0), 767 ("nfs_close: dirty unflushed (%d) directio buffers\n", 768 np->n_directio_asyncwr)); 769 if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) { 770 mtx_lock(&np->n_mtx); 771 KASSERT((np->n_directio_opens > 0), 772 ("nfs_close: unexpectedly value (0) of n_directio_opens\n")); 773 np->n_directio_opens--; 774 if (np->n_directio_opens == 0) 775 np->n_flag &= ~NNONCACHE; 776 mtx_unlock(&np->n_mtx); 777 } 778 if (localcred) 779 NFSFREECRED(cred); 780 return (error); 781 } 782 783 /* 784 * nfs getattr call from vfs. 785 */ 786 static int 787 nfs_getattr(struct vop_getattr_args *ap) 788 { 789 struct vnode *vp = ap->a_vp; 790 struct thread *td = curthread; /* XXX */ 791 struct nfsnode *np = VTONFS(vp); 792 int error = 0; 793 struct nfsvattr nfsva; 794 struct vattr *vap = ap->a_vap; 795 struct vattr vattr; 796 797 /* 798 * Update local times for special files. 799 */ 800 mtx_lock(&np->n_mtx); 801 if (np->n_flag & (NACC | NUPD)) 802 np->n_flag |= NCHG; 803 mtx_unlock(&np->n_mtx); 804 /* 805 * First look in the cache. 806 */ 807 if (ncl_getattrcache(vp, &vattr) == 0) { 808 vap->va_type = vattr.va_type; 809 vap->va_mode = vattr.va_mode; 810 vap->va_nlink = vattr.va_nlink; 811 vap->va_uid = vattr.va_uid; 812 vap->va_gid = vattr.va_gid; 813 vap->va_fsid = vattr.va_fsid; 814 vap->va_fileid = vattr.va_fileid; 815 vap->va_size = vattr.va_size; 816 vap->va_blocksize = vattr.va_blocksize; 817 vap->va_atime = vattr.va_atime; 818 vap->va_mtime = vattr.va_mtime; 819 vap->va_ctime = vattr.va_ctime; 820 vap->va_gen = vattr.va_gen; 821 vap->va_flags = vattr.va_flags; 822 vap->va_rdev = vattr.va_rdev; 823 vap->va_bytes = vattr.va_bytes; 824 vap->va_filerev = vattr.va_filerev; 825 /* 826 * Get the local modify time for the case of a write 827 * delegation. 828 */ 829 nfscl_deleggetmodtime(vp, &vap->va_mtime); 830 return (0); 831 } 832 833 if (NFS_ISV34(vp) && nfs_prime_access_cache && 834 nfsaccess_cache_timeout > 0) { 835 NFSINCRGLOBAL(newnfsstats.accesscache_misses); 836 nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL); 837 if (ncl_getattrcache(vp, ap->a_vap) == 0) { 838 nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime); 839 return (0); 840 } 841 } 842 error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL); 843 if (!error) 844 error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0); 845 if (!error) { 846 /* 847 * Get the local modify time for the case of a write 848 * delegation. 849 */ 850 nfscl_deleggetmodtime(vp, &vap->va_mtime); 851 } else if (NFS_ISV4(vp)) { 852 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 853 } 854 return (error); 855 } 856 857 /* 858 * nfs setattr call. 859 */ 860 static int 861 nfs_setattr(struct vop_setattr_args *ap) 862 { 863 struct vnode *vp = ap->a_vp; 864 struct nfsnode *np = VTONFS(vp); 865 struct thread *td = curthread; /* XXX */ 866 struct vattr *vap = ap->a_vap; 867 int error = 0; 868 u_quad_t tsize; 869 870 #ifndef nolint 871 tsize = (u_quad_t)0; 872 #endif 873 874 /* 875 * Setting of flags and marking of atimes are not supported. 876 */ 877 if (vap->va_flags != VNOVAL) 878 return (EOPNOTSUPP); 879 880 /* 881 * Disallow write attempts if the filesystem is mounted read-only. 882 */ 883 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 884 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 885 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 886 (vp->v_mount->mnt_flag & MNT_RDONLY)) 887 return (EROFS); 888 if (vap->va_size != VNOVAL) { 889 switch (vp->v_type) { 890 case VDIR: 891 return (EISDIR); 892 case VCHR: 893 case VBLK: 894 case VSOCK: 895 case VFIFO: 896 if (vap->va_mtime.tv_sec == VNOVAL && 897 vap->va_atime.tv_sec == VNOVAL && 898 vap->va_mode == (mode_t)VNOVAL && 899 vap->va_uid == (uid_t)VNOVAL && 900 vap->va_gid == (gid_t)VNOVAL) 901 return (0); 902 vap->va_size = VNOVAL; 903 break; 904 default: 905 /* 906 * Disallow write attempts if the filesystem is 907 * mounted read-only. 908 */ 909 if (vp->v_mount->mnt_flag & MNT_RDONLY) 910 return (EROFS); 911 /* 912 * We run vnode_pager_setsize() early (why?), 913 * we must set np->n_size now to avoid vinvalbuf 914 * V_SAVE races that might setsize a lower 915 * value. 916 */ 917 mtx_lock(&np->n_mtx); 918 tsize = np->n_size; 919 mtx_unlock(&np->n_mtx); 920 error = ncl_meta_setsize(vp, ap->a_cred, td, 921 vap->va_size); 922 mtx_lock(&np->n_mtx); 923 if (np->n_flag & NMODIFIED) { 924 tsize = np->n_size; 925 mtx_unlock(&np->n_mtx); 926 if (vap->va_size == 0) 927 error = ncl_vinvalbuf(vp, 0, td, 1); 928 else 929 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 930 if (error) { 931 vnode_pager_setsize(vp, tsize); 932 return (error); 933 } 934 /* 935 * Call nfscl_delegmodtime() to set the modify time 936 * locally, as required. 937 */ 938 nfscl_delegmodtime(vp); 939 } else 940 mtx_unlock(&np->n_mtx); 941 /* 942 * np->n_size has already been set to vap->va_size 943 * in ncl_meta_setsize(). We must set it again since 944 * nfs_loadattrcache() could be called through 945 * ncl_meta_setsize() and could modify np->n_size. 946 */ 947 mtx_lock(&np->n_mtx); 948 np->n_vattr.na_size = np->n_size = vap->va_size; 949 mtx_unlock(&np->n_mtx); 950 }; 951 } else { 952 mtx_lock(&np->n_mtx); 953 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 954 (np->n_flag & NMODIFIED) && vp->v_type == VREG) { 955 mtx_unlock(&np->n_mtx); 956 if ((error = ncl_vinvalbuf(vp, V_SAVE, td, 1)) != 0 && 957 (error == EINTR || error == EIO)) 958 return (error); 959 } else 960 mtx_unlock(&np->n_mtx); 961 } 962 error = nfs_setattrrpc(vp, vap, ap->a_cred, td); 963 if (error && vap->va_size != VNOVAL) { 964 mtx_lock(&np->n_mtx); 965 np->n_size = np->n_vattr.na_size = tsize; 966 vnode_pager_setsize(vp, tsize); 967 mtx_unlock(&np->n_mtx); 968 } 969 return (error); 970 } 971 972 /* 973 * Do an nfs setattr rpc. 974 */ 975 static int 976 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, 977 struct thread *td) 978 { 979 struct nfsnode *np = VTONFS(vp); 980 int error, ret, attrflag, i; 981 struct nfsvattr nfsva; 982 983 if (NFS_ISV34(vp)) { 984 mtx_lock(&np->n_mtx); 985 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) 986 np->n_accesscache[i].stamp = 0; 987 np->n_flag |= NDELEGMOD; 988 mtx_unlock(&np->n_mtx); 989 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp); 990 } 991 error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag, 992 NULL); 993 if (attrflag) { 994 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 995 if (ret && !error) 996 error = ret; 997 } 998 if (error && NFS_ISV4(vp)) 999 error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid); 1000 return (error); 1001 } 1002 1003 /* 1004 * nfs lookup call, one step at a time... 1005 * First look in cache 1006 * If not found, unlock the directory nfsnode and do the rpc 1007 */ 1008 static int 1009 nfs_lookup(struct vop_lookup_args *ap) 1010 { 1011 struct componentname *cnp = ap->a_cnp; 1012 struct vnode *dvp = ap->a_dvp; 1013 struct vnode **vpp = ap->a_vpp; 1014 struct mount *mp = dvp->v_mount; 1015 int flags = cnp->cn_flags; 1016 struct vnode *newvp; 1017 struct nfsmount *nmp; 1018 struct nfsnode *np, *newnp; 1019 int error = 0, attrflag, dattrflag, ltype, ncticks; 1020 struct thread *td = cnp->cn_thread; 1021 struct nfsfh *nfhp; 1022 struct nfsvattr dnfsva, nfsva; 1023 struct vattr vattr; 1024 struct timespec nctime; 1025 1026 *vpp = NULLVP; 1027 if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) && 1028 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 1029 return (EROFS); 1030 if (dvp->v_type != VDIR) 1031 return (ENOTDIR); 1032 nmp = VFSTONFS(mp); 1033 np = VTONFS(dvp); 1034 1035 /* For NFSv4, wait until any remove is done. */ 1036 mtx_lock(&np->n_mtx); 1037 while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) { 1038 np->n_flag |= NREMOVEWANT; 1039 (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0); 1040 } 1041 mtx_unlock(&np->n_mtx); 1042 1043 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) 1044 return (error); 1045 error = cache_lookup(dvp, vpp, cnp, &nctime, &ncticks); 1046 if (error > 0 && error != ENOENT) 1047 return (error); 1048 if (error == -1) { 1049 /* 1050 * Lookups of "." are special and always return the 1051 * current directory. cache_lookup() already handles 1052 * associated locking bookkeeping, etc. 1053 */ 1054 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 1055 /* XXX: Is this really correct? */ 1056 if (cnp->cn_nameiop != LOOKUP && 1057 (flags & ISLASTCN)) 1058 cnp->cn_flags |= SAVENAME; 1059 return (0); 1060 } 1061 1062 /* 1063 * We only accept a positive hit in the cache if the 1064 * change time of the file matches our cached copy. 1065 * Otherwise, we discard the cache entry and fallback 1066 * to doing a lookup RPC. We also only trust cache 1067 * entries for less than nm_nametimeo seconds. 1068 * 1069 * To better handle stale file handles and attributes, 1070 * clear the attribute cache of this node if it is a 1071 * leaf component, part of an open() call, and not 1072 * locally modified before fetching the attributes. 1073 * This should allow stale file handles to be detected 1074 * here where we can fall back to a LOOKUP RPC to 1075 * recover rather than having nfs_open() detect the 1076 * stale file handle and failing open(2) with ESTALE. 1077 */ 1078 newvp = *vpp; 1079 newnp = VTONFS(newvp); 1080 if (!(nmp->nm_flag & NFSMNT_NOCTO) && 1081 (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1082 !(newnp->n_flag & NMODIFIED)) { 1083 mtx_lock(&newnp->n_mtx); 1084 newnp->n_attrstamp = 0; 1085 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp); 1086 mtx_unlock(&newnp->n_mtx); 1087 } 1088 if (nfscl_nodeleg(newvp, 0) == 0 || 1089 ((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) && 1090 VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 && 1091 timespeccmp(&vattr.va_ctime, &nctime, ==))) { 1092 NFSINCRGLOBAL(newnfsstats.lookupcache_hits); 1093 if (cnp->cn_nameiop != LOOKUP && 1094 (flags & ISLASTCN)) 1095 cnp->cn_flags |= SAVENAME; 1096 return (0); 1097 } 1098 cache_purge(newvp); 1099 if (dvp != newvp) 1100 vput(newvp); 1101 else 1102 vrele(newvp); 1103 *vpp = NULLVP; 1104 } else if (error == ENOENT) { 1105 if (dvp->v_iflag & VI_DOOMED) 1106 return (ENOENT); 1107 /* 1108 * We only accept a negative hit in the cache if the 1109 * modification time of the parent directory matches 1110 * the cached copy in the name cache entry. 1111 * Otherwise, we discard all of the negative cache 1112 * entries for this directory. We also only trust 1113 * negative cache entries for up to nm_negnametimeo 1114 * seconds. 1115 */ 1116 if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) && 1117 VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 && 1118 timespeccmp(&vattr.va_mtime, &nctime, ==)) { 1119 NFSINCRGLOBAL(newnfsstats.lookupcache_hits); 1120 return (ENOENT); 1121 } 1122 cache_purge_negative(dvp); 1123 } 1124 1125 error = 0; 1126 newvp = NULLVP; 1127 NFSINCRGLOBAL(newnfsstats.lookupcache_misses); 1128 error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1129 cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1130 NULL); 1131 if (dattrflag) 1132 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1133 if (error) { 1134 if (newvp != NULLVP) { 1135 vput(newvp); 1136 *vpp = NULLVP; 1137 } 1138 1139 if (error != ENOENT) { 1140 if (NFS_ISV4(dvp)) 1141 error = nfscl_maperr(td, error, (uid_t)0, 1142 (gid_t)0); 1143 return (error); 1144 } 1145 1146 /* The requested file was not found. */ 1147 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1148 (flags & ISLASTCN)) { 1149 /* 1150 * XXX: UFS does a full VOP_ACCESS(dvp, 1151 * VWRITE) here instead of just checking 1152 * MNT_RDONLY. 1153 */ 1154 if (mp->mnt_flag & MNT_RDONLY) 1155 return (EROFS); 1156 cnp->cn_flags |= SAVENAME; 1157 return (EJUSTRETURN); 1158 } 1159 1160 if ((cnp->cn_flags & MAKEENTRY) && cnp->cn_nameiop != CREATE && 1161 dattrflag) { 1162 /* 1163 * Cache the modification time of the parent 1164 * directory from the post-op attributes in 1165 * the name cache entry. The negative cache 1166 * entry will be ignored once the directory 1167 * has changed. Don't bother adding the entry 1168 * if the directory has already changed. 1169 */ 1170 mtx_lock(&np->n_mtx); 1171 if (timespeccmp(&np->n_vattr.na_mtime, 1172 &dnfsva.na_mtime, ==)) { 1173 mtx_unlock(&np->n_mtx); 1174 cache_enter_time(dvp, NULL, cnp, 1175 &dnfsva.na_mtime, NULL); 1176 } else 1177 mtx_unlock(&np->n_mtx); 1178 } 1179 return (ENOENT); 1180 } 1181 1182 /* 1183 * Handle RENAME case... 1184 */ 1185 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { 1186 if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1187 FREE((caddr_t)nfhp, M_NFSFH); 1188 return (EISDIR); 1189 } 1190 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1191 LK_EXCLUSIVE); 1192 if (error) 1193 return (error); 1194 newvp = NFSTOV(np); 1195 if (attrflag) 1196 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1197 0, 1); 1198 *vpp = newvp; 1199 cnp->cn_flags |= SAVENAME; 1200 return (0); 1201 } 1202 1203 if (flags & ISDOTDOT) { 1204 ltype = NFSVOPISLOCKED(dvp); 1205 error = vfs_busy(mp, MBF_NOWAIT); 1206 if (error != 0) { 1207 vfs_ref(mp); 1208 NFSVOPUNLOCK(dvp, 0); 1209 error = vfs_busy(mp, 0); 1210 NFSVOPLOCK(dvp, ltype | LK_RETRY); 1211 vfs_rel(mp); 1212 if (error == 0 && (dvp->v_iflag & VI_DOOMED)) { 1213 vfs_unbusy(mp); 1214 error = ENOENT; 1215 } 1216 if (error != 0) 1217 return (error); 1218 } 1219 NFSVOPUNLOCK(dvp, 0); 1220 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1221 cnp->cn_lkflags); 1222 if (error == 0) 1223 newvp = NFSTOV(np); 1224 vfs_unbusy(mp); 1225 if (newvp != dvp) 1226 NFSVOPLOCK(dvp, ltype | LK_RETRY); 1227 if (dvp->v_iflag & VI_DOOMED) { 1228 if (error == 0) { 1229 if (newvp == dvp) 1230 vrele(newvp); 1231 else 1232 vput(newvp); 1233 } 1234 error = ENOENT; 1235 } 1236 if (error != 0) 1237 return (error); 1238 if (attrflag) 1239 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1240 0, 1); 1241 } else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1242 FREE((caddr_t)nfhp, M_NFSFH); 1243 VREF(dvp); 1244 newvp = dvp; 1245 if (attrflag) 1246 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1247 0, 1); 1248 } else { 1249 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL, 1250 cnp->cn_lkflags); 1251 if (error) 1252 return (error); 1253 newvp = NFSTOV(np); 1254 if (attrflag) 1255 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1256 0, 1); 1257 else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1258 !(np->n_flag & NMODIFIED)) { 1259 /* 1260 * Flush the attribute cache when opening a 1261 * leaf node to ensure that fresh attributes 1262 * are fetched in nfs_open() since we did not 1263 * fetch attributes from the LOOKUP reply. 1264 */ 1265 mtx_lock(&np->n_mtx); 1266 np->n_attrstamp = 0; 1267 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp); 1268 mtx_unlock(&np->n_mtx); 1269 } 1270 } 1271 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 1272 cnp->cn_flags |= SAVENAME; 1273 if ((cnp->cn_flags & MAKEENTRY) && 1274 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) && 1275 attrflag != 0 && (newvp->v_type != VDIR || dattrflag != 0)) 1276 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 1277 newvp->v_type != VDIR ? NULL : &dnfsva.na_ctime); 1278 *vpp = newvp; 1279 return (0); 1280 } 1281 1282 /* 1283 * nfs read call. 1284 * Just call ncl_bioread() to do the work. 1285 */ 1286 static int 1287 nfs_read(struct vop_read_args *ap) 1288 { 1289 struct vnode *vp = ap->a_vp; 1290 1291 switch (vp->v_type) { 1292 case VREG: 1293 return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 1294 case VDIR: 1295 return (EISDIR); 1296 default: 1297 return (EOPNOTSUPP); 1298 } 1299 } 1300 1301 /* 1302 * nfs readlink call 1303 */ 1304 static int 1305 nfs_readlink(struct vop_readlink_args *ap) 1306 { 1307 struct vnode *vp = ap->a_vp; 1308 1309 if (vp->v_type != VLNK) 1310 return (EINVAL); 1311 return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred)); 1312 } 1313 1314 /* 1315 * Do a readlink rpc. 1316 * Called by ncl_doio() from below the buffer cache. 1317 */ 1318 int 1319 ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1320 { 1321 int error, ret, attrflag; 1322 struct nfsvattr nfsva; 1323 1324 error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva, 1325 &attrflag, NULL); 1326 if (attrflag) { 1327 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1328 if (ret && !error) 1329 error = ret; 1330 } 1331 if (error && NFS_ISV4(vp)) 1332 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1333 return (error); 1334 } 1335 1336 /* 1337 * nfs read rpc call 1338 * Ditto above 1339 */ 1340 int 1341 ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1342 { 1343 int error, ret, attrflag; 1344 struct nfsvattr nfsva; 1345 1346 error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva, &attrflag, 1347 NULL); 1348 if (attrflag) { 1349 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1350 if (ret && !error) 1351 error = ret; 1352 } 1353 if (error && NFS_ISV4(vp)) 1354 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1355 return (error); 1356 } 1357 1358 /* 1359 * nfs write call 1360 */ 1361 int 1362 ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 1363 int *iomode, int *must_commit, int called_from_strategy) 1364 { 1365 struct nfsvattr nfsva; 1366 int error = 0, attrflag, ret; 1367 1368 error = nfsrpc_write(vp, uiop, iomode, must_commit, cred, 1369 uiop->uio_td, &nfsva, &attrflag, NULL, called_from_strategy); 1370 if (attrflag) { 1371 if (VTONFS(vp)->n_flag & ND_NFSV4) 1372 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1, 1373 1); 1374 else 1375 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1376 1); 1377 if (ret && !error) 1378 error = ret; 1379 } 1380 if (DOINGASYNC(vp)) 1381 *iomode = NFSWRITE_FILESYNC; 1382 if (error && NFS_ISV4(vp)) 1383 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1384 return (error); 1385 } 1386 1387 /* 1388 * nfs mknod rpc 1389 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1390 * mode set to specify the file type and the size field for rdev. 1391 */ 1392 static int 1393 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1394 struct vattr *vap) 1395 { 1396 struct nfsvattr nfsva, dnfsva; 1397 struct vnode *newvp = NULL; 1398 struct nfsnode *np = NULL, *dnp; 1399 struct nfsfh *nfhp; 1400 struct vattr vattr; 1401 int error = 0, attrflag, dattrflag; 1402 u_int32_t rdev; 1403 1404 if (vap->va_type == VCHR || vap->va_type == VBLK) 1405 rdev = vap->va_rdev; 1406 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1407 rdev = 0xffffffff; 1408 else 1409 return (EOPNOTSUPP); 1410 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1411 return (error); 1412 error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap, 1413 rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva, 1414 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 1415 if (!error) { 1416 if (!nfhp) 1417 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1418 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1419 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1420 NULL); 1421 if (nfhp) 1422 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1423 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE); 1424 } 1425 if (dattrflag) 1426 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1427 if (!error) { 1428 newvp = NFSTOV(np); 1429 if (attrflag != 0) { 1430 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1431 0, 1); 1432 if (error != 0) 1433 vput(newvp); 1434 } 1435 } 1436 if (!error) { 1437 *vpp = newvp; 1438 } else if (NFS_ISV4(dvp)) { 1439 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1440 vap->va_gid); 1441 } 1442 dnp = VTONFS(dvp); 1443 mtx_lock(&dnp->n_mtx); 1444 dnp->n_flag |= NMODIFIED; 1445 if (!dattrflag) { 1446 dnp->n_attrstamp = 0; 1447 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1448 } 1449 mtx_unlock(&dnp->n_mtx); 1450 return (error); 1451 } 1452 1453 /* 1454 * nfs mknod vop 1455 * just call nfs_mknodrpc() to do the work. 1456 */ 1457 /* ARGSUSED */ 1458 static int 1459 nfs_mknod(struct vop_mknod_args *ap) 1460 { 1461 return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap)); 1462 } 1463 1464 static struct mtx nfs_cverf_mtx; 1465 MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex", 1466 MTX_DEF); 1467 1468 static nfsquad_t 1469 nfs_get_cverf(void) 1470 { 1471 static nfsquad_t cverf; 1472 nfsquad_t ret; 1473 static int cverf_initialized = 0; 1474 1475 mtx_lock(&nfs_cverf_mtx); 1476 if (cverf_initialized == 0) { 1477 cverf.lval[0] = arc4random(); 1478 cverf.lval[1] = arc4random(); 1479 cverf_initialized = 1; 1480 } else 1481 cverf.qval++; 1482 ret = cverf; 1483 mtx_unlock(&nfs_cverf_mtx); 1484 1485 return (ret); 1486 } 1487 1488 /* 1489 * nfs file create call 1490 */ 1491 static int 1492 nfs_create(struct vop_create_args *ap) 1493 { 1494 struct vnode *dvp = ap->a_dvp; 1495 struct vattr *vap = ap->a_vap; 1496 struct componentname *cnp = ap->a_cnp; 1497 struct nfsnode *np = NULL, *dnp; 1498 struct vnode *newvp = NULL; 1499 struct nfsmount *nmp; 1500 struct nfsvattr dnfsva, nfsva; 1501 struct nfsfh *nfhp; 1502 nfsquad_t cverf; 1503 int error = 0, attrflag, dattrflag, fmode = 0; 1504 struct vattr vattr; 1505 1506 /* 1507 * Oops, not for me.. 1508 */ 1509 if (vap->va_type == VSOCK) 1510 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1511 1512 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1513 return (error); 1514 if (vap->va_vaflags & VA_EXCLUSIVE) 1515 fmode |= O_EXCL; 1516 dnp = VTONFS(dvp); 1517 nmp = VFSTONFS(vnode_mount(dvp)); 1518 again: 1519 /* For NFSv4, wait until any remove is done. */ 1520 mtx_lock(&dnp->n_mtx); 1521 while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) { 1522 dnp->n_flag |= NREMOVEWANT; 1523 (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0); 1524 } 1525 mtx_unlock(&dnp->n_mtx); 1526 1527 cverf = nfs_get_cverf(); 1528 error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1529 vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, 1530 &nfhp, &attrflag, &dattrflag, NULL); 1531 if (!error) { 1532 if (nfhp == NULL) 1533 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1534 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1535 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1536 NULL); 1537 if (nfhp != NULL) 1538 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1539 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE); 1540 } 1541 if (dattrflag) 1542 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1543 if (!error) { 1544 newvp = NFSTOV(np); 1545 if (attrflag) 1546 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1547 0, 1); 1548 } 1549 if (error) { 1550 if (newvp != NULL) { 1551 vput(newvp); 1552 newvp = NULL; 1553 } 1554 if (NFS_ISV34(dvp) && (fmode & O_EXCL) && 1555 error == NFSERR_NOTSUPP) { 1556 fmode &= ~O_EXCL; 1557 goto again; 1558 } 1559 } else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) { 1560 if (nfscl_checksattr(vap, &nfsva)) { 1561 /* 1562 * We are normally called with only a partially 1563 * initialized VAP. Since the NFSv3 spec says that 1564 * the server may use the file attributes to 1565 * store the verifier, the spec requires us to do a 1566 * SETATTR RPC. FreeBSD servers store the verifier in 1567 * atime, but we can't really assume that all servers 1568 * will so we ensure that our SETATTR sets both atime 1569 * and mtime. 1570 */ 1571 if (vap->va_mtime.tv_sec == VNOVAL) 1572 vfs_timestamp(&vap->va_mtime); 1573 if (vap->va_atime.tv_sec == VNOVAL) 1574 vap->va_atime = vap->va_mtime; 1575 error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred, 1576 cnp->cn_thread, &nfsva, &attrflag, NULL); 1577 if (error && (vap->va_uid != (uid_t)VNOVAL || 1578 vap->va_gid != (gid_t)VNOVAL)) { 1579 /* try again without setting uid/gid */ 1580 vap->va_uid = (uid_t)VNOVAL; 1581 vap->va_gid = (uid_t)VNOVAL; 1582 error = nfsrpc_setattr(newvp, vap, NULL, 1583 cnp->cn_cred, cnp->cn_thread, &nfsva, 1584 &attrflag, NULL); 1585 } 1586 if (attrflag) 1587 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 1588 NULL, 0, 1); 1589 if (error != 0) 1590 vput(newvp); 1591 } 1592 } 1593 if (!error) { 1594 if ((cnp->cn_flags & MAKEENTRY) && attrflag) 1595 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 1596 NULL); 1597 *ap->a_vpp = newvp; 1598 } else if (NFS_ISV4(dvp)) { 1599 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1600 vap->va_gid); 1601 } 1602 mtx_lock(&dnp->n_mtx); 1603 dnp->n_flag |= NMODIFIED; 1604 if (!dattrflag) { 1605 dnp->n_attrstamp = 0; 1606 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1607 } 1608 mtx_unlock(&dnp->n_mtx); 1609 return (error); 1610 } 1611 1612 /* 1613 * nfs file remove call 1614 * To try and make nfs semantics closer to ufs semantics, a file that has 1615 * other processes using the vnode is renamed instead of removed and then 1616 * removed later on the last close. 1617 * - If v_usecount > 1 1618 * If a rename is not already in the works 1619 * call nfs_sillyrename() to set it up 1620 * else 1621 * do the remove rpc 1622 */ 1623 static int 1624 nfs_remove(struct vop_remove_args *ap) 1625 { 1626 struct vnode *vp = ap->a_vp; 1627 struct vnode *dvp = ap->a_dvp; 1628 struct componentname *cnp = ap->a_cnp; 1629 struct nfsnode *np = VTONFS(vp); 1630 int error = 0; 1631 struct vattr vattr; 1632 1633 KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name")); 1634 KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount")); 1635 if (vp->v_type == VDIR) 1636 error = EPERM; 1637 else if (vrefcnt(vp) == 1 || (np->n_sillyrename && 1638 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && 1639 vattr.va_nlink > 1)) { 1640 /* 1641 * Purge the name cache so that the chance of a lookup for 1642 * the name succeeding while the remove is in progress is 1643 * minimized. Without node locking it can still happen, such 1644 * that an I/O op returns ESTALE, but since you get this if 1645 * another host removes the file.. 1646 */ 1647 cache_purge(vp); 1648 /* 1649 * throw away biocache buffers, mainly to avoid 1650 * unnecessary delayed writes later. 1651 */ 1652 error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1); 1653 /* Do the rpc */ 1654 if (error != EINTR && error != EIO) 1655 error = nfs_removerpc(dvp, vp, cnp->cn_nameptr, 1656 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread); 1657 /* 1658 * Kludge City: If the first reply to the remove rpc is lost.. 1659 * the reply to the retransmitted request will be ENOENT 1660 * since the file was in fact removed 1661 * Therefore, we cheat and return success. 1662 */ 1663 if (error == ENOENT) 1664 error = 0; 1665 } else if (!np->n_sillyrename) 1666 error = nfs_sillyrename(dvp, vp, cnp); 1667 mtx_lock(&np->n_mtx); 1668 np->n_attrstamp = 0; 1669 mtx_unlock(&np->n_mtx); 1670 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 1671 return (error); 1672 } 1673 1674 /* 1675 * nfs file remove rpc called from nfs_inactive 1676 */ 1677 int 1678 ncl_removeit(struct sillyrename *sp, struct vnode *vp) 1679 { 1680 /* 1681 * Make sure that the directory vnode is still valid. 1682 * XXX we should lock sp->s_dvp here. 1683 */ 1684 if (sp->s_dvp->v_type == VBAD) 1685 return (0); 1686 return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen, 1687 sp->s_cred, NULL)); 1688 } 1689 1690 /* 1691 * Nfs remove rpc, called from nfs_remove() and ncl_removeit(). 1692 */ 1693 static int 1694 nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 1695 int namelen, struct ucred *cred, struct thread *td) 1696 { 1697 struct nfsvattr dnfsva; 1698 struct nfsnode *dnp = VTONFS(dvp); 1699 int error = 0, dattrflag; 1700 1701 mtx_lock(&dnp->n_mtx); 1702 dnp->n_flag |= NREMOVEINPROG; 1703 mtx_unlock(&dnp->n_mtx); 1704 error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva, 1705 &dattrflag, NULL); 1706 mtx_lock(&dnp->n_mtx); 1707 if ((dnp->n_flag & NREMOVEWANT)) { 1708 dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG); 1709 mtx_unlock(&dnp->n_mtx); 1710 wakeup((caddr_t)dnp); 1711 } else { 1712 dnp->n_flag &= ~NREMOVEINPROG; 1713 mtx_unlock(&dnp->n_mtx); 1714 } 1715 if (dattrflag) 1716 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1717 mtx_lock(&dnp->n_mtx); 1718 dnp->n_flag |= NMODIFIED; 1719 if (!dattrflag) { 1720 dnp->n_attrstamp = 0; 1721 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 1722 } 1723 mtx_unlock(&dnp->n_mtx); 1724 if (error && NFS_ISV4(dvp)) 1725 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1726 return (error); 1727 } 1728 1729 /* 1730 * nfs file rename call 1731 */ 1732 static int 1733 nfs_rename(struct vop_rename_args *ap) 1734 { 1735 struct vnode *fvp = ap->a_fvp; 1736 struct vnode *tvp = ap->a_tvp; 1737 struct vnode *fdvp = ap->a_fdvp; 1738 struct vnode *tdvp = ap->a_tdvp; 1739 struct componentname *tcnp = ap->a_tcnp; 1740 struct componentname *fcnp = ap->a_fcnp; 1741 struct nfsnode *fnp = VTONFS(ap->a_fvp); 1742 struct nfsnode *tdnp = VTONFS(ap->a_tdvp); 1743 struct nfsv4node *newv4 = NULL; 1744 int error; 1745 1746 KASSERT((tcnp->cn_flags & HASBUF) != 0 && 1747 (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name")); 1748 /* Check for cross-device rename */ 1749 if ((fvp->v_mount != tdvp->v_mount) || 1750 (tvp && (fvp->v_mount != tvp->v_mount))) { 1751 error = EXDEV; 1752 goto out; 1753 } 1754 1755 if (fvp == tvp) { 1756 ncl_printf("nfs_rename: fvp == tvp (can't happen)\n"); 1757 error = 0; 1758 goto out; 1759 } 1760 if ((error = NFSVOPLOCK(fvp, LK_EXCLUSIVE)) != 0) 1761 goto out; 1762 1763 /* 1764 * We have to flush B_DELWRI data prior to renaming 1765 * the file. If we don't, the delayed-write buffers 1766 * can be flushed out later after the file has gone stale 1767 * under NFSV3. NFSV2 does not have this problem because 1768 * ( as far as I can tell ) it flushes dirty buffers more 1769 * often. 1770 * 1771 * Skip the rename operation if the fsync fails, this can happen 1772 * due to the server's volume being full, when we pushed out data 1773 * that was written back to our cache earlier. Not checking for 1774 * this condition can result in potential (silent) data loss. 1775 */ 1776 error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread); 1777 NFSVOPUNLOCK(fvp, 0); 1778 if (!error && tvp) 1779 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread); 1780 if (error) 1781 goto out; 1782 1783 /* 1784 * If the tvp exists and is in use, sillyrename it before doing the 1785 * rename of the new file over it. 1786 * XXX Can't sillyrename a directory. 1787 */ 1788 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename && 1789 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1790 vput(tvp); 1791 tvp = NULL; 1792 } 1793 1794 error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1795 tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1796 tcnp->cn_thread); 1797 1798 if (error == 0 && NFS_ISV4(tdvp)) { 1799 /* 1800 * For NFSv4, check to see if it is the same name and 1801 * replace the name, if it is different. 1802 */ 1803 MALLOC(newv4, struct nfsv4node *, 1804 sizeof (struct nfsv4node) + 1805 tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1, 1806 M_NFSV4NODE, M_WAITOK); 1807 mtx_lock(&tdnp->n_mtx); 1808 mtx_lock(&fnp->n_mtx); 1809 if (fnp->n_v4 != NULL && fvp->v_type == VREG && 1810 (fnp->n_v4->n4_namelen != tcnp->cn_namelen || 1811 NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4), 1812 tcnp->cn_namelen) || 1813 tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen || 1814 NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1815 tdnp->n_fhp->nfh_len))) { 1816 #ifdef notdef 1817 { char nnn[100]; int nnnl; 1818 nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99; 1819 bcopy(tcnp->cn_nameptr, nnn, nnnl); 1820 nnn[nnnl] = '\0'; 1821 printf("ren replace=%s\n",nnn); 1822 } 1823 #endif 1824 FREE((caddr_t)fnp->n_v4, M_NFSV4NODE); 1825 fnp->n_v4 = newv4; 1826 newv4 = NULL; 1827 fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len; 1828 fnp->n_v4->n4_namelen = tcnp->cn_namelen; 1829 NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1830 tdnp->n_fhp->nfh_len); 1831 NFSBCOPY(tcnp->cn_nameptr, 1832 NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen); 1833 } 1834 mtx_unlock(&tdnp->n_mtx); 1835 mtx_unlock(&fnp->n_mtx); 1836 if (newv4 != NULL) 1837 FREE((caddr_t)newv4, M_NFSV4NODE); 1838 } 1839 1840 if (fvp->v_type == VDIR) { 1841 if (tvp != NULL && tvp->v_type == VDIR) 1842 cache_purge(tdvp); 1843 cache_purge(fdvp); 1844 } 1845 1846 out: 1847 if (tdvp == tvp) 1848 vrele(tdvp); 1849 else 1850 vput(tdvp); 1851 if (tvp) 1852 vput(tvp); 1853 vrele(fdvp); 1854 vrele(fvp); 1855 /* 1856 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1857 */ 1858 if (error == ENOENT) 1859 error = 0; 1860 return (error); 1861 } 1862 1863 /* 1864 * nfs file rename rpc called from nfs_remove() above 1865 */ 1866 static int 1867 nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp, 1868 struct sillyrename *sp) 1869 { 1870 1871 return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen, 1872 sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred, 1873 scnp->cn_thread)); 1874 } 1875 1876 /* 1877 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1878 */ 1879 static int 1880 nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr, 1881 int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr, 1882 int tnamelen, struct ucred *cred, struct thread *td) 1883 { 1884 struct nfsvattr fnfsva, tnfsva; 1885 struct nfsnode *fdnp = VTONFS(fdvp); 1886 struct nfsnode *tdnp = VTONFS(tdvp); 1887 int error = 0, fattrflag, tattrflag; 1888 1889 error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp, 1890 tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag, 1891 &tattrflag, NULL, NULL); 1892 mtx_lock(&fdnp->n_mtx); 1893 fdnp->n_flag |= NMODIFIED; 1894 if (fattrflag != 0) { 1895 mtx_unlock(&fdnp->n_mtx); 1896 (void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1); 1897 } else { 1898 fdnp->n_attrstamp = 0; 1899 mtx_unlock(&fdnp->n_mtx); 1900 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp); 1901 } 1902 mtx_lock(&tdnp->n_mtx); 1903 tdnp->n_flag |= NMODIFIED; 1904 if (tattrflag != 0) { 1905 mtx_unlock(&tdnp->n_mtx); 1906 (void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1); 1907 } else { 1908 tdnp->n_attrstamp = 0; 1909 mtx_unlock(&tdnp->n_mtx); 1910 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp); 1911 } 1912 if (error && NFS_ISV4(fdvp)) 1913 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1914 return (error); 1915 } 1916 1917 /* 1918 * nfs hard link create call 1919 */ 1920 static int 1921 nfs_link(struct vop_link_args *ap) 1922 { 1923 struct vnode *vp = ap->a_vp; 1924 struct vnode *tdvp = ap->a_tdvp; 1925 struct componentname *cnp = ap->a_cnp; 1926 struct nfsnode *np, *tdnp; 1927 struct nfsvattr nfsva, dnfsva; 1928 int error = 0, attrflag, dattrflag; 1929 1930 if (vp->v_mount != tdvp->v_mount) { 1931 return (EXDEV); 1932 } 1933 1934 /* 1935 * Push all writes to the server, so that the attribute cache 1936 * doesn't get "out of sync" with the server. 1937 * XXX There should be a better way! 1938 */ 1939 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread); 1940 1941 error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 1942 cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag, 1943 &dattrflag, NULL); 1944 tdnp = VTONFS(tdvp); 1945 mtx_lock(&tdnp->n_mtx); 1946 tdnp->n_flag |= NMODIFIED; 1947 if (dattrflag != 0) { 1948 mtx_unlock(&tdnp->n_mtx); 1949 (void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1); 1950 } else { 1951 tdnp->n_attrstamp = 0; 1952 mtx_unlock(&tdnp->n_mtx); 1953 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp); 1954 } 1955 if (attrflag) 1956 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1957 else { 1958 np = VTONFS(vp); 1959 mtx_lock(&np->n_mtx); 1960 np->n_attrstamp = 0; 1961 mtx_unlock(&np->n_mtx); 1962 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 1963 } 1964 /* 1965 * If negative lookup caching is enabled, I might as well 1966 * add an entry for this node. Not necessary for correctness, 1967 * but if negative caching is enabled, then the system 1968 * must care about lookup caching hit rate, so... 1969 */ 1970 if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 && 1971 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) { 1972 cache_enter_time(tdvp, vp, cnp, &nfsva.na_ctime, NULL); 1973 } 1974 if (error && NFS_ISV4(vp)) 1975 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 1976 (gid_t)0); 1977 return (error); 1978 } 1979 1980 /* 1981 * nfs symbolic link create call 1982 */ 1983 static int 1984 nfs_symlink(struct vop_symlink_args *ap) 1985 { 1986 struct vnode *dvp = ap->a_dvp; 1987 struct vattr *vap = ap->a_vap; 1988 struct componentname *cnp = ap->a_cnp; 1989 struct nfsvattr nfsva, dnfsva; 1990 struct nfsfh *nfhp; 1991 struct nfsnode *np = NULL, *dnp; 1992 struct vnode *newvp = NULL; 1993 int error = 0, attrflag, dattrflag, ret; 1994 1995 vap->va_type = VLNK; 1996 error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1997 ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, 1998 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 1999 if (nfhp) { 2000 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 2001 &np, NULL, LK_EXCLUSIVE); 2002 if (!ret) 2003 newvp = NFSTOV(np); 2004 else if (!error) 2005 error = ret; 2006 } 2007 if (newvp != NULL) { 2008 if (attrflag) 2009 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 2010 0, 1); 2011 } else if (!error) { 2012 /* 2013 * If we do not have an error and we could not extract the 2014 * newvp from the response due to the request being NFSv2, we 2015 * have to do a lookup in order to obtain a newvp to return. 2016 */ 2017 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2018 cnp->cn_cred, cnp->cn_thread, &np); 2019 if (!error) 2020 newvp = NFSTOV(np); 2021 } 2022 if (error) { 2023 if (newvp) 2024 vput(newvp); 2025 if (NFS_ISV4(dvp)) 2026 error = nfscl_maperr(cnp->cn_thread, error, 2027 vap->va_uid, vap->va_gid); 2028 } else { 2029 *ap->a_vpp = newvp; 2030 } 2031 2032 dnp = VTONFS(dvp); 2033 mtx_lock(&dnp->n_mtx); 2034 dnp->n_flag |= NMODIFIED; 2035 if (dattrflag != 0) { 2036 mtx_unlock(&dnp->n_mtx); 2037 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2038 } else { 2039 dnp->n_attrstamp = 0; 2040 mtx_unlock(&dnp->n_mtx); 2041 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2042 } 2043 /* 2044 * If negative lookup caching is enabled, I might as well 2045 * add an entry for this node. Not necessary for correctness, 2046 * but if negative caching is enabled, then the system 2047 * must care about lookup caching hit rate, so... 2048 */ 2049 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 2050 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) { 2051 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, NULL); 2052 } 2053 return (error); 2054 } 2055 2056 /* 2057 * nfs make dir call 2058 */ 2059 static int 2060 nfs_mkdir(struct vop_mkdir_args *ap) 2061 { 2062 struct vnode *dvp = ap->a_dvp; 2063 struct vattr *vap = ap->a_vap; 2064 struct componentname *cnp = ap->a_cnp; 2065 struct nfsnode *np = NULL, *dnp; 2066 struct vnode *newvp = NULL; 2067 struct vattr vattr; 2068 struct nfsfh *nfhp; 2069 struct nfsvattr nfsva, dnfsva; 2070 int error = 0, attrflag, dattrflag, ret; 2071 2072 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0) 2073 return (error); 2074 vap->va_type = VDIR; 2075 error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2076 vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp, 2077 &attrflag, &dattrflag, NULL); 2078 dnp = VTONFS(dvp); 2079 mtx_lock(&dnp->n_mtx); 2080 dnp->n_flag |= NMODIFIED; 2081 if (dattrflag != 0) { 2082 mtx_unlock(&dnp->n_mtx); 2083 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2084 } else { 2085 dnp->n_attrstamp = 0; 2086 mtx_unlock(&dnp->n_mtx); 2087 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2088 } 2089 if (nfhp) { 2090 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 2091 &np, NULL, LK_EXCLUSIVE); 2092 if (!ret) { 2093 newvp = NFSTOV(np); 2094 if (attrflag) 2095 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 2096 NULL, 0, 1); 2097 } else if (!error) 2098 error = ret; 2099 } 2100 if (!error && newvp == NULL) { 2101 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2102 cnp->cn_cred, cnp->cn_thread, &np); 2103 if (!error) { 2104 newvp = NFSTOV(np); 2105 if (newvp->v_type != VDIR) 2106 error = EEXIST; 2107 } 2108 } 2109 if (error) { 2110 if (newvp) 2111 vput(newvp); 2112 if (NFS_ISV4(dvp)) 2113 error = nfscl_maperr(cnp->cn_thread, error, 2114 vap->va_uid, vap->va_gid); 2115 } else { 2116 /* 2117 * If negative lookup caching is enabled, I might as well 2118 * add an entry for this node. Not necessary for correctness, 2119 * but if negative caching is enabled, then the system 2120 * must care about lookup caching hit rate, so... 2121 */ 2122 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 2123 (cnp->cn_flags & MAKEENTRY) && 2124 attrflag != 0 && dattrflag != 0) 2125 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, 2126 &dnfsva.na_ctime); 2127 *ap->a_vpp = newvp; 2128 } 2129 return (error); 2130 } 2131 2132 /* 2133 * nfs remove directory call 2134 */ 2135 static int 2136 nfs_rmdir(struct vop_rmdir_args *ap) 2137 { 2138 struct vnode *vp = ap->a_vp; 2139 struct vnode *dvp = ap->a_dvp; 2140 struct componentname *cnp = ap->a_cnp; 2141 struct nfsnode *dnp; 2142 struct nfsvattr dnfsva; 2143 int error, dattrflag; 2144 2145 if (dvp == vp) 2146 return (EINVAL); 2147 error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2148 cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL); 2149 dnp = VTONFS(dvp); 2150 mtx_lock(&dnp->n_mtx); 2151 dnp->n_flag |= NMODIFIED; 2152 if (dattrflag != 0) { 2153 mtx_unlock(&dnp->n_mtx); 2154 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2155 } else { 2156 dnp->n_attrstamp = 0; 2157 mtx_unlock(&dnp->n_mtx); 2158 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp); 2159 } 2160 2161 cache_purge(dvp); 2162 cache_purge(vp); 2163 if (error && NFS_ISV4(dvp)) 2164 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 2165 (gid_t)0); 2166 /* 2167 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2168 */ 2169 if (error == ENOENT) 2170 error = 0; 2171 return (error); 2172 } 2173 2174 /* 2175 * nfs readdir call 2176 */ 2177 static int 2178 nfs_readdir(struct vop_readdir_args *ap) 2179 { 2180 struct vnode *vp = ap->a_vp; 2181 struct nfsnode *np = VTONFS(vp); 2182 struct uio *uio = ap->a_uio; 2183 ssize_t tresid; 2184 int error = 0; 2185 struct vattr vattr; 2186 2187 if (vp->v_type != VDIR) 2188 return(EPERM); 2189 2190 /* 2191 * First, check for hit on the EOF offset cache 2192 */ 2193 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset && 2194 (np->n_flag & NMODIFIED) == 0) { 2195 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) { 2196 mtx_lock(&np->n_mtx); 2197 if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) || 2198 !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 2199 mtx_unlock(&np->n_mtx); 2200 NFSINCRGLOBAL(newnfsstats.direofcache_hits); 2201 return (0); 2202 } else 2203 mtx_unlock(&np->n_mtx); 2204 } 2205 } 2206 2207 /* 2208 * Call ncl_bioread() to do the real work. 2209 */ 2210 tresid = uio->uio_resid; 2211 error = ncl_bioread(vp, uio, 0, ap->a_cred); 2212 2213 if (!error && uio->uio_resid == tresid) 2214 NFSINCRGLOBAL(newnfsstats.direofcache_misses); 2215 return (error); 2216 } 2217 2218 /* 2219 * Readdir rpc call. 2220 * Called from below the buffer cache by ncl_doio(). 2221 */ 2222 int 2223 ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2224 struct thread *td) 2225 { 2226 struct nfsvattr nfsva; 2227 nfsuint64 *cookiep, cookie; 2228 struct nfsnode *dnp = VTONFS(vp); 2229 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2230 int error = 0, eof, attrflag; 2231 2232 KASSERT(uiop->uio_iovcnt == 1 && 2233 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2234 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2235 ("nfs readdirrpc bad uio")); 2236 2237 /* 2238 * If there is no cookie, assume directory was stale. 2239 */ 2240 ncl_dircookie_lock(dnp); 2241 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2242 if (cookiep) { 2243 cookie = *cookiep; 2244 ncl_dircookie_unlock(dnp); 2245 } else { 2246 ncl_dircookie_unlock(dnp); 2247 return (NFSERR_BAD_COOKIE); 2248 } 2249 2250 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2251 (void)ncl_fsinfo(nmp, vp, cred, td); 2252 2253 error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva, 2254 &attrflag, &eof, NULL); 2255 if (attrflag) 2256 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2257 2258 if (!error) { 2259 /* 2260 * We are now either at the end of the directory or have filled 2261 * the block. 2262 */ 2263 if (eof) 2264 dnp->n_direofoffset = uiop->uio_offset; 2265 else { 2266 if (uiop->uio_resid > 0) 2267 ncl_printf("EEK! readdirrpc resid > 0\n"); 2268 ncl_dircookie_lock(dnp); 2269 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2270 *cookiep = cookie; 2271 ncl_dircookie_unlock(dnp); 2272 } 2273 } else if (NFS_ISV4(vp)) { 2274 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2275 } 2276 return (error); 2277 } 2278 2279 /* 2280 * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc(). 2281 */ 2282 int 2283 ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2284 struct thread *td) 2285 { 2286 struct nfsvattr nfsva; 2287 nfsuint64 *cookiep, cookie; 2288 struct nfsnode *dnp = VTONFS(vp); 2289 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2290 int error = 0, attrflag, eof; 2291 2292 KASSERT(uiop->uio_iovcnt == 1 && 2293 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2294 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2295 ("nfs readdirplusrpc bad uio")); 2296 2297 /* 2298 * If there is no cookie, assume directory was stale. 2299 */ 2300 ncl_dircookie_lock(dnp); 2301 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2302 if (cookiep) { 2303 cookie = *cookiep; 2304 ncl_dircookie_unlock(dnp); 2305 } else { 2306 ncl_dircookie_unlock(dnp); 2307 return (NFSERR_BAD_COOKIE); 2308 } 2309 2310 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2311 (void)ncl_fsinfo(nmp, vp, cred, td); 2312 error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva, 2313 &attrflag, &eof, NULL); 2314 if (attrflag) 2315 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2316 2317 if (!error) { 2318 /* 2319 * We are now either at end of the directory or have filled the 2320 * the block. 2321 */ 2322 if (eof) 2323 dnp->n_direofoffset = uiop->uio_offset; 2324 else { 2325 if (uiop->uio_resid > 0) 2326 ncl_printf("EEK! readdirplusrpc resid > 0\n"); 2327 ncl_dircookie_lock(dnp); 2328 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2329 *cookiep = cookie; 2330 ncl_dircookie_unlock(dnp); 2331 } 2332 } else if (NFS_ISV4(vp)) { 2333 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2334 } 2335 return (error); 2336 } 2337 2338 /* 2339 * Silly rename. To make the NFS filesystem that is stateless look a little 2340 * more like the "ufs" a remove of an active vnode is translated to a rename 2341 * to a funny looking filename that is removed by nfs_inactive on the 2342 * nfsnode. There is the potential for another process on a different client 2343 * to create the same funny name between the nfs_lookitup() fails and the 2344 * nfs_rename() completes, but... 2345 */ 2346 static int 2347 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2348 { 2349 struct sillyrename *sp; 2350 struct nfsnode *np; 2351 int error; 2352 short pid; 2353 unsigned int lticks; 2354 2355 cache_purge(dvp); 2356 np = VTONFS(vp); 2357 KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir")); 2358 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename), 2359 M_NEWNFSREQ, M_WAITOK); 2360 sp->s_cred = crhold(cnp->cn_cred); 2361 sp->s_dvp = dvp; 2362 VREF(dvp); 2363 2364 /* 2365 * Fudge together a funny name. 2366 * Changing the format of the funny name to accomodate more 2367 * sillynames per directory. 2368 * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 2369 * CPU ticks since boot. 2370 */ 2371 pid = cnp->cn_thread->td_proc->p_pid; 2372 lticks = (unsigned int)ticks; 2373 for ( ; ; ) { 2374 sp->s_namlen = sprintf(sp->s_name, 2375 ".nfs.%08x.%04x4.4", lticks, 2376 pid); 2377 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2378 cnp->cn_thread, NULL)) 2379 break; 2380 lticks++; 2381 } 2382 error = nfs_renameit(dvp, vp, cnp, sp); 2383 if (error) 2384 goto bad; 2385 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2386 cnp->cn_thread, &np); 2387 np->n_sillyrename = sp; 2388 return (0); 2389 bad: 2390 vrele(sp->s_dvp); 2391 crfree(sp->s_cred); 2392 free((caddr_t)sp, M_NEWNFSREQ); 2393 return (error); 2394 } 2395 2396 /* 2397 * Look up a file name and optionally either update the file handle or 2398 * allocate an nfsnode, depending on the value of npp. 2399 * npp == NULL --> just do the lookup 2400 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2401 * handled too 2402 * *npp != NULL --> update the file handle in the vnode 2403 */ 2404 static int 2405 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, 2406 struct thread *td, struct nfsnode **npp) 2407 { 2408 struct vnode *newvp = NULL, *vp; 2409 struct nfsnode *np, *dnp = VTONFS(dvp); 2410 struct nfsfh *nfhp, *onfhp; 2411 struct nfsvattr nfsva, dnfsva; 2412 struct componentname cn; 2413 int error = 0, attrflag, dattrflag; 2414 u_int hash; 2415 2416 error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva, 2417 &nfhp, &attrflag, &dattrflag, NULL); 2418 if (dattrflag) 2419 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2420 if (npp && !error) { 2421 if (*npp != NULL) { 2422 np = *npp; 2423 vp = NFSTOV(np); 2424 /* 2425 * For NFSv4, check to see if it is the same name and 2426 * replace the name, if it is different. 2427 */ 2428 if (np->n_v4 != NULL && nfsva.na_type == VREG && 2429 (np->n_v4->n4_namelen != len || 2430 NFSBCMP(name, NFS4NODENAME(np->n_v4), len) || 2431 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || 2432 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2433 dnp->n_fhp->nfh_len))) { 2434 #ifdef notdef 2435 { char nnn[100]; int nnnl; 2436 nnnl = (len < 100) ? len : 99; 2437 bcopy(name, nnn, nnnl); 2438 nnn[nnnl] = '\0'; 2439 printf("replace=%s\n",nnn); 2440 } 2441 #endif 2442 FREE((caddr_t)np->n_v4, M_NFSV4NODE); 2443 MALLOC(np->n_v4, struct nfsv4node *, 2444 sizeof (struct nfsv4node) + 2445 dnp->n_fhp->nfh_len + len - 1, 2446 M_NFSV4NODE, M_WAITOK); 2447 np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; 2448 np->n_v4->n4_namelen = len; 2449 NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2450 dnp->n_fhp->nfh_len); 2451 NFSBCOPY(name, NFS4NODENAME(np->n_v4), len); 2452 } 2453 hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, 2454 FNV1_32_INIT); 2455 onfhp = np->n_fhp; 2456 /* 2457 * Rehash node for new file handle. 2458 */ 2459 vfs_hash_rehash(vp, hash); 2460 np->n_fhp = nfhp; 2461 if (onfhp != NULL) 2462 FREE((caddr_t)onfhp, M_NFSFH); 2463 newvp = NFSTOV(np); 2464 } else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) { 2465 FREE((caddr_t)nfhp, M_NFSFH); 2466 VREF(dvp); 2467 newvp = dvp; 2468 } else { 2469 cn.cn_nameptr = name; 2470 cn.cn_namelen = len; 2471 error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td, 2472 &np, NULL, LK_EXCLUSIVE); 2473 if (error) 2474 return (error); 2475 newvp = NFSTOV(np); 2476 } 2477 if (!attrflag && *npp == NULL) { 2478 if (newvp == dvp) 2479 vrele(newvp); 2480 else 2481 vput(newvp); 2482 return (ENOENT); 2483 } 2484 if (attrflag) 2485 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 2486 0, 1); 2487 } 2488 if (npp && *npp == NULL) { 2489 if (error) { 2490 if (newvp) { 2491 if (newvp == dvp) 2492 vrele(newvp); 2493 else 2494 vput(newvp); 2495 } 2496 } else 2497 *npp = np; 2498 } 2499 if (error && NFS_ISV4(dvp)) 2500 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2501 return (error); 2502 } 2503 2504 /* 2505 * Nfs Version 3 and 4 commit rpc 2506 */ 2507 int 2508 ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred, 2509 struct thread *td) 2510 { 2511 struct nfsvattr nfsva; 2512 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2513 int error, attrflag; 2514 u_char verf[NFSX_VERF]; 2515 2516 mtx_lock(&nmp->nm_mtx); 2517 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) { 2518 mtx_unlock(&nmp->nm_mtx); 2519 return (0); 2520 } 2521 mtx_unlock(&nmp->nm_mtx); 2522 error = nfsrpc_commit(vp, offset, cnt, cred, td, verf, &nfsva, 2523 &attrflag, NULL); 2524 if (!error) { 2525 mtx_lock(&nmp->nm_mtx); 2526 if (NFSBCMP((caddr_t)nmp->nm_verf, verf, NFSX_VERF)) { 2527 NFSBCOPY(verf, (caddr_t)nmp->nm_verf, NFSX_VERF); 2528 error = NFSERR_STALEWRITEVERF; 2529 } 2530 mtx_unlock(&nmp->nm_mtx); 2531 if (!error && attrflag) 2532 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 2533 0, 1); 2534 } else if (NFS_ISV4(vp)) { 2535 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2536 } 2537 return (error); 2538 } 2539 2540 /* 2541 * Strategy routine. 2542 * For async requests when nfsiod(s) are running, queue the request by 2543 * calling ncl_asyncio(), otherwise just all ncl_doio() to do the 2544 * request. 2545 */ 2546 static int 2547 nfs_strategy(struct vop_strategy_args *ap) 2548 { 2549 struct buf *bp = ap->a_bp; 2550 struct ucred *cr; 2551 2552 KASSERT(!(bp->b_flags & B_DONE), 2553 ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp)); 2554 BUF_ASSERT_HELD(bp); 2555 2556 if (bp->b_iocmd == BIO_READ) 2557 cr = bp->b_rcred; 2558 else 2559 cr = bp->b_wcred; 2560 2561 /* 2562 * If the op is asynchronous and an i/o daemon is waiting 2563 * queue the request, wake it up and wait for completion 2564 * otherwise just do it ourselves. 2565 */ 2566 if ((bp->b_flags & B_ASYNC) == 0 || 2567 ncl_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread)) 2568 (void) ncl_doio(ap->a_vp, bp, cr, curthread, 1); 2569 return (0); 2570 } 2571 2572 /* 2573 * fsync vnode op. Just call ncl_flush() with commit == 1. 2574 */ 2575 /* ARGSUSED */ 2576 static int 2577 nfs_fsync(struct vop_fsync_args *ap) 2578 { 2579 2580 if (ap->a_vp->v_type != VREG) { 2581 /* 2582 * For NFS, metadata is changed synchronously on the server, 2583 * so there is nothing to flush. Also, ncl_flush() clears 2584 * the NMODIFIED flag and that shouldn't be done here for 2585 * directories. 2586 */ 2587 return (0); 2588 } 2589 return (ncl_flush(ap->a_vp, ap->a_waitfor, NULL, ap->a_td, 1, 0)); 2590 } 2591 2592 /* 2593 * Flush all the blocks associated with a vnode. 2594 * Walk through the buffer pool and push any dirty pages 2595 * associated with the vnode. 2596 * If the called_from_renewthread argument is TRUE, it has been called 2597 * from the NFSv4 renew thread and, as such, cannot block indefinitely 2598 * waiting for a buffer write to complete. 2599 */ 2600 int 2601 ncl_flush(struct vnode *vp, int waitfor, struct ucred *cred, struct thread *td, 2602 int commit, int called_from_renewthread) 2603 { 2604 struct nfsnode *np = VTONFS(vp); 2605 struct buf *bp; 2606 int i; 2607 struct buf *nbp; 2608 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2609 int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2610 int passone = 1, trycnt = 0; 2611 u_quad_t off, endoff, toff; 2612 struct ucred* wcred = NULL; 2613 struct buf **bvec = NULL; 2614 struct bufobj *bo; 2615 #ifndef NFS_COMMITBVECSIZ 2616 #define NFS_COMMITBVECSIZ 20 2617 #endif 2618 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; 2619 int bvecsize = 0, bveccount; 2620 2621 if (called_from_renewthread != 0) 2622 slptimeo = hz; 2623 if (nmp->nm_flag & NFSMNT_INT) 2624 slpflag = NFS_PCATCH; 2625 if (!commit) 2626 passone = 0; 2627 bo = &vp->v_bufobj; 2628 /* 2629 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2630 * server, but has not been committed to stable storage on the server 2631 * yet. On the first pass, the byte range is worked out and the commit 2632 * rpc is done. On the second pass, ncl_writebp() is called to do the 2633 * job. 2634 */ 2635 again: 2636 off = (u_quad_t)-1; 2637 endoff = 0; 2638 bvecpos = 0; 2639 if (NFS_ISV34(vp) && commit) { 2640 if (bvec != NULL && bvec != bvec_on_stack) 2641 free(bvec, M_TEMP); 2642 /* 2643 * Count up how many buffers waiting for a commit. 2644 */ 2645 bveccount = 0; 2646 BO_LOCK(bo); 2647 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2648 if (!BUF_ISLOCKED(bp) && 2649 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) 2650 == (B_DELWRI | B_NEEDCOMMIT)) 2651 bveccount++; 2652 } 2653 /* 2654 * Allocate space to remember the list of bufs to commit. It is 2655 * important to use M_NOWAIT here to avoid a race with nfs_write. 2656 * If we can't get memory (for whatever reason), we will end up 2657 * committing the buffers one-by-one in the loop below. 2658 */ 2659 if (bveccount > NFS_COMMITBVECSIZ) { 2660 /* 2661 * Release the vnode interlock to avoid a lock 2662 * order reversal. 2663 */ 2664 BO_UNLOCK(bo); 2665 bvec = (struct buf **) 2666 malloc(bveccount * sizeof(struct buf *), 2667 M_TEMP, M_NOWAIT); 2668 BO_LOCK(bo); 2669 if (bvec == NULL) { 2670 bvec = bvec_on_stack; 2671 bvecsize = NFS_COMMITBVECSIZ; 2672 } else 2673 bvecsize = bveccount; 2674 } else { 2675 bvec = bvec_on_stack; 2676 bvecsize = NFS_COMMITBVECSIZ; 2677 } 2678 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2679 if (bvecpos >= bvecsize) 2680 break; 2681 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2682 nbp = TAILQ_NEXT(bp, b_bobufs); 2683 continue; 2684 } 2685 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) != 2686 (B_DELWRI | B_NEEDCOMMIT)) { 2687 BUF_UNLOCK(bp); 2688 nbp = TAILQ_NEXT(bp, b_bobufs); 2689 continue; 2690 } 2691 BO_UNLOCK(bo); 2692 bremfree(bp); 2693 /* 2694 * Work out if all buffers are using the same cred 2695 * so we can deal with them all with one commit. 2696 * 2697 * NOTE: we are not clearing B_DONE here, so we have 2698 * to do it later on in this routine if we intend to 2699 * initiate I/O on the bp. 2700 * 2701 * Note: to avoid loopback deadlocks, we do not 2702 * assign b_runningbufspace. 2703 */ 2704 if (wcred == NULL) 2705 wcred = bp->b_wcred; 2706 else if (wcred != bp->b_wcred) 2707 wcred = NOCRED; 2708 vfs_busy_pages(bp, 1); 2709 2710 BO_LOCK(bo); 2711 /* 2712 * bp is protected by being locked, but nbp is not 2713 * and vfs_busy_pages() may sleep. We have to 2714 * recalculate nbp. 2715 */ 2716 nbp = TAILQ_NEXT(bp, b_bobufs); 2717 2718 /* 2719 * A list of these buffers is kept so that the 2720 * second loop knows which buffers have actually 2721 * been committed. This is necessary, since there 2722 * may be a race between the commit rpc and new 2723 * uncommitted writes on the file. 2724 */ 2725 bvec[bvecpos++] = bp; 2726 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2727 bp->b_dirtyoff; 2728 if (toff < off) 2729 off = toff; 2730 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2731 if (toff > endoff) 2732 endoff = toff; 2733 } 2734 BO_UNLOCK(bo); 2735 } 2736 if (bvecpos > 0) { 2737 /* 2738 * Commit data on the server, as required. 2739 * If all bufs are using the same wcred, then use that with 2740 * one call for all of them, otherwise commit each one 2741 * separately. 2742 */ 2743 if (wcred != NOCRED) 2744 retv = ncl_commit(vp, off, (int)(endoff - off), 2745 wcred, td); 2746 else { 2747 retv = 0; 2748 for (i = 0; i < bvecpos; i++) { 2749 off_t off, size; 2750 bp = bvec[i]; 2751 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2752 bp->b_dirtyoff; 2753 size = (u_quad_t)(bp->b_dirtyend 2754 - bp->b_dirtyoff); 2755 retv = ncl_commit(vp, off, (int)size, 2756 bp->b_wcred, td); 2757 if (retv) break; 2758 } 2759 } 2760 2761 if (retv == NFSERR_STALEWRITEVERF) 2762 ncl_clearcommit(vp->v_mount); 2763 2764 /* 2765 * Now, either mark the blocks I/O done or mark the 2766 * blocks dirty, depending on whether the commit 2767 * succeeded. 2768 */ 2769 for (i = 0; i < bvecpos; i++) { 2770 bp = bvec[i]; 2771 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 2772 if (retv) { 2773 /* 2774 * Error, leave B_DELWRI intact 2775 */ 2776 vfs_unbusy_pages(bp); 2777 brelse(bp); 2778 } else { 2779 /* 2780 * Success, remove B_DELWRI ( bundirty() ). 2781 * 2782 * b_dirtyoff/b_dirtyend seem to be NFS 2783 * specific. We should probably move that 2784 * into bundirty(). XXX 2785 */ 2786 bufobj_wref(bo); 2787 bp->b_flags |= B_ASYNC; 2788 bundirty(bp); 2789 bp->b_flags &= ~B_DONE; 2790 bp->b_ioflags &= ~BIO_ERROR; 2791 bp->b_dirtyoff = bp->b_dirtyend = 0; 2792 bufdone(bp); 2793 } 2794 } 2795 } 2796 2797 /* 2798 * Start/do any write(s) that are required. 2799 */ 2800 loop: 2801 BO_LOCK(bo); 2802 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2803 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2804 if (waitfor != MNT_WAIT || passone) 2805 continue; 2806 2807 error = BUF_TIMELOCK(bp, 2808 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2809 BO_MTX(bo), "nfsfsync", slpflag, slptimeo); 2810 if (error == 0) { 2811 BUF_UNLOCK(bp); 2812 goto loop; 2813 } 2814 if (error == ENOLCK) { 2815 error = 0; 2816 goto loop; 2817 } 2818 if (called_from_renewthread != 0) { 2819 /* 2820 * Return EIO so the flush will be retried 2821 * later. 2822 */ 2823 error = EIO; 2824 goto done; 2825 } 2826 if (newnfs_sigintr(nmp, td)) { 2827 error = EINTR; 2828 goto done; 2829 } 2830 if (slpflag & PCATCH) { 2831 slpflag = 0; 2832 slptimeo = 2 * hz; 2833 } 2834 goto loop; 2835 } 2836 if ((bp->b_flags & B_DELWRI) == 0) 2837 panic("nfs_fsync: not dirty"); 2838 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) { 2839 BUF_UNLOCK(bp); 2840 continue; 2841 } 2842 BO_UNLOCK(bo); 2843 bremfree(bp); 2844 if (passone || !commit) 2845 bp->b_flags |= B_ASYNC; 2846 else 2847 bp->b_flags |= B_ASYNC; 2848 bwrite(bp); 2849 if (newnfs_sigintr(nmp, td)) { 2850 error = EINTR; 2851 goto done; 2852 } 2853 goto loop; 2854 } 2855 if (passone) { 2856 passone = 0; 2857 BO_UNLOCK(bo); 2858 goto again; 2859 } 2860 if (waitfor == MNT_WAIT) { 2861 while (bo->bo_numoutput) { 2862 error = bufobj_wwait(bo, slpflag, slptimeo); 2863 if (error) { 2864 BO_UNLOCK(bo); 2865 if (called_from_renewthread != 0) { 2866 /* 2867 * Return EIO so that the flush will be 2868 * retried later. 2869 */ 2870 error = EIO; 2871 goto done; 2872 } 2873 error = newnfs_sigintr(nmp, td); 2874 if (error) 2875 goto done; 2876 if (slpflag & PCATCH) { 2877 slpflag = 0; 2878 slptimeo = 2 * hz; 2879 } 2880 BO_LOCK(bo); 2881 } 2882 } 2883 if (bo->bo_dirty.bv_cnt != 0 && commit) { 2884 BO_UNLOCK(bo); 2885 goto loop; 2886 } 2887 /* 2888 * Wait for all the async IO requests to drain 2889 */ 2890 BO_UNLOCK(bo); 2891 mtx_lock(&np->n_mtx); 2892 while (np->n_directio_asyncwr > 0) { 2893 np->n_flag |= NFSYNCWAIT; 2894 error = newnfs_msleep(td, &np->n_directio_asyncwr, 2895 &np->n_mtx, slpflag | (PRIBIO + 1), 2896 "nfsfsync", 0); 2897 if (error) { 2898 if (newnfs_sigintr(nmp, td)) { 2899 mtx_unlock(&np->n_mtx); 2900 error = EINTR; 2901 goto done; 2902 } 2903 } 2904 } 2905 mtx_unlock(&np->n_mtx); 2906 } else 2907 BO_UNLOCK(bo); 2908 mtx_lock(&np->n_mtx); 2909 if (np->n_flag & NWRITEERR) { 2910 error = np->n_error; 2911 np->n_flag &= ~NWRITEERR; 2912 } 2913 if (commit && bo->bo_dirty.bv_cnt == 0 && 2914 bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0) 2915 np->n_flag &= ~NMODIFIED; 2916 mtx_unlock(&np->n_mtx); 2917 done: 2918 if (bvec != NULL && bvec != bvec_on_stack) 2919 free(bvec, M_TEMP); 2920 if (error == 0 && commit != 0 && waitfor == MNT_WAIT && 2921 (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 || 2922 np->n_directio_asyncwr != 0) && trycnt++ < 5) { 2923 /* try, try again... */ 2924 passone = 1; 2925 wcred = NULL; 2926 bvec = NULL; 2927 bvecsize = 0; 2928 printf("try%d\n", trycnt); 2929 goto again; 2930 } 2931 return (error); 2932 } 2933 2934 /* 2935 * NFS advisory byte-level locks. 2936 */ 2937 static int 2938 nfs_advlock(struct vop_advlock_args *ap) 2939 { 2940 struct vnode *vp = ap->a_vp; 2941 struct ucred *cred; 2942 struct nfsnode *np = VTONFS(ap->a_vp); 2943 struct proc *p = (struct proc *)ap->a_id; 2944 struct thread *td = curthread; /* XXX */ 2945 struct vattr va; 2946 int ret, error = EOPNOTSUPP; 2947 u_quad_t size; 2948 2949 if (NFS_ISV4(vp) && (ap->a_flags & (F_POSIX | F_FLOCK)) != 0) { 2950 if (vp->v_type != VREG) 2951 return (EINVAL); 2952 if ((ap->a_flags & F_POSIX) != 0) 2953 cred = p->p_ucred; 2954 else 2955 cred = td->td_ucred; 2956 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 2957 if (vp->v_iflag & VI_DOOMED) { 2958 NFSVOPUNLOCK(vp, 0); 2959 return (EBADF); 2960 } 2961 2962 /* 2963 * If this is unlocking a write locked region, flush and 2964 * commit them before unlocking. This is required by 2965 * RFC3530 Sec. 9.3.2. 2966 */ 2967 if (ap->a_op == F_UNLCK && 2968 nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id, 2969 ap->a_flags)) 2970 (void) ncl_flush(vp, MNT_WAIT, cred, td, 1, 0); 2971 2972 /* 2973 * Loop around doing the lock op, while a blocking lock 2974 * must wait for the lock op to succeed. 2975 */ 2976 do { 2977 ret = nfsrpc_advlock(vp, np->n_size, ap->a_op, 2978 ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags); 2979 if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 2980 ap->a_op == F_SETLK) { 2981 NFSVOPUNLOCK(vp, 0); 2982 error = nfs_catnap(PZERO | PCATCH, ret, 2983 "ncladvl"); 2984 if (error) 2985 return (EINTR); 2986 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); 2987 if (vp->v_iflag & VI_DOOMED) { 2988 NFSVOPUNLOCK(vp, 0); 2989 return (EBADF); 2990 } 2991 } 2992 } while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 2993 ap->a_op == F_SETLK); 2994 if (ret == NFSERR_DENIED) { 2995 NFSVOPUNLOCK(vp, 0); 2996 return (EAGAIN); 2997 } else if (ret == EINVAL || ret == EBADF || ret == EINTR) { 2998 NFSVOPUNLOCK(vp, 0); 2999 return (ret); 3000 } else if (ret != 0) { 3001 NFSVOPUNLOCK(vp, 0); 3002 return (EACCES); 3003 } 3004 3005 /* 3006 * Now, if we just got a lock, invalidate data in the buffer 3007 * cache, as required, so that the coherency conforms with 3008 * RFC3530 Sec. 9.3.2. 3009 */ 3010 if (ap->a_op == F_SETLK) { 3011 if ((np->n_flag & NMODIFIED) == 0) { 3012 np->n_attrstamp = 0; 3013 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3014 ret = VOP_GETATTR(vp, &va, cred); 3015 } 3016 if ((np->n_flag & NMODIFIED) || ret || 3017 np->n_change != va.va_filerev) { 3018 (void) ncl_vinvalbuf(vp, V_SAVE, td, 1); 3019 np->n_attrstamp = 0; 3020 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 3021 ret = VOP_GETATTR(vp, &va, cred); 3022 if (!ret) { 3023 np->n_mtime = va.va_mtime; 3024 np->n_change = va.va_filerev; 3025 } 3026 } 3027 } 3028 NFSVOPUNLOCK(vp, 0); 3029 return (0); 3030 } else if (!NFS_ISV4(vp)) { 3031 error = NFSVOPLOCK(vp, LK_SHARED); 3032 if (error) 3033 return (error); 3034 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 3035 size = VTONFS(vp)->n_size; 3036 NFSVOPUNLOCK(vp, 0); 3037 error = lf_advlock(ap, &(vp->v_lockf), size); 3038 } else { 3039 if (nfs_advlock_p != NULL) 3040 error = nfs_advlock_p(ap); 3041 else { 3042 NFSVOPUNLOCK(vp, 0); 3043 error = ENOLCK; 3044 } 3045 } 3046 } 3047 return (error); 3048 } 3049 3050 /* 3051 * NFS advisory byte-level locks. 3052 */ 3053 static int 3054 nfs_advlockasync(struct vop_advlockasync_args *ap) 3055 { 3056 struct vnode *vp = ap->a_vp; 3057 u_quad_t size; 3058 int error; 3059 3060 if (NFS_ISV4(vp)) 3061 return (EOPNOTSUPP); 3062 error = NFSVOPLOCK(vp, LK_SHARED); 3063 if (error) 3064 return (error); 3065 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 3066 size = VTONFS(vp)->n_size; 3067 NFSVOPUNLOCK(vp, 0); 3068 error = lf_advlockasync(ap, &(vp->v_lockf), size); 3069 } else { 3070 NFSVOPUNLOCK(vp, 0); 3071 error = EOPNOTSUPP; 3072 } 3073 return (error); 3074 } 3075 3076 /* 3077 * Print out the contents of an nfsnode. 3078 */ 3079 static int 3080 nfs_print(struct vop_print_args *ap) 3081 { 3082 struct vnode *vp = ap->a_vp; 3083 struct nfsnode *np = VTONFS(vp); 3084 3085 ncl_printf("\tfileid %ld fsid 0x%x", 3086 np->n_vattr.na_fileid, np->n_vattr.na_fsid); 3087 if (vp->v_type == VFIFO) 3088 fifo_printinfo(vp); 3089 printf("\n"); 3090 return (0); 3091 } 3092 3093 /* 3094 * This is the "real" nfs::bwrite(struct buf*). 3095 * We set B_CACHE if this is a VMIO buffer. 3096 */ 3097 int 3098 ncl_writebp(struct buf *bp, int force __unused, struct thread *td) 3099 { 3100 int s; 3101 int oldflags = bp->b_flags; 3102 #if 0 3103 int retv = 1; 3104 off_t off; 3105 #endif 3106 3107 BUF_ASSERT_HELD(bp); 3108 3109 if (bp->b_flags & B_INVAL) { 3110 brelse(bp); 3111 return(0); 3112 } 3113 3114 bp->b_flags |= B_CACHE; 3115 3116 /* 3117 * Undirty the bp. We will redirty it later if the I/O fails. 3118 */ 3119 3120 s = splbio(); 3121 bundirty(bp); 3122 bp->b_flags &= ~B_DONE; 3123 bp->b_ioflags &= ~BIO_ERROR; 3124 bp->b_iocmd = BIO_WRITE; 3125 3126 bufobj_wref(bp->b_bufobj); 3127 curthread->td_ru.ru_oublock++; 3128 splx(s); 3129 3130 /* 3131 * Note: to avoid loopback deadlocks, we do not 3132 * assign b_runningbufspace. 3133 */ 3134 vfs_busy_pages(bp, 1); 3135 3136 BUF_KERNPROC(bp); 3137 bp->b_iooffset = dbtob(bp->b_blkno); 3138 bstrategy(bp); 3139 3140 if( (oldflags & B_ASYNC) == 0) { 3141 int rtval = bufwait(bp); 3142 3143 if (oldflags & B_DELWRI) { 3144 s = splbio(); 3145 reassignbuf(bp); 3146 splx(s); 3147 } 3148 brelse(bp); 3149 return (rtval); 3150 } 3151 3152 return (0); 3153 } 3154 3155 /* 3156 * nfs special file access vnode op. 3157 * Essentially just get vattr and then imitate iaccess() since the device is 3158 * local to the client. 3159 */ 3160 static int 3161 nfsspec_access(struct vop_access_args *ap) 3162 { 3163 struct vattr *vap; 3164 struct ucred *cred = ap->a_cred; 3165 struct vnode *vp = ap->a_vp; 3166 accmode_t accmode = ap->a_accmode; 3167 struct vattr vattr; 3168 int error; 3169 3170 /* 3171 * Disallow write attempts on filesystems mounted read-only; 3172 * unless the file is a socket, fifo, or a block or character 3173 * device resident on the filesystem. 3174 */ 3175 if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3176 switch (vp->v_type) { 3177 case VREG: 3178 case VDIR: 3179 case VLNK: 3180 return (EROFS); 3181 default: 3182 break; 3183 } 3184 } 3185 vap = &vattr; 3186 error = VOP_GETATTR(vp, vap, cred); 3187 if (error) 3188 goto out; 3189 error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid, 3190 accmode, cred, NULL); 3191 out: 3192 return error; 3193 } 3194 3195 /* 3196 * Read wrapper for fifos. 3197 */ 3198 static int 3199 nfsfifo_read(struct vop_read_args *ap) 3200 { 3201 struct nfsnode *np = VTONFS(ap->a_vp); 3202 int error; 3203 3204 /* 3205 * Set access flag. 3206 */ 3207 mtx_lock(&np->n_mtx); 3208 np->n_flag |= NACC; 3209 getnanotime(&np->n_atim); 3210 mtx_unlock(&np->n_mtx); 3211 error = fifo_specops.vop_read(ap); 3212 return error; 3213 } 3214 3215 /* 3216 * Write wrapper for fifos. 3217 */ 3218 static int 3219 nfsfifo_write(struct vop_write_args *ap) 3220 { 3221 struct nfsnode *np = VTONFS(ap->a_vp); 3222 3223 /* 3224 * Set update flag. 3225 */ 3226 mtx_lock(&np->n_mtx); 3227 np->n_flag |= NUPD; 3228 getnanotime(&np->n_mtim); 3229 mtx_unlock(&np->n_mtx); 3230 return(fifo_specops.vop_write(ap)); 3231 } 3232 3233 /* 3234 * Close wrapper for fifos. 3235 * 3236 * Update the times on the nfsnode then do fifo close. 3237 */ 3238 static int 3239 nfsfifo_close(struct vop_close_args *ap) 3240 { 3241 struct vnode *vp = ap->a_vp; 3242 struct nfsnode *np = VTONFS(vp); 3243 struct vattr vattr; 3244 struct timespec ts; 3245 3246 mtx_lock(&np->n_mtx); 3247 if (np->n_flag & (NACC | NUPD)) { 3248 getnanotime(&ts); 3249 if (np->n_flag & NACC) 3250 np->n_atim = ts; 3251 if (np->n_flag & NUPD) 3252 np->n_mtim = ts; 3253 np->n_flag |= NCHG; 3254 if (vrefcnt(vp) == 1 && 3255 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3256 VATTR_NULL(&vattr); 3257 if (np->n_flag & NACC) 3258 vattr.va_atime = np->n_atim; 3259 if (np->n_flag & NUPD) 3260 vattr.va_mtime = np->n_mtim; 3261 mtx_unlock(&np->n_mtx); 3262 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3263 goto out; 3264 } 3265 } 3266 mtx_unlock(&np->n_mtx); 3267 out: 3268 return (fifo_specops.vop_close(ap)); 3269 } 3270 3271 /* 3272 * Just call ncl_writebp() with the force argument set to 1. 3273 * 3274 * NOTE: B_DONE may or may not be set in a_bp on call. 3275 */ 3276 static int 3277 nfs_bwrite(struct buf *bp) 3278 { 3279 3280 return (ncl_writebp(bp, 1, curthread)); 3281 } 3282 3283 struct buf_ops buf_ops_newnfs = { 3284 .bop_name = "buf_ops_nfs", 3285 .bop_write = nfs_bwrite, 3286 .bop_strategy = bufstrategy, 3287 .bop_sync = bufsync, 3288 .bop_bdflush = bufbdflush, 3289 }; 3290 3291 /* 3292 * Cloned from vop_stdlock(), and then the ugly hack added. 3293 */ 3294 static int 3295 nfs_lock1(struct vop_lock1_args *ap) 3296 { 3297 struct vnode *vp = ap->a_vp; 3298 int error = 0; 3299 3300 /* 3301 * Since vfs_hash_get() calls vget() and it will no longer work 3302 * for FreeBSD8 with flags == 0, I can only think of this horrible 3303 * hack to work around it. I call vfs_hash_get() with LK_EXCLOTHER 3304 * and then handle it here. All I want for this case is a v_usecount 3305 * on the vnode to use for recovery, while another thread might 3306 * hold a lock on the vnode. I have the other threads blocked, so 3307 * there isn't any race problem. 3308 */ 3309 if ((ap->a_flags & LK_TYPE_MASK) == LK_EXCLOTHER) { 3310 if ((ap->a_flags & LK_INTERLOCK) == 0) 3311 panic("ncllock1"); 3312 if ((vp->v_iflag & VI_DOOMED)) 3313 error = ENOENT; 3314 VI_UNLOCK(vp); 3315 return (error); 3316 } 3317 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 3318 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, 3319 ap->a_line)); 3320 } 3321 3322 static int 3323 nfs_getacl(struct vop_getacl_args *ap) 3324 { 3325 int error; 3326 3327 if (ap->a_type != ACL_TYPE_NFS4) 3328 return (EOPNOTSUPP); 3329 error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3330 NULL); 3331 if (error > NFSERR_STALE) { 3332 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3333 error = EPERM; 3334 } 3335 return (error); 3336 } 3337 3338 static int 3339 nfs_setacl(struct vop_setacl_args *ap) 3340 { 3341 int error; 3342 3343 if (ap->a_type != ACL_TYPE_NFS4) 3344 return (EOPNOTSUPP); 3345 error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3346 NULL); 3347 if (error > NFSERR_STALE) { 3348 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3349 error = EPERM; 3350 } 3351 return (error); 3352 } 3353 3354 /* 3355 * Return POSIX pathconf information applicable to nfs filesystems. 3356 */ 3357 static int 3358 nfs_pathconf(struct vop_pathconf_args *ap) 3359 { 3360 struct nfsv3_pathconf pc; 3361 struct nfsvattr nfsva; 3362 struct vnode *vp = ap->a_vp; 3363 struct thread *td = curthread; 3364 int attrflag, error; 3365 3366 if (NFS_ISV4(vp) || (NFS_ISV3(vp) && (ap->a_name == _PC_LINK_MAX || 3367 ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED || 3368 ap->a_name == _PC_NO_TRUNC))) { 3369 /* 3370 * Since only the above 4 a_names are returned by the NFSv3 3371 * Pathconf RPC, there is no point in doing it for others. 3372 */ 3373 error = nfsrpc_pathconf(vp, &pc, td->td_ucred, td, &nfsva, 3374 &attrflag, NULL); 3375 if (attrflag != 0) 3376 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 3377 1); 3378 if (error != 0) 3379 return (error); 3380 } else { 3381 /* 3382 * For NFSv2 (or NFSv3 when not one of the above 4 a_names), 3383 * just fake them. 3384 */ 3385 pc.pc_linkmax = LINK_MAX; 3386 pc.pc_namemax = NFS_MAXNAMLEN; 3387 pc.pc_notrunc = 1; 3388 pc.pc_chownrestricted = 1; 3389 pc.pc_caseinsensitive = 0; 3390 pc.pc_casepreserving = 1; 3391 error = 0; 3392 } 3393 switch (ap->a_name) { 3394 case _PC_LINK_MAX: 3395 *ap->a_retval = pc.pc_linkmax; 3396 break; 3397 case _PC_NAME_MAX: 3398 *ap->a_retval = pc.pc_namemax; 3399 break; 3400 case _PC_PATH_MAX: 3401 *ap->a_retval = PATH_MAX; 3402 break; 3403 case _PC_PIPE_BUF: 3404 *ap->a_retval = PIPE_BUF; 3405 break; 3406 case _PC_CHOWN_RESTRICTED: 3407 *ap->a_retval = pc.pc_chownrestricted; 3408 break; 3409 case _PC_NO_TRUNC: 3410 *ap->a_retval = pc.pc_notrunc; 3411 break; 3412 case _PC_ACL_EXTENDED: 3413 *ap->a_retval = 0; 3414 break; 3415 case _PC_ACL_NFS4: 3416 if (NFS_ISV4(vp) && nfsrv_useacl != 0 && attrflag != 0 && 3417 NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL)) 3418 *ap->a_retval = 1; 3419 else 3420 *ap->a_retval = 0; 3421 break; 3422 case _PC_ACL_PATH_MAX: 3423 if (NFS_ISV4(vp)) 3424 *ap->a_retval = ACL_MAX_ENTRIES; 3425 else 3426 *ap->a_retval = 3; 3427 break; 3428 case _PC_MAC_PRESENT: 3429 *ap->a_retval = 0; 3430 break; 3431 case _PC_ASYNC_IO: 3432 /* _PC_ASYNC_IO should have been handled by upper layers. */ 3433 KASSERT(0, ("_PC_ASYNC_IO should not get here")); 3434 error = EINVAL; 3435 break; 3436 case _PC_PRIO_IO: 3437 *ap->a_retval = 0; 3438 break; 3439 case _PC_SYNC_IO: 3440 *ap->a_retval = 0; 3441 break; 3442 case _PC_ALLOC_SIZE_MIN: 3443 *ap->a_retval = vp->v_mount->mnt_stat.f_bsize; 3444 break; 3445 case _PC_FILESIZEBITS: 3446 if (NFS_ISV34(vp)) 3447 *ap->a_retval = 64; 3448 else 3449 *ap->a_retval = 32; 3450 break; 3451 case _PC_REC_INCR_XFER_SIZE: 3452 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize; 3453 break; 3454 case _PC_REC_MAX_XFER_SIZE: 3455 *ap->a_retval = -1; /* means ``unlimited'' */ 3456 break; 3457 case _PC_REC_MIN_XFER_SIZE: 3458 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize; 3459 break; 3460 case _PC_REC_XFER_ALIGN: 3461 *ap->a_retval = PAGE_SIZE; 3462 break; 3463 case _PC_SYMLINK_MAX: 3464 *ap->a_retval = NFS_MAXPATHLEN; 3465 break; 3466 3467 default: 3468 error = EINVAL; 3469 break; 3470 } 3471 return (error); 3472 } 3473 3474