1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from nfs_vnops.c 8.16 (Berkeley) 5/27/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /* 39 * vnode op calls for Sun NFS version 2, 3 and 4 40 */ 41 42 #include "opt_inet.h" 43 44 #include <sys/param.h> 45 #include <sys/kernel.h> 46 #include <sys/systm.h> 47 #include <sys/resourcevar.h> 48 #include <sys/proc.h> 49 #include <sys/mount.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/jail.h> 53 #include <sys/malloc.h> 54 #include <sys/mbuf.h> 55 #include <sys/namei.h> 56 #include <sys/socket.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/fcntl.h> 60 #include <sys/lockf.h> 61 #include <sys/stat.h> 62 #include <sys/sysctl.h> 63 #include <sys/signalvar.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_object.h> 67 #include <vm/vm_extern.h> 68 #include <vm/vm_object.h> 69 70 71 #include <fs/nfs/nfsport.h> 72 #include <fs/nfsclient/nfsnode.h> 73 #include <fs/nfsclient/nfsmount.h> 74 #include <fs/nfsclient/nfs.h> 75 #include <fs/nfsclient/nfs_lock.h> 76 77 #include <net/if.h> 78 #include <netinet/in.h> 79 #include <netinet/in_var.h> 80 81 /* Defs */ 82 #define TRUE 1 83 #define FALSE 0 84 85 extern struct nfsstats newnfsstats; 86 MALLOC_DECLARE(M_NEWNFSREQ); 87 vop_advlock_t *ncl_advlock_p = ncl_dolock; 88 89 /* 90 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these 91 * calls are not in getblk() and brelse() so that they would not be necessary 92 * here. 93 */ 94 #ifndef B_VMIO 95 #define vfs_busy_pages(bp, f) 96 #endif 97 98 static vop_read_t nfsfifo_read; 99 static vop_write_t nfsfifo_write; 100 static vop_close_t nfsfifo_close; 101 static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *, 102 struct thread *); 103 static vop_lookup_t nfs_lookup; 104 static vop_create_t nfs_create; 105 static vop_mknod_t nfs_mknod; 106 static vop_open_t nfs_open; 107 static vop_close_t nfs_close; 108 static vop_access_t nfs_access; 109 static vop_getattr_t nfs_getattr; 110 static vop_setattr_t nfs_setattr; 111 static vop_read_t nfs_read; 112 static vop_fsync_t nfs_fsync; 113 static vop_remove_t nfs_remove; 114 static vop_link_t nfs_link; 115 static vop_rename_t nfs_rename; 116 static vop_mkdir_t nfs_mkdir; 117 static vop_rmdir_t nfs_rmdir; 118 static vop_symlink_t nfs_symlink; 119 static vop_readdir_t nfs_readdir; 120 static vop_strategy_t nfs_strategy; 121 static vop_lock1_t nfs_lock1; 122 static int nfs_lookitup(struct vnode *, char *, int, 123 struct ucred *, struct thread *, struct nfsnode **); 124 static int nfs_sillyrename(struct vnode *, struct vnode *, 125 struct componentname *); 126 static vop_access_t nfsspec_access; 127 static vop_readlink_t nfs_readlink; 128 static vop_print_t nfs_print; 129 static vop_advlock_t nfs_advlock; 130 static vop_advlockasync_t nfs_advlockasync; 131 static vop_getacl_t nfs_getacl; 132 static vop_setacl_t nfs_setacl; 133 134 /* 135 * Global vfs data structures for nfs 136 */ 137 struct vop_vector newnfs_vnodeops = { 138 .vop_default = &default_vnodeops, 139 .vop_access = nfs_access, 140 .vop_advlock = nfs_advlock, 141 .vop_advlockasync = nfs_advlockasync, 142 .vop_close = nfs_close, 143 .vop_create = nfs_create, 144 .vop_fsync = nfs_fsync, 145 .vop_getattr = nfs_getattr, 146 .vop_getpages = ncl_getpages, 147 .vop_putpages = ncl_putpages, 148 .vop_inactive = ncl_inactive, 149 .vop_link = nfs_link, 150 .vop_lock1 = nfs_lock1, 151 .vop_lookup = nfs_lookup, 152 .vop_mkdir = nfs_mkdir, 153 .vop_mknod = nfs_mknod, 154 .vop_open = nfs_open, 155 .vop_print = nfs_print, 156 .vop_read = nfs_read, 157 .vop_readdir = nfs_readdir, 158 .vop_readlink = nfs_readlink, 159 .vop_reclaim = ncl_reclaim, 160 .vop_remove = nfs_remove, 161 .vop_rename = nfs_rename, 162 .vop_rmdir = nfs_rmdir, 163 .vop_setattr = nfs_setattr, 164 .vop_strategy = nfs_strategy, 165 .vop_symlink = nfs_symlink, 166 .vop_write = ncl_write, 167 .vop_getacl = nfs_getacl, 168 .vop_setacl = nfs_setacl, 169 }; 170 171 struct vop_vector newnfs_fifoops = { 172 .vop_default = &fifo_specops, 173 .vop_access = nfsspec_access, 174 .vop_close = nfsfifo_close, 175 .vop_fsync = nfs_fsync, 176 .vop_getattr = nfs_getattr, 177 .vop_inactive = ncl_inactive, 178 .vop_print = nfs_print, 179 .vop_read = nfsfifo_read, 180 .vop_reclaim = ncl_reclaim, 181 .vop_setattr = nfs_setattr, 182 .vop_write = nfsfifo_write, 183 }; 184 185 static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, 186 struct componentname *cnp, struct vattr *vap); 187 static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 188 int namelen, struct ucred *cred, struct thread *td); 189 static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, 190 char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp, 191 char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td); 192 static int nfs_renameit(struct vnode *sdvp, struct vnode *svp, 193 struct componentname *scnp, struct sillyrename *sp); 194 195 /* 196 * Global variables 197 */ 198 #define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) 199 200 SYSCTL_DECL(_vfs_newnfs); 201 202 static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO; 203 SYSCTL_INT(_vfs_newnfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW, 204 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout"); 205 206 static int nfs_prime_access_cache = 0; 207 SYSCTL_INT(_vfs_newnfs, OID_AUTO, prime_access_cache, CTLFLAG_RW, 208 &nfs_prime_access_cache, 0, 209 "Prime NFS ACCESS cache when fetching attributes"); 210 211 static int newnfs_commit_on_close = 0; 212 SYSCTL_INT(_vfs_newnfs, OID_AUTO, commit_on_close, CTLFLAG_RW, 213 &newnfs_commit_on_close, 0, "write+commit on close, else only write"); 214 215 static int nfs_clean_pages_on_close = 1; 216 SYSCTL_INT(_vfs_newnfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW, 217 &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close"); 218 219 int newnfs_directio_enable = 0; 220 SYSCTL_INT(_vfs_newnfs, OID_AUTO, directio_enable, CTLFLAG_RW, 221 &newnfs_directio_enable, 0, "Enable NFS directio"); 222 223 /* 224 * This sysctl allows other processes to mmap a file that has been opened 225 * O_DIRECT by a process. In general, having processes mmap the file while 226 * Direct IO is in progress can lead to Data Inconsistencies. But, we allow 227 * this by default to prevent DoS attacks - to prevent a malicious user from 228 * opening up files O_DIRECT preventing other users from mmap'ing these 229 * files. "Protected" environments where stricter consistency guarantees are 230 * required can disable this knob. The process that opened the file O_DIRECT 231 * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not 232 * meaningful. 233 */ 234 int newnfs_directio_allow_mmap = 1; 235 SYSCTL_INT(_vfs_newnfs, OID_AUTO, directio_allow_mmap, CTLFLAG_RW, 236 &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens"); 237 238 #if 0 239 SYSCTL_INT(_vfs_newnfs, OID_AUTO, access_cache_hits, CTLFLAG_RD, 240 &newnfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count"); 241 242 SYSCTL_INT(_vfs_newnfs, OID_AUTO, access_cache_misses, CTLFLAG_RD, 243 &newnfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count"); 244 #endif 245 246 #define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \ 247 | NFSACCESS_EXTEND | NFSACCESS_EXECUTE \ 248 | NFSACCESS_DELETE | NFSACCESS_LOOKUP) 249 250 /* 251 * SMP Locking Note : 252 * The list of locks after the description of the lock is the ordering 253 * of other locks acquired with the lock held. 254 * np->n_mtx : Protects the fields in the nfsnode. 255 VM Object Lock 256 VI_MTX (acquired indirectly) 257 * nmp->nm_mtx : Protects the fields in the nfsmount. 258 rep->r_mtx 259 * ncl_iod_mutex : Global lock, protects shared nfsiod state. 260 * nfs_reqq_mtx : Global lock, protects the nfs_reqq list. 261 nmp->nm_mtx 262 rep->r_mtx 263 * rep->r_mtx : Protects the fields in an nfsreq. 264 */ 265 266 static int 267 nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td, 268 struct ucred *cred, u_int32_t *retmode) 269 { 270 int error = 0, attrflag, i, lrupos; 271 u_int32_t rmode; 272 struct nfsnode *np = VTONFS(vp); 273 struct nfsvattr nfsva; 274 275 error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag, 276 &rmode, NULL); 277 if (attrflag) 278 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 279 if (!error) { 280 lrupos = 0; 281 mtx_lock(&np->n_mtx); 282 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 283 if (np->n_accesscache[i].uid == cred->cr_uid) { 284 np->n_accesscache[i].mode = rmode; 285 np->n_accesscache[i].stamp = time_second; 286 break; 287 } 288 if (i > 0 && np->n_accesscache[i].stamp < 289 np->n_accesscache[lrupos].stamp) 290 lrupos = i; 291 } 292 if (i == NFS_ACCESSCACHESIZE) { 293 np->n_accesscache[lrupos].uid = cred->cr_uid; 294 np->n_accesscache[lrupos].mode = rmode; 295 np->n_accesscache[lrupos].stamp = time_second; 296 } 297 mtx_unlock(&np->n_mtx); 298 if (retmode != NULL) 299 *retmode = rmode; 300 } else if (NFS_ISV4(vp)) { 301 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 302 } 303 return (error); 304 } 305 306 /* 307 * nfs access vnode op. 308 * For nfs version 2, just return ok. File accesses may fail later. 309 * For nfs version 3, use the access rpc to check accessibility. If file modes 310 * are changed on the server, accesses might still fail later. 311 */ 312 static int 313 nfs_access(struct vop_access_args *ap) 314 { 315 struct vnode *vp = ap->a_vp; 316 int error = 0, i, gotahit; 317 u_int32_t mode, wmode, rmode; 318 int v34 = NFS_ISV34(vp); 319 struct nfsnode *np = VTONFS(vp); 320 321 /* 322 * Disallow write attempts on filesystems mounted read-only; 323 * unless the file is a socket, fifo, or a block or character 324 * device resident on the filesystem. 325 */ 326 if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS | 327 VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL | 328 VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) { 329 switch (vp->v_type) { 330 case VREG: 331 case VDIR: 332 case VLNK: 333 return (EROFS); 334 default: 335 break; 336 } 337 } 338 /* 339 * For nfs v3 or v4, check to see if we have done this recently, and if 340 * so return our cached result instead of making an ACCESS call. 341 * If not, do an access rpc, otherwise you are stuck emulating 342 * ufs_access() locally using the vattr. This may not be correct, 343 * since the server may apply other access criteria such as 344 * client uid-->server uid mapping that we do not know about. 345 */ 346 if (v34) { 347 if (ap->a_accmode & VREAD) 348 mode = NFSACCESS_READ; 349 else 350 mode = 0; 351 if (vp->v_type != VDIR) { 352 if (ap->a_accmode & VWRITE) 353 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 354 if (ap->a_accmode & VAPPEND) 355 mode |= NFSACCESS_EXTEND; 356 if (ap->a_accmode & VEXEC) 357 mode |= NFSACCESS_EXECUTE; 358 if (ap->a_accmode & VDELETE) 359 mode |= NFSACCESS_DELETE; 360 } else { 361 if (ap->a_accmode & VWRITE) 362 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); 363 if (ap->a_accmode & VAPPEND) 364 mode |= NFSACCESS_EXTEND; 365 if (ap->a_accmode & VEXEC) 366 mode |= NFSACCESS_LOOKUP; 367 if (ap->a_accmode & VDELETE) 368 mode |= NFSACCESS_DELETE; 369 if (ap->a_accmode & VDELETE_CHILD) 370 mode |= NFSACCESS_MODIFY; 371 } 372 /* XXX safety belt, only make blanket request if caching */ 373 if (nfsaccess_cache_timeout > 0) { 374 wmode = NFSACCESS_READ | NFSACCESS_MODIFY | 375 NFSACCESS_EXTEND | NFSACCESS_EXECUTE | 376 NFSACCESS_DELETE | NFSACCESS_LOOKUP; 377 } else { 378 wmode = mode; 379 } 380 381 /* 382 * Does our cached result allow us to give a definite yes to 383 * this request? 384 */ 385 gotahit = 0; 386 mtx_lock(&np->n_mtx); 387 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { 388 if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) { 389 if (time_second < (np->n_accesscache[i].stamp 390 + nfsaccess_cache_timeout) && 391 (np->n_accesscache[i].mode & mode) == mode) { 392 NFSINCRGLOBAL(newnfsstats.accesscache_hits); 393 gotahit = 1; 394 } 395 break; 396 } 397 } 398 mtx_unlock(&np->n_mtx); 399 if (gotahit == 0) { 400 /* 401 * Either a no, or a don't know. Go to the wire. 402 */ 403 NFSINCRGLOBAL(newnfsstats.accesscache_misses); 404 error = nfs34_access_otw(vp, wmode, ap->a_td, 405 ap->a_cred, &rmode); 406 if (!error && 407 (rmode & mode) != mode) 408 error = EACCES; 409 } 410 return (error); 411 } else { 412 if ((error = nfsspec_access(ap)) != 0) { 413 return (error); 414 } 415 /* 416 * Attempt to prevent a mapped root from accessing a file 417 * which it shouldn't. We try to read a byte from the file 418 * if the user is root and the file is not zero length. 419 * After calling nfsspec_access, we should have the correct 420 * file size cached. 421 */ 422 mtx_lock(&np->n_mtx); 423 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD) 424 && VTONFS(vp)->n_size > 0) { 425 struct iovec aiov; 426 struct uio auio; 427 char buf[1]; 428 429 mtx_unlock(&np->n_mtx); 430 aiov.iov_base = buf; 431 aiov.iov_len = 1; 432 auio.uio_iov = &aiov; 433 auio.uio_iovcnt = 1; 434 auio.uio_offset = 0; 435 auio.uio_resid = 1; 436 auio.uio_segflg = UIO_SYSSPACE; 437 auio.uio_rw = UIO_READ; 438 auio.uio_td = ap->a_td; 439 440 if (vp->v_type == VREG) 441 error = ncl_readrpc(vp, &auio, ap->a_cred); 442 else if (vp->v_type == VDIR) { 443 char* bp; 444 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 445 aiov.iov_base = bp; 446 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ; 447 error = ncl_readdirrpc(vp, &auio, ap->a_cred, 448 ap->a_td); 449 free(bp, M_TEMP); 450 } else if (vp->v_type == VLNK) 451 error = ncl_readlinkrpc(vp, &auio, ap->a_cred); 452 else 453 error = EACCES; 454 } else 455 mtx_unlock(&np->n_mtx); 456 return (error); 457 } 458 } 459 460 461 /* 462 * nfs open vnode op 463 * Check to see if the type is ok 464 * and that deletion is not in progress. 465 * For paged in text files, you will need to flush the page cache 466 * if consistency is lost. 467 */ 468 /* ARGSUSED */ 469 static int 470 nfs_open(struct vop_open_args *ap) 471 { 472 struct vnode *vp = ap->a_vp; 473 struct nfsnode *np = VTONFS(vp); 474 struct vattr vattr; 475 int error; 476 int fmode = ap->a_mode; 477 478 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) 479 return (EOPNOTSUPP); 480 481 /* 482 * For NFSv4, we need to do the Open Op before cache validation, 483 * so that we conform to RFC3530 Sec. 9.3.1. 484 */ 485 if (NFS_ISV4(vp)) { 486 error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td); 487 if (error) { 488 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 489 (gid_t)0); 490 return (error); 491 } 492 } 493 494 /* 495 * Now, if this Open will be doing reading, re-validate/flush the 496 * cache, so that Close/Open coherency is maintained. 497 */ 498 if ((fmode & FREAD) && (!NFS_ISV4(vp) || nfscl_mustflush(vp))) { 499 mtx_lock(&np->n_mtx); 500 if (np->n_flag & NMODIFIED) { 501 mtx_unlock(&np->n_mtx); 502 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 503 if (error == EINTR || error == EIO) { 504 if (NFS_ISV4(vp)) 505 (void) nfsrpc_close(vp, 0, ap->a_td); 506 return (error); 507 } 508 mtx_lock(&np->n_mtx); 509 np->n_attrstamp = 0; 510 if (vp->v_type == VDIR) 511 np->n_direofoffset = 0; 512 mtx_unlock(&np->n_mtx); 513 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 514 if (error) { 515 if (NFS_ISV4(vp)) 516 (void) nfsrpc_close(vp, 0, ap->a_td); 517 return (error); 518 } 519 mtx_lock(&np->n_mtx); 520 np->n_mtime = vattr.va_mtime; 521 if (NFS_ISV4(vp)) 522 np->n_change = vattr.va_filerev; 523 mtx_unlock(&np->n_mtx); 524 } else { 525 mtx_unlock(&np->n_mtx); 526 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 527 if (error) { 528 if (NFS_ISV4(vp)) 529 (void) nfsrpc_close(vp, 0, ap->a_td); 530 return (error); 531 } 532 mtx_lock(&np->n_mtx); 533 if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) || 534 NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 535 if (vp->v_type == VDIR) 536 np->n_direofoffset = 0; 537 mtx_unlock(&np->n_mtx); 538 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 539 if (error == EINTR || error == EIO) { 540 if (NFS_ISV4(vp)) 541 (void) nfsrpc_close(vp, 0, 542 ap->a_td); 543 return (error); 544 } 545 mtx_lock(&np->n_mtx); 546 np->n_mtime = vattr.va_mtime; 547 if (NFS_ISV4(vp)) 548 np->n_change = vattr.va_filerev; 549 } 550 mtx_unlock(&np->n_mtx); 551 } 552 } 553 554 /* 555 * If the object has >= 1 O_DIRECT active opens, we disable caching. 556 */ 557 if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) { 558 if (np->n_directio_opens == 0) { 559 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 560 if (error) { 561 if (NFS_ISV4(vp)) 562 (void) nfsrpc_close(vp, 0, ap->a_td); 563 return (error); 564 } 565 mtx_lock(&np->n_mtx); 566 np->n_flag |= NNONCACHE; 567 } else { 568 mtx_lock(&np->n_mtx); 569 } 570 np->n_directio_opens++; 571 mtx_unlock(&np->n_mtx); 572 } 573 vnode_create_vobject(vp, vattr.va_size, ap->a_td); 574 return (0); 575 } 576 577 /* 578 * nfs close vnode op 579 * What an NFS client should do upon close after writing is a debatable issue. 580 * Most NFS clients push delayed writes to the server upon close, basically for 581 * two reasons: 582 * 1 - So that any write errors may be reported back to the client process 583 * doing the close system call. By far the two most likely errors are 584 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 585 * 2 - To put a worst case upper bound on cache inconsistency between 586 * multiple clients for the file. 587 * There is also a consistency problem for Version 2 of the protocol w.r.t. 588 * not being able to tell if other clients are writing a file concurrently, 589 * since there is no way of knowing if the changed modify time in the reply 590 * is only due to the write for this client. 591 * (NFS Version 3 provides weak cache consistency data in the reply that 592 * should be sufficient to detect and handle this case.) 593 * 594 * The current code does the following: 595 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 596 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 597 * or commit them (this satisfies 1 and 2 except for the 598 * case where the server crashes after this close but 599 * before the commit RPC, which is felt to be "good 600 * enough". Changing the last argument to ncl_flush() to 601 * a 1 would force a commit operation, if it is felt a 602 * commit is necessary now. 603 * for NFS Version 4 - flush the dirty buffers and commit them, if 604 * nfscl_mustflush() says this is necessary. 605 * It is necessary if there is no write delegation held, 606 * in order to satisfy open/close coherency. 607 * If the file isn't cached on local stable storage, 608 * it may be necessary in order to detect "out of space" 609 * errors from the server, if the write delegation 610 * issued by the server doesn't allow the file to grow. 611 */ 612 /* ARGSUSED */ 613 static int 614 nfs_close(struct vop_close_args *ap) 615 { 616 struct vnode *vp = ap->a_vp; 617 struct nfsnode *np = VTONFS(vp); 618 struct nfsvattr nfsva; 619 struct ucred *cred; 620 int error = 0, ret, localcred = 0; 621 int fmode = ap->a_fflag; 622 623 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)) 624 return (0); 625 /* 626 * During shutdown, a_cred isn't valid, so just use root. 627 */ 628 if (ap->a_cred == NOCRED) { 629 cred = newnfs_getcred(); 630 localcred = 1; 631 } else { 632 cred = ap->a_cred; 633 } 634 if (vp->v_type == VREG) { 635 /* 636 * Examine and clean dirty pages, regardless of NMODIFIED. 637 * This closes a major hole in close-to-open consistency. 638 * We want to push out all dirty pages (and buffers) on 639 * close, regardless of whether they were dirtied by 640 * mmap'ed writes or via write(). 641 */ 642 if (nfs_clean_pages_on_close && vp->v_object) { 643 VM_OBJECT_LOCK(vp->v_object); 644 vm_object_page_clean(vp->v_object, 0, 0, 0); 645 VM_OBJECT_UNLOCK(vp->v_object); 646 } 647 mtx_lock(&np->n_mtx); 648 if (np->n_flag & NMODIFIED) { 649 mtx_unlock(&np->n_mtx); 650 if (NFS_ISV3(vp)) { 651 /* 652 * Under NFSv3 we have dirty buffers to dispose of. We 653 * must flush them to the NFS server. We have the option 654 * of waiting all the way through the commit rpc or just 655 * waiting for the initial write. The default is to only 656 * wait through the initial write so the data is in the 657 * server's cache, which is roughly similar to the state 658 * a standard disk subsystem leaves the file in on close(). 659 * 660 * We cannot clear the NMODIFIED bit in np->n_flag due to 661 * potential races with other processes, and certainly 662 * cannot clear it if we don't commit. 663 * These races occur when there is no longer the old 664 * traditional vnode locking implemented for Vnode Ops. 665 */ 666 int cm = newnfs_commit_on_close ? 1 : 0; 667 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, cm, 0); 668 /* np->n_flag &= ~NMODIFIED; */ 669 } else if (NFS_ISV4(vp)) { 670 if (nfscl_mustflush(vp)) { 671 int cm = newnfs_commit_on_close ? 1 : 0; 672 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, 673 cm, 0); 674 /* 675 * as above w.r.t races when clearing 676 * NMODIFIED. 677 * np->n_flag &= ~NMODIFIED; 678 */ 679 } 680 } else 681 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); 682 mtx_lock(&np->n_mtx); 683 } 684 /* 685 * Invalidate the attribute cache in all cases. 686 * An open is going to fetch fresh attrs any way, other procs 687 * on this node that have file open will be forced to do an 688 * otw attr fetch, but this is safe. 689 * --> A user found that their RPC count dropped by 20% when 690 * this was commented out and I can't see any requirement 691 * for it, so I've disabled it when negative lookups are 692 * enabled. (What does this have to do with negative lookup 693 * caching? Well nothing, except it was reported by the 694 * same user that needed negative lookup caching and I wanted 695 * there to be a way to disable it to see if it 696 * is the cause of some caching/coherency issue that might 697 * crop up.) 698 */ 699 if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) 700 np->n_attrstamp = 0; 701 if (np->n_flag & NWRITEERR) { 702 np->n_flag &= ~NWRITEERR; 703 error = np->n_error; 704 } 705 mtx_unlock(&np->n_mtx); 706 } 707 708 if (NFS_ISV4(vp)) { 709 /* 710 * Get attributes so "change" is up to date. 711 */ 712 if (!error) { 713 ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva, 714 NULL); 715 if (!ret) { 716 np->n_change = nfsva.na_filerev; 717 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, 718 NULL, 0, 0); 719 } 720 } 721 722 /* 723 * and do the close. 724 */ 725 ret = nfsrpc_close(vp, 0, ap->a_td); 726 if (!error && ret) 727 error = ret; 728 if (error) 729 error = nfscl_maperr(ap->a_td, error, (uid_t)0, 730 (gid_t)0); 731 } 732 if (newnfs_directio_enable) 733 KASSERT((np->n_directio_asyncwr == 0), 734 ("nfs_close: dirty unflushed (%d) directio buffers\n", 735 np->n_directio_asyncwr)); 736 if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) { 737 mtx_lock(&np->n_mtx); 738 KASSERT((np->n_directio_opens > 0), 739 ("nfs_close: unexpectedly value (0) of n_directio_opens\n")); 740 np->n_directio_opens--; 741 if (np->n_directio_opens == 0) 742 np->n_flag &= ~NNONCACHE; 743 mtx_unlock(&np->n_mtx); 744 } 745 if (localcred) 746 NFSFREECRED(cred); 747 return (error); 748 } 749 750 /* 751 * nfs getattr call from vfs. 752 */ 753 static int 754 nfs_getattr(struct vop_getattr_args *ap) 755 { 756 struct vnode *vp = ap->a_vp; 757 struct thread *td = curthread; /* XXX */ 758 struct nfsnode *np = VTONFS(vp); 759 int error = 0; 760 struct nfsvattr nfsva; 761 struct vattr *vap = ap->a_vap; 762 struct vattr vattr; 763 764 /* 765 * Update local times for special files. 766 */ 767 mtx_lock(&np->n_mtx); 768 if (np->n_flag & (NACC | NUPD)) 769 np->n_flag |= NCHG; 770 mtx_unlock(&np->n_mtx); 771 /* 772 * First look in the cache. 773 */ 774 if (ncl_getattrcache(vp, &vattr) == 0) { 775 vap->va_type = vattr.va_type; 776 vap->va_mode = vattr.va_mode; 777 vap->va_nlink = vattr.va_nlink; 778 vap->va_uid = vattr.va_uid; 779 vap->va_gid = vattr.va_gid; 780 vap->va_fsid = vattr.va_fsid; 781 vap->va_fileid = vattr.va_fileid; 782 vap->va_size = vattr.va_size; 783 vap->va_blocksize = vattr.va_blocksize; 784 vap->va_atime = vattr.va_atime; 785 vap->va_mtime = vattr.va_mtime; 786 vap->va_ctime = vattr.va_ctime; 787 vap->va_gen = vattr.va_gen; 788 vap->va_flags = vattr.va_flags; 789 vap->va_rdev = vattr.va_rdev; 790 vap->va_bytes = vattr.va_bytes; 791 vap->va_filerev = vattr.va_filerev; 792 /* 793 * Get the local modify time for the case of a write 794 * delegation. 795 */ 796 nfscl_deleggetmodtime(vp, &vap->va_mtime); 797 return (0); 798 } 799 800 if (NFS_ISV34(vp) && nfs_prime_access_cache && 801 nfsaccess_cache_timeout > 0) { 802 NFSINCRGLOBAL(newnfsstats.accesscache_misses); 803 nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL); 804 if (ncl_getattrcache(vp, ap->a_vap) == 0) { 805 nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime); 806 return (0); 807 } 808 } 809 error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL); 810 if (!error) 811 error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0); 812 if (!error) { 813 /* 814 * Get the local modify time for the case of a write 815 * delegation. 816 */ 817 nfscl_deleggetmodtime(vp, &vap->va_mtime); 818 } else if (NFS_ISV4(vp)) { 819 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 820 } 821 return (error); 822 } 823 824 /* 825 * nfs setattr call. 826 */ 827 static int 828 nfs_setattr(struct vop_setattr_args *ap) 829 { 830 struct vnode *vp = ap->a_vp; 831 struct nfsnode *np = VTONFS(vp); 832 struct thread *td = curthread; /* XXX */ 833 struct vattr *vap = ap->a_vap; 834 int error = 0; 835 u_quad_t tsize; 836 837 #ifndef nolint 838 tsize = (u_quad_t)0; 839 #endif 840 841 /* 842 * Setting of flags and marking of atimes are not supported. 843 */ 844 if (vap->va_flags != VNOVAL) 845 return (EOPNOTSUPP); 846 847 /* 848 * Disallow write attempts if the filesystem is mounted read-only. 849 */ 850 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 851 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 852 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 853 (vp->v_mount->mnt_flag & MNT_RDONLY)) 854 return (EROFS); 855 if (vap->va_size != VNOVAL) { 856 switch (vp->v_type) { 857 case VDIR: 858 return (EISDIR); 859 case VCHR: 860 case VBLK: 861 case VSOCK: 862 case VFIFO: 863 if (vap->va_mtime.tv_sec == VNOVAL && 864 vap->va_atime.tv_sec == VNOVAL && 865 vap->va_mode == (mode_t)VNOVAL && 866 vap->va_uid == (uid_t)VNOVAL && 867 vap->va_gid == (gid_t)VNOVAL) 868 return (0); 869 vap->va_size = VNOVAL; 870 break; 871 default: 872 /* 873 * Disallow write attempts if the filesystem is 874 * mounted read-only. 875 */ 876 if (vp->v_mount->mnt_flag & MNT_RDONLY) 877 return (EROFS); 878 /* 879 * We run vnode_pager_setsize() early (why?), 880 * we must set np->n_size now to avoid vinvalbuf 881 * V_SAVE races that might setsize a lower 882 * value. 883 */ 884 mtx_lock(&np->n_mtx); 885 tsize = np->n_size; 886 mtx_unlock(&np->n_mtx); 887 error = ncl_meta_setsize(vp, ap->a_cred, td, 888 vap->va_size); 889 mtx_lock(&np->n_mtx); 890 if (np->n_flag & NMODIFIED) { 891 tsize = np->n_size; 892 mtx_unlock(&np->n_mtx); 893 if (vap->va_size == 0) 894 error = ncl_vinvalbuf(vp, 0, td, 1); 895 else 896 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 897 if (error) { 898 vnode_pager_setsize(vp, tsize); 899 return (error); 900 } 901 /* 902 * Call nfscl_delegmodtime() to set the modify time 903 * locally, as required. 904 */ 905 nfscl_delegmodtime(vp); 906 } else 907 mtx_unlock(&np->n_mtx); 908 /* 909 * np->n_size has already been set to vap->va_size 910 * in ncl_meta_setsize(). We must set it again since 911 * nfs_loadattrcache() could be called through 912 * ncl_meta_setsize() and could modify np->n_size. 913 */ 914 mtx_lock(&np->n_mtx); 915 np->n_vattr.na_size = np->n_size = vap->va_size; 916 mtx_unlock(&np->n_mtx); 917 }; 918 } else { 919 mtx_lock(&np->n_mtx); 920 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && 921 (np->n_flag & NMODIFIED) && vp->v_type == VREG) { 922 mtx_unlock(&np->n_mtx); 923 if ((error = ncl_vinvalbuf(vp, V_SAVE, td, 1)) != 0 && 924 (error == EINTR || error == EIO)) 925 return (error); 926 } else 927 mtx_unlock(&np->n_mtx); 928 } 929 error = nfs_setattrrpc(vp, vap, ap->a_cred, td); 930 if (error && vap->va_size != VNOVAL) { 931 mtx_lock(&np->n_mtx); 932 np->n_size = np->n_vattr.na_size = tsize; 933 vnode_pager_setsize(vp, tsize); 934 mtx_unlock(&np->n_mtx); 935 } 936 return (error); 937 } 938 939 /* 940 * Do an nfs setattr rpc. 941 */ 942 static int 943 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, 944 struct thread *td) 945 { 946 struct nfsnode *np = VTONFS(vp); 947 int error, ret, attrflag, i; 948 struct nfsvattr nfsva; 949 950 if (NFS_ISV34(vp)) { 951 mtx_lock(&np->n_mtx); 952 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) 953 np->n_accesscache[i].stamp = 0; 954 np->n_flag |= NDELEGMOD; 955 mtx_unlock(&np->n_mtx); 956 } 957 error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag, 958 NULL); 959 if (attrflag) { 960 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 961 if (ret && !error) 962 error = ret; 963 } 964 if (error && NFS_ISV4(vp)) 965 error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid); 966 return (error); 967 } 968 969 /* 970 * nfs lookup call, one step at a time... 971 * First look in cache 972 * If not found, unlock the directory nfsnode and do the rpc 973 */ 974 static int 975 nfs_lookup(struct vop_lookup_args *ap) 976 { 977 struct componentname *cnp = ap->a_cnp; 978 struct vnode *dvp = ap->a_dvp; 979 struct vnode **vpp = ap->a_vpp; 980 struct mount *mp = dvp->v_mount; 981 int flags = cnp->cn_flags; 982 struct vnode *newvp; 983 struct nfsmount *nmp; 984 struct nfsnode *np, *newnp; 985 int error = 0, attrflag, dattrflag, ltype; 986 struct thread *td = cnp->cn_thread; 987 struct nfsfh *nfhp; 988 struct nfsvattr dnfsva, nfsva; 989 struct vattr vattr; 990 time_t dmtime; 991 992 *vpp = NULLVP; 993 if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) && 994 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 995 return (EROFS); 996 if (dvp->v_type != VDIR) 997 return (ENOTDIR); 998 nmp = VFSTONFS(mp); 999 np = VTONFS(dvp); 1000 1001 /* For NFSv4, wait until any remove is done. */ 1002 mtx_lock(&np->n_mtx); 1003 while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) { 1004 np->n_flag |= NREMOVEWANT; 1005 (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0); 1006 } 1007 mtx_unlock(&np->n_mtx); 1008 1009 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) 1010 return (error); 1011 error = cache_lookup(dvp, vpp, cnp); 1012 if (error > 0 && error != ENOENT) 1013 return (error); 1014 if (error == -1) { 1015 /* 1016 * We only accept a positive hit in the cache if the 1017 * change time of the file matches our cached copy. 1018 * Otherwise, we discard the cache entry and fallback 1019 * to doing a lookup RPC. 1020 * 1021 * To better handle stale file handles and attributes, 1022 * clear the attribute cache of this node if it is a 1023 * leaf component, part of an open() call, and not 1024 * locally modified before fetching the attributes. 1025 * This should allow stale file handles to be detected 1026 * here where we can fall back to a LOOKUP RPC to 1027 * recover rather than having nfs_open() detect the 1028 * stale file handle and failing open(2) with ESTALE. 1029 */ 1030 newvp = *vpp; 1031 newnp = VTONFS(newvp); 1032 if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1033 !(newnp->n_flag & NMODIFIED)) { 1034 mtx_lock(&newnp->n_mtx); 1035 newnp->n_attrstamp = 0; 1036 mtx_unlock(&newnp->n_mtx); 1037 } 1038 if (nfscl_nodeleg(newvp, 0) == 0 || 1039 (VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 && 1040 vattr.va_ctime.tv_sec == newnp->n_ctime)) { 1041 NFSINCRGLOBAL(newnfsstats.lookupcache_hits); 1042 if (cnp->cn_nameiop != LOOKUP && 1043 (flags & ISLASTCN)) 1044 cnp->cn_flags |= SAVENAME; 1045 return (0); 1046 } 1047 cache_purge(newvp); 1048 if (dvp != newvp) 1049 vput(newvp); 1050 else 1051 vrele(newvp); 1052 *vpp = NULLVP; 1053 } else if (error == ENOENT) { 1054 if (dvp->v_iflag & VI_DOOMED) 1055 return (ENOENT); 1056 /* 1057 * We only accept a negative hit in the cache if the 1058 * modification time of the parent directory matches 1059 * our cached copy. Otherwise, we discard all of the 1060 * negative cache entries for this directory. We also 1061 * only trust -ve cache entries for less than 1062 * nm_negative_namecache_timeout seconds. 1063 */ 1064 if ((u_int)(ticks - np->n_dmtime_ticks) < 1065 (nmp->nm_negnametimeo * hz) && 1066 VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 && 1067 vattr.va_mtime.tv_sec == np->n_dmtime) { 1068 NFSINCRGLOBAL(newnfsstats.lookupcache_hits); 1069 return (ENOENT); 1070 } 1071 cache_purge_negative(dvp); 1072 mtx_lock(&np->n_mtx); 1073 np->n_dmtime = 0; 1074 mtx_unlock(&np->n_mtx); 1075 } 1076 1077 /* 1078 * Cache the modification time of the parent directory in case 1079 * the lookup fails and results in adding the first negative 1080 * name cache entry for the directory. Since this is reading 1081 * a single time_t, don't bother with locking. The 1082 * modification time may be a bit stale, but it must be read 1083 * before performing the lookup RPC to prevent a race where 1084 * another lookup updates the timestamp on the directory after 1085 * the lookup RPC has been performed on the server but before 1086 * n_dmtime is set at the end of this function. 1087 */ 1088 dmtime = np->n_vattr.na_mtime.tv_sec; 1089 error = 0; 1090 newvp = NULLVP; 1091 NFSINCRGLOBAL(newnfsstats.lookupcache_misses); 1092 error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1093 cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1094 NULL); 1095 if (dattrflag) 1096 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1097 if (error) { 1098 if (newvp != NULLVP) { 1099 vput(newvp); 1100 *vpp = NULLVP; 1101 } 1102 1103 if (error != ENOENT) { 1104 if (NFS_ISV4(dvp)) 1105 error = nfscl_maperr(td, error, (uid_t)0, 1106 (gid_t)0); 1107 return (error); 1108 } 1109 1110 /* The requested file was not found. */ 1111 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1112 (flags & ISLASTCN)) { 1113 /* 1114 * XXX: UFS does a full VOP_ACCESS(dvp, 1115 * VWRITE) here instead of just checking 1116 * MNT_RDONLY. 1117 */ 1118 if (mp->mnt_flag & MNT_RDONLY) 1119 return (EROFS); 1120 cnp->cn_flags |= SAVENAME; 1121 return (EJUSTRETURN); 1122 } 1123 1124 if ((cnp->cn_flags & MAKEENTRY) && cnp->cn_nameiop != CREATE) { 1125 /* 1126 * Maintain n_dmtime as the modification time 1127 * of the parent directory when the oldest -ve 1128 * name cache entry for this directory was 1129 * added. If a -ve cache entry has already 1130 * been added with a newer modification time 1131 * by a concurrent lookup, then don't bother 1132 * adding a cache entry. The modification 1133 * time of the directory might have changed 1134 * due to the file this lookup failed to find 1135 * being created. In that case a subsequent 1136 * lookup would incorrectly use the entry 1137 * added here instead of doing an extra 1138 * lookup. 1139 */ 1140 mtx_lock(&np->n_mtx); 1141 if (np->n_dmtime <= dmtime) { 1142 if (np->n_dmtime == 0) { 1143 np->n_dmtime = dmtime; 1144 np->n_dmtime_ticks = ticks; 1145 } 1146 mtx_unlock(&np->n_mtx); 1147 cache_enter(dvp, NULL, cnp); 1148 } else 1149 mtx_unlock(&np->n_mtx); 1150 } 1151 return (ENOENT); 1152 } 1153 1154 /* 1155 * Handle RENAME case... 1156 */ 1157 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { 1158 if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1159 FREE((caddr_t)nfhp, M_NFSFH); 1160 return (EISDIR); 1161 } 1162 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL); 1163 if (error) 1164 return (error); 1165 newvp = NFSTOV(np); 1166 if (attrflag) 1167 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1168 0, 1); 1169 *vpp = newvp; 1170 cnp->cn_flags |= SAVENAME; 1171 return (0); 1172 } 1173 1174 if (flags & ISDOTDOT) { 1175 ltype = VOP_ISLOCKED(dvp); 1176 error = vfs_busy(mp, MBF_NOWAIT); 1177 if (error != 0) { 1178 vfs_ref(mp); 1179 VOP_UNLOCK(dvp, 0); 1180 error = vfs_busy(mp, 0); 1181 vn_lock(dvp, ltype | LK_RETRY); 1182 vfs_rel(mp); 1183 if (error == 0 && (dvp->v_iflag & VI_DOOMED)) { 1184 vfs_unbusy(mp); 1185 error = ENOENT; 1186 } 1187 if (error != 0) 1188 return (error); 1189 } 1190 VOP_UNLOCK(dvp, 0); 1191 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL); 1192 if (error == 0) 1193 newvp = NFSTOV(np); 1194 vfs_unbusy(mp); 1195 if (newvp != dvp) 1196 vn_lock(dvp, ltype | LK_RETRY); 1197 if (dvp->v_iflag & VI_DOOMED) { 1198 if (error == 0) { 1199 if (newvp == dvp) 1200 vrele(newvp); 1201 else 1202 vput(newvp); 1203 } 1204 error = ENOENT; 1205 } 1206 if (error != 0) 1207 return (error); 1208 if (attrflag) 1209 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1210 0, 1); 1211 } else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { 1212 FREE((caddr_t)nfhp, M_NFSFH); 1213 VREF(dvp); 1214 newvp = dvp; 1215 if (attrflag) 1216 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1217 0, 1); 1218 } else { 1219 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL); 1220 if (error) 1221 return (error); 1222 newvp = NFSTOV(np); 1223 if (attrflag) 1224 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1225 0, 1); 1226 else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) && 1227 !(np->n_flag & NMODIFIED)) { 1228 /* 1229 * Flush the attribute cache when opening a 1230 * leaf node to ensure that fresh attributes 1231 * are fetched in nfs_open() since we did not 1232 * fetch attributes from the LOOKUP reply. 1233 */ 1234 mtx_lock(&np->n_mtx); 1235 np->n_attrstamp = 0; 1236 mtx_unlock(&np->n_mtx); 1237 } 1238 } 1239 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 1240 cnp->cn_flags |= SAVENAME; 1241 if ((cnp->cn_flags & MAKEENTRY) && 1242 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 1243 np->n_ctime = np->n_vattr.na_vattr.va_ctime.tv_sec; 1244 cache_enter(dvp, newvp, cnp); 1245 } 1246 *vpp = newvp; 1247 return (0); 1248 } 1249 1250 /* 1251 * nfs read call. 1252 * Just call ncl_bioread() to do the work. 1253 */ 1254 static int 1255 nfs_read(struct vop_read_args *ap) 1256 { 1257 struct vnode *vp = ap->a_vp; 1258 1259 switch (vp->v_type) { 1260 case VREG: 1261 return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 1262 case VDIR: 1263 return (EISDIR); 1264 default: 1265 return (EOPNOTSUPP); 1266 } 1267 } 1268 1269 /* 1270 * nfs readlink call 1271 */ 1272 static int 1273 nfs_readlink(struct vop_readlink_args *ap) 1274 { 1275 struct vnode *vp = ap->a_vp; 1276 1277 if (vp->v_type != VLNK) 1278 return (EINVAL); 1279 return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred)); 1280 } 1281 1282 /* 1283 * Do a readlink rpc. 1284 * Called by ncl_doio() from below the buffer cache. 1285 */ 1286 int 1287 ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1288 { 1289 int error, ret, attrflag; 1290 struct nfsvattr nfsva; 1291 1292 error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva, 1293 &attrflag, NULL); 1294 if (attrflag) { 1295 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1296 if (ret && !error) 1297 error = ret; 1298 } 1299 if (error && NFS_ISV4(vp)) 1300 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1301 return (error); 1302 } 1303 1304 /* 1305 * nfs read rpc call 1306 * Ditto above 1307 */ 1308 int 1309 ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1310 { 1311 int error, ret, attrflag; 1312 struct nfsvattr nfsva; 1313 1314 error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva, &attrflag, 1315 NULL); 1316 if (attrflag) { 1317 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1318 if (ret && !error) 1319 error = ret; 1320 } 1321 if (error && NFS_ISV4(vp)) 1322 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1323 return (error); 1324 } 1325 1326 /* 1327 * nfs write call 1328 */ 1329 int 1330 ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 1331 int *iomode, int *must_commit, int called_from_strategy) 1332 { 1333 struct nfsvattr nfsva; 1334 int error = 0, attrflag, ret; 1335 u_char verf[NFSX_VERF]; 1336 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1337 1338 *must_commit = 0; 1339 error = nfsrpc_write(vp, uiop, iomode, verf, cred, 1340 uiop->uio_td, &nfsva, &attrflag, NULL, called_from_strategy); 1341 NFSLOCKMNT(nmp); 1342 if (!error && NFSHASWRITEVERF(nmp) && 1343 NFSBCMP(verf, nmp->nm_verf, NFSX_VERF)) { 1344 *must_commit = 1; 1345 NFSBCOPY(verf, nmp->nm_verf, NFSX_VERF); 1346 } 1347 NFSUNLOCKMNT(nmp); 1348 if (attrflag) { 1349 if (VTONFS(vp)->n_flag & ND_NFSV4) 1350 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1, 1351 1); 1352 else 1353 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1354 1); 1355 if (ret && !error) 1356 error = ret; 1357 } 1358 if (vp->v_mount->mnt_kern_flag & MNTK_ASYNC) 1359 *iomode = NFSWRITE_FILESYNC; 1360 if (error && NFS_ISV4(vp)) 1361 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); 1362 return (error); 1363 } 1364 1365 /* 1366 * nfs mknod rpc 1367 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1368 * mode set to specify the file type and the size field for rdev. 1369 */ 1370 static int 1371 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1372 struct vattr *vap) 1373 { 1374 struct nfsvattr nfsva, dnfsva; 1375 struct vnode *newvp = NULL; 1376 struct nfsnode *np = NULL, *dnp; 1377 struct nfsfh *nfhp; 1378 struct vattr vattr; 1379 int error = 0, attrflag, dattrflag; 1380 u_int32_t rdev; 1381 1382 if (vap->va_type == VCHR || vap->va_type == VBLK) 1383 rdev = vap->va_rdev; 1384 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1385 rdev = 0xffffffff; 1386 else 1387 return (EOPNOTSUPP); 1388 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1389 return (error); 1390 error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap, 1391 rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva, 1392 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 1393 if (!error) { 1394 if (!nfhp) 1395 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1396 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1397 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1398 NULL); 1399 if (nfhp) 1400 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1401 cnp->cn_thread, &np, NULL); 1402 } 1403 if (dattrflag) 1404 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1405 if (!error) { 1406 newvp = NFSTOV(np); 1407 if (attrflag) 1408 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1409 0, 1); 1410 } 1411 if (!error) { 1412 if ((cnp->cn_flags & MAKEENTRY)) 1413 cache_enter(dvp, newvp, cnp); 1414 *vpp = newvp; 1415 } else if (NFS_ISV4(dvp)) { 1416 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1417 vap->va_gid); 1418 } 1419 dnp = VTONFS(dvp); 1420 mtx_lock(&dnp->n_mtx); 1421 dnp->n_flag |= NMODIFIED; 1422 if (!dattrflag) 1423 dnp->n_attrstamp = 0; 1424 mtx_unlock(&dnp->n_mtx); 1425 return (error); 1426 } 1427 1428 /* 1429 * nfs mknod vop 1430 * just call nfs_mknodrpc() to do the work. 1431 */ 1432 /* ARGSUSED */ 1433 static int 1434 nfs_mknod(struct vop_mknod_args *ap) 1435 { 1436 return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap)); 1437 } 1438 1439 static struct mtx nfs_cverf_mtx; 1440 MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex", 1441 MTX_DEF); 1442 1443 static nfsquad_t 1444 nfs_get_cverf(void) 1445 { 1446 static nfsquad_t cverf; 1447 nfsquad_t ret; 1448 static int cverf_initialized = 0; 1449 1450 mtx_lock(&nfs_cverf_mtx); 1451 if (cverf_initialized == 0) { 1452 cverf.lval[0] = arc4random(); 1453 cverf.lval[1] = arc4random(); 1454 cverf_initialized = 1; 1455 } else 1456 cverf.qval++; 1457 ret = cverf; 1458 mtx_unlock(&nfs_cverf_mtx); 1459 1460 return (ret); 1461 } 1462 1463 /* 1464 * nfs file create call 1465 */ 1466 static int 1467 nfs_create(struct vop_create_args *ap) 1468 { 1469 struct vnode *dvp = ap->a_dvp; 1470 struct vattr *vap = ap->a_vap; 1471 struct componentname *cnp = ap->a_cnp; 1472 struct nfsnode *np = NULL, *dnp; 1473 struct vnode *newvp = NULL; 1474 struct nfsmount *nmp; 1475 struct nfsvattr dnfsva, nfsva; 1476 struct nfsfh *nfhp; 1477 nfsquad_t cverf; 1478 int error = 0, attrflag, dattrflag, fmode = 0; 1479 struct vattr vattr; 1480 1481 /* 1482 * Oops, not for me.. 1483 */ 1484 if (vap->va_type == VSOCK) 1485 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1486 1487 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 1488 return (error); 1489 if (vap->va_vaflags & VA_EXCLUSIVE) 1490 fmode |= O_EXCL; 1491 dnp = VTONFS(dvp); 1492 nmp = VFSTONFS(vnode_mount(dvp)); 1493 again: 1494 /* For NFSv4, wait until any remove is done. */ 1495 mtx_lock(&dnp->n_mtx); 1496 while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) { 1497 dnp->n_flag |= NREMOVEWANT; 1498 (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0); 1499 } 1500 mtx_unlock(&dnp->n_mtx); 1501 1502 cverf = nfs_get_cverf(); 1503 error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1504 vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, 1505 &nfhp, &attrflag, &dattrflag, NULL); 1506 if (!error) { 1507 if (nfhp == NULL) 1508 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, 1509 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, 1510 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, 1511 NULL); 1512 if (nfhp != NULL) 1513 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, 1514 cnp->cn_thread, &np, NULL); 1515 } 1516 if (dattrflag) 1517 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1518 if (!error) { 1519 newvp = NFSTOV(np); 1520 if (attrflag) 1521 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1522 0, 1); 1523 } 1524 if (error) { 1525 if (newvp != NULL) { 1526 vrele(newvp); 1527 newvp = NULL; 1528 } 1529 if (NFS_ISV34(dvp) && (fmode & O_EXCL) && 1530 error == NFSERR_NOTSUPP) { 1531 fmode &= ~O_EXCL; 1532 goto again; 1533 } 1534 } else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) { 1535 if (nfscl_checksattr(vap, &nfsva)) { 1536 error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred, 1537 cnp->cn_thread, &nfsva, &attrflag, NULL); 1538 if (error && (vap->va_uid != (uid_t)VNOVAL || 1539 vap->va_gid != (gid_t)VNOVAL)) { 1540 /* try again without setting uid/gid */ 1541 vap->va_uid = (uid_t)VNOVAL; 1542 vap->va_gid = (uid_t)VNOVAL; 1543 error = nfsrpc_setattr(newvp, vap, NULL, 1544 cnp->cn_cred, cnp->cn_thread, &nfsva, 1545 &attrflag, NULL); 1546 } 1547 if (attrflag) 1548 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 1549 NULL, 0, 1); 1550 } 1551 } 1552 if (!error) { 1553 if (cnp->cn_flags & MAKEENTRY) 1554 cache_enter(dvp, newvp, cnp); 1555 *ap->a_vpp = newvp; 1556 } else if (NFS_ISV4(dvp)) { 1557 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, 1558 vap->va_gid); 1559 } 1560 mtx_lock(&dnp->n_mtx); 1561 dnp->n_flag |= NMODIFIED; 1562 if (!dattrflag) 1563 dnp->n_attrstamp = 0; 1564 mtx_unlock(&dnp->n_mtx); 1565 return (error); 1566 } 1567 1568 /* 1569 * nfs file remove call 1570 * To try and make nfs semantics closer to ufs semantics, a file that has 1571 * other processes using the vnode is renamed instead of removed and then 1572 * removed later on the last close. 1573 * - If v_usecount > 1 1574 * If a rename is not already in the works 1575 * call nfs_sillyrename() to set it up 1576 * else 1577 * do the remove rpc 1578 */ 1579 static int 1580 nfs_remove(struct vop_remove_args *ap) 1581 { 1582 struct vnode *vp = ap->a_vp; 1583 struct vnode *dvp = ap->a_dvp; 1584 struct componentname *cnp = ap->a_cnp; 1585 struct nfsnode *np = VTONFS(vp); 1586 int error = 0; 1587 struct vattr vattr; 1588 1589 KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name")); 1590 KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount")); 1591 if (vp->v_type == VDIR) 1592 error = EPERM; 1593 else if (vrefcnt(vp) == 1 || (np->n_sillyrename && 1594 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && 1595 vattr.va_nlink > 1)) { 1596 /* 1597 * Purge the name cache so that the chance of a lookup for 1598 * the name succeeding while the remove is in progress is 1599 * minimized. Without node locking it can still happen, such 1600 * that an I/O op returns ESTALE, but since you get this if 1601 * another host removes the file.. 1602 */ 1603 cache_purge(vp); 1604 /* 1605 * throw away biocache buffers, mainly to avoid 1606 * unnecessary delayed writes later. 1607 */ 1608 error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1); 1609 /* Do the rpc */ 1610 if (error != EINTR && error != EIO) 1611 error = nfs_removerpc(dvp, vp, cnp->cn_nameptr, 1612 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread); 1613 /* 1614 * Kludge City: If the first reply to the remove rpc is lost.. 1615 * the reply to the retransmitted request will be ENOENT 1616 * since the file was in fact removed 1617 * Therefore, we cheat and return success. 1618 */ 1619 if (error == ENOENT) 1620 error = 0; 1621 } else if (!np->n_sillyrename) 1622 error = nfs_sillyrename(dvp, vp, cnp); 1623 np->n_attrstamp = 0; 1624 return (error); 1625 } 1626 1627 /* 1628 * nfs file remove rpc called from nfs_inactive 1629 */ 1630 int 1631 ncl_removeit(struct sillyrename *sp, struct vnode *vp) 1632 { 1633 /* 1634 * Make sure that the directory vnode is still valid. 1635 * XXX we should lock sp->s_dvp here. 1636 */ 1637 if (sp->s_dvp->v_type == VBAD) 1638 return (0); 1639 return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen, 1640 sp->s_cred, NULL)); 1641 } 1642 1643 /* 1644 * Nfs remove rpc, called from nfs_remove() and ncl_removeit(). 1645 */ 1646 static int 1647 nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, 1648 int namelen, struct ucred *cred, struct thread *td) 1649 { 1650 struct nfsvattr dnfsva; 1651 struct nfsnode *dnp = VTONFS(dvp); 1652 int error = 0, dattrflag; 1653 1654 mtx_lock(&dnp->n_mtx); 1655 dnp->n_flag |= NREMOVEINPROG; 1656 mtx_unlock(&dnp->n_mtx); 1657 error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva, 1658 &dattrflag, NULL); 1659 mtx_lock(&dnp->n_mtx); 1660 if ((dnp->n_flag & NREMOVEWANT)) { 1661 dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG); 1662 mtx_unlock(&dnp->n_mtx); 1663 wakeup((caddr_t)dnp); 1664 } else { 1665 dnp->n_flag &= ~NREMOVEINPROG; 1666 mtx_unlock(&dnp->n_mtx); 1667 } 1668 if (dattrflag) 1669 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1670 mtx_lock(&dnp->n_mtx); 1671 dnp->n_flag |= NMODIFIED; 1672 if (!dattrflag) 1673 dnp->n_attrstamp = 0; 1674 mtx_unlock(&dnp->n_mtx); 1675 if (error && NFS_ISV4(dvp)) 1676 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1677 return (error); 1678 } 1679 1680 /* 1681 * nfs file rename call 1682 */ 1683 static int 1684 nfs_rename(struct vop_rename_args *ap) 1685 { 1686 struct vnode *fvp = ap->a_fvp; 1687 struct vnode *tvp = ap->a_tvp; 1688 struct vnode *fdvp = ap->a_fdvp; 1689 struct vnode *tdvp = ap->a_tdvp; 1690 struct componentname *tcnp = ap->a_tcnp; 1691 struct componentname *fcnp = ap->a_fcnp; 1692 struct nfsnode *fnp = VTONFS(ap->a_fvp); 1693 struct nfsnode *tdnp = VTONFS(ap->a_tdvp); 1694 struct nfsv4node *newv4 = NULL; 1695 int error; 1696 1697 KASSERT((tcnp->cn_flags & HASBUF) != 0 && 1698 (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name")); 1699 /* Check for cross-device rename */ 1700 if ((fvp->v_mount != tdvp->v_mount) || 1701 (tvp && (fvp->v_mount != tvp->v_mount))) { 1702 error = EXDEV; 1703 goto out; 1704 } 1705 1706 if (fvp == tvp) { 1707 ncl_printf("nfs_rename: fvp == tvp (can't happen)\n"); 1708 error = 0; 1709 goto out; 1710 } 1711 if ((error = vn_lock(fvp, LK_EXCLUSIVE))) 1712 goto out; 1713 1714 /* 1715 * We have to flush B_DELWRI data prior to renaming 1716 * the file. If we don't, the delayed-write buffers 1717 * can be flushed out later after the file has gone stale 1718 * under NFSV3. NFSV2 does not have this problem because 1719 * ( as far as I can tell ) it flushes dirty buffers more 1720 * often. 1721 * 1722 * Skip the rename operation if the fsync fails, this can happen 1723 * due to the server's volume being full, when we pushed out data 1724 * that was written back to our cache earlier. Not checking for 1725 * this condition can result in potential (silent) data loss. 1726 */ 1727 error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread); 1728 VOP_UNLOCK(fvp, 0); 1729 if (!error && tvp) 1730 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread); 1731 if (error) 1732 goto out; 1733 1734 /* 1735 * If the tvp exists and is in use, sillyrename it before doing the 1736 * rename of the new file over it. 1737 * XXX Can't sillyrename a directory. 1738 */ 1739 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename && 1740 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1741 vput(tvp); 1742 tvp = NULL; 1743 } 1744 1745 error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1746 tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1747 tcnp->cn_thread); 1748 1749 if (!error) { 1750 /* 1751 * For NFSv4, check to see if it is the same name and 1752 * replace the name, if it is different. 1753 */ 1754 MALLOC(newv4, struct nfsv4node *, 1755 sizeof (struct nfsv4node) + 1756 tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1, 1757 M_NFSV4NODE, M_WAITOK); 1758 mtx_lock(&tdnp->n_mtx); 1759 mtx_lock(&fnp->n_mtx); 1760 if (fnp->n_v4 != NULL && fvp->v_type == VREG && 1761 (fnp->n_v4->n4_namelen != tcnp->cn_namelen || 1762 NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4), 1763 tcnp->cn_namelen) || 1764 tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen || 1765 NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1766 tdnp->n_fhp->nfh_len))) { 1767 #ifdef notdef 1768 { char nnn[100]; int nnnl; 1769 nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99; 1770 bcopy(tcnp->cn_nameptr, nnn, nnnl); 1771 nnn[nnnl] = '\0'; 1772 printf("ren replace=%s\n",nnn); 1773 } 1774 #endif 1775 FREE((caddr_t)fnp->n_v4, M_NFSV4NODE); 1776 fnp->n_v4 = newv4; 1777 newv4 = NULL; 1778 fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len; 1779 fnp->n_v4->n4_namelen = tcnp->cn_namelen; 1780 NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, 1781 tdnp->n_fhp->nfh_len); 1782 NFSBCOPY(tcnp->cn_nameptr, 1783 NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen); 1784 } 1785 mtx_unlock(&tdnp->n_mtx); 1786 mtx_unlock(&fnp->n_mtx); 1787 if (newv4 != NULL) 1788 FREE((caddr_t)newv4, M_NFSV4NODE); 1789 } 1790 1791 if (fvp->v_type == VDIR) { 1792 if (tvp != NULL && tvp->v_type == VDIR) 1793 cache_purge(tdvp); 1794 cache_purge(fdvp); 1795 } 1796 1797 out: 1798 if (tdvp == tvp) 1799 vrele(tdvp); 1800 else 1801 vput(tdvp); 1802 if (tvp) 1803 vput(tvp); 1804 vrele(fdvp); 1805 vrele(fvp); 1806 /* 1807 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1808 */ 1809 if (error == ENOENT) 1810 error = 0; 1811 return (error); 1812 } 1813 1814 /* 1815 * nfs file rename rpc called from nfs_remove() above 1816 */ 1817 static int 1818 nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp, 1819 struct sillyrename *sp) 1820 { 1821 1822 return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen, 1823 sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred, 1824 scnp->cn_thread)); 1825 } 1826 1827 /* 1828 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1829 */ 1830 static int 1831 nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr, 1832 int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr, 1833 int tnamelen, struct ucred *cred, struct thread *td) 1834 { 1835 struct nfsvattr fnfsva, tnfsva; 1836 struct nfsnode *fdnp = VTONFS(fdvp); 1837 struct nfsnode *tdnp = VTONFS(tdvp); 1838 int error = 0, fattrflag, tattrflag; 1839 1840 error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp, 1841 tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag, 1842 &tattrflag, NULL, NULL); 1843 mtx_lock(&fdnp->n_mtx); 1844 fdnp->n_flag |= NMODIFIED; 1845 mtx_unlock(&fdnp->n_mtx); 1846 mtx_lock(&tdnp->n_mtx); 1847 tdnp->n_flag |= NMODIFIED; 1848 mtx_unlock(&tdnp->n_mtx); 1849 if (fattrflag) 1850 (void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1); 1851 else 1852 fdnp->n_attrstamp = 0; 1853 if (tattrflag) 1854 (void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1); 1855 else 1856 tdnp->n_attrstamp = 0; 1857 if (error && NFS_ISV4(fdvp)) 1858 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 1859 return (error); 1860 } 1861 1862 /* 1863 * nfs hard link create call 1864 */ 1865 static int 1866 nfs_link(struct vop_link_args *ap) 1867 { 1868 struct vnode *vp = ap->a_vp; 1869 struct vnode *tdvp = ap->a_tdvp; 1870 struct componentname *cnp = ap->a_cnp; 1871 struct nfsnode *tdnp; 1872 struct nfsvattr nfsva, dnfsva; 1873 int error = 0, attrflag, dattrflag; 1874 1875 if (vp->v_mount != tdvp->v_mount) { 1876 return (EXDEV); 1877 } 1878 1879 /* 1880 * Push all writes to the server, so that the attribute cache 1881 * doesn't get "out of sync" with the server. 1882 * XXX There should be a better way! 1883 */ 1884 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread); 1885 1886 error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 1887 cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag, 1888 &dattrflag, NULL); 1889 tdnp = VTONFS(tdvp); 1890 mtx_lock(&tdnp->n_mtx); 1891 tdnp->n_flag |= NMODIFIED; 1892 mtx_unlock(&tdnp->n_mtx); 1893 if (attrflag) 1894 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 1895 else 1896 VTONFS(vp)->n_attrstamp = 0; 1897 if (dattrflag) 1898 (void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1); 1899 else 1900 tdnp->n_attrstamp = 0; 1901 /* 1902 * If negative lookup caching is enabled, I might as well 1903 * add an entry for this node. Not necessary for correctness, 1904 * but if negative caching is enabled, then the system 1905 * must care about lookup caching hit rate, so... 1906 */ 1907 if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 && 1908 (cnp->cn_flags & MAKEENTRY)) 1909 cache_enter(tdvp, vp, cnp); 1910 if (error && NFS_ISV4(vp)) 1911 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 1912 (gid_t)0); 1913 return (error); 1914 } 1915 1916 /* 1917 * nfs symbolic link create call 1918 */ 1919 static int 1920 nfs_symlink(struct vop_symlink_args *ap) 1921 { 1922 struct vnode *dvp = ap->a_dvp; 1923 struct vattr *vap = ap->a_vap; 1924 struct componentname *cnp = ap->a_cnp; 1925 struct nfsvattr nfsva, dnfsva; 1926 struct nfsfh *nfhp; 1927 struct nfsnode *np = NULL, *dnp; 1928 struct vnode *newvp = NULL; 1929 int error = 0, attrflag, dattrflag, ret; 1930 1931 vap->va_type = VLNK; 1932 error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1933 ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, 1934 &nfsva, &nfhp, &attrflag, &dattrflag, NULL); 1935 if (nfhp) { 1936 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 1937 &np, NULL); 1938 if (!ret) 1939 newvp = NFSTOV(np); 1940 else if (!error) 1941 error = ret; 1942 } 1943 if (newvp != NULL) { 1944 if (attrflag) 1945 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 1946 0, 1); 1947 } else if (!error) { 1948 /* 1949 * If we do not have an error and we could not extract the 1950 * newvp from the response due to the request being NFSv2, we 1951 * have to do a lookup in order to obtain a newvp to return. 1952 */ 1953 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 1954 cnp->cn_cred, cnp->cn_thread, &np); 1955 if (!error) 1956 newvp = NFSTOV(np); 1957 } 1958 if (error) { 1959 if (newvp) 1960 vput(newvp); 1961 if (NFS_ISV4(dvp)) 1962 error = nfscl_maperr(cnp->cn_thread, error, 1963 vap->va_uid, vap->va_gid); 1964 } else { 1965 /* 1966 * If negative lookup caching is enabled, I might as well 1967 * add an entry for this node. Not necessary for correctness, 1968 * but if negative caching is enabled, then the system 1969 * must care about lookup caching hit rate, so... 1970 */ 1971 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 1972 (cnp->cn_flags & MAKEENTRY)) 1973 cache_enter(dvp, newvp, cnp); 1974 *ap->a_vpp = newvp; 1975 } 1976 1977 dnp = VTONFS(dvp); 1978 mtx_lock(&dnp->n_mtx); 1979 dnp->n_flag |= NMODIFIED; 1980 mtx_unlock(&dnp->n_mtx); 1981 if (dattrflag) 1982 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 1983 else 1984 dnp->n_attrstamp = 0; 1985 return (error); 1986 } 1987 1988 /* 1989 * nfs make dir call 1990 */ 1991 static int 1992 nfs_mkdir(struct vop_mkdir_args *ap) 1993 { 1994 struct vnode *dvp = ap->a_dvp; 1995 struct vattr *vap = ap->a_vap; 1996 struct componentname *cnp = ap->a_cnp; 1997 struct nfsnode *np = NULL, *dnp; 1998 struct vnode *newvp = NULL; 1999 struct vattr vattr; 2000 struct nfsfh *nfhp; 2001 struct nfsvattr nfsva, dnfsva; 2002 int error = 0, attrflag, dattrflag, ret; 2003 2004 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) 2005 return (error); 2006 vap->va_type = VDIR; 2007 error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2008 vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp, 2009 &attrflag, &dattrflag, NULL); 2010 dnp = VTONFS(dvp); 2011 mtx_lock(&dnp->n_mtx); 2012 dnp->n_flag |= NMODIFIED; 2013 mtx_unlock(&dnp->n_mtx); 2014 if (dattrflag) 2015 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2016 else 2017 dnp->n_attrstamp = 0; 2018 if (nfhp) { 2019 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, 2020 &np, NULL); 2021 if (!ret) { 2022 newvp = NFSTOV(np); 2023 if (attrflag) 2024 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, 2025 NULL, 0, 1); 2026 } else if (!error) 2027 error = ret; 2028 } 2029 if (!error && newvp == NULL) { 2030 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2031 cnp->cn_cred, cnp->cn_thread, &np); 2032 if (!error) { 2033 newvp = NFSTOV(np); 2034 if (newvp->v_type != VDIR) 2035 error = EEXIST; 2036 } 2037 } 2038 if (error) { 2039 if (newvp) 2040 vput(newvp); 2041 if (NFS_ISV4(dvp)) 2042 error = nfscl_maperr(cnp->cn_thread, error, 2043 vap->va_uid, vap->va_gid); 2044 } else { 2045 /* 2046 * If negative lookup caching is enabled, I might as well 2047 * add an entry for this node. Not necessary for correctness, 2048 * but if negative caching is enabled, then the system 2049 * must care about lookup caching hit rate, so... 2050 */ 2051 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 && 2052 (cnp->cn_flags & MAKEENTRY)) 2053 cache_enter(dvp, newvp, cnp); 2054 *ap->a_vpp = newvp; 2055 } 2056 return (error); 2057 } 2058 2059 /* 2060 * nfs remove directory call 2061 */ 2062 static int 2063 nfs_rmdir(struct vop_rmdir_args *ap) 2064 { 2065 struct vnode *vp = ap->a_vp; 2066 struct vnode *dvp = ap->a_dvp; 2067 struct componentname *cnp = ap->a_cnp; 2068 struct nfsnode *dnp; 2069 struct nfsvattr dnfsva; 2070 int error, dattrflag; 2071 2072 if (dvp == vp) 2073 return (EINVAL); 2074 error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2075 cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL); 2076 dnp = VTONFS(dvp); 2077 mtx_lock(&dnp->n_mtx); 2078 dnp->n_flag |= NMODIFIED; 2079 mtx_unlock(&dnp->n_mtx); 2080 if (dattrflag) 2081 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2082 else 2083 dnp->n_attrstamp = 0; 2084 2085 cache_purge(dvp); 2086 cache_purge(vp); 2087 if (error && NFS_ISV4(dvp)) 2088 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, 2089 (gid_t)0); 2090 /* 2091 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2092 */ 2093 if (error == ENOENT) 2094 error = 0; 2095 return (error); 2096 } 2097 2098 /* 2099 * nfs readdir call 2100 */ 2101 static int 2102 nfs_readdir(struct vop_readdir_args *ap) 2103 { 2104 struct vnode *vp = ap->a_vp; 2105 struct nfsnode *np = VTONFS(vp); 2106 struct uio *uio = ap->a_uio; 2107 int tresid, error = 0; 2108 struct vattr vattr; 2109 2110 if (vp->v_type != VDIR) 2111 return(EPERM); 2112 2113 /* 2114 * First, check for hit on the EOF offset cache 2115 */ 2116 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset && 2117 (np->n_flag & NMODIFIED) == 0) { 2118 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) { 2119 mtx_lock(&np->n_mtx); 2120 if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) || 2121 !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { 2122 mtx_unlock(&np->n_mtx); 2123 NFSINCRGLOBAL(newnfsstats.direofcache_hits); 2124 return (0); 2125 } else 2126 mtx_unlock(&np->n_mtx); 2127 } 2128 } 2129 2130 /* 2131 * Call ncl_bioread() to do the real work. 2132 */ 2133 tresid = uio->uio_resid; 2134 error = ncl_bioread(vp, uio, 0, ap->a_cred); 2135 2136 if (!error && uio->uio_resid == tresid) 2137 NFSINCRGLOBAL(newnfsstats.direofcache_misses); 2138 return (error); 2139 } 2140 2141 /* 2142 * Readdir rpc call. 2143 * Called from below the buffer cache by ncl_doio(). 2144 */ 2145 int 2146 ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2147 struct thread *td) 2148 { 2149 struct nfsvattr nfsva; 2150 nfsuint64 *cookiep, cookie; 2151 struct nfsnode *dnp = VTONFS(vp); 2152 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2153 int error = 0, eof, attrflag; 2154 2155 KASSERT(uiop->uio_iovcnt == 1 && 2156 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2157 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2158 ("nfs readdirrpc bad uio")); 2159 2160 /* 2161 * If there is no cookie, assume directory was stale. 2162 */ 2163 ncl_dircookie_lock(dnp); 2164 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2165 if (cookiep) { 2166 cookie = *cookiep; 2167 ncl_dircookie_unlock(dnp); 2168 } else { 2169 ncl_dircookie_unlock(dnp); 2170 return (NFSERR_BAD_COOKIE); 2171 } 2172 2173 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2174 (void)ncl_fsinfo(nmp, vp, cred, td); 2175 2176 error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva, 2177 &attrflag, &eof, NULL); 2178 if (attrflag) 2179 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2180 2181 if (!error) { 2182 /* 2183 * We are now either at the end of the directory or have filled 2184 * the block. 2185 */ 2186 if (eof) 2187 dnp->n_direofoffset = uiop->uio_offset; 2188 else { 2189 if (uiop->uio_resid > 0) 2190 ncl_printf("EEK! readdirrpc resid > 0\n"); 2191 ncl_dircookie_lock(dnp); 2192 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2193 *cookiep = cookie; 2194 ncl_dircookie_unlock(dnp); 2195 } 2196 } else if (NFS_ISV4(vp)) { 2197 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2198 } 2199 return (error); 2200 } 2201 2202 /* 2203 * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc(). 2204 */ 2205 int 2206 ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2207 struct thread *td) 2208 { 2209 struct nfsvattr nfsva; 2210 nfsuint64 *cookiep, cookie; 2211 struct nfsnode *dnp = VTONFS(vp); 2212 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2213 int error = 0, attrflag, eof; 2214 2215 KASSERT(uiop->uio_iovcnt == 1 && 2216 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 && 2217 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0, 2218 ("nfs readdirplusrpc bad uio")); 2219 2220 /* 2221 * If there is no cookie, assume directory was stale. 2222 */ 2223 ncl_dircookie_lock(dnp); 2224 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); 2225 if (cookiep) { 2226 cookie = *cookiep; 2227 ncl_dircookie_unlock(dnp); 2228 } else { 2229 ncl_dircookie_unlock(dnp); 2230 return (NFSERR_BAD_COOKIE); 2231 } 2232 2233 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) 2234 (void)ncl_fsinfo(nmp, vp, cred, td); 2235 error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva, 2236 &attrflag, &eof, NULL); 2237 if (attrflag) 2238 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); 2239 2240 if (!error) { 2241 /* 2242 * We are now either at end of the directory or have filled the 2243 * the block. 2244 */ 2245 if (eof) 2246 dnp->n_direofoffset = uiop->uio_offset; 2247 else { 2248 if (uiop->uio_resid > 0) 2249 ncl_printf("EEK! readdirplusrpc resid > 0\n"); 2250 ncl_dircookie_lock(dnp); 2251 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); 2252 *cookiep = cookie; 2253 ncl_dircookie_unlock(dnp); 2254 } 2255 } else if (NFS_ISV4(vp)) { 2256 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2257 } 2258 return (error); 2259 } 2260 2261 /* 2262 * Silly rename. To make the NFS filesystem that is stateless look a little 2263 * more like the "ufs" a remove of an active vnode is translated to a rename 2264 * to a funny looking filename that is removed by nfs_inactive on the 2265 * nfsnode. There is the potential for another process on a different client 2266 * to create the same funny name between the nfs_lookitup() fails and the 2267 * nfs_rename() completes, but... 2268 */ 2269 static int 2270 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2271 { 2272 struct sillyrename *sp; 2273 struct nfsnode *np; 2274 int error; 2275 short pid; 2276 unsigned int lticks; 2277 2278 cache_purge(dvp); 2279 np = VTONFS(vp); 2280 KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir")); 2281 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename), 2282 M_NEWNFSREQ, M_WAITOK); 2283 sp->s_cred = crhold(cnp->cn_cred); 2284 sp->s_dvp = dvp; 2285 VREF(dvp); 2286 2287 /* 2288 * Fudge together a funny name. 2289 * Changing the format of the funny name to accomodate more 2290 * sillynames per directory. 2291 * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is 2292 * CPU ticks since boot. 2293 */ 2294 pid = cnp->cn_thread->td_proc->p_pid; 2295 lticks = (unsigned int)ticks; 2296 for ( ; ; ) { 2297 sp->s_namlen = sprintf(sp->s_name, 2298 ".nfs.%08x.%04x4.4", lticks, 2299 pid); 2300 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2301 cnp->cn_thread, NULL)) 2302 break; 2303 lticks++; 2304 } 2305 error = nfs_renameit(dvp, vp, cnp, sp); 2306 if (error) 2307 goto bad; 2308 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2309 cnp->cn_thread, &np); 2310 np->n_sillyrename = sp; 2311 return (0); 2312 bad: 2313 vrele(sp->s_dvp); 2314 crfree(sp->s_cred); 2315 free((caddr_t)sp, M_NEWNFSREQ); 2316 return (error); 2317 } 2318 2319 /* 2320 * Look up a file name and optionally either update the file handle or 2321 * allocate an nfsnode, depending on the value of npp. 2322 * npp == NULL --> just do the lookup 2323 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2324 * handled too 2325 * *npp != NULL --> update the file handle in the vnode 2326 */ 2327 static int 2328 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, 2329 struct thread *td, struct nfsnode **npp) 2330 { 2331 struct vnode *newvp = NULL, *vp; 2332 struct nfsnode *np, *dnp = VTONFS(dvp); 2333 struct nfsfh *nfhp, *onfhp; 2334 struct nfsvattr nfsva, dnfsva; 2335 struct componentname cn; 2336 int error = 0, attrflag, dattrflag; 2337 u_int hash; 2338 2339 error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva, 2340 &nfhp, &attrflag, &dattrflag, NULL); 2341 if (dattrflag) 2342 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); 2343 if (npp && !error) { 2344 if (*npp != NULL) { 2345 np = *npp; 2346 vp = NFSTOV(np); 2347 /* 2348 * For NFSv4, check to see if it is the same name and 2349 * replace the name, if it is different. 2350 */ 2351 if (np->n_v4 != NULL && nfsva.na_type == VREG && 2352 (np->n_v4->n4_namelen != len || 2353 NFSBCMP(name, NFS4NODENAME(np->n_v4), len) || 2354 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || 2355 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2356 dnp->n_fhp->nfh_len))) { 2357 #ifdef notdef 2358 { char nnn[100]; int nnnl; 2359 nnnl = (len < 100) ? len : 99; 2360 bcopy(name, nnn, nnnl); 2361 nnn[nnnl] = '\0'; 2362 printf("replace=%s\n",nnn); 2363 } 2364 #endif 2365 FREE((caddr_t)np->n_v4, M_NFSV4NODE); 2366 MALLOC(np->n_v4, struct nfsv4node *, 2367 sizeof (struct nfsv4node) + 2368 dnp->n_fhp->nfh_len + len - 1, 2369 M_NFSV4NODE, M_WAITOK); 2370 np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; 2371 np->n_v4->n4_namelen = len; 2372 NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, 2373 dnp->n_fhp->nfh_len); 2374 NFSBCOPY(name, NFS4NODENAME(np->n_v4), len); 2375 } 2376 hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, 2377 FNV1_32_INIT); 2378 onfhp = np->n_fhp; 2379 /* 2380 * Rehash node for new file handle. 2381 */ 2382 vfs_hash_rehash(vp, hash); 2383 np->n_fhp = nfhp; 2384 if (onfhp != NULL) 2385 FREE((caddr_t)onfhp, M_NFSFH); 2386 newvp = NFSTOV(np); 2387 } else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) { 2388 FREE((caddr_t)nfhp, M_NFSFH); 2389 VREF(dvp); 2390 newvp = dvp; 2391 } else { 2392 cn.cn_nameptr = name; 2393 cn.cn_namelen = len; 2394 error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td, 2395 &np, NULL); 2396 if (error) 2397 return (error); 2398 newvp = NFSTOV(np); 2399 } 2400 if (!attrflag && *npp == NULL) { 2401 vrele(newvp); 2402 return (ENOENT); 2403 } 2404 if (attrflag) 2405 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, 2406 0, 1); 2407 } 2408 if (npp && *npp == NULL) { 2409 if (error) { 2410 if (newvp) { 2411 if (newvp == dvp) 2412 vrele(newvp); 2413 else 2414 vput(newvp); 2415 } 2416 } else 2417 *npp = np; 2418 } 2419 if (error && NFS_ISV4(dvp)) 2420 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2421 return (error); 2422 } 2423 2424 /* 2425 * Nfs Version 3 and 4 commit rpc 2426 */ 2427 int 2428 ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred, 2429 struct thread *td) 2430 { 2431 struct nfsvattr nfsva; 2432 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2433 int error, attrflag; 2434 u_char verf[NFSX_VERF]; 2435 2436 mtx_lock(&nmp->nm_mtx); 2437 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) { 2438 mtx_unlock(&nmp->nm_mtx); 2439 return (0); 2440 } 2441 mtx_unlock(&nmp->nm_mtx); 2442 error = nfsrpc_commit(vp, offset, cnt, cred, td, verf, &nfsva, 2443 &attrflag, NULL); 2444 if (!error) { 2445 if (NFSBCMP((caddr_t)nmp->nm_verf, verf, NFSX_VERF)) { 2446 NFSBCOPY(verf, (caddr_t)nmp->nm_verf, NFSX_VERF); 2447 error = NFSERR_STALEWRITEVERF; 2448 } 2449 if (!error && attrflag) 2450 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 2451 0, 1); 2452 } else if (NFS_ISV4(vp)) { 2453 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); 2454 } 2455 return (error); 2456 } 2457 2458 /* 2459 * Strategy routine. 2460 * For async requests when nfsiod(s) are running, queue the request by 2461 * calling ncl_asyncio(), otherwise just all ncl_doio() to do the 2462 * request. 2463 */ 2464 static int 2465 nfs_strategy(struct vop_strategy_args *ap) 2466 { 2467 struct buf *bp = ap->a_bp; 2468 struct ucred *cr; 2469 2470 KASSERT(!(bp->b_flags & B_DONE), 2471 ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp)); 2472 BUF_ASSERT_HELD(bp); 2473 2474 if (bp->b_iocmd == BIO_READ) 2475 cr = bp->b_rcred; 2476 else 2477 cr = bp->b_wcred; 2478 2479 /* 2480 * If the op is asynchronous and an i/o daemon is waiting 2481 * queue the request, wake it up and wait for completion 2482 * otherwise just do it ourselves. 2483 */ 2484 if ((bp->b_flags & B_ASYNC) == 0 || 2485 ncl_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread)) 2486 (void) ncl_doio(ap->a_vp, bp, cr, curthread, 1); 2487 return (0); 2488 } 2489 2490 /* 2491 * fsync vnode op. Just call ncl_flush() with commit == 1. 2492 */ 2493 /* ARGSUSED */ 2494 static int 2495 nfs_fsync(struct vop_fsync_args *ap) 2496 { 2497 return (ncl_flush(ap->a_vp, ap->a_waitfor, NULL, ap->a_td, 1, 0)); 2498 } 2499 2500 /* 2501 * Flush all the blocks associated with a vnode. 2502 * Walk through the buffer pool and push any dirty pages 2503 * associated with the vnode. 2504 * If the called_from_renewthread argument is TRUE, it has been called 2505 * from the NFSv4 renew thread and, as such, cannot block indefinitely 2506 * waiting for a buffer write to complete. 2507 */ 2508 int 2509 ncl_flush(struct vnode *vp, int waitfor, struct ucred *cred, struct thread *td, 2510 int commit, int called_from_renewthread) 2511 { 2512 struct nfsnode *np = VTONFS(vp); 2513 struct buf *bp; 2514 int i; 2515 struct buf *nbp; 2516 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2517 int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2518 int passone = 1, trycnt = 0; 2519 u_quad_t off, endoff, toff; 2520 struct ucred* wcred = NULL; 2521 struct buf **bvec = NULL; 2522 struct bufobj *bo; 2523 #ifndef NFS_COMMITBVECSIZ 2524 #define NFS_COMMITBVECSIZ 20 2525 #endif 2526 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; 2527 int bvecsize = 0, bveccount; 2528 2529 if (called_from_renewthread != 0) 2530 slptimeo = hz; 2531 if (nmp->nm_flag & NFSMNT_INT) 2532 slpflag = NFS_PCATCH; 2533 if (!commit) 2534 passone = 0; 2535 bo = &vp->v_bufobj; 2536 /* 2537 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2538 * server, but has not been committed to stable storage on the server 2539 * yet. On the first pass, the byte range is worked out and the commit 2540 * rpc is done. On the second pass, ncl_writebp() is called to do the 2541 * job. 2542 */ 2543 again: 2544 off = (u_quad_t)-1; 2545 endoff = 0; 2546 bvecpos = 0; 2547 if (NFS_ISV34(vp) && commit) { 2548 if (bvec != NULL && bvec != bvec_on_stack) 2549 free(bvec, M_TEMP); 2550 /* 2551 * Count up how many buffers waiting for a commit. 2552 */ 2553 bveccount = 0; 2554 BO_LOCK(bo); 2555 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2556 if (!BUF_ISLOCKED(bp) && 2557 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) 2558 == (B_DELWRI | B_NEEDCOMMIT)) 2559 bveccount++; 2560 } 2561 /* 2562 * Allocate space to remember the list of bufs to commit. It is 2563 * important to use M_NOWAIT here to avoid a race with nfs_write. 2564 * If we can't get memory (for whatever reason), we will end up 2565 * committing the buffers one-by-one in the loop below. 2566 */ 2567 if (bveccount > NFS_COMMITBVECSIZ) { 2568 /* 2569 * Release the vnode interlock to avoid a lock 2570 * order reversal. 2571 */ 2572 BO_UNLOCK(bo); 2573 bvec = (struct buf **) 2574 malloc(bveccount * sizeof(struct buf *), 2575 M_TEMP, M_NOWAIT); 2576 BO_LOCK(bo); 2577 if (bvec == NULL) { 2578 bvec = bvec_on_stack; 2579 bvecsize = NFS_COMMITBVECSIZ; 2580 } else 2581 bvecsize = bveccount; 2582 } else { 2583 bvec = bvec_on_stack; 2584 bvecsize = NFS_COMMITBVECSIZ; 2585 } 2586 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2587 if (bvecpos >= bvecsize) 2588 break; 2589 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2590 nbp = TAILQ_NEXT(bp, b_bobufs); 2591 continue; 2592 } 2593 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) != 2594 (B_DELWRI | B_NEEDCOMMIT)) { 2595 BUF_UNLOCK(bp); 2596 nbp = TAILQ_NEXT(bp, b_bobufs); 2597 continue; 2598 } 2599 BO_UNLOCK(bo); 2600 bremfree(bp); 2601 /* 2602 * Work out if all buffers are using the same cred 2603 * so we can deal with them all with one commit. 2604 * 2605 * NOTE: we are not clearing B_DONE here, so we have 2606 * to do it later on in this routine if we intend to 2607 * initiate I/O on the bp. 2608 * 2609 * Note: to avoid loopback deadlocks, we do not 2610 * assign b_runningbufspace. 2611 */ 2612 if (wcred == NULL) 2613 wcred = bp->b_wcred; 2614 else if (wcred != bp->b_wcred) 2615 wcred = NOCRED; 2616 vfs_busy_pages(bp, 1); 2617 2618 BO_LOCK(bo); 2619 /* 2620 * bp is protected by being locked, but nbp is not 2621 * and vfs_busy_pages() may sleep. We have to 2622 * recalculate nbp. 2623 */ 2624 nbp = TAILQ_NEXT(bp, b_bobufs); 2625 2626 /* 2627 * A list of these buffers is kept so that the 2628 * second loop knows which buffers have actually 2629 * been committed. This is necessary, since there 2630 * may be a race between the commit rpc and new 2631 * uncommitted writes on the file. 2632 */ 2633 bvec[bvecpos++] = bp; 2634 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2635 bp->b_dirtyoff; 2636 if (toff < off) 2637 off = toff; 2638 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2639 if (toff > endoff) 2640 endoff = toff; 2641 } 2642 BO_UNLOCK(bo); 2643 } 2644 if (bvecpos > 0) { 2645 /* 2646 * Commit data on the server, as required. 2647 * If all bufs are using the same wcred, then use that with 2648 * one call for all of them, otherwise commit each one 2649 * separately. 2650 */ 2651 if (wcred != NOCRED) 2652 retv = ncl_commit(vp, off, (int)(endoff - off), 2653 wcred, td); 2654 else { 2655 retv = 0; 2656 for (i = 0; i < bvecpos; i++) { 2657 off_t off, size; 2658 bp = bvec[i]; 2659 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2660 bp->b_dirtyoff; 2661 size = (u_quad_t)(bp->b_dirtyend 2662 - bp->b_dirtyoff); 2663 retv = ncl_commit(vp, off, (int)size, 2664 bp->b_wcred, td); 2665 if (retv) break; 2666 } 2667 } 2668 2669 if (retv == NFSERR_STALEWRITEVERF) 2670 ncl_clearcommit(vp->v_mount); 2671 2672 /* 2673 * Now, either mark the blocks I/O done or mark the 2674 * blocks dirty, depending on whether the commit 2675 * succeeded. 2676 */ 2677 for (i = 0; i < bvecpos; i++) { 2678 bp = bvec[i]; 2679 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 2680 if (retv) { 2681 /* 2682 * Error, leave B_DELWRI intact 2683 */ 2684 vfs_unbusy_pages(bp); 2685 brelse(bp); 2686 } else { 2687 /* 2688 * Success, remove B_DELWRI ( bundirty() ). 2689 * 2690 * b_dirtyoff/b_dirtyend seem to be NFS 2691 * specific. We should probably move that 2692 * into bundirty(). XXX 2693 */ 2694 bufobj_wref(bo); 2695 bp->b_flags |= B_ASYNC; 2696 bundirty(bp); 2697 bp->b_flags &= ~B_DONE; 2698 bp->b_ioflags &= ~BIO_ERROR; 2699 bp->b_dirtyoff = bp->b_dirtyend = 0; 2700 bufdone(bp); 2701 } 2702 } 2703 } 2704 2705 /* 2706 * Start/do any write(s) that are required. 2707 */ 2708 loop: 2709 BO_LOCK(bo); 2710 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2711 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2712 if (waitfor != MNT_WAIT || passone) 2713 continue; 2714 2715 error = BUF_TIMELOCK(bp, 2716 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2717 BO_MTX(bo), "nfsfsync", slpflag, slptimeo); 2718 if (error == 0) { 2719 BUF_UNLOCK(bp); 2720 goto loop; 2721 } 2722 if (error == ENOLCK) { 2723 error = 0; 2724 goto loop; 2725 } 2726 if (called_from_renewthread != 0) { 2727 /* 2728 * Return EIO so the flush will be retried 2729 * later. 2730 */ 2731 error = EIO; 2732 goto done; 2733 } 2734 if (newnfs_sigintr(nmp, td)) { 2735 error = EINTR; 2736 goto done; 2737 } 2738 if (slpflag & PCATCH) { 2739 slpflag = 0; 2740 slptimeo = 2 * hz; 2741 } 2742 goto loop; 2743 } 2744 if ((bp->b_flags & B_DELWRI) == 0) 2745 panic("nfs_fsync: not dirty"); 2746 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) { 2747 BUF_UNLOCK(bp); 2748 continue; 2749 } 2750 BO_UNLOCK(bo); 2751 bremfree(bp); 2752 if (passone || !commit) 2753 bp->b_flags |= B_ASYNC; 2754 else 2755 bp->b_flags |= B_ASYNC; 2756 bwrite(bp); 2757 if (newnfs_sigintr(nmp, td)) { 2758 error = EINTR; 2759 goto done; 2760 } 2761 goto loop; 2762 } 2763 if (passone) { 2764 passone = 0; 2765 BO_UNLOCK(bo); 2766 goto again; 2767 } 2768 if (waitfor == MNT_WAIT) { 2769 while (bo->bo_numoutput) { 2770 error = bufobj_wwait(bo, slpflag, slptimeo); 2771 if (error) { 2772 BO_UNLOCK(bo); 2773 if (called_from_renewthread != 0) { 2774 /* 2775 * Return EIO so that the flush will be 2776 * retried later. 2777 */ 2778 error = EIO; 2779 goto done; 2780 } 2781 error = newnfs_sigintr(nmp, td); 2782 if (error) 2783 goto done; 2784 if (slpflag & PCATCH) { 2785 slpflag = 0; 2786 slptimeo = 2 * hz; 2787 } 2788 BO_LOCK(bo); 2789 } 2790 } 2791 if (bo->bo_dirty.bv_cnt != 0 && commit) { 2792 BO_UNLOCK(bo); 2793 goto loop; 2794 } 2795 /* 2796 * Wait for all the async IO requests to drain 2797 */ 2798 BO_UNLOCK(bo); 2799 mtx_lock(&np->n_mtx); 2800 while (np->n_directio_asyncwr > 0) { 2801 np->n_flag |= NFSYNCWAIT; 2802 error = newnfs_msleep(td, &np->n_directio_asyncwr, 2803 &np->n_mtx, slpflag | (PRIBIO + 1), 2804 "nfsfsync", 0); 2805 if (error) { 2806 if (newnfs_sigintr(nmp, td)) { 2807 mtx_unlock(&np->n_mtx); 2808 error = EINTR; 2809 goto done; 2810 } 2811 } 2812 } 2813 mtx_unlock(&np->n_mtx); 2814 } else 2815 BO_UNLOCK(bo); 2816 mtx_lock(&np->n_mtx); 2817 if (np->n_flag & NWRITEERR) { 2818 error = np->n_error; 2819 np->n_flag &= ~NWRITEERR; 2820 } 2821 if (commit && bo->bo_dirty.bv_cnt == 0 && 2822 bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0) 2823 np->n_flag &= ~NMODIFIED; 2824 mtx_unlock(&np->n_mtx); 2825 done: 2826 if (bvec != NULL && bvec != bvec_on_stack) 2827 free(bvec, M_TEMP); 2828 if (error == 0 && commit != 0 && waitfor == MNT_WAIT && 2829 (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 || 2830 np->n_directio_asyncwr != 0) && trycnt++ < 5) { 2831 /* try, try again... */ 2832 passone = 1; 2833 wcred = NULL; 2834 bvec = NULL; 2835 bvecsize = 0; 2836 printf("try%d\n", trycnt); 2837 goto again; 2838 } 2839 return (error); 2840 } 2841 2842 /* 2843 * NFS advisory byte-level locks. 2844 */ 2845 static int 2846 nfs_advlock(struct vop_advlock_args *ap) 2847 { 2848 struct vnode *vp = ap->a_vp; 2849 struct ucred *cred; 2850 struct nfsnode *np = VTONFS(ap->a_vp); 2851 struct proc *p = (struct proc *)ap->a_id; 2852 struct thread *td = curthread; /* XXX */ 2853 struct vattr va; 2854 int ret, error = EOPNOTSUPP; 2855 u_quad_t size; 2856 2857 if (NFS_ISV4(vp) && (ap->a_flags & F_POSIX)) { 2858 cred = p->p_ucred; 2859 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2860 if (vp->v_iflag & VI_DOOMED) { 2861 VOP_UNLOCK(vp, 0); 2862 return (EBADF); 2863 } 2864 2865 /* 2866 * If this is unlocking a write locked region, flush and 2867 * commit them before unlocking. This is required by 2868 * RFC3530 Sec. 9.3.2. 2869 */ 2870 if (ap->a_op == F_UNLCK && 2871 nfscl_checkwritelocked(vp, ap->a_fl, cred, td)) 2872 (void) ncl_flush(vp, MNT_WAIT, cred, td, 1, 0); 2873 2874 /* 2875 * Loop around doing the lock op, while a blocking lock 2876 * must wait for the lock op to succeed. 2877 */ 2878 do { 2879 ret = nfsrpc_advlock(vp, np->n_size, ap->a_op, 2880 ap->a_fl, 0, cred, td); 2881 if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 2882 ap->a_op == F_SETLK) { 2883 VOP_UNLOCK(vp, 0); 2884 error = nfs_catnap(PZERO | PCATCH, ret, 2885 "ncladvl"); 2886 if (error) 2887 return (EINTR); 2888 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2889 if (vp->v_iflag & VI_DOOMED) { 2890 VOP_UNLOCK(vp, 0); 2891 return (EBADF); 2892 } 2893 } 2894 } while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && 2895 ap->a_op == F_SETLK); 2896 if (ret == NFSERR_DENIED) { 2897 VOP_UNLOCK(vp, 0); 2898 return (EAGAIN); 2899 } else if (ret == EINVAL || ret == EBADF || ret == EINTR) { 2900 VOP_UNLOCK(vp, 0); 2901 return (ret); 2902 } else if (ret != 0) { 2903 VOP_UNLOCK(vp, 0); 2904 return (EACCES); 2905 } 2906 2907 /* 2908 * Now, if we just got a lock, invalidate data in the buffer 2909 * cache, as required, so that the coherency conforms with 2910 * RFC3530 Sec. 9.3.2. 2911 */ 2912 if (ap->a_op == F_SETLK) { 2913 if ((np->n_flag & NMODIFIED) == 0) { 2914 np->n_attrstamp = 0; 2915 ret = VOP_GETATTR(vp, &va, cred); 2916 } 2917 if ((np->n_flag & NMODIFIED) || ret || 2918 np->n_change != va.va_filerev) { 2919 (void) ncl_vinvalbuf(vp, V_SAVE, td, 1); 2920 np->n_attrstamp = 0; 2921 ret = VOP_GETATTR(vp, &va, cred); 2922 if (!ret) { 2923 np->n_mtime = va.va_mtime; 2924 np->n_change = va.va_filerev; 2925 } 2926 } 2927 } 2928 VOP_UNLOCK(vp, 0); 2929 return (0); 2930 } else if (!NFS_ISV4(vp)) { 2931 error = vn_lock(vp, LK_SHARED); 2932 if (error) 2933 return (error); 2934 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 2935 size = VTONFS(vp)->n_size; 2936 VOP_UNLOCK(vp, 0); 2937 error = lf_advlock(ap, &(vp->v_lockf), size); 2938 } else { 2939 if (ncl_advlock_p) 2940 error = ncl_advlock_p(ap); 2941 else 2942 error = ENOLCK; 2943 } 2944 } 2945 return (error); 2946 } 2947 2948 /* 2949 * NFS advisory byte-level locks. 2950 */ 2951 static int 2952 nfs_advlockasync(struct vop_advlockasync_args *ap) 2953 { 2954 struct vnode *vp = ap->a_vp; 2955 u_quad_t size; 2956 int error; 2957 2958 if (NFS_ISV4(vp)) 2959 return (EOPNOTSUPP); 2960 error = vn_lock(vp, LK_SHARED); 2961 if (error) 2962 return (error); 2963 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { 2964 size = VTONFS(vp)->n_size; 2965 VOP_UNLOCK(vp, 0); 2966 error = lf_advlockasync(ap, &(vp->v_lockf), size); 2967 } else { 2968 VOP_UNLOCK(vp, 0); 2969 error = EOPNOTSUPP; 2970 } 2971 return (error); 2972 } 2973 2974 /* 2975 * Print out the contents of an nfsnode. 2976 */ 2977 static int 2978 nfs_print(struct vop_print_args *ap) 2979 { 2980 struct vnode *vp = ap->a_vp; 2981 struct nfsnode *np = VTONFS(vp); 2982 2983 ncl_printf("\tfileid %ld fsid 0x%x", 2984 np->n_vattr.na_fileid, np->n_vattr.na_fsid); 2985 if (vp->v_type == VFIFO) 2986 fifo_printinfo(vp); 2987 printf("\n"); 2988 return (0); 2989 } 2990 2991 /* 2992 * This is the "real" nfs::bwrite(struct buf*). 2993 * We set B_CACHE if this is a VMIO buffer. 2994 */ 2995 int 2996 ncl_writebp(struct buf *bp, int force __unused, struct thread *td) 2997 { 2998 int s; 2999 int oldflags = bp->b_flags; 3000 #if 0 3001 int retv = 1; 3002 off_t off; 3003 #endif 3004 3005 BUF_ASSERT_HELD(bp); 3006 3007 if (bp->b_flags & B_INVAL) { 3008 brelse(bp); 3009 return(0); 3010 } 3011 3012 bp->b_flags |= B_CACHE; 3013 3014 /* 3015 * Undirty the bp. We will redirty it later if the I/O fails. 3016 */ 3017 3018 s = splbio(); 3019 bundirty(bp); 3020 bp->b_flags &= ~B_DONE; 3021 bp->b_ioflags &= ~BIO_ERROR; 3022 bp->b_iocmd = BIO_WRITE; 3023 3024 bufobj_wref(bp->b_bufobj); 3025 curthread->td_ru.ru_oublock++; 3026 splx(s); 3027 3028 /* 3029 * Note: to avoid loopback deadlocks, we do not 3030 * assign b_runningbufspace. 3031 */ 3032 vfs_busy_pages(bp, 1); 3033 3034 BUF_KERNPROC(bp); 3035 bp->b_iooffset = dbtob(bp->b_blkno); 3036 bstrategy(bp); 3037 3038 if( (oldflags & B_ASYNC) == 0) { 3039 int rtval = bufwait(bp); 3040 3041 if (oldflags & B_DELWRI) { 3042 s = splbio(); 3043 reassignbuf(bp); 3044 splx(s); 3045 } 3046 brelse(bp); 3047 return (rtval); 3048 } 3049 3050 return (0); 3051 } 3052 3053 /* 3054 * nfs special file access vnode op. 3055 * Essentially just get vattr and then imitate iaccess() since the device is 3056 * local to the client. 3057 */ 3058 static int 3059 nfsspec_access(struct vop_access_args *ap) 3060 { 3061 struct vattr *vap; 3062 struct ucred *cred = ap->a_cred; 3063 struct vnode *vp = ap->a_vp; 3064 accmode_t accmode = ap->a_accmode; 3065 struct vattr vattr; 3066 int error; 3067 3068 /* 3069 * Disallow write attempts on filesystems mounted read-only; 3070 * unless the file is a socket, fifo, or a block or character 3071 * device resident on the filesystem. 3072 */ 3073 if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3074 switch (vp->v_type) { 3075 case VREG: 3076 case VDIR: 3077 case VLNK: 3078 return (EROFS); 3079 default: 3080 break; 3081 } 3082 } 3083 vap = &vattr; 3084 error = VOP_GETATTR(vp, vap, cred); 3085 if (error) 3086 goto out; 3087 error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid, 3088 accmode, cred, NULL); 3089 out: 3090 return error; 3091 } 3092 3093 /* 3094 * Read wrapper for fifos. 3095 */ 3096 static int 3097 nfsfifo_read(struct vop_read_args *ap) 3098 { 3099 struct nfsnode *np = VTONFS(ap->a_vp); 3100 int error; 3101 3102 /* 3103 * Set access flag. 3104 */ 3105 mtx_lock(&np->n_mtx); 3106 np->n_flag |= NACC; 3107 getnanotime(&np->n_atim); 3108 mtx_unlock(&np->n_mtx); 3109 error = fifo_specops.vop_read(ap); 3110 return error; 3111 } 3112 3113 /* 3114 * Write wrapper for fifos. 3115 */ 3116 static int 3117 nfsfifo_write(struct vop_write_args *ap) 3118 { 3119 struct nfsnode *np = VTONFS(ap->a_vp); 3120 3121 /* 3122 * Set update flag. 3123 */ 3124 mtx_lock(&np->n_mtx); 3125 np->n_flag |= NUPD; 3126 getnanotime(&np->n_mtim); 3127 mtx_unlock(&np->n_mtx); 3128 return(fifo_specops.vop_write(ap)); 3129 } 3130 3131 /* 3132 * Close wrapper for fifos. 3133 * 3134 * Update the times on the nfsnode then do fifo close. 3135 */ 3136 static int 3137 nfsfifo_close(struct vop_close_args *ap) 3138 { 3139 struct vnode *vp = ap->a_vp; 3140 struct nfsnode *np = VTONFS(vp); 3141 struct vattr vattr; 3142 struct timespec ts; 3143 3144 mtx_lock(&np->n_mtx); 3145 if (np->n_flag & (NACC | NUPD)) { 3146 getnanotime(&ts); 3147 if (np->n_flag & NACC) 3148 np->n_atim = ts; 3149 if (np->n_flag & NUPD) 3150 np->n_mtim = ts; 3151 np->n_flag |= NCHG; 3152 if (vrefcnt(vp) == 1 && 3153 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3154 VATTR_NULL(&vattr); 3155 if (np->n_flag & NACC) 3156 vattr.va_atime = np->n_atim; 3157 if (np->n_flag & NUPD) 3158 vattr.va_mtime = np->n_mtim; 3159 mtx_unlock(&np->n_mtx); 3160 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3161 goto out; 3162 } 3163 } 3164 mtx_unlock(&np->n_mtx); 3165 out: 3166 return (fifo_specops.vop_close(ap)); 3167 } 3168 3169 /* 3170 * Just call ncl_writebp() with the force argument set to 1. 3171 * 3172 * NOTE: B_DONE may or may not be set in a_bp on call. 3173 */ 3174 static int 3175 nfs_bwrite(struct buf *bp) 3176 { 3177 3178 return (ncl_writebp(bp, 1, curthread)); 3179 } 3180 3181 struct buf_ops buf_ops_newnfs = { 3182 .bop_name = "buf_ops_nfs", 3183 .bop_write = nfs_bwrite, 3184 .bop_strategy = bufstrategy, 3185 .bop_sync = bufsync, 3186 .bop_bdflush = bufbdflush, 3187 }; 3188 3189 /* 3190 * Cloned from vop_stdlock(), and then the ugly hack added. 3191 */ 3192 static int 3193 nfs_lock1(struct vop_lock1_args *ap) 3194 { 3195 struct vnode *vp = ap->a_vp; 3196 int error = 0; 3197 3198 /* 3199 * Since vfs_hash_get() calls vget() and it will no longer work 3200 * for FreeBSD8 with flags == 0, I can only think of this horrible 3201 * hack to work around it. I call vfs_hash_get() with LK_EXCLOTHER 3202 * and then handle it here. All I want for this case is a v_usecount 3203 * on the vnode to use for recovery, while another thread might 3204 * hold a lock on the vnode. I have the other threads blocked, so 3205 * there isn't any race problem. 3206 */ 3207 if ((ap->a_flags & LK_TYPE_MASK) == LK_EXCLOTHER) { 3208 if ((ap->a_flags & LK_INTERLOCK) == 0) 3209 panic("ncllock1"); 3210 if ((vp->v_iflag & VI_DOOMED)) 3211 error = ENOENT; 3212 VI_UNLOCK(vp); 3213 return (error); 3214 } 3215 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 3216 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, 3217 ap->a_line)); 3218 } 3219 3220 static int 3221 nfs_getacl(struct vop_getacl_args *ap) 3222 { 3223 int error; 3224 3225 if (ap->a_type != ACL_TYPE_NFS4) 3226 return (EOPNOTSUPP); 3227 error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3228 NULL); 3229 if (error > NFSERR_STALE) { 3230 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3231 error = EPERM; 3232 } 3233 return (error); 3234 } 3235 3236 static int 3237 nfs_setacl(struct vop_setacl_args *ap) 3238 { 3239 int error; 3240 3241 if (ap->a_type != ACL_TYPE_NFS4) 3242 return (EOPNOTSUPP); 3243 error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, 3244 NULL); 3245 if (error > NFSERR_STALE) { 3246 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); 3247 error = EPERM; 3248 } 3249 return (error); 3250 } 3251