xref: /freebsd/sys/fs/nfsclient/nfs_clbio.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/kernel.h>
43 #include <sys/mount.h>
44 #include <sys/proc.h>
45 #include <sys/resourcevar.h>
46 #include <sys/signalvar.h>
47 #include <sys/vmmeter.h>
48 #include <sys/vnode.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vnode_pager.h>
56 
57 #include <fs/nfs/nfsport.h>
58 #include <fs/nfsclient/nfsmount.h>
59 #include <fs/nfsclient/nfs.h>
60 #include <fs/nfsclient/nfsnode.h>
61 
62 extern int newnfs_directio_allow_mmap;
63 extern struct nfsstats newnfsstats;
64 extern struct mtx ncl_iod_mutex;
65 extern int ncl_numasync;
66 extern struct proc *ncl_iodwant[NFS_MAXRAHEAD];
67 extern struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD];
68 extern int newnfs_directio_enable;
69 
70 int ncl_pbuf_freecnt = -1;	/* start out unlimited */
71 
72 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
73     struct thread *td);
74 static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
75     struct ucred *cred, int ioflag);
76 
77 /*
78  * Any signal that can interrupt an NFS operation in an intr mount
79  * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
80  */
81 static int nfs_sig_set[] = {
82 	SIGINT,
83 	SIGTERM,
84 	SIGHUP,
85 	SIGKILL,
86 	SIGSTOP,
87 	SIGQUIT
88 };
89 
90 #ifdef notnow
91 /*
92  * Check to see if one of the signals in our subset is pending on
93  * the process (in an intr mount).
94  */
95 int
96 ncl_sig_pending(sigset_t set)
97 {
98 	int i;
99 
100 	for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
101 		if (SIGISMEMBER(set, nfs_sig_set[i]))
102 			return (1);
103 	return (0);
104 }
105 #endif
106 
107 /*
108  * The set/restore sigmask functions are used to (temporarily) overwrite
109  * the process p_sigmask during an RPC call (for example). These are also
110  * used in other places in the NFS client that might tsleep().
111  */
112 static void
113 ncl_set_sigmask(struct thread *td, sigset_t *oldset)
114 {
115 	sigset_t newset;
116 	int i;
117 	struct proc *p;
118 
119 	SIGFILLSET(newset);
120 	if (td == NULL)
121 		td = curthread; /* XXX */
122 	p = td->td_proc;
123 	/* Remove the NFS set of signals from newset */
124 	PROC_LOCK(p);
125 	mtx_lock(&p->p_sigacts->ps_mtx);
126 	for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
127 		/*
128 		 * But make sure we leave the ones already masked
129 		 * by the process, ie. remove the signal from the
130 		 * temporary signalmask only if it wasn't already
131 		 * in p_sigmask.
132 		 */
133 		if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
134 		    !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
135 			SIGDELSET(newset, nfs_sig_set[i]);
136 	}
137 	mtx_unlock(&p->p_sigacts->ps_mtx);
138 	PROC_UNLOCK(p);
139 	kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
140 }
141 
142 static void
143 ncl_restore_sigmask(struct thread *td, sigset_t *set)
144 {
145 	if (td == NULL)
146 		td = curthread; /* XXX */
147 	kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
148 }
149 
150 /*
151  * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
152  * old one after msleep() returns.
153  */
154 int
155 ncl_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
156 {
157 	sigset_t oldset;
158 	int error;
159 	struct proc *p;
160 
161 	if ((priority & PCATCH) == 0)
162 		return msleep(ident, mtx, priority, wmesg, timo);
163 	if (td == NULL)
164 		td = curthread; /* XXX */
165 	ncl_set_sigmask(td, &oldset);
166 	error = msleep(ident, mtx, priority, wmesg, timo);
167 	ncl_restore_sigmask(td, &oldset);
168 	p = td->td_proc;
169 	return (error);
170 }
171 
172 /*
173  * Vnode op for VM getpages.
174  */
175 int
176 ncl_getpages(struct vop_getpages_args *ap)
177 {
178 	int i, error, nextoff, size, toff, count, npages;
179 	struct uio uio;
180 	struct iovec iov;
181 	vm_offset_t kva;
182 	struct buf *bp;
183 	struct vnode *vp;
184 	struct thread *td;
185 	struct ucred *cred;
186 	struct nfsmount *nmp;
187 	vm_object_t object;
188 	vm_page_t *pages;
189 	struct nfsnode *np;
190 
191 	vp = ap->a_vp;
192 	np = VTONFS(vp);
193 	td = curthread;				/* XXX */
194 	cred = curthread->td_ucred;		/* XXX */
195 	nmp = VFSTONFS(vp->v_mount);
196 	pages = ap->a_m;
197 	count = ap->a_count;
198 
199 	if ((object = vp->v_object) == NULL) {
200 		ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
201 		return (VM_PAGER_ERROR);
202 	}
203 
204 	if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
205 		mtx_lock(&np->n_mtx);
206 		if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
207 			mtx_unlock(&np->n_mtx);
208 			ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
209 			return (VM_PAGER_ERROR);
210 		} else
211 			mtx_unlock(&np->n_mtx);
212 	}
213 
214 	mtx_lock(&nmp->nm_mtx);
215 	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
216 	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
217 		mtx_unlock(&nmp->nm_mtx);
218 		/* We'll never get here for v4, because we always have fsinfo */
219 		(void)ncl_fsinfo(nmp, vp, cred, td);
220 	} else
221 		mtx_unlock(&nmp->nm_mtx);
222 
223 	npages = btoc(count);
224 
225 	/*
226 	 * If the requested page is partially valid, just return it and
227 	 * allow the pager to zero-out the blanks.  Partially valid pages
228 	 * can only occur at the file EOF.
229 	 */
230 	VM_OBJECT_LOCK(object);
231 	if (pages[ap->a_reqpage]->valid != 0) {
232 		vm_page_lock_queues();
233 		for (i = 0; i < npages; ++i) {
234 			if (i != ap->a_reqpage)
235 				vm_page_free(pages[i]);
236 		}
237 		vm_page_unlock_queues();
238 		VM_OBJECT_UNLOCK(object);
239 		return (0);
240 	}
241 	VM_OBJECT_UNLOCK(object);
242 
243 	/*
244 	 * We use only the kva address for the buffer, but this is extremely
245 	 * convienient and fast.
246 	 */
247 	bp = getpbuf(&ncl_pbuf_freecnt);
248 
249 	kva = (vm_offset_t) bp->b_data;
250 	pmap_qenter(kva, pages, npages);
251 	PCPU_INC(cnt.v_vnodein);
252 	PCPU_ADD(cnt.v_vnodepgsin, npages);
253 
254 	iov.iov_base = (caddr_t) kva;
255 	iov.iov_len = count;
256 	uio.uio_iov = &iov;
257 	uio.uio_iovcnt = 1;
258 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
259 	uio.uio_resid = count;
260 	uio.uio_segflg = UIO_SYSSPACE;
261 	uio.uio_rw = UIO_READ;
262 	uio.uio_td = td;
263 
264 	error = ncl_readrpc(vp, &uio, cred);
265 	pmap_qremove(kva, npages);
266 
267 	relpbuf(bp, &ncl_pbuf_freecnt);
268 
269 	if (error && (uio.uio_resid == count)) {
270 		ncl_printf("nfs_getpages: error %d\n", error);
271 		VM_OBJECT_LOCK(object);
272 		vm_page_lock_queues();
273 		for (i = 0; i < npages; ++i) {
274 			if (i != ap->a_reqpage)
275 				vm_page_free(pages[i]);
276 		}
277 		vm_page_unlock_queues();
278 		VM_OBJECT_UNLOCK(object);
279 		return (VM_PAGER_ERROR);
280 	}
281 
282 	/*
283 	 * Calculate the number of bytes read and validate only that number
284 	 * of bytes.  Note that due to pending writes, size may be 0.  This
285 	 * does not mean that the remaining data is invalid!
286 	 */
287 
288 	size = count - uio.uio_resid;
289 	VM_OBJECT_LOCK(object);
290 	vm_page_lock_queues();
291 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
292 		vm_page_t m;
293 		nextoff = toff + PAGE_SIZE;
294 		m = pages[i];
295 
296 		if (nextoff <= size) {
297 			/*
298 			 * Read operation filled an entire page
299 			 */
300 			m->valid = VM_PAGE_BITS_ALL;
301 			KASSERT(m->dirty == 0,
302 			    ("nfs_getpages: page %p is dirty", m));
303 		} else if (size > toff) {
304 			/*
305 			 * Read operation filled a partial page.
306 			 */
307 			m->valid = 0;
308 			vm_page_set_valid(m, 0, size - toff);
309 			KASSERT(m->dirty == 0,
310 			    ("nfs_getpages: page %p is dirty", m));
311 		} else {
312 			/*
313 			 * Read operation was short.  If no error occured
314 			 * we may have hit a zero-fill section.   We simply
315 			 * leave valid set to 0.
316 			 */
317 			;
318 		}
319 		if (i != ap->a_reqpage) {
320 			/*
321 			 * Whether or not to leave the page activated is up in
322 			 * the air, but we should put the page on a page queue
323 			 * somewhere (it already is in the object).  Result:
324 			 * It appears that emperical results show that
325 			 * deactivating pages is best.
326 			 */
327 
328 			/*
329 			 * Just in case someone was asking for this page we
330 			 * now tell them that it is ok to use.
331 			 */
332 			if (!error) {
333 				if (m->oflags & VPO_WANTED)
334 					vm_page_activate(m);
335 				else
336 					vm_page_deactivate(m);
337 				vm_page_wakeup(m);
338 			} else {
339 				vm_page_free(m);
340 			}
341 		}
342 	}
343 	vm_page_unlock_queues();
344 	VM_OBJECT_UNLOCK(object);
345 	return (0);
346 }
347 
348 /*
349  * Vnode op for VM putpages.
350  */
351 int
352 ncl_putpages(struct vop_putpages_args *ap)
353 {
354 	struct uio uio;
355 	struct iovec iov;
356 	vm_offset_t kva;
357 	struct buf *bp;
358 	int iomode, must_commit, i, error, npages, count;
359 	off_t offset;
360 	int *rtvals;
361 	struct vnode *vp;
362 	struct thread *td;
363 	struct ucred *cred;
364 	struct nfsmount *nmp;
365 	struct nfsnode *np;
366 	vm_page_t *pages;
367 
368 	vp = ap->a_vp;
369 	np = VTONFS(vp);
370 	td = curthread;				/* XXX */
371 	cred = curthread->td_ucred;		/* XXX */
372 	nmp = VFSTONFS(vp->v_mount);
373 	pages = ap->a_m;
374 	count = ap->a_count;
375 	rtvals = ap->a_rtvals;
376 	npages = btoc(count);
377 	offset = IDX_TO_OFF(pages[0]->pindex);
378 
379 	mtx_lock(&nmp->nm_mtx);
380 	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
381 	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
382 		mtx_unlock(&nmp->nm_mtx);
383 		(void)ncl_fsinfo(nmp, vp, cred, td);
384 	} else
385 		mtx_unlock(&nmp->nm_mtx);
386 
387 	mtx_lock(&np->n_mtx);
388 	if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
389 	    (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
390 		mtx_unlock(&np->n_mtx);
391 		ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
392 		mtx_lock(&np->n_mtx);
393 	}
394 
395 	for (i = 0; i < npages; i++)
396 		rtvals[i] = VM_PAGER_AGAIN;
397 
398 	/*
399 	 * When putting pages, do not extend file past EOF.
400 	 */
401 	if (offset + count > np->n_size) {
402 		count = np->n_size - offset;
403 		if (count < 0)
404 			count = 0;
405 	}
406 	mtx_unlock(&np->n_mtx);
407 
408 	/*
409 	 * We use only the kva address for the buffer, but this is extremely
410 	 * convienient and fast.
411 	 */
412 	bp = getpbuf(&ncl_pbuf_freecnt);
413 
414 	kva = (vm_offset_t) bp->b_data;
415 	pmap_qenter(kva, pages, npages);
416 	PCPU_INC(cnt.v_vnodeout);
417 	PCPU_ADD(cnt.v_vnodepgsout, count);
418 
419 	iov.iov_base = (caddr_t) kva;
420 	iov.iov_len = count;
421 	uio.uio_iov = &iov;
422 	uio.uio_iovcnt = 1;
423 	uio.uio_offset = offset;
424 	uio.uio_resid = count;
425 	uio.uio_segflg = UIO_SYSSPACE;
426 	uio.uio_rw = UIO_WRITE;
427 	uio.uio_td = td;
428 
429 	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
430 	    iomode = NFSWRITE_UNSTABLE;
431 	else
432 	    iomode = NFSWRITE_FILESYNC;
433 
434 	error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit);
435 
436 	pmap_qremove(kva, npages);
437 	relpbuf(bp, &ncl_pbuf_freecnt);
438 
439 	if (!error) {
440 		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
441 		for (i = 0; i < nwritten; i++) {
442 			rtvals[i] = VM_PAGER_OK;
443 			vm_page_undirty(pages[i]);
444 		}
445 		if (must_commit) {
446 			ncl_clearcommit(vp->v_mount);
447 		}
448 	}
449 	return rtvals[0];
450 }
451 
452 /*
453  * For nfs, cache consistency can only be maintained approximately.
454  * Although RFC1094 does not specify the criteria, the following is
455  * believed to be compatible with the reference port.
456  * For nfs:
457  * If the file's modify time on the server has changed since the
458  * last read rpc or you have written to the file,
459  * you may have lost data cache consistency with the
460  * server, so flush all of the file's data out of the cache.
461  * Then force a getattr rpc to ensure that you have up to date
462  * attributes.
463  * NB: This implies that cache data can be read when up to
464  * NFS_ATTRTIMEO seconds out of date. If you find that you need current
465  * attributes this could be forced by setting n_attrstamp to 0 before
466  * the VOP_GETATTR() call.
467  */
468 static inline int
469 nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
470 {
471 	int error = 0;
472 	struct vattr vattr;
473 	struct nfsnode *np = VTONFS(vp);
474 	int old_lock;
475 
476 	/*
477 	 * Grab the exclusive lock before checking whether the cache is
478 	 * consistent.
479 	 * XXX - We can make this cheaper later (by acquiring cheaper locks).
480 	 * But for now, this suffices.
481 	 */
482 	old_lock = ncl_upgrade_vnlock(vp);
483 	if (vp->v_iflag & VI_DOOMED) {
484 		ncl_downgrade_vnlock(vp, old_lock);
485 		return (EBADF);
486 	}
487 
488 	mtx_lock(&np->n_mtx);
489 	if (np->n_flag & NMODIFIED) {
490 		mtx_unlock(&np->n_mtx);
491 		if (vp->v_type != VREG) {
492 			if (vp->v_type != VDIR)
493 				panic("nfs: bioread, not dir");
494 			ncl_invaldir(vp);
495 			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
496 			if (error)
497 				goto out;
498 		}
499 		np->n_attrstamp = 0;
500 		error = VOP_GETATTR(vp, &vattr, cred);
501 		if (error)
502 			goto out;
503 		mtx_lock(&np->n_mtx);
504 		np->n_mtime = vattr.va_mtime;
505 		mtx_unlock(&np->n_mtx);
506 	} else {
507 		mtx_unlock(&np->n_mtx);
508 		error = VOP_GETATTR(vp, &vattr, cred);
509 		if (error)
510 			return (error);
511 		mtx_lock(&np->n_mtx);
512 		if ((np->n_flag & NSIZECHANGED)
513 		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
514 			mtx_unlock(&np->n_mtx);
515 			if (vp->v_type == VDIR)
516 				ncl_invaldir(vp);
517 			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
518 			if (error)
519 				goto out;
520 			mtx_lock(&np->n_mtx);
521 			np->n_mtime = vattr.va_mtime;
522 			np->n_flag &= ~NSIZECHANGED;
523 		}
524 		mtx_unlock(&np->n_mtx);
525 	}
526 out:
527 	ncl_downgrade_vnlock(vp, old_lock);
528 	return error;
529 }
530 
531 /*
532  * Vnode op for read using bio
533  */
534 int
535 ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
536 {
537 	struct nfsnode *np = VTONFS(vp);
538 	int biosize, i;
539 	struct buf *bp, *rabp;
540 	struct thread *td;
541 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
542 	daddr_t lbn, rabn;
543 	int bcount;
544 	int seqcount;
545 	int nra, error = 0, n = 0, on = 0;
546 
547 #ifdef DIAGNOSTIC
548 	if (uio->uio_rw != UIO_READ)
549 		panic("ncl_read mode");
550 #endif
551 	if (uio->uio_resid == 0)
552 		return (0);
553 	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
554 		return (EINVAL);
555 	td = uio->uio_td;
556 
557 	mtx_lock(&nmp->nm_mtx);
558 	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
559 	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
560 		mtx_unlock(&nmp->nm_mtx);
561 		(void)ncl_fsinfo(nmp, vp, cred, td);
562 		mtx_lock(&nmp->nm_mtx);
563 	}
564 	if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
565 		(void) newnfs_iosize(nmp);
566 	mtx_unlock(&nmp->nm_mtx);
567 
568 	if (vp->v_type != VDIR &&
569 	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
570 		return (EFBIG);
571 
572 	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
573 		/* No caching/ no readaheads. Just read data into the user buffer */
574 		return ncl_readrpc(vp, uio, cred);
575 
576 	biosize = vp->v_mount->mnt_stat.f_iosize;
577 	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
578 
579 	error = nfs_bioread_check_cons(vp, td, cred);
580 	if (error)
581 		return error;
582 
583 	do {
584 	    u_quad_t nsize;
585 
586 	    mtx_lock(&np->n_mtx);
587 	    nsize = np->n_size;
588 	    mtx_unlock(&np->n_mtx);
589 
590 	    switch (vp->v_type) {
591 	    case VREG:
592 		NFSINCRGLOBAL(newnfsstats.biocache_reads);
593 		lbn = uio->uio_offset / biosize;
594 		on = uio->uio_offset & (biosize - 1);
595 
596 		/*
597 		 * Start the read ahead(s), as required.
598 		 */
599 		if (nmp->nm_readahead > 0) {
600 		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
601 			(off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
602 			rabn = lbn + 1 + nra;
603 			if (incore(&vp->v_bufobj, rabn) == NULL) {
604 			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
605 			    if (!rabp) {
606 				error = newnfs_sigintr(nmp, td);
607 				if (error)
608 				    return (error);
609 				else
610 				    break;
611 			    }
612 			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
613 				rabp->b_flags |= B_ASYNC;
614 				rabp->b_iocmd = BIO_READ;
615 				vfs_busy_pages(rabp, 0);
616 				if (ncl_asyncio(nmp, rabp, cred, td)) {
617 				    rabp->b_flags |= B_INVAL;
618 				    rabp->b_ioflags |= BIO_ERROR;
619 				    vfs_unbusy_pages(rabp);
620 				    brelse(rabp);
621 				    break;
622 				}
623 			    } else {
624 				brelse(rabp);
625 			    }
626 			}
627 		    }
628 		}
629 
630 		/* Note that bcount is *not* DEV_BSIZE aligned. */
631 		bcount = biosize;
632 		if ((off_t)lbn * biosize >= nsize) {
633 			bcount = 0;
634 		} else if ((off_t)(lbn + 1) * biosize > nsize) {
635 			bcount = nsize - (off_t)lbn * biosize;
636 		}
637 		bp = nfs_getcacheblk(vp, lbn, bcount, td);
638 
639 		if (!bp) {
640 			error = newnfs_sigintr(nmp, td);
641 			return (error ? error : EINTR);
642 		}
643 
644 		/*
645 		 * If B_CACHE is not set, we must issue the read.  If this
646 		 * fails, we return an error.
647 		 */
648 
649 		if ((bp->b_flags & B_CACHE) == 0) {
650 		    bp->b_iocmd = BIO_READ;
651 		    vfs_busy_pages(bp, 0);
652 		    error = ncl_doio(vp, bp, cred, td);
653 		    if (error) {
654 			brelse(bp);
655 			return (error);
656 		    }
657 		}
658 
659 		/*
660 		 * on is the offset into the current bp.  Figure out how many
661 		 * bytes we can copy out of the bp.  Note that bcount is
662 		 * NOT DEV_BSIZE aligned.
663 		 *
664 		 * Then figure out how many bytes we can copy into the uio.
665 		 */
666 
667 		n = 0;
668 		if (on < bcount)
669 			n = min((unsigned)(bcount - on), uio->uio_resid);
670 		break;
671 	    case VLNK:
672 		NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
673 		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
674 		if (!bp) {
675 			error = newnfs_sigintr(nmp, td);
676 			return (error ? error : EINTR);
677 		}
678 		if ((bp->b_flags & B_CACHE) == 0) {
679 		    bp->b_iocmd = BIO_READ;
680 		    vfs_busy_pages(bp, 0);
681 		    error = ncl_doio(vp, bp, cred, td);
682 		    if (error) {
683 			bp->b_ioflags |= BIO_ERROR;
684 			brelse(bp);
685 			return (error);
686 		    }
687 		}
688 		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
689 		on = 0;
690 		break;
691 	    case VDIR:
692 		NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
693 		if (np->n_direofoffset
694 		    && uio->uio_offset >= np->n_direofoffset) {
695 		    return (0);
696 		}
697 		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
698 		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
699 		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
700 		if (!bp) {
701 		    error = newnfs_sigintr(nmp, td);
702 		    return (error ? error : EINTR);
703 		}
704 		if ((bp->b_flags & B_CACHE) == 0) {
705 		    bp->b_iocmd = BIO_READ;
706 		    vfs_busy_pages(bp, 0);
707 		    error = ncl_doio(vp, bp, cred, td);
708 		    if (error) {
709 			    brelse(bp);
710 		    }
711 		    while (error == NFSERR_BAD_COOKIE) {
712 			ncl_invaldir(vp);
713 			error = ncl_vinvalbuf(vp, 0, td, 1);
714 			/*
715 			 * Yuck! The directory has been modified on the
716 			 * server. The only way to get the block is by
717 			 * reading from the beginning to get all the
718 			 * offset cookies.
719 			 *
720 			 * Leave the last bp intact unless there is an error.
721 			 * Loop back up to the while if the error is another
722 			 * NFSERR_BAD_COOKIE (double yuch!).
723 			 */
724 			for (i = 0; i <= lbn && !error; i++) {
725 			    if (np->n_direofoffset
726 				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
727 				    return (0);
728 			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
729 			    if (!bp) {
730 				error = newnfs_sigintr(nmp, td);
731 				return (error ? error : EINTR);
732 			    }
733 			    if ((bp->b_flags & B_CACHE) == 0) {
734 				    bp->b_iocmd = BIO_READ;
735 				    vfs_busy_pages(bp, 0);
736 				    error = ncl_doio(vp, bp, cred, td);
737 				    /*
738 				     * no error + B_INVAL == directory EOF,
739 				     * use the block.
740 				     */
741 				    if (error == 0 && (bp->b_flags & B_INVAL))
742 					    break;
743 			    }
744 			    /*
745 			     * An error will throw away the block and the
746 			     * for loop will break out.  If no error and this
747 			     * is not the block we want, we throw away the
748 			     * block and go for the next one via the for loop.
749 			     */
750 			    if (error || i < lbn)
751 				    brelse(bp);
752 			}
753 		    }
754 		    /*
755 		     * The above while is repeated if we hit another cookie
756 		     * error.  If we hit an error and it wasn't a cookie error,
757 		     * we give up.
758 		     */
759 		    if (error)
760 			    return (error);
761 		}
762 
763 		/*
764 		 * If not eof and read aheads are enabled, start one.
765 		 * (You need the current block first, so that you have the
766 		 *  directory offset cookie of the next block.)
767 		 */
768 		if (nmp->nm_readahead > 0 &&
769 		    (bp->b_flags & B_INVAL) == 0 &&
770 		    (np->n_direofoffset == 0 ||
771 		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
772 		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
773 			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
774 			if (rabp) {
775 			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
776 				rabp->b_flags |= B_ASYNC;
777 				rabp->b_iocmd = BIO_READ;
778 				vfs_busy_pages(rabp, 0);
779 				if (ncl_asyncio(nmp, rabp, cred, td)) {
780 				    rabp->b_flags |= B_INVAL;
781 				    rabp->b_ioflags |= BIO_ERROR;
782 				    vfs_unbusy_pages(rabp);
783 				    brelse(rabp);
784 				}
785 			    } else {
786 				brelse(rabp);
787 			    }
788 			}
789 		}
790 		/*
791 		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
792 		 * chopped for the EOF condition, we cannot tell how large
793 		 * NFS directories are going to be until we hit EOF.  So
794 		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
795 		 * it just so happens that b_resid will effectively chop it
796 		 * to EOF.  *BUT* this information is lost if the buffer goes
797 		 * away and is reconstituted into a B_CACHE state ( due to
798 		 * being VMIO ) later.  So we keep track of the directory eof
799 		 * in np->n_direofoffset and chop it off as an extra step
800 		 * right here.
801 		 */
802 		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
803 		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
804 			n = np->n_direofoffset - uio->uio_offset;
805 		break;
806 	    default:
807 		ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
808 		bp = NULL;
809 		break;
810 	    };
811 
812 	    if (n > 0) {
813 		    error = uiomove(bp->b_data + on, (int)n, uio);
814 	    }
815 	    if (vp->v_type == VLNK)
816 		n = 0;
817 	    if (bp != NULL)
818 		brelse(bp);
819 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
820 	return (error);
821 }
822 
823 /*
824  * The NFS write path cannot handle iovecs with len > 1. So we need to
825  * break up iovecs accordingly (restricting them to wsize).
826  * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
827  * For the ASYNC case, 2 copies are needed. The first a copy from the
828  * user buffer to a staging buffer and then a second copy from the staging
829  * buffer to mbufs. This can be optimized by copying from the user buffer
830  * directly into mbufs and passing the chain down, but that requires a
831  * fair amount of re-working of the relevant codepaths (and can be done
832  * later).
833  */
834 static int
835 nfs_directio_write(vp, uiop, cred, ioflag)
836 	struct vnode *vp;
837 	struct uio *uiop;
838 	struct ucred *cred;
839 	int ioflag;
840 {
841 	int error;
842 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
843 	struct thread *td = uiop->uio_td;
844 	int size;
845 	int wsize;
846 
847 	mtx_lock(&nmp->nm_mtx);
848 	wsize = nmp->nm_wsize;
849 	mtx_unlock(&nmp->nm_mtx);
850 	if (ioflag & IO_SYNC) {
851 		int iomode, must_commit;
852 		struct uio uio;
853 		struct iovec iov;
854 do_sync:
855 		while (uiop->uio_resid > 0) {
856 			size = min(uiop->uio_resid, wsize);
857 			size = min(uiop->uio_iov->iov_len, size);
858 			iov.iov_base = uiop->uio_iov->iov_base;
859 			iov.iov_len = size;
860 			uio.uio_iov = &iov;
861 			uio.uio_iovcnt = 1;
862 			uio.uio_offset = uiop->uio_offset;
863 			uio.uio_resid = size;
864 			uio.uio_segflg = UIO_USERSPACE;
865 			uio.uio_rw = UIO_WRITE;
866 			uio.uio_td = td;
867 			iomode = NFSWRITE_FILESYNC;
868 			error = ncl_writerpc(vp, &uio, cred, &iomode,
869 			    &must_commit);
870 			KASSERT((must_commit == 0),
871 				("ncl_directio_write: Did not commit write"));
872 			if (error)
873 				return (error);
874 			uiop->uio_offset += size;
875 			uiop->uio_resid -= size;
876 			if (uiop->uio_iov->iov_len <= size) {
877 				uiop->uio_iovcnt--;
878 				uiop->uio_iov++;
879 			} else {
880 				uiop->uio_iov->iov_base =
881 					(char *)uiop->uio_iov->iov_base + size;
882 				uiop->uio_iov->iov_len -= size;
883 			}
884 		}
885 	} else {
886 		struct uio *t_uio;
887 		struct iovec *t_iov;
888 		struct buf *bp;
889 
890 		/*
891 		 * Break up the write into blocksize chunks and hand these
892 		 * over to nfsiod's for write back.
893 		 * Unfortunately, this incurs a copy of the data. Since
894 		 * the user could modify the buffer before the write is
895 		 * initiated.
896 		 *
897 		 * The obvious optimization here is that one of the 2 copies
898 		 * in the async write path can be eliminated by copying the
899 		 * data here directly into mbufs and passing the mbuf chain
900 		 * down. But that will require a fair amount of re-working
901 		 * of the code and can be done if there's enough interest
902 		 * in NFS directio access.
903 		 */
904 		while (uiop->uio_resid > 0) {
905 			size = min(uiop->uio_resid, wsize);
906 			size = min(uiop->uio_iov->iov_len, size);
907 			bp = getpbuf(&ncl_pbuf_freecnt);
908 			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
909 			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
910 			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
911 			t_iov->iov_len = size;
912 			t_uio->uio_iov = t_iov;
913 			t_uio->uio_iovcnt = 1;
914 			t_uio->uio_offset = uiop->uio_offset;
915 			t_uio->uio_resid = size;
916 			t_uio->uio_segflg = UIO_SYSSPACE;
917 			t_uio->uio_rw = UIO_WRITE;
918 			t_uio->uio_td = td;
919 			bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
920 			bp->b_flags |= B_DIRECT;
921 			bp->b_iocmd = BIO_WRITE;
922 			if (cred != NOCRED) {
923 				crhold(cred);
924 				bp->b_wcred = cred;
925 			} else
926 				bp->b_wcred = NOCRED;
927 			bp->b_caller1 = (void *)t_uio;
928 			bp->b_vp = vp;
929 			error = ncl_asyncio(nmp, bp, NOCRED, td);
930 			if (error) {
931 				free(t_iov->iov_base, M_NFSDIRECTIO);
932 				free(t_iov, M_NFSDIRECTIO);
933 				free(t_uio, M_NFSDIRECTIO);
934 				bp->b_vp = NULL;
935 				relpbuf(bp, &ncl_pbuf_freecnt);
936 				if (error == EINTR)
937 					return (error);
938 				goto do_sync;
939 			}
940 			uiop->uio_offset += size;
941 			uiop->uio_resid -= size;
942 			if (uiop->uio_iov->iov_len <= size) {
943 				uiop->uio_iovcnt--;
944 				uiop->uio_iov++;
945 			} else {
946 				uiop->uio_iov->iov_base =
947 					(char *)uiop->uio_iov->iov_base + size;
948 				uiop->uio_iov->iov_len -= size;
949 			}
950 		}
951 	}
952 	return (0);
953 }
954 
955 /*
956  * Vnode op for write using bio
957  */
958 int
959 ncl_write(struct vop_write_args *ap)
960 {
961 	int biosize;
962 	struct uio *uio = ap->a_uio;
963 	struct thread *td = uio->uio_td;
964 	struct vnode *vp = ap->a_vp;
965 	struct nfsnode *np = VTONFS(vp);
966 	struct ucred *cred = ap->a_cred;
967 	int ioflag = ap->a_ioflag;
968 	struct buf *bp;
969 	struct vattr vattr;
970 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
971 	daddr_t lbn;
972 	int bcount;
973 	int n, on, error = 0;
974 	struct proc *p = td?td->td_proc:NULL;
975 
976 #ifdef DIAGNOSTIC
977 	if (uio->uio_rw != UIO_WRITE)
978 		panic("ncl_write mode");
979 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
980 		panic("ncl_write proc");
981 #endif
982 	if (vp->v_type != VREG)
983 		return (EIO);
984 	mtx_lock(&np->n_mtx);
985 	if (np->n_flag & NWRITEERR) {
986 		np->n_flag &= ~NWRITEERR;
987 		mtx_unlock(&np->n_mtx);
988 		return (np->n_error);
989 	} else
990 		mtx_unlock(&np->n_mtx);
991 	mtx_lock(&nmp->nm_mtx);
992 	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
993 	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
994 		mtx_unlock(&nmp->nm_mtx);
995 		(void)ncl_fsinfo(nmp, vp, cred, td);
996 		mtx_lock(&nmp->nm_mtx);
997 	}
998 	if (nmp->nm_wsize == 0)
999 		(void) newnfs_iosize(nmp);
1000 	mtx_unlock(&nmp->nm_mtx);
1001 
1002 	/*
1003 	 * Synchronously flush pending buffers if we are in synchronous
1004 	 * mode or if we are appending.
1005 	 */
1006 	if (ioflag & (IO_APPEND | IO_SYNC)) {
1007 		mtx_lock(&np->n_mtx);
1008 		if (np->n_flag & NMODIFIED) {
1009 			mtx_unlock(&np->n_mtx);
1010 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
1011 			/*
1012 			 * Require non-blocking, synchronous writes to
1013 			 * dirty files to inform the program it needs
1014 			 * to fsync(2) explicitly.
1015 			 */
1016 			if (ioflag & IO_NDELAY)
1017 				return (EAGAIN);
1018 #endif
1019 flush_and_restart:
1020 			np->n_attrstamp = 0;
1021 			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
1022 			if (error)
1023 				return (error);
1024 		} else
1025 			mtx_unlock(&np->n_mtx);
1026 	}
1027 
1028 	/*
1029 	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
1030 	 * get the append lock.
1031 	 */
1032 	if (ioflag & IO_APPEND) {
1033 		np->n_attrstamp = 0;
1034 		error = VOP_GETATTR(vp, &vattr, cred);
1035 		if (error)
1036 			return (error);
1037 		mtx_lock(&np->n_mtx);
1038 		uio->uio_offset = np->n_size;
1039 		mtx_unlock(&np->n_mtx);
1040 	}
1041 
1042 	if (uio->uio_offset < 0)
1043 		return (EINVAL);
1044 	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
1045 		return (EFBIG);
1046 	if (uio->uio_resid == 0)
1047 		return (0);
1048 
1049 	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
1050 		return nfs_directio_write(vp, uio, cred, ioflag);
1051 
1052 	/*
1053 	 * Maybe this should be above the vnode op call, but so long as
1054 	 * file servers have no limits, i don't think it matters
1055 	 */
1056 	if (p != NULL) {
1057 		PROC_LOCK(p);
1058 		if (uio->uio_offset + uio->uio_resid >
1059 		    lim_cur(p, RLIMIT_FSIZE)) {
1060 			psignal(p, SIGXFSZ);
1061 			PROC_UNLOCK(p);
1062 			return (EFBIG);
1063 		}
1064 		PROC_UNLOCK(p);
1065 	}
1066 
1067 	biosize = vp->v_mount->mnt_stat.f_iosize;
1068 	/*
1069 	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
1070 	 * would exceed the local maximum per-file write commit size when
1071 	 * combined with those, we must decide whether to flush,
1072 	 * go synchronous, or return error.  We don't bother checking
1073 	 * IO_UNIT -- we just make all writes atomic anyway, as there's
1074 	 * no point optimizing for something that really won't ever happen.
1075 	 */
1076 	if (!(ioflag & IO_SYNC)) {
1077 		int nflag;
1078 
1079 		mtx_lock(&np->n_mtx);
1080 		nflag = np->n_flag;
1081 		mtx_unlock(&np->n_mtx);
1082 		int needrestart = 0;
1083 		if (nmp->nm_wcommitsize < uio->uio_resid) {
1084 			/*
1085 			 * If this request could not possibly be completed
1086 			 * without exceeding the maximum outstanding write
1087 			 * commit size, see if we can convert it into a
1088 			 * synchronous write operation.
1089 			 */
1090 			if (ioflag & IO_NDELAY)
1091 				return (EAGAIN);
1092 			ioflag |= IO_SYNC;
1093 			if (nflag & NMODIFIED)
1094 				needrestart = 1;
1095 		} else if (nflag & NMODIFIED) {
1096 			int wouldcommit = 0;
1097 			BO_LOCK(&vp->v_bufobj);
1098 			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1099 				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1100 				    b_bobufs) {
1101 					if (bp->b_flags & B_NEEDCOMMIT)
1102 						wouldcommit += bp->b_bcount;
1103 				}
1104 			}
1105 			BO_UNLOCK(&vp->v_bufobj);
1106 			/*
1107 			 * Since we're not operating synchronously and
1108 			 * bypassing the buffer cache, we are in a commit
1109 			 * and holding all of these buffers whether
1110 			 * transmitted or not.  If not limited, this
1111 			 * will lead to the buffer cache deadlocking,
1112 			 * as no one else can flush our uncommitted buffers.
1113 			 */
1114 			wouldcommit += uio->uio_resid;
1115 			/*
1116 			 * If we would initially exceed the maximum
1117 			 * outstanding write commit size, flush and restart.
1118 			 */
1119 			if (wouldcommit > nmp->nm_wcommitsize)
1120 				needrestart = 1;
1121 		}
1122 		if (needrestart)
1123 			goto flush_and_restart;
1124 	}
1125 
1126 	do {
1127 		NFSINCRGLOBAL(newnfsstats.biocache_writes);
1128 		lbn = uio->uio_offset / biosize;
1129 		on = uio->uio_offset & (biosize-1);
1130 		n = min((unsigned)(biosize - on), uio->uio_resid);
1131 again:
1132 		/*
1133 		 * Handle direct append and file extension cases, calculate
1134 		 * unaligned buffer size.
1135 		 */
1136 		mtx_lock(&np->n_mtx);
1137 		if (uio->uio_offset == np->n_size && n) {
1138 			mtx_unlock(&np->n_mtx);
1139 			/*
1140 			 * Get the buffer (in its pre-append state to maintain
1141 			 * B_CACHE if it was previously set).  Resize the
1142 			 * nfsnode after we have locked the buffer to prevent
1143 			 * readers from reading garbage.
1144 			 */
1145 			bcount = on;
1146 			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1147 
1148 			if (bp != NULL) {
1149 				long save;
1150 
1151 				mtx_lock(&np->n_mtx);
1152 				np->n_size = uio->uio_offset + n;
1153 				np->n_flag |= NMODIFIED;
1154 				vnode_pager_setsize(vp, np->n_size);
1155 				mtx_unlock(&np->n_mtx);
1156 
1157 				save = bp->b_flags & B_CACHE;
1158 				bcount += n;
1159 				allocbuf(bp, bcount);
1160 				bp->b_flags |= save;
1161 			}
1162 		} else {
1163 			/*
1164 			 * Obtain the locked cache block first, and then
1165 			 * adjust the file's size as appropriate.
1166 			 */
1167 			bcount = on + n;
1168 			if ((off_t)lbn * biosize + bcount < np->n_size) {
1169 				if ((off_t)(lbn + 1) * biosize < np->n_size)
1170 					bcount = biosize;
1171 				else
1172 					bcount = np->n_size - (off_t)lbn * biosize;
1173 			}
1174 			mtx_unlock(&np->n_mtx);
1175 			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1176 			mtx_lock(&np->n_mtx);
1177 			if (uio->uio_offset + n > np->n_size) {
1178 				np->n_size = uio->uio_offset + n;
1179 				np->n_flag |= NMODIFIED;
1180 				vnode_pager_setsize(vp, np->n_size);
1181 			}
1182 			mtx_unlock(&np->n_mtx);
1183 		}
1184 
1185 		if (!bp) {
1186 			error = newnfs_sigintr(nmp, td);
1187 			if (!error)
1188 				error = EINTR;
1189 			break;
1190 		}
1191 
1192 		/*
1193 		 * Issue a READ if B_CACHE is not set.  In special-append
1194 		 * mode, B_CACHE is based on the buffer prior to the write
1195 		 * op and is typically set, avoiding the read.  If a read
1196 		 * is required in special append mode, the server will
1197 		 * probably send us a short-read since we extended the file
1198 		 * on our end, resulting in b_resid == 0 and, thusly,
1199 		 * B_CACHE getting set.
1200 		 *
1201 		 * We can also avoid issuing the read if the write covers
1202 		 * the entire buffer.  We have to make sure the buffer state
1203 		 * is reasonable in this case since we will not be initiating
1204 		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1205 		 * more information.
1206 		 *
1207 		 * B_CACHE may also be set due to the buffer being cached
1208 		 * normally.
1209 		 */
1210 
1211 		if (on == 0 && n == bcount) {
1212 			bp->b_flags |= B_CACHE;
1213 			bp->b_flags &= ~B_INVAL;
1214 			bp->b_ioflags &= ~BIO_ERROR;
1215 		}
1216 
1217 		if ((bp->b_flags & B_CACHE) == 0) {
1218 			bp->b_iocmd = BIO_READ;
1219 			vfs_busy_pages(bp, 0);
1220 			error = ncl_doio(vp, bp, cred, td);
1221 			if (error) {
1222 				brelse(bp);
1223 				break;
1224 			}
1225 		}
1226 		if (bp->b_wcred == NOCRED)
1227 			bp->b_wcred = crhold(cred);
1228 		mtx_lock(&np->n_mtx);
1229 		np->n_flag |= NMODIFIED;
1230 		mtx_unlock(&np->n_mtx);
1231 
1232 		/*
1233 		 * If dirtyend exceeds file size, chop it down.  This should
1234 		 * not normally occur but there is an append race where it
1235 		 * might occur XXX, so we log it.
1236 		 *
1237 		 * If the chopping creates a reverse-indexed or degenerate
1238 		 * situation with dirtyoff/end, we 0 both of them.
1239 		 */
1240 
1241 		if (bp->b_dirtyend > bcount) {
1242 			ncl_printf("NFS append race @%lx:%d\n",
1243 			    (long)bp->b_blkno * DEV_BSIZE,
1244 			    bp->b_dirtyend - bcount);
1245 			bp->b_dirtyend = bcount;
1246 		}
1247 
1248 		if (bp->b_dirtyoff >= bp->b_dirtyend)
1249 			bp->b_dirtyoff = bp->b_dirtyend = 0;
1250 
1251 		/*
1252 		 * If the new write will leave a contiguous dirty
1253 		 * area, just update the b_dirtyoff and b_dirtyend,
1254 		 * otherwise force a write rpc of the old dirty area.
1255 		 *
1256 		 * While it is possible to merge discontiguous writes due to
1257 		 * our having a B_CACHE buffer ( and thus valid read data
1258 		 * for the hole), we don't because it could lead to
1259 		 * significant cache coherency problems with multiple clients,
1260 		 * especially if locking is implemented later on.
1261 		 *
1262 		 * as an optimization we could theoretically maintain
1263 		 * a linked list of discontinuous areas, but we would still
1264 		 * have to commit them separately so there isn't much
1265 		 * advantage to it except perhaps a bit of asynchronization.
1266 		 */
1267 
1268 		if (bp->b_dirtyend > 0 &&
1269 		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1270 			if (bwrite(bp) == EINTR) {
1271 				error = EINTR;
1272 				break;
1273 			}
1274 			goto again;
1275 		}
1276 
1277 		error = uiomove((char *)bp->b_data + on, n, uio);
1278 
1279 		/*
1280 		 * Since this block is being modified, it must be written
1281 		 * again and not just committed.  Since write clustering does
1282 		 * not work for the stage 1 data write, only the stage 2
1283 		 * commit rpc, we have to clear B_CLUSTEROK as well.
1284 		 */
1285 		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1286 
1287 		if (error) {
1288 			bp->b_ioflags |= BIO_ERROR;
1289 			brelse(bp);
1290 			break;
1291 		}
1292 
1293 		/*
1294 		 * Only update dirtyoff/dirtyend if not a degenerate
1295 		 * condition.
1296 		 */
1297 		if (n) {
1298 			if (bp->b_dirtyend > 0) {
1299 				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1300 				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1301 			} else {
1302 				bp->b_dirtyoff = on;
1303 				bp->b_dirtyend = on + n;
1304 			}
1305 			vfs_bio_set_valid(bp, on, n);
1306 		}
1307 
1308 		/*
1309 		 * If IO_SYNC do bwrite().
1310 		 *
1311 		 * IO_INVAL appears to be unused.  The idea appears to be
1312 		 * to turn off caching in this case.  Very odd.  XXX
1313 		 */
1314 		if ((ioflag & IO_SYNC)) {
1315 			if (ioflag & IO_INVAL)
1316 				bp->b_flags |= B_NOCACHE;
1317 			error = bwrite(bp);
1318 			if (error)
1319 				break;
1320 		} else if ((n + on) == biosize) {
1321 			bp->b_flags |= B_ASYNC;
1322 			(void) ncl_writebp(bp, 0, NULL);
1323 		} else {
1324 			bdwrite(bp);
1325 		}
1326 	} while (uio->uio_resid > 0 && n > 0);
1327 
1328 	return (error);
1329 }
1330 
1331 /*
1332  * Get an nfs cache block.
1333  *
1334  * Allocate a new one if the block isn't currently in the cache
1335  * and return the block marked busy. If the calling process is
1336  * interrupted by a signal for an interruptible mount point, return
1337  * NULL.
1338  *
1339  * The caller must carefully deal with the possible B_INVAL state of
1340  * the buffer.  ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1341  * indirectly), so synchronous reads can be issued without worrying about
1342  * the B_INVAL state.  We have to be a little more careful when dealing
1343  * with writes (see comments in nfs_write()) when extending a file past
1344  * its EOF.
1345  */
1346 static struct buf *
1347 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1348 {
1349 	struct buf *bp;
1350 	struct mount *mp;
1351 	struct nfsmount *nmp;
1352 
1353 	mp = vp->v_mount;
1354 	nmp = VFSTONFS(mp);
1355 
1356 	if (nmp->nm_flag & NFSMNT_INT) {
1357  		sigset_t oldset;
1358 
1359  		ncl_set_sigmask(td, &oldset);
1360 		bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0);
1361  		ncl_restore_sigmask(td, &oldset);
1362 		while (bp == NULL) {
1363 			if (newnfs_sigintr(nmp, td))
1364 				return (NULL);
1365 			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1366 		}
1367 	} else {
1368 		bp = getblk(vp, bn, size, 0, 0, 0);
1369 	}
1370 
1371 	if (vp->v_type == VREG) {
1372 		int biosize;
1373 
1374 		biosize = mp->mnt_stat.f_iosize;
1375 		bp->b_blkno = bn * (biosize / DEV_BSIZE);
1376 	}
1377 	return (bp);
1378 }
1379 
1380 /*
1381  * Flush and invalidate all dirty buffers. If another process is already
1382  * doing the flush, just wait for completion.
1383  */
1384 int
1385 ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1386 {
1387 	struct nfsnode *np = VTONFS(vp);
1388 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1389 	int error = 0, slpflag, slptimeo;
1390  	int old_lock = 0;
1391 
1392 	ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1393 
1394 	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1395 		intrflg = 0;
1396 	if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1397 		intrflg = 1;
1398 	if (intrflg) {
1399 		slpflag = NFS_PCATCH;
1400 		slptimeo = 2 * hz;
1401 	} else {
1402 		slpflag = 0;
1403 		slptimeo = 0;
1404 	}
1405 
1406 	old_lock = ncl_upgrade_vnlock(vp);
1407 	if (vp->v_iflag & VI_DOOMED) {
1408 		/*
1409 		 * Since vgonel() uses the generic vinvalbuf() to flush
1410 		 * dirty buffers and it does not call this function, it
1411 		 * is safe to just return OK when VI_DOOMED is set.
1412 		 */
1413 		ncl_downgrade_vnlock(vp, old_lock);
1414 		return (0);
1415 	}
1416 
1417 	/*
1418 	 * Now, flush as required.
1419 	 */
1420 	if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1421 		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1422 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1423 		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1424 		/*
1425 		 * If the page clean was interrupted, fail the invalidation.
1426 		 * Not doing so, we run the risk of losing dirty pages in the
1427 		 * vinvalbuf() call below.
1428 		 */
1429 		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1430 			goto out;
1431 	}
1432 
1433 	error = vinvalbuf(vp, flags, slpflag, 0);
1434 	while (error) {
1435 		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1436 			goto out;
1437 		error = vinvalbuf(vp, flags, 0, slptimeo);
1438 	}
1439 	mtx_lock(&np->n_mtx);
1440 	if (np->n_directio_asyncwr == 0)
1441 		np->n_flag &= ~NMODIFIED;
1442 	mtx_unlock(&np->n_mtx);
1443 out:
1444 	ncl_downgrade_vnlock(vp, old_lock);
1445 	return error;
1446 }
1447 
1448 /*
1449  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1450  * This is mainly to avoid queueing async I/O requests when the nfsiods
1451  * are all hung on a dead server.
1452  *
1453  * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1454  * is eventually dequeued by the async daemon, ncl_doio() *will*.
1455  */
1456 int
1457 ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1458 {
1459 	int iod;
1460 	int gotiod;
1461 	int slpflag = 0;
1462 	int slptimeo = 0;
1463 	int error, error2;
1464 
1465 	/*
1466 	 * Unless iothreadcnt is set > 0, don't bother with async I/O
1467 	 * threads. For LAN environments, they don't buy any significant
1468 	 * performance improvement that you can't get with large block
1469 	 * sizes.
1470 	 */
1471 	if (nmp->nm_readahead == 0)
1472 		return (EPERM);
1473 
1474 	/*
1475 	 * Commits are usually short and sweet so lets save some cpu and
1476 	 * leave the async daemons for more important rpc's (such as reads
1477 	 * and writes).
1478 	 */
1479 	mtx_lock(&ncl_iod_mutex);
1480 	if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1481 	    (nmp->nm_bufqiods > ncl_numasync / 2)) {
1482 		mtx_unlock(&ncl_iod_mutex);
1483 		return(EIO);
1484 	}
1485 again:
1486 	if (nmp->nm_flag & NFSMNT_INT)
1487 		slpflag = NFS_PCATCH;
1488 	gotiod = FALSE;
1489 
1490 	/*
1491 	 * Find a free iod to process this request.
1492 	 */
1493 	for (iod = 0; iod < ncl_numasync; iod++)
1494 		if (ncl_iodwant[iod]) {
1495 			gotiod = TRUE;
1496 			break;
1497 		}
1498 
1499 	/*
1500 	 * Try to create one if none are free.
1501 	 */
1502 	if (!gotiod) {
1503 		iod = ncl_nfsiodnew();
1504 		if (iod != -1)
1505 			gotiod = TRUE;
1506 	}
1507 
1508 	if (gotiod) {
1509 		/*
1510 		 * Found one, so wake it up and tell it which
1511 		 * mount to process.
1512 		 */
1513 		NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1514 		    iod, nmp));
1515 		ncl_iodwant[iod] = NULL;
1516 		ncl_iodmount[iod] = nmp;
1517 		nmp->nm_bufqiods++;
1518 		wakeup(&ncl_iodwant[iod]);
1519 	}
1520 
1521 	/*
1522 	 * If none are free, we may already have an iod working on this mount
1523 	 * point.  If so, it will process our request.
1524 	 */
1525 	if (!gotiod) {
1526 		if (nmp->nm_bufqiods > 0) {
1527 			NFS_DPF(ASYNCIO,
1528 				("ncl_asyncio: %d iods are already processing mount %p\n",
1529 				 nmp->nm_bufqiods, nmp));
1530 			gotiod = TRUE;
1531 		}
1532 	}
1533 
1534 	/*
1535 	 * If we have an iod which can process the request, then queue
1536 	 * the buffer.
1537 	 */
1538 	if (gotiod) {
1539 		/*
1540 		 * Ensure that the queue never grows too large.  We still want
1541 		 * to asynchronize so we block rather then return EIO.
1542 		 */
1543 		while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1544 			NFS_DPF(ASYNCIO,
1545 				("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1546 			nmp->nm_bufqwant = TRUE;
1547  			error = ncl_msleep(td, &nmp->nm_bufq, &ncl_iod_mutex,
1548 					   slpflag | PRIBIO,
1549  					   "nfsaio", slptimeo);
1550 			if (error) {
1551 				error2 = newnfs_sigintr(nmp, td);
1552 				if (error2) {
1553 					mtx_unlock(&ncl_iod_mutex);
1554 					return (error2);
1555 				}
1556 				if (slpflag == NFS_PCATCH) {
1557 					slpflag = 0;
1558 					slptimeo = 2 * hz;
1559 				}
1560 			}
1561 			/*
1562 			 * We might have lost our iod while sleeping,
1563 			 * so check and loop if nescessary.
1564 			 */
1565 			if (nmp->nm_bufqiods == 0) {
1566 				NFS_DPF(ASYNCIO,
1567 					("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1568 				goto again;
1569 			}
1570 		}
1571 
1572 		/* We might have lost our nfsiod */
1573 		if (nmp->nm_bufqiods == 0) {
1574 			NFS_DPF(ASYNCIO,
1575 				("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1576 			goto again;
1577 		}
1578 
1579 		if (bp->b_iocmd == BIO_READ) {
1580 			if (bp->b_rcred == NOCRED && cred != NOCRED)
1581 				bp->b_rcred = crhold(cred);
1582 		} else {
1583 			if (bp->b_wcred == NOCRED && cred != NOCRED)
1584 				bp->b_wcred = crhold(cred);
1585 		}
1586 
1587 		if (bp->b_flags & B_REMFREE)
1588 			bremfreef(bp);
1589 		BUF_KERNPROC(bp);
1590 		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1591 		nmp->nm_bufqlen++;
1592 		if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1593 			mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1594 			VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1595 			VTONFS(bp->b_vp)->n_directio_asyncwr++;
1596 			mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1597 		}
1598 		mtx_unlock(&ncl_iod_mutex);
1599 		return (0);
1600 	}
1601 
1602 	mtx_unlock(&ncl_iod_mutex);
1603 
1604 	/*
1605 	 * All the iods are busy on other mounts, so return EIO to
1606 	 * force the caller to process the i/o synchronously.
1607 	 */
1608 	NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1609 	return (EIO);
1610 }
1611 
1612 void
1613 ncl_doio_directwrite(struct buf *bp)
1614 {
1615 	int iomode, must_commit;
1616 	struct uio *uiop = (struct uio *)bp->b_caller1;
1617 	char *iov_base = uiop->uio_iov->iov_base;
1618 
1619 	iomode = NFSWRITE_FILESYNC;
1620 	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1621 	ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit);
1622 	KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1623 	free(iov_base, M_NFSDIRECTIO);
1624 	free(uiop->uio_iov, M_NFSDIRECTIO);
1625 	free(uiop, M_NFSDIRECTIO);
1626 	if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1627 		struct nfsnode *np = VTONFS(bp->b_vp);
1628 		mtx_lock(&np->n_mtx);
1629 		np->n_directio_asyncwr--;
1630 		if (np->n_directio_asyncwr == 0) {
1631 			np->n_flag &= ~NMODIFIED;
1632 			if ((np->n_flag & NFSYNCWAIT)) {
1633 				np->n_flag &= ~NFSYNCWAIT;
1634 				wakeup((caddr_t)&np->n_directio_asyncwr);
1635 			}
1636 		}
1637 		mtx_unlock(&np->n_mtx);
1638 	}
1639 	bp->b_vp = NULL;
1640 	relpbuf(bp, &ncl_pbuf_freecnt);
1641 }
1642 
1643 /*
1644  * Do an I/O operation to/from a cache block. This may be called
1645  * synchronously or from an nfsiod.
1646  */
1647 int
1648 ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
1649 {
1650 	struct uio *uiop;
1651 	struct nfsnode *np;
1652 	struct nfsmount *nmp;
1653 	int error = 0, iomode, must_commit = 0;
1654 	struct uio uio;
1655 	struct iovec io;
1656 	struct proc *p = td ? td->td_proc : NULL;
1657 	uint8_t	iocmd;
1658 
1659 	np = VTONFS(vp);
1660 	nmp = VFSTONFS(vp->v_mount);
1661 	uiop = &uio;
1662 	uiop->uio_iov = &io;
1663 	uiop->uio_iovcnt = 1;
1664 	uiop->uio_segflg = UIO_SYSSPACE;
1665 	uiop->uio_td = td;
1666 
1667 	/*
1668 	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1669 	 * do this here so we do not have to do it in all the code that
1670 	 * calls us.
1671 	 */
1672 	bp->b_flags &= ~B_INVAL;
1673 	bp->b_ioflags &= ~BIO_ERROR;
1674 
1675 	KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1676 	iocmd = bp->b_iocmd;
1677 	if (iocmd == BIO_READ) {
1678 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1679 	    io.iov_base = bp->b_data;
1680 	    uiop->uio_rw = UIO_READ;
1681 
1682 	    switch (vp->v_type) {
1683 	    case VREG:
1684 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1685 		NFSINCRGLOBAL(newnfsstats.read_bios);
1686 		error = ncl_readrpc(vp, uiop, cr);
1687 
1688 		if (!error) {
1689 		    if (uiop->uio_resid) {
1690 			/*
1691 			 * If we had a short read with no error, we must have
1692 			 * hit a file hole.  We should zero-fill the remainder.
1693 			 * This can also occur if the server hits the file EOF.
1694 			 *
1695 			 * Holes used to be able to occur due to pending
1696 			 * writes, but that is not possible any longer.
1697 			 */
1698 			int nread = bp->b_bcount - uiop->uio_resid;
1699 			int left  = uiop->uio_resid;
1700 
1701 			if (left > 0)
1702 				bzero((char *)bp->b_data + nread, left);
1703 			uiop->uio_resid = 0;
1704 		    }
1705 		}
1706 		/* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1707 		if (p && (vp->v_vflag & VV_TEXT)) {
1708 			mtx_lock(&np->n_mtx);
1709 			if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1710 				mtx_unlock(&np->n_mtx);
1711 				PROC_LOCK(p);
1712 				killproc(p, "text file modification");
1713 				PROC_UNLOCK(p);
1714 			} else
1715 				mtx_unlock(&np->n_mtx);
1716 		}
1717 		break;
1718 	    case VLNK:
1719 		uiop->uio_offset = (off_t)0;
1720 		NFSINCRGLOBAL(newnfsstats.readlink_bios);
1721 		error = ncl_readlinkrpc(vp, uiop, cr);
1722 		break;
1723 	    case VDIR:
1724 		NFSINCRGLOBAL(newnfsstats.readdir_bios);
1725 		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1726 		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1727 			error = ncl_readdirplusrpc(vp, uiop, cr, td);
1728 			if (error == NFSERR_NOTSUPP)
1729 				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1730 		}
1731 		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1732 			error = ncl_readdirrpc(vp, uiop, cr, td);
1733 		/*
1734 		 * end-of-directory sets B_INVAL but does not generate an
1735 		 * error.
1736 		 */
1737 		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1738 			bp->b_flags |= B_INVAL;
1739 		break;
1740 	    default:
1741 		ncl_printf("ncl_doio:  type %x unexpected\n", vp->v_type);
1742 		break;
1743 	    };
1744 	    if (error) {
1745 		bp->b_ioflags |= BIO_ERROR;
1746 		bp->b_error = error;
1747 	    }
1748 	} else {
1749 	    /*
1750 	     * If we only need to commit, try to commit
1751 	     */
1752 	    if (bp->b_flags & B_NEEDCOMMIT) {
1753 		    int retv;
1754 		    off_t off;
1755 
1756 		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1757 		    retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1758 			bp->b_wcred, td);
1759 		    if (retv == 0) {
1760 			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1761 			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1762 			    bp->b_resid = 0;
1763 			    bufdone(bp);
1764 			    return (0);
1765 		    }
1766 		    if (retv == NFSERR_STALEWRITEVERF) {
1767 			    ncl_clearcommit(vp->v_mount);
1768 		    }
1769 	    }
1770 
1771 	    /*
1772 	     * Setup for actual write
1773 	     */
1774 	    mtx_lock(&np->n_mtx);
1775 	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1776 		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1777 	    mtx_unlock(&np->n_mtx);
1778 
1779 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1780 		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1781 		    - bp->b_dirtyoff;
1782 		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1783 		    + bp->b_dirtyoff;
1784 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1785 		uiop->uio_rw = UIO_WRITE;
1786 		NFSINCRGLOBAL(newnfsstats.write_bios);
1787 
1788 		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1789 		    iomode = NFSWRITE_UNSTABLE;
1790 		else
1791 		    iomode = NFSWRITE_FILESYNC;
1792 
1793 		error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit);
1794 
1795 		/*
1796 		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1797 		 * to cluster the buffers needing commit.  This will allow
1798 		 * the system to submit a single commit rpc for the whole
1799 		 * cluster.  We can do this even if the buffer is not 100%
1800 		 * dirty (relative to the NFS blocksize), so we optimize the
1801 		 * append-to-file-case.
1802 		 *
1803 		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1804 		 * cleared because write clustering only works for commit
1805 		 * rpc's, not for the data portion of the write).
1806 		 */
1807 
1808 		if (!error && iomode == NFSWRITE_UNSTABLE) {
1809 		    bp->b_flags |= B_NEEDCOMMIT;
1810 		    if (bp->b_dirtyoff == 0
1811 			&& bp->b_dirtyend == bp->b_bcount)
1812 			bp->b_flags |= B_CLUSTEROK;
1813 		} else {
1814 		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1815 		}
1816 
1817 		/*
1818 		 * For an interrupted write, the buffer is still valid
1819 		 * and the write hasn't been pushed to the server yet,
1820 		 * so we can't set BIO_ERROR and report the interruption
1821 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1822 		 * is not relevant, so the rpc attempt is essentially
1823 		 * a noop.  For the case of a V3 write rpc not being
1824 		 * committed to stable storage, the block is still
1825 		 * dirty and requires either a commit rpc or another
1826 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1827 		 * the block is reused. This is indicated by setting
1828 		 * the B_DELWRI and B_NEEDCOMMIT flags.
1829 		 *
1830 		 * If the buffer is marked B_PAGING, it does not reside on
1831 		 * the vp's paging queues so we cannot call bdirty().  The
1832 		 * bp in this case is not an NFS cache block so we should
1833 		 * be safe. XXX
1834 		 *
1835 		 * The logic below breaks up errors into recoverable and
1836 		 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1837 		 * and keep the buffer around for potential write retries.
1838 		 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1839 		 * and save the error in the nfsnode. This is less than ideal
1840 		 * but necessary. Keeping such buffers around could potentially
1841 		 * cause buffer exhaustion eventually (they can never be written
1842 		 * out, so will get constantly be re-dirtied). It also causes
1843 		 * all sorts of vfs panics. For non-recoverable write errors,
1844 		 * also invalidate the attrcache, so we'll be forced to go over
1845 		 * the wire for this object, returning an error to user on next
1846 		 * call (most of the time).
1847 		 */
1848     		if (error == EINTR || error == EIO || error == ETIMEDOUT
1849 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1850 			int s;
1851 
1852 			s = splbio();
1853 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1854 			if ((bp->b_flags & B_PAGING) == 0) {
1855 			    bdirty(bp);
1856 			    bp->b_flags &= ~B_DONE;
1857 			}
1858 			if (error && (bp->b_flags & B_ASYNC) == 0)
1859 			    bp->b_flags |= B_EINTR;
1860 			splx(s);
1861 	    	} else {
1862 		    if (error) {
1863 			bp->b_ioflags |= BIO_ERROR;
1864 			bp->b_flags |= B_INVAL;
1865 			bp->b_error = np->n_error = error;
1866 			mtx_lock(&np->n_mtx);
1867 			np->n_flag |= NWRITEERR;
1868 			np->n_attrstamp = 0;
1869 			mtx_unlock(&np->n_mtx);
1870 		    }
1871 		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1872 		}
1873 	    } else {
1874 		bp->b_resid = 0;
1875 		bufdone(bp);
1876 		return (0);
1877 	    }
1878 	}
1879 	bp->b_resid = uiop->uio_resid;
1880 	if (must_commit)
1881 	    ncl_clearcommit(vp->v_mount);
1882 	bufdone(bp);
1883 	return (error);
1884 }
1885 
1886 /*
1887  * Used to aid in handling ftruncate() operations on the NFS client side.
1888  * Truncation creates a number of special problems for NFS.  We have to
1889  * throw away VM pages and buffer cache buffers that are beyond EOF, and
1890  * we have to properly handle VM pages or (potentially dirty) buffers
1891  * that straddle the truncation point.
1892  */
1893 
1894 int
1895 ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1896 {
1897 	struct nfsnode *np = VTONFS(vp);
1898 	u_quad_t tsize;
1899 	int biosize = vp->v_mount->mnt_stat.f_iosize;
1900 	int error = 0;
1901 
1902 	mtx_lock(&np->n_mtx);
1903 	tsize = np->n_size;
1904 	np->n_size = nsize;
1905 	mtx_unlock(&np->n_mtx);
1906 
1907 	if (nsize < tsize) {
1908 		struct buf *bp;
1909 		daddr_t lbn;
1910 		int bufsize;
1911 
1912 		/*
1913 		 * vtruncbuf() doesn't get the buffer overlapping the
1914 		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1915 		 * buffer that now needs to be truncated.
1916 		 */
1917 		error = vtruncbuf(vp, cred, td, nsize, biosize);
1918 		lbn = nsize / biosize;
1919 		bufsize = nsize & (biosize - 1);
1920 		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1921  		if (!bp)
1922  			return EINTR;
1923 		if (bp->b_dirtyoff > bp->b_bcount)
1924 			bp->b_dirtyoff = bp->b_bcount;
1925 		if (bp->b_dirtyend > bp->b_bcount)
1926 			bp->b_dirtyend = bp->b_bcount;
1927 		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1928 		brelse(bp);
1929 	} else {
1930 		vnode_pager_setsize(vp, nsize);
1931 	}
1932 	return(error);
1933 }
1934 
1935