xref: /freebsd/sys/fs/smbfs/smbfs_io.c (revision e32fecd0c2c3ee37c47ee100f169e7eb0282a873)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000-2001 Boris Popov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  *
30  */
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/fcntl.h>
35 #include <sys/bio.h>
36 #include <sys/buf.h>
37 #include <sys/mount.h>
38 #include <sys/namei.h>
39 #include <sys/vnode.h>
40 #include <sys/dirent.h>
41 #include <sys/rwlock.h>
42 #include <sys/signalvar.h>
43 #include <sys/sysctl.h>
44 #include <sys/vmmeter.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_pager.h>
52 #include <vm/vnode_pager.h>
53 /*
54 #include <sys/ioccom.h>
55 */
56 #include <netsmb/smb.h>
57 #include <netsmb/smb_conn.h>
58 #include <netsmb/smb_subr.h>
59 
60 #include <fs/smbfs/smbfs.h>
61 #include <fs/smbfs/smbfs_node.h>
62 #include <fs/smbfs/smbfs_subr.h>
63 
64 /*#define SMBFS_RWGENERIC*/
65 
66 extern uma_zone_t smbfs_pbuf_zone;
67 
68 static int smbfs_fastlookup = 1;
69 
70 SYSCTL_DECL(_vfs_smbfs);
71 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
72 
73 #define DE_SIZE	(sizeof(struct dirent))
74 
75 static int
76 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
77 {
78 	struct dirent de;
79 	struct componentname cn;
80 	struct smb_cred *scred;
81 	struct smbfs_fctx *ctx;
82 	struct vnode *newvp;
83 	struct smbnode *np = VTOSMB(vp);
84 	int error/*, *eofflag = ap->a_eofflag*/;
85 	long offset, limit;
86 
87 	np = VTOSMB(vp);
88 	SMBVDEBUG("dirname='%s'\n", np->n_name);
89 	scred = smbfs_malloc_scred();
90 	smb_makescred(scred, uio->uio_td, cred);
91 	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
92 	limit = uio->uio_resid / DE_SIZE;
93 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
94 		error = EINVAL;
95 		goto out;
96 	}
97 	while (limit && offset < 2) {
98 		limit--;
99 		bzero((caddr_t)&de, DE_SIZE);
100 		de.d_reclen = DE_SIZE;
101 		de.d_fileno = (offset == 0) ? np->n_ino :
102 		    (np->n_parent ? np->n_parentino : 2);
103 		if (de.d_fileno == 0)
104 			de.d_fileno = 0x7ffffffd + offset;
105 		de.d_off = offset + 1;
106 		de.d_namlen = offset + 1;
107 		de.d_name[0] = '.';
108 		de.d_name[1] = '.';
109 		de.d_type = DT_DIR;
110 		dirent_terminate(&de);
111 		error = uiomove(&de, DE_SIZE, uio);
112 		if (error)
113 			goto out;
114 		offset++;
115 		uio->uio_offset += DE_SIZE;
116 	}
117 	if (limit == 0) {
118 		error = 0;
119 		goto out;
120 	}
121 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
122 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
123 		if (np->n_dirseq) {
124 			smbfs_findclose(np->n_dirseq, scred);
125 			np->n_dirseq = NULL;
126 		}
127 		np->n_dirofs = 2;
128 		error = smbfs_findopen(np, "*", 1,
129 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
130 		    scred, &ctx);
131 		if (error) {
132 			SMBVDEBUG("can not open search, error = %d", error);
133 			goto out;
134 		}
135 		np->n_dirseq = ctx;
136 	} else
137 		ctx = np->n_dirseq;
138 	while (np->n_dirofs < offset) {
139 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
140 		if (error) {
141 			smbfs_findclose(np->n_dirseq, scred);
142 			np->n_dirseq = NULL;
143 			error = ENOENT ? 0 : error;
144 			goto out;
145 		}
146 	}
147 	error = 0;
148 	for (; limit; limit--, offset++) {
149 		error = smbfs_findnext(ctx, limit, scred);
150 		if (error)
151 			break;
152 		np->n_dirofs++;
153 		bzero((caddr_t)&de, DE_SIZE);
154 		de.d_reclen = DE_SIZE;
155 		de.d_fileno = ctx->f_attr.fa_ino;
156 		de.d_off = offset + 1;
157 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
158 		de.d_namlen = ctx->f_nmlen;
159 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
160 		dirent_terminate(&de);
161 		if (smbfs_fastlookup) {
162 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
163 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
164 			if (!error) {
165 				cn.cn_nameptr = de.d_name;
166 				cn.cn_namelen = de.d_namlen;
167 				cache_enter(vp, newvp, &cn);
168 				vput(newvp);
169 			}
170 		}
171 		error = uiomove(&de, DE_SIZE, uio);
172 		if (error)
173 			break;
174 	}
175 	if (error == ENOENT)
176 		error = 0;
177 	uio->uio_offset = offset * DE_SIZE;
178 out:
179 	smbfs_free_scred(scred);
180 	return error;
181 }
182 
183 int
184 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
185 {
186 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
187 	struct smbnode *np = VTOSMB(vp);
188 	struct thread *td;
189 	struct vattr vattr;
190 	struct smb_cred *scred;
191 	int error, lks;
192 
193 	/*
194 	 * Protect against method which is not supported for now
195 	 */
196 	if (uiop->uio_segflg == UIO_NOCOPY)
197 		return EOPNOTSUPP;
198 
199 	if (vp->v_type != VREG && vp->v_type != VDIR) {
200 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
201 		return EIO;
202 	}
203 	if (uiop->uio_resid == 0)
204 		return 0;
205 	if (uiop->uio_offset < 0)
206 		return EINVAL;
207 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
208 		return EFBIG;*/
209 	td = uiop->uio_td;
210 	if (vp->v_type == VDIR) {
211 		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
212 		if (lks == LK_SHARED)
213 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
214 		error = smbfs_readvdir(vp, uiop, cred);
215 		if (lks == LK_SHARED)
216 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
217 		return error;
218 	}
219 
220 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
221 	if (np->n_flag & NMODIFIED) {
222 		smbfs_attr_cacheremove(vp);
223 		error = VOP_GETATTR(vp, &vattr, cred);
224 		if (error)
225 			return error;
226 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
227 	} else {
228 		error = VOP_GETATTR(vp, &vattr, cred);
229 		if (error)
230 			return error;
231 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
232 			error = smbfs_vinvalbuf(vp, td);
233 			if (error)
234 				return error;
235 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
236 		}
237 	}
238 	scred = smbfs_malloc_scred();
239 	smb_makescred(scred, td, cred);
240 	error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
241 	smbfs_free_scred(scred);
242 	return (error);
243 }
244 
245 int
246 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
247 	struct ucred *cred, int ioflag)
248 {
249 	struct smbmount *smp = VTOSMBFS(vp);
250 	struct smbnode *np = VTOSMB(vp);
251 	struct smb_cred *scred;
252 	struct thread *td;
253 	int error = 0;
254 
255 	if (vp->v_type != VREG) {
256 		SMBERROR("vn types other than VREG unsupported !\n");
257 		return EIO;
258 	}
259 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
260 	    uiop->uio_resid);
261 	if (uiop->uio_offset < 0)
262 		return EINVAL;
263 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
264 		return (EFBIG);*/
265 	td = uiop->uio_td;
266 	if (ioflag & (IO_APPEND | IO_SYNC)) {
267 		if (np->n_flag & NMODIFIED) {
268 			smbfs_attr_cacheremove(vp);
269 			error = smbfs_vinvalbuf(vp, td);
270 			if (error)
271 				return error;
272 		}
273 		if (ioflag & IO_APPEND) {
274 #ifdef notyet
275 			/*
276 			 * File size can be changed by another client
277 			 */
278 			smbfs_attr_cacheremove(vp);
279 			error = VOP_GETATTR(vp, &vattr, cred);
280 			if (error) return (error);
281 #endif
282 			uiop->uio_offset = np->n_size;
283 		}
284 	}
285 	if (uiop->uio_resid == 0)
286 		return 0;
287 
288 	error = vn_rlimit_fsize(vp, uiop, td);
289 	if (error != 0)
290 		return (error);
291 
292 	scred = smbfs_malloc_scred();
293 	smb_makescred(scred, td, cred);
294 	error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
295 	smbfs_free_scred(scred);
296 	SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
297 	    uiop->uio_resid);
298 	if (!error) {
299 		if (uiop->uio_offset > np->n_size) {
300 			np->n_size = uiop->uio_offset;
301 			vnode_pager_setsize(vp, np->n_size);
302 		}
303 	}
304 	return error;
305 }
306 
307 /*
308  * Do an I/O operation to/from a cache block.
309  */
310 int
311 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
312 {
313 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
314 	struct smbnode *np = VTOSMB(vp);
315 	struct uio *uiop;
316 	struct iovec io;
317 	struct smb_cred *scred;
318 	int error = 0;
319 
320 	uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
321 	uiop->uio_iov = &io;
322 	uiop->uio_iovcnt = 1;
323 	uiop->uio_segflg = UIO_SYSSPACE;
324 	uiop->uio_td = td;
325 
326 	scred = smbfs_malloc_scred();
327 	smb_makescred(scred, td, cr);
328 
329 	if (bp->b_iocmd == BIO_READ) {
330 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
331 	    io.iov_base = bp->b_data;
332 	    uiop->uio_rw = UIO_READ;
333 	    switch (vp->v_type) {
334 	      case VREG:
335 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
336 		error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
337 		if (error)
338 			break;
339 		if (uiop->uio_resid) {
340 			int left = uiop->uio_resid;
341 			int nread = bp->b_bcount - left;
342 			if (left > 0)
343 			    bzero((char *)bp->b_data + nread, left);
344 		}
345 		break;
346 	    default:
347 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
348 		break;
349 	    }
350 	    if (error) {
351 		bp->b_error = error;
352 		bp->b_ioflags |= BIO_ERROR;
353 	    }
354 	} else { /* write */
355 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
356 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
357 
358 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
359 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
360 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
361 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
362 		uiop->uio_rw = UIO_WRITE;
363 		error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
364 
365 		/*
366 		 * For an interrupted write, the buffer is still valid
367 		 * and the write hasn't been pushed to the server yet,
368 		 * so we can't set BIO_ERROR and report the interruption
369 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
370 		 * is not relevant, so the rpc attempt is essentially
371 		 * a noop.  For the case of a V3 write rpc not being
372 		 * committed to stable storage, the block is still
373 		 * dirty and requires either a commit rpc or another
374 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
375 		 * the block is reused. This is indicated by setting
376 		 * the B_DELWRI and B_NEEDCOMMIT flags.
377 		 */
378 		if (error == EINTR
379 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
380 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
381 			if ((bp->b_flags & B_ASYNC) == 0)
382 			    bp->b_flags |= B_EINTR;
383 			if ((bp->b_flags & B_PAGING) == 0) {
384 			    bdirty(bp);
385 			    bp->b_flags &= ~B_DONE;
386 			}
387 			if ((bp->b_flags & B_ASYNC) == 0)
388 			    bp->b_flags |= B_EINTR;
389 		} else {
390 			if (error) {
391 				bp->b_ioflags |= BIO_ERROR;
392 				bp->b_error = error;
393 			}
394 			bp->b_dirtyoff = bp->b_dirtyend = 0;
395 		}
396 	    } else {
397 		bp->b_resid = 0;
398 		bufdone(bp);
399 		free(uiop, M_SMBFSDATA);
400 		smbfs_free_scred(scred);
401 		return 0;
402 	    }
403 	}
404 	bp->b_resid = uiop->uio_resid;
405 	bufdone(bp);
406 	free(uiop, M_SMBFSDATA);
407 	smbfs_free_scred(scred);
408 	return error;
409 }
410 
411 /*
412  * Vnode op for VM getpages.
413  * Wish wish .... get rid from multiple IO routines
414  */
415 int
416 smbfs_getpages(struct vop_getpages_args *ap)
417 {
418 #ifdef SMBFS_RWGENERIC
419 	return vop_stdgetpages(ap);
420 #else
421 	int i, error, nextoff, size, toff, npages, count;
422 	struct uio uio;
423 	struct iovec iov;
424 	vm_offset_t kva;
425 	struct buf *bp;
426 	struct vnode *vp;
427 	struct thread *td;
428 	struct ucred *cred;
429 	struct smbmount *smp;
430 	struct smbnode *np;
431 	struct smb_cred *scred;
432 	vm_object_t object;
433 	vm_page_t *pages;
434 
435 	vp = ap->a_vp;
436 	if ((object = vp->v_object) == NULL) {
437 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
438 		return VM_PAGER_ERROR;
439 	}
440 
441 	td = curthread;				/* XXX */
442 	cred = td->td_ucred;		/* XXX */
443 	np = VTOSMB(vp);
444 	smp = VFSTOSMBFS(vp->v_mount);
445 	pages = ap->a_m;
446 	npages = ap->a_count;
447 
448 	/*
449 	 * If the requested page is partially valid, just return it and
450 	 * allow the pager to zero-out the blanks.  Partially valid pages
451 	 * can only occur at the file EOF.
452 	 *
453 	 * XXXGL: is that true for SMB filesystem?
454 	 */
455 	VM_OBJECT_WLOCK(object);
456 	if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0)
457 		goto out;
458 	VM_OBJECT_WUNLOCK(object);
459 
460 	scred = smbfs_malloc_scred();
461 	smb_makescred(scred, td, cred);
462 
463 	bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
464 
465 	kva = (vm_offset_t) bp->b_data;
466 	pmap_qenter(kva, pages, npages);
467 	VM_CNT_INC(v_vnodein);
468 	VM_CNT_ADD(v_vnodepgsin, npages);
469 
470 	count = npages << PAGE_SHIFT;
471 	iov.iov_base = (caddr_t) kva;
472 	iov.iov_len = count;
473 	uio.uio_iov = &iov;
474 	uio.uio_iovcnt = 1;
475 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
476 	uio.uio_resid = count;
477 	uio.uio_segflg = UIO_SYSSPACE;
478 	uio.uio_rw = UIO_READ;
479 	uio.uio_td = td;
480 
481 	error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
482 	smbfs_free_scred(scred);
483 	pmap_qremove(kva, npages);
484 
485 	uma_zfree(smbfs_pbuf_zone, bp);
486 
487 	if (error && (uio.uio_resid == count)) {
488 		printf("smbfs_getpages: error %d\n",error);
489 		return VM_PAGER_ERROR;
490 	}
491 
492 	size = count - uio.uio_resid;
493 
494 	VM_OBJECT_WLOCK(object);
495 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
496 		vm_page_t m;
497 		nextoff = toff + PAGE_SIZE;
498 		m = pages[i];
499 
500 		if (nextoff <= size) {
501 			/*
502 			 * Read operation filled an entire page
503 			 */
504 			vm_page_valid(m);
505 			KASSERT(m->dirty == 0,
506 			    ("smbfs_getpages: page %p is dirty", m));
507 		} else if (size > toff) {
508 			/*
509 			 * Read operation filled a partial page.
510 			 */
511 			vm_page_invalid(m);
512 			vm_page_set_valid_range(m, 0, size - toff);
513 			KASSERT(m->dirty == 0,
514 			    ("smbfs_getpages: page %p is dirty", m));
515 		} else {
516 			/*
517 			 * Read operation was short.  If no error occurred
518 			 * we may have hit a zero-fill section.   We simply
519 			 * leave valid set to 0.
520 			 */
521 			;
522 		}
523 	}
524 out:
525 	VM_OBJECT_WUNLOCK(object);
526 	if (ap->a_rbehind)
527 		*ap->a_rbehind = 0;
528 	if (ap->a_rahead)
529 		*ap->a_rahead = 0;
530 	return (VM_PAGER_OK);
531 #endif /* SMBFS_RWGENERIC */
532 }
533 
534 /*
535  * Vnode op for VM putpages.
536  * possible bug: all IO done in sync mode
537  * Note that vop_close always invalidate pages before close, so it's
538  * not necessary to open vnode.
539  */
540 int
541 smbfs_putpages(struct vop_putpages_args *ap)
542 {
543 	int error;
544 	struct vnode *vp = ap->a_vp;
545 	struct thread *td;
546 	struct ucred *cred;
547 
548 #ifdef SMBFS_RWGENERIC
549 	td = curthread;			/* XXX */
550 	cred = td->td_ucred;		/* XXX */
551 	VOP_OPEN(vp, FWRITE, cred, td, NULL);
552 	error = vop_stdputpages(ap);
553 	VOP_CLOSE(vp, FWRITE, cred, td);
554 	return error;
555 #else
556 	struct uio uio;
557 	struct iovec iov;
558 	vm_offset_t kva;
559 	struct buf *bp;
560 	int i, npages, count;
561 	int *rtvals;
562 	struct smbmount *smp;
563 	struct smbnode *np;
564 	struct smb_cred *scred;
565 	vm_page_t *pages;
566 
567 	td = curthread;			/* XXX */
568 	cred = td->td_ucred;		/* XXX */
569 /*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
570 	np = VTOSMB(vp);
571 	smp = VFSTOSMBFS(vp->v_mount);
572 	pages = ap->a_m;
573 	count = ap->a_count;
574 	rtvals = ap->a_rtvals;
575 	npages = btoc(count);
576 
577 	for (i = 0; i < npages; i++) {
578 		rtvals[i] = VM_PAGER_ERROR;
579 	}
580 
581 	bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
582 
583 	kva = (vm_offset_t) bp->b_data;
584 	pmap_qenter(kva, pages, npages);
585 	VM_CNT_INC(v_vnodeout);
586 	VM_CNT_ADD(v_vnodepgsout, count);
587 
588 	iov.iov_base = (caddr_t) kva;
589 	iov.iov_len = count;
590 	uio.uio_iov = &iov;
591 	uio.uio_iovcnt = 1;
592 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
593 	uio.uio_resid = count;
594 	uio.uio_segflg = UIO_SYSSPACE;
595 	uio.uio_rw = UIO_WRITE;
596 	uio.uio_td = td;
597 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
598 	    uio.uio_resid);
599 
600 	scred = smbfs_malloc_scred();
601 	smb_makescred(scred, td, cred);
602 	error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
603 	smbfs_free_scred(scred);
604 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
605 	SMBVDEBUG("paged write done: %d\n", error);
606 
607 	pmap_qremove(kva, npages);
608 
609 	uma_zfree(smbfs_pbuf_zone, bp);
610 
611 	if (error == 0) {
612 		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid,
613 		    npages * PAGE_SIZE, npages * PAGE_SIZE);
614 	}
615 	return (rtvals[0]);
616 #endif /* SMBFS_RWGENERIC */
617 }
618 
619 /*
620  * Flush and invalidate all dirty buffers. If another process is already
621  * doing the flush, just wait for completion.
622  */
623 int
624 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
625 {
626 	struct smbnode *np = VTOSMB(vp);
627 	int error = 0;
628 
629 	if (VN_IS_DOOMED(vp))
630 		return 0;
631 
632 	while (np->n_flag & NFLUSHINPROG) {
633 		np->n_flag |= NFLUSHWANT;
634 		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
635 		error = smb_td_intr(td);
636 		if (error == EINTR)
637 			return EINTR;
638 	}
639 	np->n_flag |= NFLUSHINPROG;
640 
641 	if (vp->v_bufobj.bo_object != NULL) {
642 		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
643 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
644 		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
645 	}
646 
647 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
648 	while (error) {
649 		if (error == ERESTART || error == EINTR) {
650 			np->n_flag &= ~NFLUSHINPROG;
651 			if (np->n_flag & NFLUSHWANT) {
652 				np->n_flag &= ~NFLUSHWANT;
653 				wakeup(&np->n_flag);
654 			}
655 			return EINTR;
656 		}
657 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
658 	}
659 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
660 	if (np->n_flag & NFLUSHWANT) {
661 		np->n_flag &= ~NFLUSHWANT;
662 		wakeup(&np->n_flag);
663 	}
664 	return (error);
665 }
666