xref: /freebsd/sys/fs/smbfs/smbfs_io.c (revision 5e3190f700637fcfc1a52daeaa4a031fdd2557c7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000-2001 Boris Popov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/fcntl.h>
33 #include <sys/bio.h>
34 #include <sys/buf.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/vnode.h>
38 #include <sys/dirent.h>
39 #include <sys/rwlock.h>
40 #include <sys/signalvar.h>
41 #include <sys/sysctl.h>
42 #include <sys/vmmeter.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_pager.h>
50 #include <vm/vnode_pager.h>
51 /*
52 #include <sys/ioccom.h>
53 */
54 #include <netsmb/smb.h>
55 #include <netsmb/smb_conn.h>
56 #include <netsmb/smb_subr.h>
57 
58 #include <fs/smbfs/smbfs.h>
59 #include <fs/smbfs/smbfs_node.h>
60 #include <fs/smbfs/smbfs_subr.h>
61 
62 /*#define SMBFS_RWGENERIC*/
63 
64 extern uma_zone_t smbfs_pbuf_zone;
65 
66 static int smbfs_fastlookup = 1;
67 
68 SYSCTL_DECL(_vfs_smbfs);
69 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
70 
71 #define DE_SIZE	(sizeof(struct dirent))
72 
73 static int
74 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
75 {
76 	struct dirent de;
77 	struct componentname cn;
78 	struct smb_cred *scred;
79 	struct smbfs_fctx *ctx;
80 	struct vnode *newvp;
81 	struct smbnode *np = VTOSMB(vp);
82 	int error/*, *eofflag = ap->a_eofflag*/;
83 	long offset, limit;
84 
85 	np = VTOSMB(vp);
86 	SMBVDEBUG("dirname='%s'\n", np->n_name);
87 	scred = smbfs_malloc_scred();
88 	smb_makescred(scred, uio->uio_td, cred);
89 	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
90 	limit = uio->uio_resid / DE_SIZE;
91 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
92 		error = EINVAL;
93 		goto out;
94 	}
95 	while (limit && offset < 2) {
96 		limit--;
97 		bzero((caddr_t)&de, DE_SIZE);
98 		de.d_reclen = DE_SIZE;
99 		de.d_fileno = (offset == 0) ? np->n_ino :
100 		    (np->n_parent ? np->n_parentino : 2);
101 		if (de.d_fileno == 0)
102 			de.d_fileno = 0x7ffffffd + offset;
103 		de.d_off = offset + 1;
104 		de.d_namlen = offset + 1;
105 		de.d_name[0] = '.';
106 		de.d_name[1] = '.';
107 		de.d_type = DT_DIR;
108 		dirent_terminate(&de);
109 		error = uiomove(&de, DE_SIZE, uio);
110 		if (error)
111 			goto out;
112 		offset++;
113 		uio->uio_offset += DE_SIZE;
114 	}
115 	if (limit == 0) {
116 		error = 0;
117 		goto out;
118 	}
119 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
120 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
121 		if (np->n_dirseq) {
122 			smbfs_findclose(np->n_dirseq, scred);
123 			np->n_dirseq = NULL;
124 		}
125 		np->n_dirofs = 2;
126 		error = smbfs_findopen(np, "*", 1,
127 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
128 		    scred, &ctx);
129 		if (error) {
130 			SMBVDEBUG("can not open search, error = %d", error);
131 			goto out;
132 		}
133 		np->n_dirseq = ctx;
134 	} else
135 		ctx = np->n_dirseq;
136 	while (np->n_dirofs < offset) {
137 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
138 		if (error) {
139 			smbfs_findclose(np->n_dirseq, scred);
140 			np->n_dirseq = NULL;
141 			error = ENOENT ? 0 : error;
142 			goto out;
143 		}
144 	}
145 	error = 0;
146 	for (; limit; limit--, offset++) {
147 		error = smbfs_findnext(ctx, limit, scred);
148 		if (error)
149 			break;
150 		np->n_dirofs++;
151 		bzero((caddr_t)&de, DE_SIZE);
152 		de.d_reclen = DE_SIZE;
153 		de.d_fileno = ctx->f_attr.fa_ino;
154 		de.d_off = offset + 1;
155 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
156 		de.d_namlen = ctx->f_nmlen;
157 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
158 		dirent_terminate(&de);
159 		if (smbfs_fastlookup) {
160 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
161 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
162 			if (!error) {
163 				cn.cn_nameptr = de.d_name;
164 				cn.cn_namelen = de.d_namlen;
165 				cache_enter(vp, newvp, &cn);
166 				vput(newvp);
167 			}
168 		}
169 		error = uiomove(&de, DE_SIZE, uio);
170 		if (error)
171 			break;
172 	}
173 	if (error == ENOENT)
174 		error = 0;
175 	uio->uio_offset = offset * DE_SIZE;
176 out:
177 	smbfs_free_scred(scred);
178 	return error;
179 }
180 
181 int
182 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
183 {
184 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
185 	struct smbnode *np = VTOSMB(vp);
186 	struct thread *td;
187 	struct vattr vattr;
188 	struct smb_cred *scred;
189 	int error, lks;
190 
191 	/*
192 	 * Protect against method which is not supported for now
193 	 */
194 	if (uiop->uio_segflg == UIO_NOCOPY)
195 		return EOPNOTSUPP;
196 
197 	if (vp->v_type != VREG && vp->v_type != VDIR) {
198 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
199 		return EIO;
200 	}
201 	if (uiop->uio_resid == 0)
202 		return 0;
203 	if (uiop->uio_offset < 0)
204 		return EINVAL;
205 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
206 		return EFBIG;*/
207 	td = uiop->uio_td;
208 	if (vp->v_type == VDIR) {
209 		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
210 		if (lks == LK_SHARED)
211 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
212 		error = smbfs_readvdir(vp, uiop, cred);
213 		if (lks == LK_SHARED)
214 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
215 		return error;
216 	}
217 
218 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
219 	if (np->n_flag & NMODIFIED) {
220 		smbfs_attr_cacheremove(vp);
221 		error = VOP_GETATTR(vp, &vattr, cred);
222 		if (error)
223 			return error;
224 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
225 	} else {
226 		error = VOP_GETATTR(vp, &vattr, cred);
227 		if (error)
228 			return error;
229 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
230 			error = smbfs_vinvalbuf(vp, td);
231 			if (error)
232 				return error;
233 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
234 		}
235 	}
236 	scred = smbfs_malloc_scred();
237 	smb_makescred(scred, td, cred);
238 	error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
239 	smbfs_free_scred(scred);
240 	return (error);
241 }
242 
243 int
244 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
245 	struct ucred *cred, int ioflag)
246 {
247 	struct smbmount *smp = VTOSMBFS(vp);
248 	struct smbnode *np = VTOSMB(vp);
249 	struct smb_cred *scred;
250 	struct thread *td;
251 	int error = 0;
252 
253 	if (vp->v_type != VREG) {
254 		SMBERROR("vn types other than VREG unsupported !\n");
255 		return EIO;
256 	}
257 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
258 	    uiop->uio_resid);
259 	if (uiop->uio_offset < 0)
260 		return EINVAL;
261 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
262 		return (EFBIG);*/
263 	td = uiop->uio_td;
264 	if (ioflag & (IO_APPEND | IO_SYNC)) {
265 		if (np->n_flag & NMODIFIED) {
266 			smbfs_attr_cacheremove(vp);
267 			error = smbfs_vinvalbuf(vp, td);
268 			if (error)
269 				return error;
270 		}
271 		if (ioflag & IO_APPEND) {
272 #ifdef notyet
273 			/*
274 			 * File size can be changed by another client
275 			 */
276 			smbfs_attr_cacheremove(vp);
277 			error = VOP_GETATTR(vp, &vattr, cred);
278 			if (error) return (error);
279 #endif
280 			uiop->uio_offset = np->n_size;
281 		}
282 	}
283 	if (uiop->uio_resid == 0)
284 		return 0;
285 
286 	error = vn_rlimit_fsize(vp, uiop, td);
287 	if (error != 0)
288 		return (error);
289 
290 	scred = smbfs_malloc_scred();
291 	smb_makescred(scred, td, cred);
292 	error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
293 	smbfs_free_scred(scred);
294 	SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
295 	    uiop->uio_resid);
296 	if (!error) {
297 		if (uiop->uio_offset > np->n_size) {
298 			np->n_size = uiop->uio_offset;
299 			vnode_pager_setsize(vp, np->n_size);
300 		}
301 	}
302 	return error;
303 }
304 
305 /*
306  * Do an I/O operation to/from a cache block.
307  */
308 int
309 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
310 {
311 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
312 	struct smbnode *np = VTOSMB(vp);
313 	struct uio *uiop;
314 	struct iovec io;
315 	struct smb_cred *scred;
316 	int error = 0;
317 
318 	uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
319 	uiop->uio_iov = &io;
320 	uiop->uio_iovcnt = 1;
321 	uiop->uio_segflg = UIO_SYSSPACE;
322 	uiop->uio_td = td;
323 
324 	scred = smbfs_malloc_scred();
325 	smb_makescred(scred, td, cr);
326 
327 	if (bp->b_iocmd == BIO_READ) {
328 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
329 	    io.iov_base = bp->b_data;
330 	    uiop->uio_rw = UIO_READ;
331 	    switch (vp->v_type) {
332 	      case VREG:
333 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
334 		error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
335 		if (error)
336 			break;
337 		if (uiop->uio_resid) {
338 			int left = uiop->uio_resid;
339 			int nread = bp->b_bcount - left;
340 			if (left > 0)
341 			    bzero((char *)bp->b_data + nread, left);
342 		}
343 		break;
344 	    default:
345 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
346 		break;
347 	    }
348 	    if (error) {
349 		bp->b_error = error;
350 		bp->b_ioflags |= BIO_ERROR;
351 	    }
352 	} else { /* write */
353 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
354 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
355 
356 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
357 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
358 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
359 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
360 		uiop->uio_rw = UIO_WRITE;
361 		error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
362 
363 		/*
364 		 * For an interrupted write, the buffer is still valid
365 		 * and the write hasn't been pushed to the server yet,
366 		 * so we can't set BIO_ERROR and report the interruption
367 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
368 		 * is not relevant, so the rpc attempt is essentially
369 		 * a noop.  For the case of a V3 write rpc not being
370 		 * committed to stable storage, the block is still
371 		 * dirty and requires either a commit rpc or another
372 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
373 		 * the block is reused. This is indicated by setting
374 		 * the B_DELWRI and B_NEEDCOMMIT flags.
375 		 */
376 		if (error == EINTR
377 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
378 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
379 			if ((bp->b_flags & B_ASYNC) == 0)
380 			    bp->b_flags |= B_EINTR;
381 			if ((bp->b_flags & B_PAGING) == 0) {
382 			    bdirty(bp);
383 			    bp->b_flags &= ~B_DONE;
384 			}
385 			if ((bp->b_flags & B_ASYNC) == 0)
386 			    bp->b_flags |= B_EINTR;
387 		} else {
388 			if (error) {
389 				bp->b_ioflags |= BIO_ERROR;
390 				bp->b_error = error;
391 			}
392 			bp->b_dirtyoff = bp->b_dirtyend = 0;
393 		}
394 	    } else {
395 		bp->b_resid = 0;
396 		bufdone(bp);
397 		free(uiop, M_SMBFSDATA);
398 		smbfs_free_scred(scred);
399 		return 0;
400 	    }
401 	}
402 	bp->b_resid = uiop->uio_resid;
403 	bufdone(bp);
404 	free(uiop, M_SMBFSDATA);
405 	smbfs_free_scred(scred);
406 	return error;
407 }
408 
409 /*
410  * Vnode op for VM getpages.
411  * Wish wish .... get rid from multiple IO routines
412  */
413 int
414 smbfs_getpages(struct vop_getpages_args *ap)
415 {
416 #ifdef SMBFS_RWGENERIC
417 	return vop_stdgetpages(ap);
418 #else
419 	int i, error, nextoff, size, toff, npages, count;
420 	struct uio uio;
421 	struct iovec iov;
422 	vm_offset_t kva;
423 	struct buf *bp;
424 	struct vnode *vp;
425 	struct thread *td;
426 	struct ucred *cred;
427 	struct smbmount *smp;
428 	struct smbnode *np;
429 	struct smb_cred *scred;
430 	vm_object_t object;
431 	vm_page_t *pages;
432 
433 	vp = ap->a_vp;
434 	if ((object = vp->v_object) == NULL) {
435 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
436 		return VM_PAGER_ERROR;
437 	}
438 
439 	td = curthread;				/* XXX */
440 	cred = td->td_ucred;		/* XXX */
441 	np = VTOSMB(vp);
442 	smp = VFSTOSMBFS(vp->v_mount);
443 	pages = ap->a_m;
444 	npages = ap->a_count;
445 
446 	/*
447 	 * If the requested page is partially valid, just return it and
448 	 * allow the pager to zero-out the blanks.  Partially valid pages
449 	 * can only occur at the file EOF.
450 	 *
451 	 * XXXGL: is that true for SMB filesystem?
452 	 */
453 	VM_OBJECT_WLOCK(object);
454 	if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0)
455 		goto out;
456 	VM_OBJECT_WUNLOCK(object);
457 
458 	scred = smbfs_malloc_scred();
459 	smb_makescred(scred, td, cred);
460 
461 	bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
462 
463 	kva = (vm_offset_t) bp->b_data;
464 	pmap_qenter(kva, pages, npages);
465 	VM_CNT_INC(v_vnodein);
466 	VM_CNT_ADD(v_vnodepgsin, npages);
467 
468 	count = npages << PAGE_SHIFT;
469 	iov.iov_base = (caddr_t) kva;
470 	iov.iov_len = count;
471 	uio.uio_iov = &iov;
472 	uio.uio_iovcnt = 1;
473 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
474 	uio.uio_resid = count;
475 	uio.uio_segflg = UIO_SYSSPACE;
476 	uio.uio_rw = UIO_READ;
477 	uio.uio_td = td;
478 
479 	error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
480 	smbfs_free_scred(scred);
481 	pmap_qremove(kva, npages);
482 
483 	uma_zfree(smbfs_pbuf_zone, bp);
484 
485 	if (error && (uio.uio_resid == count)) {
486 		printf("smbfs_getpages: error %d\n",error);
487 		return VM_PAGER_ERROR;
488 	}
489 
490 	size = count - uio.uio_resid;
491 
492 	VM_OBJECT_WLOCK(object);
493 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
494 		vm_page_t m;
495 		nextoff = toff + PAGE_SIZE;
496 		m = pages[i];
497 
498 		if (nextoff <= size) {
499 			/*
500 			 * Read operation filled an entire page
501 			 */
502 			vm_page_valid(m);
503 			KASSERT(m->dirty == 0,
504 			    ("smbfs_getpages: page %p is dirty", m));
505 		} else if (size > toff) {
506 			/*
507 			 * Read operation filled a partial page.
508 			 */
509 			vm_page_invalid(m);
510 			vm_page_set_valid_range(m, 0, size - toff);
511 			KASSERT(m->dirty == 0,
512 			    ("smbfs_getpages: page %p is dirty", m));
513 		} else {
514 			/*
515 			 * Read operation was short.  If no error occurred
516 			 * we may have hit a zero-fill section.   We simply
517 			 * leave valid set to 0.
518 			 */
519 			;
520 		}
521 	}
522 out:
523 	VM_OBJECT_WUNLOCK(object);
524 	if (ap->a_rbehind)
525 		*ap->a_rbehind = 0;
526 	if (ap->a_rahead)
527 		*ap->a_rahead = 0;
528 	return (VM_PAGER_OK);
529 #endif /* SMBFS_RWGENERIC */
530 }
531 
532 /*
533  * Vnode op for VM putpages.
534  * possible bug: all IO done in sync mode
535  * Note that vop_close always invalidate pages before close, so it's
536  * not necessary to open vnode.
537  */
538 int
539 smbfs_putpages(struct vop_putpages_args *ap)
540 {
541 	int error;
542 	struct vnode *vp = ap->a_vp;
543 	struct thread *td;
544 	struct ucred *cred;
545 
546 #ifdef SMBFS_RWGENERIC
547 	td = curthread;			/* XXX */
548 	cred = td->td_ucred;		/* XXX */
549 	VOP_OPEN(vp, FWRITE, cred, td, NULL);
550 	error = vop_stdputpages(ap);
551 	VOP_CLOSE(vp, FWRITE, cred, td);
552 	return error;
553 #else
554 	struct uio uio;
555 	struct iovec iov;
556 	vm_offset_t kva;
557 	struct buf *bp;
558 	int i, npages, count;
559 	int *rtvals;
560 	struct smbmount *smp;
561 	struct smbnode *np;
562 	struct smb_cred *scred;
563 	vm_page_t *pages;
564 
565 	td = curthread;			/* XXX */
566 	cred = td->td_ucred;		/* XXX */
567 /*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
568 	np = VTOSMB(vp);
569 	smp = VFSTOSMBFS(vp->v_mount);
570 	pages = ap->a_m;
571 	count = ap->a_count;
572 	rtvals = ap->a_rtvals;
573 	npages = btoc(count);
574 
575 	for (i = 0; i < npages; i++) {
576 		rtvals[i] = VM_PAGER_ERROR;
577 	}
578 
579 	bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
580 
581 	kva = (vm_offset_t) bp->b_data;
582 	pmap_qenter(kva, pages, npages);
583 	VM_CNT_INC(v_vnodeout);
584 	VM_CNT_ADD(v_vnodepgsout, count);
585 
586 	iov.iov_base = (caddr_t) kva;
587 	iov.iov_len = count;
588 	uio.uio_iov = &iov;
589 	uio.uio_iovcnt = 1;
590 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
591 	uio.uio_resid = count;
592 	uio.uio_segflg = UIO_SYSSPACE;
593 	uio.uio_rw = UIO_WRITE;
594 	uio.uio_td = td;
595 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
596 	    uio.uio_resid);
597 
598 	scred = smbfs_malloc_scred();
599 	smb_makescred(scred, td, cred);
600 	error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
601 	smbfs_free_scred(scred);
602 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
603 	SMBVDEBUG("paged write done: %d\n", error);
604 
605 	pmap_qremove(kva, npages);
606 
607 	uma_zfree(smbfs_pbuf_zone, bp);
608 
609 	if (error == 0) {
610 		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid,
611 		    npages * PAGE_SIZE, npages * PAGE_SIZE);
612 	}
613 	return (rtvals[0]);
614 #endif /* SMBFS_RWGENERIC */
615 }
616 
617 /*
618  * Flush and invalidate all dirty buffers. If another process is already
619  * doing the flush, just wait for completion.
620  */
621 int
622 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
623 {
624 	struct smbnode *np = VTOSMB(vp);
625 	int error = 0;
626 
627 	if (VN_IS_DOOMED(vp))
628 		return 0;
629 
630 	while (np->n_flag & NFLUSHINPROG) {
631 		np->n_flag |= NFLUSHWANT;
632 		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
633 		error = smb_td_intr(td);
634 		if (error == EINTR)
635 			return EINTR;
636 	}
637 	np->n_flag |= NFLUSHINPROG;
638 
639 	if (vp->v_bufobj.bo_object != NULL) {
640 		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
641 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
642 		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
643 	}
644 
645 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
646 	while (error) {
647 		if (error == ERESTART || error == EINTR) {
648 			np->n_flag &= ~NFLUSHINPROG;
649 			if (np->n_flag & NFLUSHWANT) {
650 				np->n_flag &= ~NFLUSHWANT;
651 				wakeup(&np->n_flag);
652 			}
653 			return EINTR;
654 		}
655 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
656 	}
657 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
658 	if (np->n_flag & NFLUSHWANT) {
659 		np->n_flag &= ~NFLUSHWANT;
660 		wakeup(&np->n_flag);
661 	}
662 	return (error);
663 }
664