xref: /freebsd/sys/fs/smbfs/smbfs_io.c (revision 430f7286a566b1407c7b32ce13585caf5aa59b92)
1 /*-
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/fcntl.h>
33 #include <sys/bio.h>
34 #include <sys/buf.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/vnode.h>
38 #include <sys/dirent.h>
39 #include <sys/rwlock.h>
40 #include <sys/signalvar.h>
41 #include <sys/sysctl.h>
42 #include <sys/vmmeter.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_pager.h>
50 #include <vm/vnode_pager.h>
51 /*
52 #include <sys/ioccom.h>
53 */
54 #include <netsmb/smb.h>
55 #include <netsmb/smb_conn.h>
56 #include <netsmb/smb_subr.h>
57 
58 #include <fs/smbfs/smbfs.h>
59 #include <fs/smbfs/smbfs_node.h>
60 #include <fs/smbfs/smbfs_subr.h>
61 
62 /*#define SMBFS_RWGENERIC*/
63 
64 extern int smbfs_pbuf_freecnt;
65 
66 static int smbfs_fastlookup = 1;
67 
68 SYSCTL_DECL(_vfs_smbfs);
69 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
70 
71 
72 #define DE_SIZE	(sizeof(struct dirent))
73 
74 static int
75 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
76 {
77 	struct dirent de;
78 	struct componentname cn;
79 	struct smb_cred *scred;
80 	struct smbfs_fctx *ctx;
81 	struct vnode *newvp;
82 	struct smbnode *np = VTOSMB(vp);
83 	int error/*, *eofflag = ap->a_eofflag*/;
84 	long offset, limit;
85 
86 	np = VTOSMB(vp);
87 	SMBVDEBUG("dirname='%s'\n", np->n_name);
88 	scred = smbfs_malloc_scred();
89 	smb_makescred(scred, uio->uio_td, cred);
90 	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
91 	limit = uio->uio_resid / DE_SIZE;
92 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
93 		error = EINVAL;
94 		goto out;
95 	}
96 	while (limit && offset < 2) {
97 		limit--;
98 		bzero((caddr_t)&de, DE_SIZE);
99 		de.d_reclen = DE_SIZE;
100 		de.d_fileno = (offset == 0) ? np->n_ino :
101 		    (np->n_parent ? np->n_parentino : 2);
102 		if (de.d_fileno == 0)
103 			de.d_fileno = 0x7ffffffd + offset;
104 		de.d_namlen = offset + 1;
105 		de.d_name[0] = '.';
106 		de.d_name[1] = '.';
107 		de.d_name[offset + 1] = '\0';
108 		de.d_type = DT_DIR;
109 		error = uiomove(&de, DE_SIZE, uio);
110 		if (error)
111 			goto out;
112 		offset++;
113 		uio->uio_offset += DE_SIZE;
114 	}
115 	if (limit == 0) {
116 		error = 0;
117 		goto out;
118 	}
119 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
120 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
121 		if (np->n_dirseq) {
122 			smbfs_findclose(np->n_dirseq, scred);
123 			np->n_dirseq = NULL;
124 		}
125 		np->n_dirofs = 2;
126 		error = smbfs_findopen(np, "*", 1,
127 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
128 		    scred, &ctx);
129 		if (error) {
130 			SMBVDEBUG("can not open search, error = %d", error);
131 			goto out;
132 		}
133 		np->n_dirseq = ctx;
134 	} else
135 		ctx = np->n_dirseq;
136 	while (np->n_dirofs < offset) {
137 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
138 		if (error) {
139 			smbfs_findclose(np->n_dirseq, scred);
140 			np->n_dirseq = NULL;
141 			error = ENOENT ? 0 : error;
142 			goto out;
143 		}
144 	}
145 	error = 0;
146 	for (; limit; limit--, offset++) {
147 		error = smbfs_findnext(ctx, limit, scred);
148 		if (error)
149 			break;
150 		np->n_dirofs++;
151 		bzero((caddr_t)&de, DE_SIZE);
152 		de.d_reclen = DE_SIZE;
153 		de.d_fileno = ctx->f_attr.fa_ino;
154 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
155 		de.d_namlen = ctx->f_nmlen;
156 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
157 		de.d_name[de.d_namlen] = '\0';
158 		if (smbfs_fastlookup) {
159 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
160 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
161 			if (!error) {
162 				cn.cn_nameptr = de.d_name;
163 				cn.cn_namelen = de.d_namlen;
164 				cache_enter(vp, newvp, &cn);
165 				vput(newvp);
166 			}
167 		}
168 		error = uiomove(&de, DE_SIZE, uio);
169 		if (error)
170 			break;
171 	}
172 	if (error == ENOENT)
173 		error = 0;
174 	uio->uio_offset = offset * DE_SIZE;
175 out:
176 	smbfs_free_scred(scred);
177 	return error;
178 }
179 
180 int
181 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
182 {
183 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
184 	struct smbnode *np = VTOSMB(vp);
185 	struct thread *td;
186 	struct vattr vattr;
187 	struct smb_cred *scred;
188 	int error, lks;
189 
190 	/*
191 	 * Protect against method which is not supported for now
192 	 */
193 	if (uiop->uio_segflg == UIO_NOCOPY)
194 		return EOPNOTSUPP;
195 
196 	if (vp->v_type != VREG && vp->v_type != VDIR) {
197 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
198 		return EIO;
199 	}
200 	if (uiop->uio_resid == 0)
201 		return 0;
202 	if (uiop->uio_offset < 0)
203 		return EINVAL;
204 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
205 		return EFBIG;*/
206 	td = uiop->uio_td;
207 	if (vp->v_type == VDIR) {
208 		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
209 		if (lks == LK_SHARED)
210 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
211 		error = smbfs_readvdir(vp, uiop, cred);
212 		if (lks == LK_SHARED)
213 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
214 		return error;
215 	}
216 
217 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
218 	if (np->n_flag & NMODIFIED) {
219 		smbfs_attr_cacheremove(vp);
220 		error = VOP_GETATTR(vp, &vattr, cred);
221 		if (error)
222 			return error;
223 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
224 	} else {
225 		error = VOP_GETATTR(vp, &vattr, cred);
226 		if (error)
227 			return error;
228 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
229 			error = smbfs_vinvalbuf(vp, td);
230 			if (error)
231 				return error;
232 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
233 		}
234 	}
235 	scred = smbfs_malloc_scred();
236 	smb_makescred(scred, td, cred);
237 	error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
238 	smbfs_free_scred(scred);
239 	return (error);
240 }
241 
242 int
243 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
244 	struct ucred *cred, int ioflag)
245 {
246 	struct smbmount *smp = VTOSMBFS(vp);
247 	struct smbnode *np = VTOSMB(vp);
248 	struct smb_cred *scred;
249 	struct thread *td;
250 	int error = 0;
251 
252 	if (vp->v_type != VREG) {
253 		SMBERROR("vn types other than VREG unsupported !\n");
254 		return EIO;
255 	}
256 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
257 	    uiop->uio_resid);
258 	if (uiop->uio_offset < 0)
259 		return EINVAL;
260 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
261 		return (EFBIG);*/
262 	td = uiop->uio_td;
263 	if (ioflag & (IO_APPEND | IO_SYNC)) {
264 		if (np->n_flag & NMODIFIED) {
265 			smbfs_attr_cacheremove(vp);
266 			error = smbfs_vinvalbuf(vp, td);
267 			if (error)
268 				return error;
269 		}
270 		if (ioflag & IO_APPEND) {
271 #ifdef notyet
272 			/*
273 			 * File size can be changed by another client
274 			 */
275 			smbfs_attr_cacheremove(vp);
276 			error = VOP_GETATTR(vp, &vattr, cred);
277 			if (error) return (error);
278 #endif
279 			uiop->uio_offset = np->n_size;
280 		}
281 	}
282 	if (uiop->uio_resid == 0)
283 		return 0;
284 
285 	if (vn_rlimit_fsize(vp, uiop, td))
286 		return (EFBIG);
287 
288 	scred = smbfs_malloc_scred();
289 	smb_makescred(scred, td, cred);
290 	error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
291 	smbfs_free_scred(scred);
292 	SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
293 	    uiop->uio_resid);
294 	if (!error) {
295 		if (uiop->uio_offset > np->n_size) {
296 			np->n_size = uiop->uio_offset;
297 			vnode_pager_setsize(vp, np->n_size);
298 		}
299 	}
300 	return error;
301 }
302 
303 /*
304  * Do an I/O operation to/from a cache block.
305  */
306 int
307 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
308 {
309 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
310 	struct smbnode *np = VTOSMB(vp);
311 	struct uio *uiop;
312 	struct iovec io;
313 	struct smb_cred *scred;
314 	int error = 0;
315 
316 	uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
317 	uiop->uio_iov = &io;
318 	uiop->uio_iovcnt = 1;
319 	uiop->uio_segflg = UIO_SYSSPACE;
320 	uiop->uio_td = td;
321 
322 	scred = smbfs_malloc_scred();
323 	smb_makescred(scred, td, cr);
324 
325 	if (bp->b_iocmd == BIO_READ) {
326 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
327 	    io.iov_base = bp->b_data;
328 	    uiop->uio_rw = UIO_READ;
329 	    switch (vp->v_type) {
330 	      case VREG:
331 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
332 		error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
333 		if (error)
334 			break;
335 		if (uiop->uio_resid) {
336 			int left = uiop->uio_resid;
337 			int nread = bp->b_bcount - left;
338 			if (left > 0)
339 			    bzero((char *)bp->b_data + nread, left);
340 		}
341 		break;
342 	    default:
343 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
344 		break;
345 	    }
346 	    if (error) {
347 		bp->b_error = error;
348 		bp->b_ioflags |= BIO_ERROR;
349 	    }
350 	} else { /* write */
351 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
352 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
353 
354 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
355 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
356 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
357 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
358 		uiop->uio_rw = UIO_WRITE;
359 		error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
360 
361 		/*
362 		 * For an interrupted write, the buffer is still valid
363 		 * and the write hasn't been pushed to the server yet,
364 		 * so we can't set BIO_ERROR and report the interruption
365 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
366 		 * is not relevant, so the rpc attempt is essentially
367 		 * a noop.  For the case of a V3 write rpc not being
368 		 * committed to stable storage, the block is still
369 		 * dirty and requires either a commit rpc or another
370 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
371 		 * the block is reused. This is indicated by setting
372 		 * the B_DELWRI and B_NEEDCOMMIT flags.
373 		 */
374 		if (error == EINTR
375 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
376 			int s;
377 
378 			s = splbio();
379 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
380 			if ((bp->b_flags & B_ASYNC) == 0)
381 			    bp->b_flags |= B_EINTR;
382 			if ((bp->b_flags & B_PAGING) == 0) {
383 			    bdirty(bp);
384 			    bp->b_flags &= ~B_DONE;
385 			}
386 			if ((bp->b_flags & B_ASYNC) == 0)
387 			    bp->b_flags |= B_EINTR;
388 			splx(s);
389 		} else {
390 			if (error) {
391 				bp->b_ioflags |= BIO_ERROR;
392 				bp->b_error = error;
393 			}
394 			bp->b_dirtyoff = bp->b_dirtyend = 0;
395 		}
396 	    } else {
397 		bp->b_resid = 0;
398 		bufdone(bp);
399 		free(uiop, M_SMBFSDATA);
400 		smbfs_free_scred(scred);
401 		return 0;
402 	    }
403 	}
404 	bp->b_resid = uiop->uio_resid;
405 	bufdone(bp);
406 	free(uiop, M_SMBFSDATA);
407 	smbfs_free_scred(scred);
408 	return error;
409 }
410 
411 /*
412  * Vnode op for VM getpages.
413  * Wish wish .... get rid from multiple IO routines
414  */
415 int
416 smbfs_getpages(ap)
417 	struct vop_getpages_args /* {
418 		struct vnode *a_vp;
419 		vm_page_t *a_m;
420 		int a_count;
421 		int a_reqpage;
422 	} */ *ap;
423 {
424 #ifdef SMBFS_RWGENERIC
425 	return vop_stdgetpages(ap);
426 #else
427 	int i, error, nextoff, size, toff, npages, count;
428 	struct uio uio;
429 	struct iovec iov;
430 	vm_offset_t kva;
431 	struct buf *bp;
432 	struct vnode *vp;
433 	struct thread *td;
434 	struct ucred *cred;
435 	struct smbmount *smp;
436 	struct smbnode *np;
437 	struct smb_cred *scred;
438 	vm_object_t object;
439 	vm_page_t *pages;
440 
441 	vp = ap->a_vp;
442 	if ((object = vp->v_object) == NULL) {
443 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
444 		return VM_PAGER_ERROR;
445 	}
446 
447 	td = curthread;				/* XXX */
448 	cred = td->td_ucred;		/* XXX */
449 	np = VTOSMB(vp);
450 	smp = VFSTOSMBFS(vp->v_mount);
451 	pages = ap->a_m;
452 	npages = ap->a_count;
453 
454 	/*
455 	 * If the requested page is partially valid, just return it and
456 	 * allow the pager to zero-out the blanks.  Partially valid pages
457 	 * can only occur at the file EOF.
458 	 *
459 	 * XXXGL: is that true for SMB filesystem?
460 	 */
461 	VM_OBJECT_WLOCK(object);
462 	if (pages[npages - 1]->valid != 0 && --npages == 0)
463 		goto out;
464 	VM_OBJECT_WUNLOCK(object);
465 
466 	scred = smbfs_malloc_scred();
467 	smb_makescred(scred, td, cred);
468 
469 	bp = getpbuf(&smbfs_pbuf_freecnt);
470 
471 	kva = (vm_offset_t) bp->b_data;
472 	pmap_qenter(kva, pages, npages);
473 	PCPU_INC(cnt.v_vnodein);
474 	PCPU_ADD(cnt.v_vnodepgsin, npages);
475 
476 	count = npages << PAGE_SHIFT;
477 	iov.iov_base = (caddr_t) kva;
478 	iov.iov_len = count;
479 	uio.uio_iov = &iov;
480 	uio.uio_iovcnt = 1;
481 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
482 	uio.uio_resid = count;
483 	uio.uio_segflg = UIO_SYSSPACE;
484 	uio.uio_rw = UIO_READ;
485 	uio.uio_td = td;
486 
487 	error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
488 	smbfs_free_scred(scred);
489 	pmap_qremove(kva, npages);
490 
491 	relpbuf(bp, &smbfs_pbuf_freecnt);
492 
493 	if (error && (uio.uio_resid == count)) {
494 		printf("smbfs_getpages: error %d\n",error);
495 		return VM_PAGER_ERROR;
496 	}
497 
498 	size = count - uio.uio_resid;
499 
500 	VM_OBJECT_WLOCK(object);
501 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
502 		vm_page_t m;
503 		nextoff = toff + PAGE_SIZE;
504 		m = pages[i];
505 
506 		if (nextoff <= size) {
507 			/*
508 			 * Read operation filled an entire page
509 			 */
510 			m->valid = VM_PAGE_BITS_ALL;
511 			KASSERT(m->dirty == 0,
512 			    ("smbfs_getpages: page %p is dirty", m));
513 		} else if (size > toff) {
514 			/*
515 			 * Read operation filled a partial page.
516 			 */
517 			m->valid = 0;
518 			vm_page_set_valid_range(m, 0, size - toff);
519 			KASSERT(m->dirty == 0,
520 			    ("smbfs_getpages: page %p is dirty", m));
521 		} else {
522 			/*
523 			 * Read operation was short.  If no error occurred
524 			 * we may have hit a zero-fill section.   We simply
525 			 * leave valid set to 0.
526 			 */
527 			;
528 		}
529 	}
530 out:
531 	VM_OBJECT_WUNLOCK(object);
532 	if (ap->a_rbehind)
533 		*ap->a_rbehind = 0;
534 	if (ap->a_rahead)
535 		*ap->a_rahead = 0;
536 	return (VM_PAGER_OK);
537 #endif /* SMBFS_RWGENERIC */
538 }
539 
540 /*
541  * Vnode op for VM putpages.
542  * possible bug: all IO done in sync mode
543  * Note that vop_close always invalidate pages before close, so it's
544  * not necessary to open vnode.
545  */
546 int
547 smbfs_putpages(ap)
548 	struct vop_putpages_args /* {
549 		struct vnode *a_vp;
550 		vm_page_t *a_m;
551 		int a_count;
552 		int a_sync;
553 		int *a_rtvals;
554 	} */ *ap;
555 {
556 	int error;
557 	struct vnode *vp = ap->a_vp;
558 	struct thread *td;
559 	struct ucred *cred;
560 
561 #ifdef SMBFS_RWGENERIC
562 	td = curthread;			/* XXX */
563 	cred = td->td_ucred;		/* XXX */
564 	VOP_OPEN(vp, FWRITE, cred, td, NULL);
565 	error = vop_stdputpages(ap);
566 	VOP_CLOSE(vp, FWRITE, cred, td);
567 	return error;
568 #else
569 	struct uio uio;
570 	struct iovec iov;
571 	vm_offset_t kva;
572 	struct buf *bp;
573 	int i, npages, count;
574 	int *rtvals;
575 	struct smbmount *smp;
576 	struct smbnode *np;
577 	struct smb_cred *scred;
578 	vm_page_t *pages;
579 
580 	td = curthread;			/* XXX */
581 	cred = td->td_ucred;		/* XXX */
582 /*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
583 	np = VTOSMB(vp);
584 	smp = VFSTOSMBFS(vp->v_mount);
585 	pages = ap->a_m;
586 	count = ap->a_count;
587 	rtvals = ap->a_rtvals;
588 	npages = btoc(count);
589 
590 	for (i = 0; i < npages; i++) {
591 		rtvals[i] = VM_PAGER_ERROR;
592 	}
593 
594 	bp = getpbuf(&smbfs_pbuf_freecnt);
595 
596 	kva = (vm_offset_t) bp->b_data;
597 	pmap_qenter(kva, pages, npages);
598 	PCPU_INC(cnt.v_vnodeout);
599 	PCPU_ADD(cnt.v_vnodepgsout, count);
600 
601 	iov.iov_base = (caddr_t) kva;
602 	iov.iov_len = count;
603 	uio.uio_iov = &iov;
604 	uio.uio_iovcnt = 1;
605 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
606 	uio.uio_resid = count;
607 	uio.uio_segflg = UIO_SYSSPACE;
608 	uio.uio_rw = UIO_WRITE;
609 	uio.uio_td = td;
610 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
611 	    uio.uio_resid);
612 
613 	scred = smbfs_malloc_scred();
614 	smb_makescred(scred, td, cred);
615 	error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
616 	smbfs_free_scred(scred);
617 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
618 	SMBVDEBUG("paged write done: %d\n", error);
619 
620 	pmap_qremove(kva, npages);
621 
622 	relpbuf(bp, &smbfs_pbuf_freecnt);
623 
624 	if (!error)
625 		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
626 	return rtvals[0];
627 #endif /* SMBFS_RWGENERIC */
628 }
629 
630 /*
631  * Flush and invalidate all dirty buffers. If another process is already
632  * doing the flush, just wait for completion.
633  */
634 int
635 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
636 {
637 	struct smbnode *np = VTOSMB(vp);
638 	int error = 0;
639 
640 	if (vp->v_iflag & VI_DOOMED)
641 		return 0;
642 
643 	while (np->n_flag & NFLUSHINPROG) {
644 		np->n_flag |= NFLUSHWANT;
645 		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
646 		error = smb_td_intr(td);
647 		if (error == EINTR)
648 			return EINTR;
649 	}
650 	np->n_flag |= NFLUSHINPROG;
651 
652 	if (vp->v_bufobj.bo_object != NULL) {
653 		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
654 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
655 		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
656 	}
657 
658 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
659 	while (error) {
660 		if (error == ERESTART || error == EINTR) {
661 			np->n_flag &= ~NFLUSHINPROG;
662 			if (np->n_flag & NFLUSHWANT) {
663 				np->n_flag &= ~NFLUSHWANT;
664 				wakeup(&np->n_flag);
665 			}
666 			return EINTR;
667 		}
668 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
669 	}
670 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
671 	if (np->n_flag & NFLUSHWANT) {
672 		np->n_flag &= ~NFLUSHWANT;
673 		wakeup(&np->n_flag);
674 	}
675 	return (error);
676 }
677