xref: /freebsd/sys/fs/smbfs/smbfs_io.c (revision ddd5b8e9b4d8957fce018c520657cdfa4ecffad3)
1 /*-
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/fcntl.h>
33 #include <sys/bio.h>
34 #include <sys/buf.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/vnode.h>
38 #include <sys/dirent.h>
39 #include <sys/signalvar.h>
40 #include <sys/sysctl.h>
41 #include <sys/vmmeter.h>
42 
43 #include <vm/vm.h>
44 #include <vm/vm_param.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_extern.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_pager.h>
49 #include <vm/vnode_pager.h>
50 /*
51 #include <sys/ioccom.h>
52 */
53 #include <netsmb/smb.h>
54 #include <netsmb/smb_conn.h>
55 #include <netsmb/smb_subr.h>
56 
57 #include <fs/smbfs/smbfs.h>
58 #include <fs/smbfs/smbfs_node.h>
59 #include <fs/smbfs/smbfs_subr.h>
60 
61 /*#define SMBFS_RWGENERIC*/
62 
63 extern int smbfs_pbuf_freecnt;
64 
65 static int smbfs_fastlookup = 1;
66 
67 SYSCTL_DECL(_vfs_smbfs);
68 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
69 
70 
71 #define DE_SIZE	(sizeof(struct dirent))
72 
73 static int
74 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
75 {
76 	struct dirent de;
77 	struct componentname cn;
78 	struct smb_cred *scred;
79 	struct smbfs_fctx *ctx;
80 	struct vnode *newvp;
81 	struct smbnode *np = VTOSMB(vp);
82 	int error/*, *eofflag = ap->a_eofflag*/;
83 	long offset, limit;
84 
85 	np = VTOSMB(vp);
86 	SMBVDEBUG("dirname='%s'\n", np->n_name);
87 	scred = smbfs_malloc_scred();
88 	smb_makescred(scred, uio->uio_td, cred);
89 	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
90 	limit = uio->uio_resid / DE_SIZE;
91 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
92 		error = EINVAL;
93 		goto out;
94 	}
95 	while (limit && offset < 2) {
96 		limit--;
97 		bzero((caddr_t)&de, DE_SIZE);
98 		de.d_reclen = DE_SIZE;
99 		de.d_fileno = (offset == 0) ? np->n_ino :
100 		    (np->n_parent ? np->n_parentino : 2);
101 		if (de.d_fileno == 0)
102 			de.d_fileno = 0x7ffffffd + offset;
103 		de.d_namlen = offset + 1;
104 		de.d_name[0] = '.';
105 		de.d_name[1] = '.';
106 		de.d_name[offset + 1] = '\0';
107 		de.d_type = DT_DIR;
108 		error = uiomove(&de, DE_SIZE, uio);
109 		if (error)
110 			goto out;
111 		offset++;
112 		uio->uio_offset += DE_SIZE;
113 	}
114 	if (limit == 0) {
115 		error = 0;
116 		goto out;
117 	}
118 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
119 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
120 		if (np->n_dirseq) {
121 			smbfs_findclose(np->n_dirseq, scred);
122 			np->n_dirseq = NULL;
123 		}
124 		np->n_dirofs = 2;
125 		error = smbfs_findopen(np, "*", 1,
126 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
127 		    scred, &ctx);
128 		if (error) {
129 			SMBVDEBUG("can not open search, error = %d", error);
130 			goto out;
131 		}
132 		np->n_dirseq = ctx;
133 	} else
134 		ctx = np->n_dirseq;
135 	while (np->n_dirofs < offset) {
136 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
137 		if (error) {
138 			smbfs_findclose(np->n_dirseq, scred);
139 			np->n_dirseq = NULL;
140 			error = ENOENT ? 0 : error;
141 			goto out;
142 		}
143 	}
144 	error = 0;
145 	for (; limit; limit--, offset++) {
146 		error = smbfs_findnext(ctx, limit, scred);
147 		if (error)
148 			break;
149 		np->n_dirofs++;
150 		bzero((caddr_t)&de, DE_SIZE);
151 		de.d_reclen = DE_SIZE;
152 		de.d_fileno = ctx->f_attr.fa_ino;
153 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
154 		de.d_namlen = ctx->f_nmlen;
155 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
156 		de.d_name[de.d_namlen] = '\0';
157 		if (smbfs_fastlookup) {
158 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
159 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
160 			if (!error) {
161 				cn.cn_nameptr = de.d_name;
162 				cn.cn_namelen = de.d_namlen;
163 				cache_enter(vp, newvp, &cn);
164 				vput(newvp);
165 			}
166 		}
167 		error = uiomove(&de, DE_SIZE, uio);
168 		if (error)
169 			break;
170 	}
171 	if (error == ENOENT)
172 		error = 0;
173 	uio->uio_offset = offset * DE_SIZE;
174 out:
175 	smbfs_free_scred(scred);
176 	return error;
177 }
178 
179 int
180 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
181 {
182 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
183 	struct smbnode *np = VTOSMB(vp);
184 	struct thread *td;
185 	struct vattr vattr;
186 	struct smb_cred *scred;
187 	int error, lks;
188 
189 	/*
190 	 * Protect against method which is not supported for now
191 	 */
192 	if (uiop->uio_segflg == UIO_NOCOPY)
193 		return EOPNOTSUPP;
194 
195 	if (vp->v_type != VREG && vp->v_type != VDIR) {
196 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
197 		return EIO;
198 	}
199 	if (uiop->uio_resid == 0)
200 		return 0;
201 	if (uiop->uio_offset < 0)
202 		return EINVAL;
203 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
204 		return EFBIG;*/
205 	td = uiop->uio_td;
206 	if (vp->v_type == VDIR) {
207 		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
208 		if (lks == LK_SHARED)
209 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
210 		error = smbfs_readvdir(vp, uiop, cred);
211 		if (lks == LK_SHARED)
212 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
213 		return error;
214 	}
215 
216 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
217 	if (np->n_flag & NMODIFIED) {
218 		smbfs_attr_cacheremove(vp);
219 		error = VOP_GETATTR(vp, &vattr, cred);
220 		if (error)
221 			return error;
222 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
223 	} else {
224 		error = VOP_GETATTR(vp, &vattr, cred);
225 		if (error)
226 			return error;
227 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
228 			error = smbfs_vinvalbuf(vp, td);
229 			if (error)
230 				return error;
231 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
232 		}
233 	}
234 	scred = smbfs_malloc_scred();
235 	smb_makescred(scred, td, cred);
236 	error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
237 	smbfs_free_scred(scred);
238 	return (error);
239 }
240 
241 int
242 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
243 	struct ucred *cred, int ioflag)
244 {
245 	struct smbmount *smp = VTOSMBFS(vp);
246 	struct smbnode *np = VTOSMB(vp);
247 	struct smb_cred *scred;
248 	struct thread *td;
249 	int error = 0;
250 
251 	if (vp->v_type != VREG) {
252 		SMBERROR("vn types other than VREG unsupported !\n");
253 		return EIO;
254 	}
255 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
256 	    uiop->uio_resid);
257 	if (uiop->uio_offset < 0)
258 		return EINVAL;
259 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
260 		return (EFBIG);*/
261 	td = uiop->uio_td;
262 	if (ioflag & (IO_APPEND | IO_SYNC)) {
263 		if (np->n_flag & NMODIFIED) {
264 			smbfs_attr_cacheremove(vp);
265 			error = smbfs_vinvalbuf(vp, td);
266 			if (error)
267 				return error;
268 		}
269 		if (ioflag & IO_APPEND) {
270 #ifdef notyet
271 			/*
272 			 * File size can be changed by another client
273 			 */
274 			smbfs_attr_cacheremove(vp);
275 			error = VOP_GETATTR(vp, &vattr, cred);
276 			if (error) return (error);
277 #endif
278 			uiop->uio_offset = np->n_size;
279 		}
280 	}
281 	if (uiop->uio_resid == 0)
282 		return 0;
283 
284 	if (vn_rlimit_fsize(vp, uiop, td))
285 		return (EFBIG);
286 
287 	scred = smbfs_malloc_scred();
288 	smb_makescred(scred, td, cred);
289 	error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
290 	smbfs_free_scred(scred);
291 	SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
292 	    uiop->uio_resid);
293 	if (!error) {
294 		if (uiop->uio_offset > np->n_size) {
295 			np->n_size = uiop->uio_offset;
296 			vnode_pager_setsize(vp, np->n_size);
297 		}
298 	}
299 	return error;
300 }
301 
302 /*
303  * Do an I/O operation to/from a cache block.
304  */
305 int
306 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
307 {
308 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
309 	struct smbnode *np = VTOSMB(vp);
310 	struct uio *uiop;
311 	struct iovec io;
312 	struct smb_cred *scred;
313 	int error = 0;
314 
315 	uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
316 	uiop->uio_iov = &io;
317 	uiop->uio_iovcnt = 1;
318 	uiop->uio_segflg = UIO_SYSSPACE;
319 	uiop->uio_td = td;
320 
321 	scred = smbfs_malloc_scred();
322 	smb_makescred(scred, td, cr);
323 
324 	if (bp->b_iocmd == BIO_READ) {
325 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
326 	    io.iov_base = bp->b_data;
327 	    uiop->uio_rw = UIO_READ;
328 	    switch (vp->v_type) {
329 	      case VREG:
330 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
331 		error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
332 		if (error)
333 			break;
334 		if (uiop->uio_resid) {
335 			int left = uiop->uio_resid;
336 			int nread = bp->b_bcount - left;
337 			if (left > 0)
338 			    bzero((char *)bp->b_data + nread, left);
339 		}
340 		break;
341 	    default:
342 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
343 		break;
344 	    };
345 	    if (error) {
346 		bp->b_error = error;
347 		bp->b_ioflags |= BIO_ERROR;
348 	    }
349 	} else { /* write */
350 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
351 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
352 
353 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
354 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
355 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
356 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
357 		uiop->uio_rw = UIO_WRITE;
358 		error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
359 
360 		/*
361 		 * For an interrupted write, the buffer is still valid
362 		 * and the write hasn't been pushed to the server yet,
363 		 * so we can't set BIO_ERROR and report the interruption
364 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
365 		 * is not relevant, so the rpc attempt is essentially
366 		 * a noop.  For the case of a V3 write rpc not being
367 		 * committed to stable storage, the block is still
368 		 * dirty and requires either a commit rpc or another
369 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
370 		 * the block is reused. This is indicated by setting
371 		 * the B_DELWRI and B_NEEDCOMMIT flags.
372 		 */
373 		if (error == EINTR
374 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
375 			int s;
376 
377 			s = splbio();
378 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
379 			if ((bp->b_flags & B_ASYNC) == 0)
380 			    bp->b_flags |= B_EINTR;
381 			if ((bp->b_flags & B_PAGING) == 0) {
382 			    bdirty(bp);
383 			    bp->b_flags &= ~B_DONE;
384 			}
385 			if ((bp->b_flags & B_ASYNC) == 0)
386 			    bp->b_flags |= B_EINTR;
387 			splx(s);
388 		} else {
389 			if (error) {
390 				bp->b_ioflags |= BIO_ERROR;
391 				bp->b_error = error;
392 			}
393 			bp->b_dirtyoff = bp->b_dirtyend = 0;
394 		}
395 	    } else {
396 		bp->b_resid = 0;
397 		bufdone(bp);
398 		free(uiop, M_SMBFSDATA);
399 		smbfs_free_scred(scred);
400 		return 0;
401 	    }
402 	}
403 	bp->b_resid = uiop->uio_resid;
404 	bufdone(bp);
405 	free(uiop, M_SMBFSDATA);
406 	smbfs_free_scred(scred);
407 	return error;
408 }
409 
410 /*
411  * Vnode op for VM getpages.
412  * Wish wish .... get rid from multiple IO routines
413  */
414 int
415 smbfs_getpages(ap)
416 	struct vop_getpages_args /* {
417 		struct vnode *a_vp;
418 		vm_page_t *a_m;
419 		int a_count;
420 		int a_reqpage;
421 		vm_ooffset_t a_offset;
422 	} */ *ap;
423 {
424 #ifdef SMBFS_RWGENERIC
425 	return vop_stdgetpages(ap);
426 #else
427 	int i, error, nextoff, size, toff, npages, count, reqpage;
428 	struct uio uio;
429 	struct iovec iov;
430 	vm_offset_t kva;
431 	struct buf *bp;
432 	struct vnode *vp;
433 	struct thread *td;
434 	struct ucred *cred;
435 	struct smbmount *smp;
436 	struct smbnode *np;
437 	struct smb_cred *scred;
438 	vm_object_t object;
439 	vm_page_t *pages, m;
440 
441 	vp = ap->a_vp;
442 	if ((object = vp->v_object) == NULL) {
443 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
444 		return VM_PAGER_ERROR;
445 	}
446 
447 	td = curthread;				/* XXX */
448 	cred = td->td_ucred;		/* XXX */
449 	np = VTOSMB(vp);
450 	smp = VFSTOSMBFS(vp->v_mount);
451 	pages = ap->a_m;
452 	count = ap->a_count;
453 	npages = btoc(count);
454 	reqpage = ap->a_reqpage;
455 
456 	/*
457 	 * If the requested page is partially valid, just return it and
458 	 * allow the pager to zero-out the blanks.  Partially valid pages
459 	 * can only occur at the file EOF.
460 	 */
461 	m = pages[reqpage];
462 
463 	VM_OBJECT_LOCK(object);
464 	if (m->valid != 0) {
465 		for (i = 0; i < npages; ++i) {
466 			if (i != reqpage) {
467 				vm_page_lock(pages[i]);
468 				vm_page_free(pages[i]);
469 				vm_page_unlock(pages[i]);
470 			}
471 		}
472 		VM_OBJECT_UNLOCK(object);
473 		return 0;
474 	}
475 	VM_OBJECT_UNLOCK(object);
476 
477 	scred = smbfs_malloc_scred();
478 	smb_makescred(scred, td, cred);
479 
480 	bp = getpbuf(&smbfs_pbuf_freecnt);
481 
482 	kva = (vm_offset_t) bp->b_data;
483 	pmap_qenter(kva, pages, npages);
484 	PCPU_INC(cnt.v_vnodein);
485 	PCPU_ADD(cnt.v_vnodepgsin, npages);
486 
487 	iov.iov_base = (caddr_t) kva;
488 	iov.iov_len = count;
489 	uio.uio_iov = &iov;
490 	uio.uio_iovcnt = 1;
491 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
492 	uio.uio_resid = count;
493 	uio.uio_segflg = UIO_SYSSPACE;
494 	uio.uio_rw = UIO_READ;
495 	uio.uio_td = td;
496 
497 	error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
498 	smbfs_free_scred(scred);
499 	pmap_qremove(kva, npages);
500 
501 	relpbuf(bp, &smbfs_pbuf_freecnt);
502 
503 	VM_OBJECT_LOCK(object);
504 	if (error && (uio.uio_resid == count)) {
505 		printf("smbfs_getpages: error %d\n",error);
506 		for (i = 0; i < npages; i++) {
507 			if (reqpage != i) {
508 				vm_page_lock(pages[i]);
509 				vm_page_free(pages[i]);
510 				vm_page_unlock(pages[i]);
511 			}
512 		}
513 		VM_OBJECT_UNLOCK(object);
514 		return VM_PAGER_ERROR;
515 	}
516 
517 	size = count - uio.uio_resid;
518 
519 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
520 		vm_page_t m;
521 		nextoff = toff + PAGE_SIZE;
522 		m = pages[i];
523 
524 		if (nextoff <= size) {
525 			/*
526 			 * Read operation filled an entire page
527 			 */
528 			m->valid = VM_PAGE_BITS_ALL;
529 			KASSERT(m->dirty == 0,
530 			    ("smbfs_getpages: page %p is dirty", m));
531 		} else if (size > toff) {
532 			/*
533 			 * Read operation filled a partial page.
534 			 */
535 			m->valid = 0;
536 			vm_page_set_valid_range(m, 0, size - toff);
537 			KASSERT(m->dirty == 0,
538 			    ("smbfs_getpages: page %p is dirty", m));
539 		} else {
540 			/*
541 			 * Read operation was short.  If no error occured
542 			 * we may have hit a zero-fill section.   We simply
543 			 * leave valid set to 0.
544 			 */
545 			;
546 		}
547 
548 		if (i != reqpage)
549 			vm_page_readahead_finish(m);
550 	}
551 	VM_OBJECT_UNLOCK(object);
552 	return 0;
553 #endif /* SMBFS_RWGENERIC */
554 }
555 
556 /*
557  * Vnode op for VM putpages.
558  * possible bug: all IO done in sync mode
559  * Note that vop_close always invalidate pages before close, so it's
560  * not necessary to open vnode.
561  */
562 int
563 smbfs_putpages(ap)
564 	struct vop_putpages_args /* {
565 		struct vnode *a_vp;
566 		vm_page_t *a_m;
567 		int a_count;
568 		int a_sync;
569 		int *a_rtvals;
570 		vm_ooffset_t a_offset;
571 	} */ *ap;
572 {
573 	int error;
574 	struct vnode *vp = ap->a_vp;
575 	struct thread *td;
576 	struct ucred *cred;
577 
578 #ifdef SMBFS_RWGENERIC
579 	td = curthread;			/* XXX */
580 	cred = td->td_ucred;		/* XXX */
581 	VOP_OPEN(vp, FWRITE, cred, td, NULL);
582 	error = vop_stdputpages(ap);
583 	VOP_CLOSE(vp, FWRITE, cred, td);
584 	return error;
585 #else
586 	struct uio uio;
587 	struct iovec iov;
588 	vm_offset_t kva;
589 	struct buf *bp;
590 	int i, npages, count;
591 	int *rtvals;
592 	struct smbmount *smp;
593 	struct smbnode *np;
594 	struct smb_cred *scred;
595 	vm_page_t *pages;
596 
597 	td = curthread;			/* XXX */
598 	cred = td->td_ucred;		/* XXX */
599 /*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
600 	np = VTOSMB(vp);
601 	smp = VFSTOSMBFS(vp->v_mount);
602 	pages = ap->a_m;
603 	count = ap->a_count;
604 	rtvals = ap->a_rtvals;
605 	npages = btoc(count);
606 
607 	for (i = 0; i < npages; i++) {
608 		rtvals[i] = VM_PAGER_ERROR;
609 	}
610 
611 	bp = getpbuf(&smbfs_pbuf_freecnt);
612 
613 	kva = (vm_offset_t) bp->b_data;
614 	pmap_qenter(kva, pages, npages);
615 	PCPU_INC(cnt.v_vnodeout);
616 	PCPU_ADD(cnt.v_vnodepgsout, count);
617 
618 	iov.iov_base = (caddr_t) kva;
619 	iov.iov_len = count;
620 	uio.uio_iov = &iov;
621 	uio.uio_iovcnt = 1;
622 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
623 	uio.uio_resid = count;
624 	uio.uio_segflg = UIO_SYSSPACE;
625 	uio.uio_rw = UIO_WRITE;
626 	uio.uio_td = td;
627 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
628 	    uio.uio_resid);
629 
630 	scred = smbfs_malloc_scred();
631 	smb_makescred(scred, td, cred);
632 	error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
633 	smbfs_free_scred(scred);
634 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
635 	SMBVDEBUG("paged write done: %d\n", error);
636 
637 	pmap_qremove(kva, npages);
638 
639 	relpbuf(bp, &smbfs_pbuf_freecnt);
640 
641 	if (!error)
642 		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
643 	return rtvals[0];
644 #endif /* SMBFS_RWGENERIC */
645 }
646 
647 /*
648  * Flush and invalidate all dirty buffers. If another process is already
649  * doing the flush, just wait for completion.
650  */
651 int
652 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
653 {
654 	struct smbnode *np = VTOSMB(vp);
655 	int error = 0;
656 
657 	if (vp->v_iflag & VI_DOOMED)
658 		return 0;
659 
660 	while (np->n_flag & NFLUSHINPROG) {
661 		np->n_flag |= NFLUSHWANT;
662 		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
663 		error = smb_td_intr(td);
664 		if (error == EINTR)
665 			return EINTR;
666 	}
667 	np->n_flag |= NFLUSHINPROG;
668 
669 	if (vp->v_bufobj.bo_object != NULL) {
670 		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
671 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
672 		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
673 	}
674 
675 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
676 	while (error) {
677 		if (error == ERESTART || error == EINTR) {
678 			np->n_flag &= ~NFLUSHINPROG;
679 			if (np->n_flag & NFLUSHWANT) {
680 				np->n_flag &= ~NFLUSHWANT;
681 				wakeup(&np->n_flag);
682 			}
683 			return EINTR;
684 		}
685 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
686 	}
687 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
688 	if (np->n_flag & NFLUSHWANT) {
689 		np->n_flag &= ~NFLUSHWANT;
690 		wakeup(&np->n_flag);
691 	}
692 	return (error);
693 }
694