xref: /freebsd/sys/fs/smbfs/smbfs_io.c (revision 7aa383846770374466b1dcb2cefd71bde9acf463)
1 /*-
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/fcntl.h>
33 #include <sys/bio.h>
34 #include <sys/buf.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/vnode.h>
38 #include <sys/dirent.h>
39 #include <sys/signalvar.h>
40 #include <sys/sysctl.h>
41 #include <sys/vmmeter.h>
42 
43 #include <vm/vm.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_pager.h>
48 #include <vm/vnode_pager.h>
49 /*
50 #include <sys/ioccom.h>
51 */
52 #include <netsmb/smb.h>
53 #include <netsmb/smb_conn.h>
54 #include <netsmb/smb_subr.h>
55 
56 #include <fs/smbfs/smbfs.h>
57 #include <fs/smbfs/smbfs_node.h>
58 #include <fs/smbfs/smbfs_subr.h>
59 
60 /*#define SMBFS_RWGENERIC*/
61 
62 extern int smbfs_pbuf_freecnt;
63 
64 static int smbfs_fastlookup = 1;
65 
66 SYSCTL_DECL(_vfs_smbfs);
67 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
68 
69 
70 #define DE_SIZE	(sizeof(struct dirent))
71 
72 static int
73 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
74 {
75 	struct dirent de;
76 	struct componentname cn;
77 	struct smb_cred scred;
78 	struct smbfs_fctx *ctx;
79 	struct vnode *newvp;
80 	struct smbnode *np = VTOSMB(vp);
81 	int error/*, *eofflag = ap->a_eofflag*/;
82 	long offset, limit;
83 
84 	np = VTOSMB(vp);
85 	SMBVDEBUG("dirname='%s'\n", np->n_name);
86 	smb_makescred(&scred, uio->uio_td, cred);
87 	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
88 	limit = uio->uio_resid / DE_SIZE;
89 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
90 		return EINVAL;
91 	while (limit && offset < 2) {
92 		limit--;
93 		bzero((caddr_t)&de, DE_SIZE);
94 		de.d_reclen = DE_SIZE;
95 		de.d_fileno = (offset == 0) ? np->n_ino :
96 		    (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
97 		if (de.d_fileno == 0)
98 			de.d_fileno = 0x7ffffffd + offset;
99 		de.d_namlen = offset + 1;
100 		de.d_name[0] = '.';
101 		de.d_name[1] = '.';
102 		de.d_name[offset + 1] = '\0';
103 		de.d_type = DT_DIR;
104 		error = uiomove(&de, DE_SIZE, uio);
105 		if (error)
106 			return error;
107 		offset++;
108 		uio->uio_offset += DE_SIZE;
109 	}
110 	if (limit == 0)
111 		return 0;
112 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
113 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
114 		if (np->n_dirseq) {
115 			smbfs_findclose(np->n_dirseq, &scred);
116 			np->n_dirseq = NULL;
117 		}
118 		np->n_dirofs = 2;
119 		error = smbfs_findopen(np, "*", 1,
120 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
121 		    &scred, &ctx);
122 		if (error) {
123 			SMBVDEBUG("can not open search, error = %d", error);
124 			return error;
125 		}
126 		np->n_dirseq = ctx;
127 	} else
128 		ctx = np->n_dirseq;
129 	while (np->n_dirofs < offset) {
130 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
131 		if (error) {
132 			smbfs_findclose(np->n_dirseq, &scred);
133 			np->n_dirseq = NULL;
134 			return error == ENOENT ? 0 : error;
135 		}
136 	}
137 	error = 0;
138 	for (; limit; limit--, offset++) {
139 		error = smbfs_findnext(ctx, limit, &scred);
140 		if (error)
141 			break;
142 		np->n_dirofs++;
143 		bzero((caddr_t)&de, DE_SIZE);
144 		de.d_reclen = DE_SIZE;
145 		de.d_fileno = ctx->f_attr.fa_ino;
146 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
147 		de.d_namlen = ctx->f_nmlen;
148 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
149 		de.d_name[de.d_namlen] = '\0';
150 		if (smbfs_fastlookup) {
151 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
152 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
153 			if (!error) {
154 				cn.cn_nameptr = de.d_name;
155 				cn.cn_namelen = de.d_namlen;
156 				cache_enter(vp, newvp, &cn);
157 				vput(newvp);
158 			}
159 		}
160 		error = uiomove(&de, DE_SIZE, uio);
161 		if (error)
162 			break;
163 	}
164 	if (error == ENOENT)
165 		error = 0;
166 	uio->uio_offset = offset * DE_SIZE;
167 	return error;
168 }
169 
170 int
171 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
172 {
173 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
174 	struct smbnode *np = VTOSMB(vp);
175 	struct thread *td;
176 	struct vattr vattr;
177 	struct smb_cred scred;
178 	int error, lks;
179 
180 	/*
181 	 * Protect against method which is not supported for now
182 	 */
183 	if (uiop->uio_segflg == UIO_NOCOPY)
184 		return EOPNOTSUPP;
185 
186 	if (vp->v_type != VREG && vp->v_type != VDIR) {
187 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
188 		return EIO;
189 	}
190 	if (uiop->uio_resid == 0)
191 		return 0;
192 	if (uiop->uio_offset < 0)
193 		return EINVAL;
194 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
195 		return EFBIG;*/
196 	td = uiop->uio_td;
197 	if (vp->v_type == VDIR) {
198 		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
199 		if (lks == LK_SHARED)
200 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
201 		error = smbfs_readvdir(vp, uiop, cred);
202 		if (lks == LK_SHARED)
203 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
204 		return error;
205 	}
206 
207 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
208 	if (np->n_flag & NMODIFIED) {
209 		smbfs_attr_cacheremove(vp);
210 		error = VOP_GETATTR(vp, &vattr, cred);
211 		if (error)
212 			return error;
213 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
214 	} else {
215 		error = VOP_GETATTR(vp, &vattr, cred);
216 		if (error)
217 			return error;
218 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
219 			error = smbfs_vinvalbuf(vp, td);
220 			if (error)
221 				return error;
222 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
223 		}
224 	}
225 	smb_makescred(&scred, td, cred);
226 	return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
227 }
228 
229 int
230 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
231 	struct ucred *cred, int ioflag)
232 {
233 	struct smbmount *smp = VTOSMBFS(vp);
234 	struct smbnode *np = VTOSMB(vp);
235 	struct smb_cred scred;
236 	struct thread *td;
237 	int error = 0;
238 
239 	if (vp->v_type != VREG) {
240 		SMBERROR("vn types other than VREG unsupported !\n");
241 		return EIO;
242 	}
243 	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
244 	if (uiop->uio_offset < 0)
245 		return EINVAL;
246 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
247 		return (EFBIG);*/
248 	td = uiop->uio_td;
249 	if (ioflag & (IO_APPEND | IO_SYNC)) {
250 		if (np->n_flag & NMODIFIED) {
251 			smbfs_attr_cacheremove(vp);
252 			error = smbfs_vinvalbuf(vp, td);
253 			if (error)
254 				return error;
255 		}
256 		if (ioflag & IO_APPEND) {
257 #ifdef notyet
258 			/*
259 			 * File size can be changed by another client
260 			 */
261 			smbfs_attr_cacheremove(vp);
262 			error = VOP_GETATTR(vp, &vattr, cred);
263 			if (error) return (error);
264 #endif
265 			uiop->uio_offset = np->n_size;
266 		}
267 	}
268 	if (uiop->uio_resid == 0)
269 		return 0;
270 
271 	if (vn_rlimit_fsize(vp, uiop, td))
272 		return (EFBIG);
273 
274 	smb_makescred(&scred, td, cred);
275 	error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
276 	SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
277 	if (!error) {
278 		if (uiop->uio_offset > np->n_size) {
279 			np->n_size = uiop->uio_offset;
280 			vnode_pager_setsize(vp, np->n_size);
281 		}
282 	}
283 	return error;
284 }
285 
286 /*
287  * Do an I/O operation to/from a cache block.
288  */
289 int
290 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
291 {
292 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
293 	struct smbnode *np = VTOSMB(vp);
294 	struct uio uio, *uiop = &uio;
295 	struct iovec io;
296 	struct smb_cred scred;
297 	int error = 0;
298 
299 	uiop->uio_iov = &io;
300 	uiop->uio_iovcnt = 1;
301 	uiop->uio_segflg = UIO_SYSSPACE;
302 	uiop->uio_td = td;
303 
304 	smb_makescred(&scred, td, cr);
305 
306 	if (bp->b_iocmd == BIO_READ) {
307 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
308 	    io.iov_base = bp->b_data;
309 	    uiop->uio_rw = UIO_READ;
310 	    switch (vp->v_type) {
311 	      case VREG:
312 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
313 		error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
314 		if (error)
315 			break;
316 		if (uiop->uio_resid) {
317 			int left = uiop->uio_resid;
318 			int nread = bp->b_bcount - left;
319 			if (left > 0)
320 			    bzero((char *)bp->b_data + nread, left);
321 		}
322 		break;
323 	    default:
324 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
325 		break;
326 	    };
327 	    if (error) {
328 		bp->b_error = error;
329 		bp->b_ioflags |= BIO_ERROR;
330 	    }
331 	} else { /* write */
332 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
333 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
334 
335 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
336 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
337 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
338 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
339 		uiop->uio_rw = UIO_WRITE;
340 		error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
341 
342 		/*
343 		 * For an interrupted write, the buffer is still valid
344 		 * and the write hasn't been pushed to the server yet,
345 		 * so we can't set BIO_ERROR and report the interruption
346 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
347 		 * is not relevant, so the rpc attempt is essentially
348 		 * a noop.  For the case of a V3 write rpc not being
349 		 * committed to stable storage, the block is still
350 		 * dirty and requires either a commit rpc or another
351 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
352 		 * the block is reused. This is indicated by setting
353 		 * the B_DELWRI and B_NEEDCOMMIT flags.
354 		 */
355 		if (error == EINTR
356 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
357 			int s;
358 
359 			s = splbio();
360 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
361 			if ((bp->b_flags & B_ASYNC) == 0)
362 			    bp->b_flags |= B_EINTR;
363 			if ((bp->b_flags & B_PAGING) == 0) {
364 			    bdirty(bp);
365 			    bp->b_flags &= ~B_DONE;
366 			}
367 			if ((bp->b_flags & B_ASYNC) == 0)
368 			    bp->b_flags |= B_EINTR;
369 			splx(s);
370 		} else {
371 			if (error) {
372 				bp->b_ioflags |= BIO_ERROR;
373 				bp->b_error = error;
374 			}
375 			bp->b_dirtyoff = bp->b_dirtyend = 0;
376 		}
377 	    } else {
378 		bp->b_resid = 0;
379 		bufdone(bp);
380 		return 0;
381 	    }
382 	}
383 	bp->b_resid = uiop->uio_resid;
384 	bufdone(bp);
385 	return error;
386 }
387 
388 /*
389  * Vnode op for VM getpages.
390  * Wish wish .... get rid from multiple IO routines
391  */
392 int
393 smbfs_getpages(ap)
394 	struct vop_getpages_args /* {
395 		struct vnode *a_vp;
396 		vm_page_t *a_m;
397 		int a_count;
398 		int a_reqpage;
399 		vm_ooffset_t a_offset;
400 	} */ *ap;
401 {
402 #ifdef SMBFS_RWGENERIC
403 	return vop_stdgetpages(ap);
404 #else
405 	int i, error, nextoff, size, toff, npages, count, reqpage;
406 	struct uio uio;
407 	struct iovec iov;
408 	vm_offset_t kva;
409 	struct buf *bp;
410 	struct vnode *vp;
411 	struct thread *td;
412 	struct ucred *cred;
413 	struct smbmount *smp;
414 	struct smbnode *np;
415 	struct smb_cred scred;
416 	vm_object_t object;
417 	vm_page_t *pages, m;
418 
419 	vp = ap->a_vp;
420 	if ((object = vp->v_object) == NULL) {
421 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
422 		return VM_PAGER_ERROR;
423 	}
424 
425 	td = curthread;				/* XXX */
426 	cred = td->td_ucred;		/* XXX */
427 	np = VTOSMB(vp);
428 	smp = VFSTOSMBFS(vp->v_mount);
429 	pages = ap->a_m;
430 	count = ap->a_count;
431 	npages = btoc(count);
432 	reqpage = ap->a_reqpage;
433 
434 	/*
435 	 * If the requested page is partially valid, just return it and
436 	 * allow the pager to zero-out the blanks.  Partially valid pages
437 	 * can only occur at the file EOF.
438 	 */
439 	m = pages[reqpage];
440 
441 	VM_OBJECT_LOCK(object);
442 	if (m->valid != 0) {
443 		for (i = 0; i < npages; ++i) {
444 			if (i != reqpage) {
445 				vm_page_lock(pages[i]);
446 				vm_page_free(pages[i]);
447 				vm_page_unlock(pages[i]);
448 			}
449 		}
450 		VM_OBJECT_UNLOCK(object);
451 		return 0;
452 	}
453 	VM_OBJECT_UNLOCK(object);
454 
455 	smb_makescred(&scred, td, cred);
456 
457 	bp = getpbuf(&smbfs_pbuf_freecnt);
458 
459 	kva = (vm_offset_t) bp->b_data;
460 	pmap_qenter(kva, pages, npages);
461 	PCPU_INC(cnt.v_vnodein);
462 	PCPU_ADD(cnt.v_vnodepgsin, npages);
463 
464 	iov.iov_base = (caddr_t) kva;
465 	iov.iov_len = count;
466 	uio.uio_iov = &iov;
467 	uio.uio_iovcnt = 1;
468 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
469 	uio.uio_resid = count;
470 	uio.uio_segflg = UIO_SYSSPACE;
471 	uio.uio_rw = UIO_READ;
472 	uio.uio_td = td;
473 
474 	error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
475 	pmap_qremove(kva, npages);
476 
477 	relpbuf(bp, &smbfs_pbuf_freecnt);
478 
479 	VM_OBJECT_LOCK(object);
480 	if (error && (uio.uio_resid == count)) {
481 		printf("smbfs_getpages: error %d\n",error);
482 		for (i = 0; i < npages; i++) {
483 			if (reqpage != i) {
484 				vm_page_lock(pages[i]);
485 				vm_page_free(pages[i]);
486 				vm_page_unlock(pages[i]);
487 			}
488 		}
489 		VM_OBJECT_UNLOCK(object);
490 		return VM_PAGER_ERROR;
491 	}
492 
493 	size = count - uio.uio_resid;
494 
495 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
496 		vm_page_t m;
497 		nextoff = toff + PAGE_SIZE;
498 		m = pages[i];
499 
500 		if (nextoff <= size) {
501 			/*
502 			 * Read operation filled an entire page
503 			 */
504 			m->valid = VM_PAGE_BITS_ALL;
505 			KASSERT(m->dirty == 0,
506 			    ("smbfs_getpages: page %p is dirty", m));
507 		} else if (size > toff) {
508 			/*
509 			 * Read operation filled a partial page.
510 			 */
511 			m->valid = 0;
512 			vm_page_set_valid(m, 0, size - toff);
513 			KASSERT(m->dirty == 0,
514 			    ("smbfs_getpages: page %p is dirty", m));
515 		} else {
516 			/*
517 			 * Read operation was short.  If no error occured
518 			 * we may have hit a zero-fill section.   We simply
519 			 * leave valid set to 0.
520 			 */
521 			;
522 		}
523 
524 		if (i != reqpage) {
525 			/*
526 			 * Whether or not to leave the page activated is up in
527 			 * the air, but we should put the page on a page queue
528 			 * somewhere (it already is in the object).  Result:
529 			 * It appears that emperical results show that
530 			 * deactivating pages is best.
531 			 */
532 
533 			/*
534 			 * Just in case someone was asking for this page we
535 			 * now tell them that it is ok to use.
536 			 */
537 			if (!error) {
538 				if (m->oflags & VPO_WANTED) {
539 					vm_page_lock(m);
540 					vm_page_activate(m);
541 					vm_page_unlock(m);
542 				} else {
543 					vm_page_lock(m);
544 					vm_page_deactivate(m);
545 					vm_page_unlock(m);
546 				}
547 				vm_page_wakeup(m);
548 			} else {
549 				vm_page_lock(m);
550 				vm_page_free(m);
551 				vm_page_unlock(m);
552 			}
553 		}
554 	}
555 	VM_OBJECT_UNLOCK(object);
556 	return 0;
557 #endif /* SMBFS_RWGENERIC */
558 }
559 
560 /*
561  * Vnode op for VM putpages.
562  * possible bug: all IO done in sync mode
563  * Note that vop_close always invalidate pages before close, so it's
564  * not necessary to open vnode.
565  */
566 int
567 smbfs_putpages(ap)
568 	struct vop_putpages_args /* {
569 		struct vnode *a_vp;
570 		vm_page_t *a_m;
571 		int a_count;
572 		int a_sync;
573 		int *a_rtvals;
574 		vm_ooffset_t a_offset;
575 	} */ *ap;
576 {
577 	int error;
578 	struct vnode *vp = ap->a_vp;
579 	struct thread *td;
580 	struct ucred *cred;
581 
582 #ifdef SMBFS_RWGENERIC
583 	td = curthread;			/* XXX */
584 	cred = td->td_ucred;		/* XXX */
585 	VOP_OPEN(vp, FWRITE, cred, td, NULL);
586 	error = vop_stdputpages(ap);
587 	VOP_CLOSE(vp, FWRITE, cred, td);
588 	return error;
589 #else
590 	struct uio uio;
591 	struct iovec iov;
592 	vm_offset_t kva;
593 	struct buf *bp;
594 	int i, npages, count;
595 	int *rtvals;
596 	struct smbmount *smp;
597 	struct smbnode *np;
598 	struct smb_cred scred;
599 	vm_page_t *pages;
600 
601 	td = curthread;			/* XXX */
602 	cred = td->td_ucred;		/* XXX */
603 /*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
604 	np = VTOSMB(vp);
605 	smp = VFSTOSMBFS(vp->v_mount);
606 	pages = ap->a_m;
607 	count = ap->a_count;
608 	rtvals = ap->a_rtvals;
609 	npages = btoc(count);
610 
611 	for (i = 0; i < npages; i++) {
612 		rtvals[i] = VM_PAGER_AGAIN;
613 	}
614 
615 	bp = getpbuf(&smbfs_pbuf_freecnt);
616 
617 	kva = (vm_offset_t) bp->b_data;
618 	pmap_qenter(kva, pages, npages);
619 	PCPU_INC(cnt.v_vnodeout);
620 	PCPU_ADD(cnt.v_vnodepgsout, count);
621 
622 	iov.iov_base = (caddr_t) kva;
623 	iov.iov_len = count;
624 	uio.uio_iov = &iov;
625 	uio.uio_iovcnt = 1;
626 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
627 	uio.uio_resid = count;
628 	uio.uio_segflg = UIO_SYSSPACE;
629 	uio.uio_rw = UIO_WRITE;
630 	uio.uio_td = td;
631 	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
632 
633 	smb_makescred(&scred, td, cred);
634 	error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
635 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
636 	SMBVDEBUG("paged write done: %d\n", error);
637 
638 	pmap_qremove(kva, npages);
639 
640 	relpbuf(bp, &smbfs_pbuf_freecnt);
641 
642 	if (!error) {
643 		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
644 		for (i = 0; i < nwritten; i++) {
645 			rtvals[i] = VM_PAGER_OK;
646 			vm_page_undirty(pages[i]);
647 		}
648 	}
649 	return rtvals[0];
650 #endif /* SMBFS_RWGENERIC */
651 }
652 
653 /*
654  * Flush and invalidate all dirty buffers. If another process is already
655  * doing the flush, just wait for completion.
656  */
657 int
658 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
659 {
660 	struct smbnode *np = VTOSMB(vp);
661 	int error = 0;
662 
663 	if (vp->v_iflag & VI_DOOMED)
664 		return 0;
665 
666 	while (np->n_flag & NFLUSHINPROG) {
667 		np->n_flag |= NFLUSHWANT;
668 		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
669 		error = smb_td_intr(td);
670 		if (error == EINTR)
671 			return EINTR;
672 	}
673 	np->n_flag |= NFLUSHINPROG;
674 
675 	if (vp->v_bufobj.bo_object != NULL) {
676 		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
677 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
678 		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
679 	}
680 
681 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
682 	while (error) {
683 		if (error == ERESTART || error == EINTR) {
684 			np->n_flag &= ~NFLUSHINPROG;
685 			if (np->n_flag & NFLUSHWANT) {
686 				np->n_flag &= ~NFLUSHWANT;
687 				wakeup(&np->n_flag);
688 			}
689 			return EINTR;
690 		}
691 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
692 	}
693 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
694 	if (np->n_flag & NFLUSHWANT) {
695 		np->n_flag &= ~NFLUSHWANT;
696 		wakeup(&np->n_flag);
697 	}
698 	return (error);
699 }
700