xref: /freebsd/sys/fs/smbfs/smbfs_io.c (revision 7661de35d15f582ab33e3bd6b8d909601557e436)
1 /*-
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/fcntl.h>
33 #include <sys/bio.h>
34 #include <sys/buf.h>
35 #include <sys/mount.h>
36 #include <sys/namei.h>
37 #include <sys/vnode.h>
38 #include <sys/dirent.h>
39 #include <sys/rwlock.h>
40 #include <sys/signalvar.h>
41 #include <sys/sysctl.h>
42 #include <sys/vmmeter.h>
43 
44 #include <vm/vm.h>
45 #include <vm/vm_param.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_pager.h>
50 #include <vm/vnode_pager.h>
51 /*
52 #include <sys/ioccom.h>
53 */
54 #include <netsmb/smb.h>
55 #include <netsmb/smb_conn.h>
56 #include <netsmb/smb_subr.h>
57 
58 #include <fs/smbfs/smbfs.h>
59 #include <fs/smbfs/smbfs_node.h>
60 #include <fs/smbfs/smbfs_subr.h>
61 
62 /*#define SMBFS_RWGENERIC*/
63 
64 extern int smbfs_pbuf_freecnt;
65 
66 static int smbfs_fastlookup = 1;
67 
68 SYSCTL_DECL(_vfs_smbfs);
69 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
70 
71 
72 #define DE_SIZE	(sizeof(struct dirent))
73 
74 static int
75 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
76 {
77 	struct dirent de;
78 	struct componentname cn;
79 	struct smb_cred *scred;
80 	struct smbfs_fctx *ctx;
81 	struct vnode *newvp;
82 	struct smbnode *np = VTOSMB(vp);
83 	int error/*, *eofflag = ap->a_eofflag*/;
84 	long offset, limit;
85 
86 	np = VTOSMB(vp);
87 	SMBVDEBUG("dirname='%s'\n", np->n_name);
88 	scred = smbfs_malloc_scred();
89 	smb_makescred(scred, uio->uio_td, cred);
90 	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
91 	limit = uio->uio_resid / DE_SIZE;
92 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
93 		error = EINVAL;
94 		goto out;
95 	}
96 	while (limit && offset < 2) {
97 		limit--;
98 		bzero((caddr_t)&de, DE_SIZE);
99 		de.d_reclen = DE_SIZE;
100 		de.d_fileno = (offset == 0) ? np->n_ino :
101 		    (np->n_parent ? np->n_parentino : 2);
102 		if (de.d_fileno == 0)
103 			de.d_fileno = 0x7ffffffd + offset;
104 		de.d_namlen = offset + 1;
105 		de.d_name[0] = '.';
106 		de.d_name[1] = '.';
107 		de.d_name[offset + 1] = '\0';
108 		de.d_type = DT_DIR;
109 		error = uiomove(&de, DE_SIZE, uio);
110 		if (error)
111 			goto out;
112 		offset++;
113 		uio->uio_offset += DE_SIZE;
114 	}
115 	if (limit == 0) {
116 		error = 0;
117 		goto out;
118 	}
119 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
120 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
121 		if (np->n_dirseq) {
122 			smbfs_findclose(np->n_dirseq, scred);
123 			np->n_dirseq = NULL;
124 		}
125 		np->n_dirofs = 2;
126 		error = smbfs_findopen(np, "*", 1,
127 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
128 		    scred, &ctx);
129 		if (error) {
130 			SMBVDEBUG("can not open search, error = %d", error);
131 			goto out;
132 		}
133 		np->n_dirseq = ctx;
134 	} else
135 		ctx = np->n_dirseq;
136 	while (np->n_dirofs < offset) {
137 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
138 		if (error) {
139 			smbfs_findclose(np->n_dirseq, scred);
140 			np->n_dirseq = NULL;
141 			error = ENOENT ? 0 : error;
142 			goto out;
143 		}
144 	}
145 	error = 0;
146 	for (; limit; limit--, offset++) {
147 		error = smbfs_findnext(ctx, limit, scred);
148 		if (error)
149 			break;
150 		np->n_dirofs++;
151 		bzero((caddr_t)&de, DE_SIZE);
152 		de.d_reclen = DE_SIZE;
153 		de.d_fileno = ctx->f_attr.fa_ino;
154 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
155 		de.d_namlen = ctx->f_nmlen;
156 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
157 		de.d_name[de.d_namlen] = '\0';
158 		if (smbfs_fastlookup) {
159 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
160 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
161 			if (!error) {
162 				cn.cn_nameptr = de.d_name;
163 				cn.cn_namelen = de.d_namlen;
164 				cache_enter(vp, newvp, &cn);
165 				vput(newvp);
166 			}
167 		}
168 		error = uiomove(&de, DE_SIZE, uio);
169 		if (error)
170 			break;
171 	}
172 	if (error == ENOENT)
173 		error = 0;
174 	uio->uio_offset = offset * DE_SIZE;
175 out:
176 	smbfs_free_scred(scred);
177 	return error;
178 }
179 
180 int
181 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
182 {
183 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
184 	struct smbnode *np = VTOSMB(vp);
185 	struct thread *td;
186 	struct vattr vattr;
187 	struct smb_cred *scred;
188 	int error, lks;
189 
190 	/*
191 	 * Protect against method which is not supported for now
192 	 */
193 	if (uiop->uio_segflg == UIO_NOCOPY)
194 		return EOPNOTSUPP;
195 
196 	if (vp->v_type != VREG && vp->v_type != VDIR) {
197 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
198 		return EIO;
199 	}
200 	if (uiop->uio_resid == 0)
201 		return 0;
202 	if (uiop->uio_offset < 0)
203 		return EINVAL;
204 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
205 		return EFBIG;*/
206 	td = uiop->uio_td;
207 	if (vp->v_type == VDIR) {
208 		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
209 		if (lks == LK_SHARED)
210 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
211 		error = smbfs_readvdir(vp, uiop, cred);
212 		if (lks == LK_SHARED)
213 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
214 		return error;
215 	}
216 
217 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
218 	if (np->n_flag & NMODIFIED) {
219 		smbfs_attr_cacheremove(vp);
220 		error = VOP_GETATTR(vp, &vattr, cred);
221 		if (error)
222 			return error;
223 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
224 	} else {
225 		error = VOP_GETATTR(vp, &vattr, cred);
226 		if (error)
227 			return error;
228 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
229 			error = smbfs_vinvalbuf(vp, td);
230 			if (error)
231 				return error;
232 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
233 		}
234 	}
235 	scred = smbfs_malloc_scred();
236 	smb_makescred(scred, td, cred);
237 	error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
238 	smbfs_free_scred(scred);
239 	return (error);
240 }
241 
242 int
243 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
244 	struct ucred *cred, int ioflag)
245 {
246 	struct smbmount *smp = VTOSMBFS(vp);
247 	struct smbnode *np = VTOSMB(vp);
248 	struct smb_cred *scred;
249 	struct thread *td;
250 	int error = 0;
251 
252 	if (vp->v_type != VREG) {
253 		SMBERROR("vn types other than VREG unsupported !\n");
254 		return EIO;
255 	}
256 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
257 	    uiop->uio_resid);
258 	if (uiop->uio_offset < 0)
259 		return EINVAL;
260 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
261 		return (EFBIG);*/
262 	td = uiop->uio_td;
263 	if (ioflag & (IO_APPEND | IO_SYNC)) {
264 		if (np->n_flag & NMODIFIED) {
265 			smbfs_attr_cacheremove(vp);
266 			error = smbfs_vinvalbuf(vp, td);
267 			if (error)
268 				return error;
269 		}
270 		if (ioflag & IO_APPEND) {
271 #ifdef notyet
272 			/*
273 			 * File size can be changed by another client
274 			 */
275 			smbfs_attr_cacheremove(vp);
276 			error = VOP_GETATTR(vp, &vattr, cred);
277 			if (error) return (error);
278 #endif
279 			uiop->uio_offset = np->n_size;
280 		}
281 	}
282 	if (uiop->uio_resid == 0)
283 		return 0;
284 
285 	if (vn_rlimit_fsize(vp, uiop, td))
286 		return (EFBIG);
287 
288 	scred = smbfs_malloc_scred();
289 	smb_makescred(scred, td, cred);
290 	error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
291 	smbfs_free_scred(scred);
292 	SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
293 	    uiop->uio_resid);
294 	if (!error) {
295 		if (uiop->uio_offset > np->n_size) {
296 			np->n_size = uiop->uio_offset;
297 			vnode_pager_setsize(vp, np->n_size);
298 		}
299 	}
300 	return error;
301 }
302 
303 /*
304  * Do an I/O operation to/from a cache block.
305  */
306 int
307 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
308 {
309 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
310 	struct smbnode *np = VTOSMB(vp);
311 	struct uio *uiop;
312 	struct iovec io;
313 	struct smb_cred *scred;
314 	int error = 0;
315 
316 	uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
317 	uiop->uio_iov = &io;
318 	uiop->uio_iovcnt = 1;
319 	uiop->uio_segflg = UIO_SYSSPACE;
320 	uiop->uio_td = td;
321 
322 	scred = smbfs_malloc_scred();
323 	smb_makescred(scred, td, cr);
324 
325 	if (bp->b_iocmd == BIO_READ) {
326 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
327 	    io.iov_base = bp->b_data;
328 	    uiop->uio_rw = UIO_READ;
329 	    switch (vp->v_type) {
330 	      case VREG:
331 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
332 		error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
333 		if (error)
334 			break;
335 		if (uiop->uio_resid) {
336 			int left = uiop->uio_resid;
337 			int nread = bp->b_bcount - left;
338 			if (left > 0)
339 			    bzero((char *)bp->b_data + nread, left);
340 		}
341 		break;
342 	    default:
343 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
344 		break;
345 	    };
346 	    if (error) {
347 		bp->b_error = error;
348 		bp->b_ioflags |= BIO_ERROR;
349 	    }
350 	} else { /* write */
351 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
352 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
353 
354 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
355 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
356 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
357 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
358 		uiop->uio_rw = UIO_WRITE;
359 		error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
360 
361 		/*
362 		 * For an interrupted write, the buffer is still valid
363 		 * and the write hasn't been pushed to the server yet,
364 		 * so we can't set BIO_ERROR and report the interruption
365 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
366 		 * is not relevant, so the rpc attempt is essentially
367 		 * a noop.  For the case of a V3 write rpc not being
368 		 * committed to stable storage, the block is still
369 		 * dirty and requires either a commit rpc or another
370 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
371 		 * the block is reused. This is indicated by setting
372 		 * the B_DELWRI and B_NEEDCOMMIT flags.
373 		 */
374 		if (error == EINTR
375 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
376 			int s;
377 
378 			s = splbio();
379 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
380 			if ((bp->b_flags & B_ASYNC) == 0)
381 			    bp->b_flags |= B_EINTR;
382 			if ((bp->b_flags & B_PAGING) == 0) {
383 			    bdirty(bp);
384 			    bp->b_flags &= ~B_DONE;
385 			}
386 			if ((bp->b_flags & B_ASYNC) == 0)
387 			    bp->b_flags |= B_EINTR;
388 			splx(s);
389 		} else {
390 			if (error) {
391 				bp->b_ioflags |= BIO_ERROR;
392 				bp->b_error = error;
393 			}
394 			bp->b_dirtyoff = bp->b_dirtyend = 0;
395 		}
396 	    } else {
397 		bp->b_resid = 0;
398 		bufdone(bp);
399 		free(uiop, M_SMBFSDATA);
400 		smbfs_free_scred(scred);
401 		return 0;
402 	    }
403 	}
404 	bp->b_resid = uiop->uio_resid;
405 	bufdone(bp);
406 	free(uiop, M_SMBFSDATA);
407 	smbfs_free_scred(scred);
408 	return error;
409 }
410 
411 /*
412  * Vnode op for VM getpages.
413  * Wish wish .... get rid from multiple IO routines
414  */
415 int
416 smbfs_getpages(ap)
417 	struct vop_getpages_args /* {
418 		struct vnode *a_vp;
419 		vm_page_t *a_m;
420 		int a_count;
421 		int a_reqpage;
422 		vm_ooffset_t a_offset;
423 	} */ *ap;
424 {
425 #ifdef SMBFS_RWGENERIC
426 	return vop_stdgetpages(ap);
427 #else
428 	int i, error, nextoff, size, toff, npages, count, reqpage;
429 	struct uio uio;
430 	struct iovec iov;
431 	vm_offset_t kva;
432 	struct buf *bp;
433 	struct vnode *vp;
434 	struct thread *td;
435 	struct ucred *cred;
436 	struct smbmount *smp;
437 	struct smbnode *np;
438 	struct smb_cred *scred;
439 	vm_object_t object;
440 	vm_page_t *pages, m;
441 
442 	vp = ap->a_vp;
443 	if ((object = vp->v_object) == NULL) {
444 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
445 		return VM_PAGER_ERROR;
446 	}
447 
448 	td = curthread;				/* XXX */
449 	cred = td->td_ucred;		/* XXX */
450 	np = VTOSMB(vp);
451 	smp = VFSTOSMBFS(vp->v_mount);
452 	pages = ap->a_m;
453 	count = ap->a_count;
454 	npages = btoc(count);
455 	reqpage = ap->a_reqpage;
456 
457 	/*
458 	 * If the requested page is partially valid, just return it and
459 	 * allow the pager to zero-out the blanks.  Partially valid pages
460 	 * can only occur at the file EOF.
461 	 */
462 	m = pages[reqpage];
463 
464 	VM_OBJECT_WLOCK(object);
465 	if (m->valid != 0) {
466 		for (i = 0; i < npages; ++i) {
467 			if (i != reqpage) {
468 				vm_page_lock(pages[i]);
469 				vm_page_free(pages[i]);
470 				vm_page_unlock(pages[i]);
471 			}
472 		}
473 		VM_OBJECT_WUNLOCK(object);
474 		return 0;
475 	}
476 	VM_OBJECT_WUNLOCK(object);
477 
478 	scred = smbfs_malloc_scred();
479 	smb_makescred(scred, td, cred);
480 
481 	bp = getpbuf(&smbfs_pbuf_freecnt);
482 
483 	kva = (vm_offset_t) bp->b_data;
484 	pmap_qenter(kva, pages, npages);
485 	PCPU_INC(cnt.v_vnodein);
486 	PCPU_ADD(cnt.v_vnodepgsin, npages);
487 
488 	iov.iov_base = (caddr_t) kva;
489 	iov.iov_len = count;
490 	uio.uio_iov = &iov;
491 	uio.uio_iovcnt = 1;
492 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
493 	uio.uio_resid = count;
494 	uio.uio_segflg = UIO_SYSSPACE;
495 	uio.uio_rw = UIO_READ;
496 	uio.uio_td = td;
497 
498 	error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
499 	smbfs_free_scred(scred);
500 	pmap_qremove(kva, npages);
501 
502 	relpbuf(bp, &smbfs_pbuf_freecnt);
503 
504 	VM_OBJECT_WLOCK(object);
505 	if (error && (uio.uio_resid == count)) {
506 		printf("smbfs_getpages: error %d\n",error);
507 		for (i = 0; i < npages; i++) {
508 			if (reqpage != i) {
509 				vm_page_lock(pages[i]);
510 				vm_page_free(pages[i]);
511 				vm_page_unlock(pages[i]);
512 			}
513 		}
514 		VM_OBJECT_WUNLOCK(object);
515 		return VM_PAGER_ERROR;
516 	}
517 
518 	size = count - uio.uio_resid;
519 
520 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
521 		vm_page_t m;
522 		nextoff = toff + PAGE_SIZE;
523 		m = pages[i];
524 
525 		if (nextoff <= size) {
526 			/*
527 			 * Read operation filled an entire page
528 			 */
529 			m->valid = VM_PAGE_BITS_ALL;
530 			KASSERT(m->dirty == 0,
531 			    ("smbfs_getpages: page %p is dirty", m));
532 		} else if (size > toff) {
533 			/*
534 			 * Read operation filled a partial page.
535 			 */
536 			m->valid = 0;
537 			vm_page_set_valid_range(m, 0, size - toff);
538 			KASSERT(m->dirty == 0,
539 			    ("smbfs_getpages: page %p is dirty", m));
540 		} else {
541 			/*
542 			 * Read operation was short.  If no error occured
543 			 * we may have hit a zero-fill section.   We simply
544 			 * leave valid set to 0.
545 			 */
546 			;
547 		}
548 
549 		if (i != reqpage)
550 			vm_page_readahead_finish(m);
551 	}
552 	VM_OBJECT_WUNLOCK(object);
553 	return 0;
554 #endif /* SMBFS_RWGENERIC */
555 }
556 
557 /*
558  * Vnode op for VM putpages.
559  * possible bug: all IO done in sync mode
560  * Note that vop_close always invalidate pages before close, so it's
561  * not necessary to open vnode.
562  */
563 int
564 smbfs_putpages(ap)
565 	struct vop_putpages_args /* {
566 		struct vnode *a_vp;
567 		vm_page_t *a_m;
568 		int a_count;
569 		int a_sync;
570 		int *a_rtvals;
571 		vm_ooffset_t a_offset;
572 	} */ *ap;
573 {
574 	int error;
575 	struct vnode *vp = ap->a_vp;
576 	struct thread *td;
577 	struct ucred *cred;
578 
579 #ifdef SMBFS_RWGENERIC
580 	td = curthread;			/* XXX */
581 	cred = td->td_ucred;		/* XXX */
582 	VOP_OPEN(vp, FWRITE, cred, td, NULL);
583 	error = vop_stdputpages(ap);
584 	VOP_CLOSE(vp, FWRITE, cred, td);
585 	return error;
586 #else
587 	struct uio uio;
588 	struct iovec iov;
589 	vm_offset_t kva;
590 	struct buf *bp;
591 	int i, npages, count;
592 	int *rtvals;
593 	struct smbmount *smp;
594 	struct smbnode *np;
595 	struct smb_cred *scred;
596 	vm_page_t *pages;
597 
598 	td = curthread;			/* XXX */
599 	cred = td->td_ucred;		/* XXX */
600 /*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
601 	np = VTOSMB(vp);
602 	smp = VFSTOSMBFS(vp->v_mount);
603 	pages = ap->a_m;
604 	count = ap->a_count;
605 	rtvals = ap->a_rtvals;
606 	npages = btoc(count);
607 
608 	for (i = 0; i < npages; i++) {
609 		rtvals[i] = VM_PAGER_ERROR;
610 	}
611 
612 	bp = getpbuf(&smbfs_pbuf_freecnt);
613 
614 	kva = (vm_offset_t) bp->b_data;
615 	pmap_qenter(kva, pages, npages);
616 	PCPU_INC(cnt.v_vnodeout);
617 	PCPU_ADD(cnt.v_vnodepgsout, count);
618 
619 	iov.iov_base = (caddr_t) kva;
620 	iov.iov_len = count;
621 	uio.uio_iov = &iov;
622 	uio.uio_iovcnt = 1;
623 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
624 	uio.uio_resid = count;
625 	uio.uio_segflg = UIO_SYSSPACE;
626 	uio.uio_rw = UIO_WRITE;
627 	uio.uio_td = td;
628 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
629 	    uio.uio_resid);
630 
631 	scred = smbfs_malloc_scred();
632 	smb_makescred(scred, td, cred);
633 	error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
634 	smbfs_free_scred(scred);
635 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
636 	SMBVDEBUG("paged write done: %d\n", error);
637 
638 	pmap_qremove(kva, npages);
639 
640 	relpbuf(bp, &smbfs_pbuf_freecnt);
641 
642 	if (!error)
643 		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
644 	return rtvals[0];
645 #endif /* SMBFS_RWGENERIC */
646 }
647 
648 /*
649  * Flush and invalidate all dirty buffers. If another process is already
650  * doing the flush, just wait for completion.
651  */
652 int
653 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
654 {
655 	struct smbnode *np = VTOSMB(vp);
656 	int error = 0;
657 
658 	if (vp->v_iflag & VI_DOOMED)
659 		return 0;
660 
661 	while (np->n_flag & NFLUSHINPROG) {
662 		np->n_flag |= NFLUSHWANT;
663 		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
664 		error = smb_td_intr(td);
665 		if (error == EINTR)
666 			return EINTR;
667 	}
668 	np->n_flag |= NFLUSHINPROG;
669 
670 	if (vp->v_bufobj.bo_object != NULL) {
671 		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
672 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
673 		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
674 	}
675 
676 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
677 	while (error) {
678 		if (error == ERESTART || error == EINTR) {
679 			np->n_flag &= ~NFLUSHINPROG;
680 			if (np->n_flag & NFLUSHWANT) {
681 				np->n_flag &= ~NFLUSHWANT;
682 				wakeup(&np->n_flag);
683 			}
684 			return EINTR;
685 		}
686 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
687 	}
688 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
689 	if (np->n_flag & NFLUSHWANT) {
690 		np->n_flag &= ~NFLUSHWANT;
691 		wakeup(&np->n_flag);
692 	}
693 	return (error);
694 }
695