xref: /freebsd/sys/fs/smbfs/smbfs_io.c (revision 405c3050f102b8c74782f0366c8ead927bd07b68)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000-2001 Boris Popov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  *
30  */
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/fcntl.h>
35 #include <sys/bio.h>
36 #include <sys/buf.h>
37 #include <sys/mount.h>
38 #include <sys/namei.h>
39 #include <sys/vnode.h>
40 #include <sys/dirent.h>
41 #include <sys/rwlock.h>
42 #include <sys/signalvar.h>
43 #include <sys/sysctl.h>
44 #include <sys/vmmeter.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_pager.h>
52 #include <vm/vnode_pager.h>
53 /*
54 #include <sys/ioccom.h>
55 */
56 #include <netsmb/smb.h>
57 #include <netsmb/smb_conn.h>
58 #include <netsmb/smb_subr.h>
59 
60 #include <fs/smbfs/smbfs.h>
61 #include <fs/smbfs/smbfs_node.h>
62 #include <fs/smbfs/smbfs_subr.h>
63 
64 /*#define SMBFS_RWGENERIC*/
65 
66 extern int smbfs_pbuf_freecnt;
67 
68 static int smbfs_fastlookup = 1;
69 
70 SYSCTL_DECL(_vfs_smbfs);
71 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
72 
73 
74 #define DE_SIZE	(sizeof(struct dirent))
75 
76 static int
77 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
78 {
79 	struct dirent de;
80 	struct componentname cn;
81 	struct smb_cred *scred;
82 	struct smbfs_fctx *ctx;
83 	struct vnode *newvp;
84 	struct smbnode *np = VTOSMB(vp);
85 	int error/*, *eofflag = ap->a_eofflag*/;
86 	long offset, limit;
87 
88 	np = VTOSMB(vp);
89 	SMBVDEBUG("dirname='%s'\n", np->n_name);
90 	scred = smbfs_malloc_scred();
91 	smb_makescred(scred, uio->uio_td, cred);
92 	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
93 	limit = uio->uio_resid / DE_SIZE;
94 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
95 		error = EINVAL;
96 		goto out;
97 	}
98 	while (limit && offset < 2) {
99 		limit--;
100 		bzero((caddr_t)&de, DE_SIZE);
101 		de.d_reclen = DE_SIZE;
102 		de.d_fileno = (offset == 0) ? np->n_ino :
103 		    (np->n_parent ? np->n_parentino : 2);
104 		if (de.d_fileno == 0)
105 			de.d_fileno = 0x7ffffffd + offset;
106 		de.d_namlen = offset + 1;
107 		de.d_name[0] = '.';
108 		de.d_name[1] = '.';
109 		de.d_type = DT_DIR;
110 		dirent_terminate(&de);
111 		error = uiomove(&de, DE_SIZE, uio);
112 		if (error)
113 			goto out;
114 		offset++;
115 		uio->uio_offset += DE_SIZE;
116 	}
117 	if (limit == 0) {
118 		error = 0;
119 		goto out;
120 	}
121 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
122 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
123 		if (np->n_dirseq) {
124 			smbfs_findclose(np->n_dirseq, scred);
125 			np->n_dirseq = NULL;
126 		}
127 		np->n_dirofs = 2;
128 		error = smbfs_findopen(np, "*", 1,
129 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
130 		    scred, &ctx);
131 		if (error) {
132 			SMBVDEBUG("can not open search, error = %d", error);
133 			goto out;
134 		}
135 		np->n_dirseq = ctx;
136 	} else
137 		ctx = np->n_dirseq;
138 	while (np->n_dirofs < offset) {
139 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
140 		if (error) {
141 			smbfs_findclose(np->n_dirseq, scred);
142 			np->n_dirseq = NULL;
143 			error = ENOENT ? 0 : error;
144 			goto out;
145 		}
146 	}
147 	error = 0;
148 	for (; limit; limit--, offset++) {
149 		error = smbfs_findnext(ctx, limit, scred);
150 		if (error)
151 			break;
152 		np->n_dirofs++;
153 		bzero((caddr_t)&de, DE_SIZE);
154 		de.d_reclen = DE_SIZE;
155 		de.d_fileno = ctx->f_attr.fa_ino;
156 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
157 		de.d_namlen = ctx->f_nmlen;
158 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
159 		dirent_terminate(&de);
160 		if (smbfs_fastlookup) {
161 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
162 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
163 			if (!error) {
164 				cn.cn_nameptr = de.d_name;
165 				cn.cn_namelen = de.d_namlen;
166 				cache_enter(vp, newvp, &cn);
167 				vput(newvp);
168 			}
169 		}
170 		error = uiomove(&de, DE_SIZE, uio);
171 		if (error)
172 			break;
173 	}
174 	if (error == ENOENT)
175 		error = 0;
176 	uio->uio_offset = offset * DE_SIZE;
177 out:
178 	smbfs_free_scred(scred);
179 	return error;
180 }
181 
182 int
183 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
184 {
185 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
186 	struct smbnode *np = VTOSMB(vp);
187 	struct thread *td;
188 	struct vattr vattr;
189 	struct smb_cred *scred;
190 	int error, lks;
191 
192 	/*
193 	 * Protect against method which is not supported for now
194 	 */
195 	if (uiop->uio_segflg == UIO_NOCOPY)
196 		return EOPNOTSUPP;
197 
198 	if (vp->v_type != VREG && vp->v_type != VDIR) {
199 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
200 		return EIO;
201 	}
202 	if (uiop->uio_resid == 0)
203 		return 0;
204 	if (uiop->uio_offset < 0)
205 		return EINVAL;
206 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
207 		return EFBIG;*/
208 	td = uiop->uio_td;
209 	if (vp->v_type == VDIR) {
210 		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
211 		if (lks == LK_SHARED)
212 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
213 		error = smbfs_readvdir(vp, uiop, cred);
214 		if (lks == LK_SHARED)
215 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
216 		return error;
217 	}
218 
219 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
220 	if (np->n_flag & NMODIFIED) {
221 		smbfs_attr_cacheremove(vp);
222 		error = VOP_GETATTR(vp, &vattr, cred);
223 		if (error)
224 			return error;
225 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
226 	} else {
227 		error = VOP_GETATTR(vp, &vattr, cred);
228 		if (error)
229 			return error;
230 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
231 			error = smbfs_vinvalbuf(vp, td);
232 			if (error)
233 				return error;
234 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
235 		}
236 	}
237 	scred = smbfs_malloc_scred();
238 	smb_makescred(scred, td, cred);
239 	error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
240 	smbfs_free_scred(scred);
241 	return (error);
242 }
243 
244 int
245 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
246 	struct ucred *cred, int ioflag)
247 {
248 	struct smbmount *smp = VTOSMBFS(vp);
249 	struct smbnode *np = VTOSMB(vp);
250 	struct smb_cred *scred;
251 	struct thread *td;
252 	int error = 0;
253 
254 	if (vp->v_type != VREG) {
255 		SMBERROR("vn types other than VREG unsupported !\n");
256 		return EIO;
257 	}
258 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
259 	    uiop->uio_resid);
260 	if (uiop->uio_offset < 0)
261 		return EINVAL;
262 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
263 		return (EFBIG);*/
264 	td = uiop->uio_td;
265 	if (ioflag & (IO_APPEND | IO_SYNC)) {
266 		if (np->n_flag & NMODIFIED) {
267 			smbfs_attr_cacheremove(vp);
268 			error = smbfs_vinvalbuf(vp, td);
269 			if (error)
270 				return error;
271 		}
272 		if (ioflag & IO_APPEND) {
273 #ifdef notyet
274 			/*
275 			 * File size can be changed by another client
276 			 */
277 			smbfs_attr_cacheremove(vp);
278 			error = VOP_GETATTR(vp, &vattr, cred);
279 			if (error) return (error);
280 #endif
281 			uiop->uio_offset = np->n_size;
282 		}
283 	}
284 	if (uiop->uio_resid == 0)
285 		return 0;
286 
287 	if (vn_rlimit_fsize(vp, uiop, td))
288 		return (EFBIG);
289 
290 	scred = smbfs_malloc_scred();
291 	smb_makescred(scred, td, cred);
292 	error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
293 	smbfs_free_scred(scred);
294 	SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
295 	    uiop->uio_resid);
296 	if (!error) {
297 		if (uiop->uio_offset > np->n_size) {
298 			np->n_size = uiop->uio_offset;
299 			vnode_pager_setsize(vp, np->n_size);
300 		}
301 	}
302 	return error;
303 }
304 
305 /*
306  * Do an I/O operation to/from a cache block.
307  */
308 int
309 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
310 {
311 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
312 	struct smbnode *np = VTOSMB(vp);
313 	struct uio *uiop;
314 	struct iovec io;
315 	struct smb_cred *scred;
316 	int error = 0;
317 
318 	uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
319 	uiop->uio_iov = &io;
320 	uiop->uio_iovcnt = 1;
321 	uiop->uio_segflg = UIO_SYSSPACE;
322 	uiop->uio_td = td;
323 
324 	scred = smbfs_malloc_scred();
325 	smb_makescred(scred, td, cr);
326 
327 	if (bp->b_iocmd == BIO_READ) {
328 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
329 	    io.iov_base = bp->b_data;
330 	    uiop->uio_rw = UIO_READ;
331 	    switch (vp->v_type) {
332 	      case VREG:
333 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
334 		error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
335 		if (error)
336 			break;
337 		if (uiop->uio_resid) {
338 			int left = uiop->uio_resid;
339 			int nread = bp->b_bcount - left;
340 			if (left > 0)
341 			    bzero((char *)bp->b_data + nread, left);
342 		}
343 		break;
344 	    default:
345 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
346 		break;
347 	    }
348 	    if (error) {
349 		bp->b_error = error;
350 		bp->b_ioflags |= BIO_ERROR;
351 	    }
352 	} else { /* write */
353 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
354 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
355 
356 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
357 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
358 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
359 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
360 		uiop->uio_rw = UIO_WRITE;
361 		error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
362 
363 		/*
364 		 * For an interrupted write, the buffer is still valid
365 		 * and the write hasn't been pushed to the server yet,
366 		 * so we can't set BIO_ERROR and report the interruption
367 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
368 		 * is not relevant, so the rpc attempt is essentially
369 		 * a noop.  For the case of a V3 write rpc not being
370 		 * committed to stable storage, the block is still
371 		 * dirty and requires either a commit rpc or another
372 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
373 		 * the block is reused. This is indicated by setting
374 		 * the B_DELWRI and B_NEEDCOMMIT flags.
375 		 */
376 		if (error == EINTR
377 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
378 			int s;
379 
380 			s = splbio();
381 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
382 			if ((bp->b_flags & B_ASYNC) == 0)
383 			    bp->b_flags |= B_EINTR;
384 			if ((bp->b_flags & B_PAGING) == 0) {
385 			    bdirty(bp);
386 			    bp->b_flags &= ~B_DONE;
387 			}
388 			if ((bp->b_flags & B_ASYNC) == 0)
389 			    bp->b_flags |= B_EINTR;
390 			splx(s);
391 		} else {
392 			if (error) {
393 				bp->b_ioflags |= BIO_ERROR;
394 				bp->b_error = error;
395 			}
396 			bp->b_dirtyoff = bp->b_dirtyend = 0;
397 		}
398 	    } else {
399 		bp->b_resid = 0;
400 		bufdone(bp);
401 		free(uiop, M_SMBFSDATA);
402 		smbfs_free_scred(scred);
403 		return 0;
404 	    }
405 	}
406 	bp->b_resid = uiop->uio_resid;
407 	bufdone(bp);
408 	free(uiop, M_SMBFSDATA);
409 	smbfs_free_scred(scred);
410 	return error;
411 }
412 
413 /*
414  * Vnode op for VM getpages.
415  * Wish wish .... get rid from multiple IO routines
416  */
417 int
418 smbfs_getpages(ap)
419 	struct vop_getpages_args /* {
420 		struct vnode *a_vp;
421 		vm_page_t *a_m;
422 		int a_count;
423 		int a_reqpage;
424 	} */ *ap;
425 {
426 #ifdef SMBFS_RWGENERIC
427 	return vop_stdgetpages(ap);
428 #else
429 	int i, error, nextoff, size, toff, npages, count;
430 	struct uio uio;
431 	struct iovec iov;
432 	vm_offset_t kva;
433 	struct buf *bp;
434 	struct vnode *vp;
435 	struct thread *td;
436 	struct ucred *cred;
437 	struct smbmount *smp;
438 	struct smbnode *np;
439 	struct smb_cred *scred;
440 	vm_object_t object;
441 	vm_page_t *pages;
442 
443 	vp = ap->a_vp;
444 	if ((object = vp->v_object) == NULL) {
445 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
446 		return VM_PAGER_ERROR;
447 	}
448 
449 	td = curthread;				/* XXX */
450 	cred = td->td_ucred;		/* XXX */
451 	np = VTOSMB(vp);
452 	smp = VFSTOSMBFS(vp->v_mount);
453 	pages = ap->a_m;
454 	npages = ap->a_count;
455 
456 	/*
457 	 * If the requested page is partially valid, just return it and
458 	 * allow the pager to zero-out the blanks.  Partially valid pages
459 	 * can only occur at the file EOF.
460 	 *
461 	 * XXXGL: is that true for SMB filesystem?
462 	 */
463 	VM_OBJECT_WLOCK(object);
464 	if (pages[npages - 1]->valid != 0 && --npages == 0)
465 		goto out;
466 	VM_OBJECT_WUNLOCK(object);
467 
468 	scred = smbfs_malloc_scred();
469 	smb_makescred(scred, td, cred);
470 
471 	bp = getpbuf(&smbfs_pbuf_freecnt);
472 
473 	kva = (vm_offset_t) bp->b_data;
474 	pmap_qenter(kva, pages, npages);
475 	VM_CNT_INC(v_vnodein);
476 	VM_CNT_ADD(v_vnodepgsin, npages);
477 
478 	count = npages << PAGE_SHIFT;
479 	iov.iov_base = (caddr_t) kva;
480 	iov.iov_len = count;
481 	uio.uio_iov = &iov;
482 	uio.uio_iovcnt = 1;
483 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
484 	uio.uio_resid = count;
485 	uio.uio_segflg = UIO_SYSSPACE;
486 	uio.uio_rw = UIO_READ;
487 	uio.uio_td = td;
488 
489 	error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
490 	smbfs_free_scred(scred);
491 	pmap_qremove(kva, npages);
492 
493 	relpbuf(bp, &smbfs_pbuf_freecnt);
494 
495 	if (error && (uio.uio_resid == count)) {
496 		printf("smbfs_getpages: error %d\n",error);
497 		return VM_PAGER_ERROR;
498 	}
499 
500 	size = count - uio.uio_resid;
501 
502 	VM_OBJECT_WLOCK(object);
503 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
504 		vm_page_t m;
505 		nextoff = toff + PAGE_SIZE;
506 		m = pages[i];
507 
508 		if (nextoff <= size) {
509 			/*
510 			 * Read operation filled an entire page
511 			 */
512 			m->valid = VM_PAGE_BITS_ALL;
513 			KASSERT(m->dirty == 0,
514 			    ("smbfs_getpages: page %p is dirty", m));
515 		} else if (size > toff) {
516 			/*
517 			 * Read operation filled a partial page.
518 			 */
519 			m->valid = 0;
520 			vm_page_set_valid_range(m, 0, size - toff);
521 			KASSERT(m->dirty == 0,
522 			    ("smbfs_getpages: page %p is dirty", m));
523 		} else {
524 			/*
525 			 * Read operation was short.  If no error occurred
526 			 * we may have hit a zero-fill section.   We simply
527 			 * leave valid set to 0.
528 			 */
529 			;
530 		}
531 	}
532 out:
533 	VM_OBJECT_WUNLOCK(object);
534 	if (ap->a_rbehind)
535 		*ap->a_rbehind = 0;
536 	if (ap->a_rahead)
537 		*ap->a_rahead = 0;
538 	return (VM_PAGER_OK);
539 #endif /* SMBFS_RWGENERIC */
540 }
541 
542 /*
543  * Vnode op for VM putpages.
544  * possible bug: all IO done in sync mode
545  * Note that vop_close always invalidate pages before close, so it's
546  * not necessary to open vnode.
547  */
548 int
549 smbfs_putpages(ap)
550 	struct vop_putpages_args /* {
551 		struct vnode *a_vp;
552 		vm_page_t *a_m;
553 		int a_count;
554 		int a_sync;
555 		int *a_rtvals;
556 	} */ *ap;
557 {
558 	int error;
559 	struct vnode *vp = ap->a_vp;
560 	struct thread *td;
561 	struct ucred *cred;
562 
563 #ifdef SMBFS_RWGENERIC
564 	td = curthread;			/* XXX */
565 	cred = td->td_ucred;		/* XXX */
566 	VOP_OPEN(vp, FWRITE, cred, td, NULL);
567 	error = vop_stdputpages(ap);
568 	VOP_CLOSE(vp, FWRITE, cred, td);
569 	return error;
570 #else
571 	struct uio uio;
572 	struct iovec iov;
573 	vm_offset_t kva;
574 	struct buf *bp;
575 	int i, npages, count;
576 	int *rtvals;
577 	struct smbmount *smp;
578 	struct smbnode *np;
579 	struct smb_cred *scred;
580 	vm_page_t *pages;
581 
582 	td = curthread;			/* XXX */
583 	cred = td->td_ucred;		/* XXX */
584 /*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
585 	np = VTOSMB(vp);
586 	smp = VFSTOSMBFS(vp->v_mount);
587 	pages = ap->a_m;
588 	count = ap->a_count;
589 	rtvals = ap->a_rtvals;
590 	npages = btoc(count);
591 
592 	for (i = 0; i < npages; i++) {
593 		rtvals[i] = VM_PAGER_ERROR;
594 	}
595 
596 	bp = getpbuf(&smbfs_pbuf_freecnt);
597 
598 	kva = (vm_offset_t) bp->b_data;
599 	pmap_qenter(kva, pages, npages);
600 	VM_CNT_INC(v_vnodeout);
601 	VM_CNT_ADD(v_vnodepgsout, count);
602 
603 	iov.iov_base = (caddr_t) kva;
604 	iov.iov_len = count;
605 	uio.uio_iov = &iov;
606 	uio.uio_iovcnt = 1;
607 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
608 	uio.uio_resid = count;
609 	uio.uio_segflg = UIO_SYSSPACE;
610 	uio.uio_rw = UIO_WRITE;
611 	uio.uio_td = td;
612 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
613 	    uio.uio_resid);
614 
615 	scred = smbfs_malloc_scred();
616 	smb_makescred(scred, td, cred);
617 	error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
618 	smbfs_free_scred(scred);
619 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
620 	SMBVDEBUG("paged write done: %d\n", error);
621 
622 	pmap_qremove(kva, npages);
623 
624 	relpbuf(bp, &smbfs_pbuf_freecnt);
625 
626 	if (error == 0) {
627 		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid,
628 		    npages * PAGE_SIZE, npages * PAGE_SIZE);
629 	}
630 	return (rtvals[0]);
631 #endif /* SMBFS_RWGENERIC */
632 }
633 
634 /*
635  * Flush and invalidate all dirty buffers. If another process is already
636  * doing the flush, just wait for completion.
637  */
638 int
639 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
640 {
641 	struct smbnode *np = VTOSMB(vp);
642 	int error = 0;
643 
644 	if (vp->v_iflag & VI_DOOMED)
645 		return 0;
646 
647 	while (np->n_flag & NFLUSHINPROG) {
648 		np->n_flag |= NFLUSHWANT;
649 		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
650 		error = smb_td_intr(td);
651 		if (error == EINTR)
652 			return EINTR;
653 	}
654 	np->n_flag |= NFLUSHINPROG;
655 
656 	if (vp->v_bufobj.bo_object != NULL) {
657 		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
658 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
659 		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
660 	}
661 
662 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
663 	while (error) {
664 		if (error == ERESTART || error == EINTR) {
665 			np->n_flag &= ~NFLUSHINPROG;
666 			if (np->n_flag & NFLUSHWANT) {
667 				np->n_flag &= ~NFLUSHWANT;
668 				wakeup(&np->n_flag);
669 			}
670 			return EINTR;
671 		}
672 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
673 	}
674 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
675 	if (np->n_flag & NFLUSHWANT) {
676 		np->n_flag &= ~NFLUSHWANT;
677 		wakeup(&np->n_flag);
678 	}
679 	return (error);
680 }
681