xref: /freebsd/sys/fs/smbfs/smbfs_io.c (revision 6132212808e8dccedc9e5d85fea4390c2f38059a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000-2001 Boris Popov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  *
30  */
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/fcntl.h>
35 #include <sys/bio.h>
36 #include <sys/buf.h>
37 #include <sys/mount.h>
38 #include <sys/namei.h>
39 #include <sys/vnode.h>
40 #include <sys/dirent.h>
41 #include <sys/rwlock.h>
42 #include <sys/signalvar.h>
43 #include <sys/sysctl.h>
44 #include <sys/vmmeter.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_pager.h>
52 #include <vm/vnode_pager.h>
53 /*
54 #include <sys/ioccom.h>
55 */
56 #include <netsmb/smb.h>
57 #include <netsmb/smb_conn.h>
58 #include <netsmb/smb_subr.h>
59 
60 #include <fs/smbfs/smbfs.h>
61 #include <fs/smbfs/smbfs_node.h>
62 #include <fs/smbfs/smbfs_subr.h>
63 
64 /*#define SMBFS_RWGENERIC*/
65 
66 extern uma_zone_t smbfs_pbuf_zone;
67 
68 static int smbfs_fastlookup = 1;
69 
70 SYSCTL_DECL(_vfs_smbfs);
71 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
72 
73 #define DE_SIZE	(sizeof(struct dirent))
74 
75 static int
76 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
77 {
78 	struct dirent de;
79 	struct componentname cn;
80 	struct smb_cred *scred;
81 	struct smbfs_fctx *ctx;
82 	struct vnode *newvp;
83 	struct smbnode *np = VTOSMB(vp);
84 	int error/*, *eofflag = ap->a_eofflag*/;
85 	long offset, limit;
86 
87 	np = VTOSMB(vp);
88 	SMBVDEBUG("dirname='%s'\n", np->n_name);
89 	scred = smbfs_malloc_scred();
90 	smb_makescred(scred, uio->uio_td, cred);
91 	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
92 	limit = uio->uio_resid / DE_SIZE;
93 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) {
94 		error = EINVAL;
95 		goto out;
96 	}
97 	while (limit && offset < 2) {
98 		limit--;
99 		bzero((caddr_t)&de, DE_SIZE);
100 		de.d_reclen = DE_SIZE;
101 		de.d_fileno = (offset == 0) ? np->n_ino :
102 		    (np->n_parent ? np->n_parentino : 2);
103 		if (de.d_fileno == 0)
104 			de.d_fileno = 0x7ffffffd + offset;
105 		de.d_namlen = offset + 1;
106 		de.d_name[0] = '.';
107 		de.d_name[1] = '.';
108 		de.d_type = DT_DIR;
109 		dirent_terminate(&de);
110 		error = uiomove(&de, DE_SIZE, uio);
111 		if (error)
112 			goto out;
113 		offset++;
114 		uio->uio_offset += DE_SIZE;
115 	}
116 	if (limit == 0) {
117 		error = 0;
118 		goto out;
119 	}
120 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
121 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
122 		if (np->n_dirseq) {
123 			smbfs_findclose(np->n_dirseq, scred);
124 			np->n_dirseq = NULL;
125 		}
126 		np->n_dirofs = 2;
127 		error = smbfs_findopen(np, "*", 1,
128 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
129 		    scred, &ctx);
130 		if (error) {
131 			SMBVDEBUG("can not open search, error = %d", error);
132 			goto out;
133 		}
134 		np->n_dirseq = ctx;
135 	} else
136 		ctx = np->n_dirseq;
137 	while (np->n_dirofs < offset) {
138 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred);
139 		if (error) {
140 			smbfs_findclose(np->n_dirseq, scred);
141 			np->n_dirseq = NULL;
142 			error = ENOENT ? 0 : error;
143 			goto out;
144 		}
145 	}
146 	error = 0;
147 	for (; limit; limit--, offset++) {
148 		error = smbfs_findnext(ctx, limit, scred);
149 		if (error)
150 			break;
151 		np->n_dirofs++;
152 		bzero((caddr_t)&de, DE_SIZE);
153 		de.d_reclen = DE_SIZE;
154 		de.d_fileno = ctx->f_attr.fa_ino;
155 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
156 		de.d_namlen = ctx->f_nmlen;
157 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
158 		dirent_terminate(&de);
159 		if (smbfs_fastlookup) {
160 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
161 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
162 			if (!error) {
163 				cn.cn_nameptr = de.d_name;
164 				cn.cn_namelen = de.d_namlen;
165 				cache_enter(vp, newvp, &cn);
166 				vput(newvp);
167 			}
168 		}
169 		error = uiomove(&de, DE_SIZE, uio);
170 		if (error)
171 			break;
172 	}
173 	if (error == ENOENT)
174 		error = 0;
175 	uio->uio_offset = offset * DE_SIZE;
176 out:
177 	smbfs_free_scred(scred);
178 	return error;
179 }
180 
181 int
182 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
183 {
184 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
185 	struct smbnode *np = VTOSMB(vp);
186 	struct thread *td;
187 	struct vattr vattr;
188 	struct smb_cred *scred;
189 	int error, lks;
190 
191 	/*
192 	 * Protect against method which is not supported for now
193 	 */
194 	if (uiop->uio_segflg == UIO_NOCOPY)
195 		return EOPNOTSUPP;
196 
197 	if (vp->v_type != VREG && vp->v_type != VDIR) {
198 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
199 		return EIO;
200 	}
201 	if (uiop->uio_resid == 0)
202 		return 0;
203 	if (uiop->uio_offset < 0)
204 		return EINVAL;
205 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
206 		return EFBIG;*/
207 	td = uiop->uio_td;
208 	if (vp->v_type == VDIR) {
209 		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
210 		if (lks == LK_SHARED)
211 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
212 		error = smbfs_readvdir(vp, uiop, cred);
213 		if (lks == LK_SHARED)
214 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
215 		return error;
216 	}
217 
218 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
219 	if (np->n_flag & NMODIFIED) {
220 		smbfs_attr_cacheremove(vp);
221 		error = VOP_GETATTR(vp, &vattr, cred);
222 		if (error)
223 			return error;
224 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
225 	} else {
226 		error = VOP_GETATTR(vp, &vattr, cred);
227 		if (error)
228 			return error;
229 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
230 			error = smbfs_vinvalbuf(vp, td);
231 			if (error)
232 				return error;
233 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
234 		}
235 	}
236 	scred = smbfs_malloc_scred();
237 	smb_makescred(scred, td, cred);
238 	error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
239 	smbfs_free_scred(scred);
240 	return (error);
241 }
242 
243 int
244 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
245 	struct ucred *cred, int ioflag)
246 {
247 	struct smbmount *smp = VTOSMBFS(vp);
248 	struct smbnode *np = VTOSMB(vp);
249 	struct smb_cred *scred;
250 	struct thread *td;
251 	int error = 0;
252 
253 	if (vp->v_type != VREG) {
254 		SMBERROR("vn types other than VREG unsupported !\n");
255 		return EIO;
256 	}
257 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
258 	    uiop->uio_resid);
259 	if (uiop->uio_offset < 0)
260 		return EINVAL;
261 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
262 		return (EFBIG);*/
263 	td = uiop->uio_td;
264 	if (ioflag & (IO_APPEND | IO_SYNC)) {
265 		if (np->n_flag & NMODIFIED) {
266 			smbfs_attr_cacheremove(vp);
267 			error = smbfs_vinvalbuf(vp, td);
268 			if (error)
269 				return error;
270 		}
271 		if (ioflag & IO_APPEND) {
272 #ifdef notyet
273 			/*
274 			 * File size can be changed by another client
275 			 */
276 			smbfs_attr_cacheremove(vp);
277 			error = VOP_GETATTR(vp, &vattr, cred);
278 			if (error) return (error);
279 #endif
280 			uiop->uio_offset = np->n_size;
281 		}
282 	}
283 	if (uiop->uio_resid == 0)
284 		return 0;
285 
286 	if (vn_rlimit_fsize(vp, uiop, td))
287 		return (EFBIG);
288 
289 	scred = smbfs_malloc_scred();
290 	smb_makescred(scred, td, cred);
291 	error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
292 	smbfs_free_scred(scred);
293 	SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset,
294 	    uiop->uio_resid);
295 	if (!error) {
296 		if (uiop->uio_offset > np->n_size) {
297 			np->n_size = uiop->uio_offset;
298 			vnode_pager_setsize(vp, np->n_size);
299 		}
300 	}
301 	return error;
302 }
303 
304 /*
305  * Do an I/O operation to/from a cache block.
306  */
307 int
308 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
309 {
310 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
311 	struct smbnode *np = VTOSMB(vp);
312 	struct uio *uiop;
313 	struct iovec io;
314 	struct smb_cred *scred;
315 	int error = 0;
316 
317 	uiop = malloc(sizeof(struct uio), M_SMBFSDATA, M_WAITOK);
318 	uiop->uio_iov = &io;
319 	uiop->uio_iovcnt = 1;
320 	uiop->uio_segflg = UIO_SYSSPACE;
321 	uiop->uio_td = td;
322 
323 	scred = smbfs_malloc_scred();
324 	smb_makescred(scred, td, cr);
325 
326 	if (bp->b_iocmd == BIO_READ) {
327 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
328 	    io.iov_base = bp->b_data;
329 	    uiop->uio_rw = UIO_READ;
330 	    switch (vp->v_type) {
331 	      case VREG:
332 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
333 		error = smb_read(smp->sm_share, np->n_fid, uiop, scred);
334 		if (error)
335 			break;
336 		if (uiop->uio_resid) {
337 			int left = uiop->uio_resid;
338 			int nread = bp->b_bcount - left;
339 			if (left > 0)
340 			    bzero((char *)bp->b_data + nread, left);
341 		}
342 		break;
343 	    default:
344 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
345 		break;
346 	    }
347 	    if (error) {
348 		bp->b_error = error;
349 		bp->b_ioflags |= BIO_ERROR;
350 	    }
351 	} else { /* write */
352 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
353 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
354 
355 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
356 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
357 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
358 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
359 		uiop->uio_rw = UIO_WRITE;
360 		error = smb_write(smp->sm_share, np->n_fid, uiop, scred);
361 
362 		/*
363 		 * For an interrupted write, the buffer is still valid
364 		 * and the write hasn't been pushed to the server yet,
365 		 * so we can't set BIO_ERROR and report the interruption
366 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
367 		 * is not relevant, so the rpc attempt is essentially
368 		 * a noop.  For the case of a V3 write rpc not being
369 		 * committed to stable storage, the block is still
370 		 * dirty and requires either a commit rpc or another
371 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
372 		 * the block is reused. This is indicated by setting
373 		 * the B_DELWRI and B_NEEDCOMMIT flags.
374 		 */
375 		if (error == EINTR
376 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
377 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
378 			if ((bp->b_flags & B_ASYNC) == 0)
379 			    bp->b_flags |= B_EINTR;
380 			if ((bp->b_flags & B_PAGING) == 0) {
381 			    bdirty(bp);
382 			    bp->b_flags &= ~B_DONE;
383 			}
384 			if ((bp->b_flags & B_ASYNC) == 0)
385 			    bp->b_flags |= B_EINTR;
386 		} else {
387 			if (error) {
388 				bp->b_ioflags |= BIO_ERROR;
389 				bp->b_error = error;
390 			}
391 			bp->b_dirtyoff = bp->b_dirtyend = 0;
392 		}
393 	    } else {
394 		bp->b_resid = 0;
395 		bufdone(bp);
396 		free(uiop, M_SMBFSDATA);
397 		smbfs_free_scred(scred);
398 		return 0;
399 	    }
400 	}
401 	bp->b_resid = uiop->uio_resid;
402 	bufdone(bp);
403 	free(uiop, M_SMBFSDATA);
404 	smbfs_free_scred(scred);
405 	return error;
406 }
407 
408 /*
409  * Vnode op for VM getpages.
410  * Wish wish .... get rid from multiple IO routines
411  */
412 int
413 smbfs_getpages(ap)
414 	struct vop_getpages_args /* {
415 		struct vnode *a_vp;
416 		vm_page_t *a_m;
417 		int a_count;
418 		int a_reqpage;
419 	} */ *ap;
420 {
421 #ifdef SMBFS_RWGENERIC
422 	return vop_stdgetpages(ap);
423 #else
424 	int i, error, nextoff, size, toff, npages, count;
425 	struct uio uio;
426 	struct iovec iov;
427 	vm_offset_t kva;
428 	struct buf *bp;
429 	struct vnode *vp;
430 	struct thread *td;
431 	struct ucred *cred;
432 	struct smbmount *smp;
433 	struct smbnode *np;
434 	struct smb_cred *scred;
435 	vm_object_t object;
436 	vm_page_t *pages;
437 
438 	vp = ap->a_vp;
439 	if ((object = vp->v_object) == NULL) {
440 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
441 		return VM_PAGER_ERROR;
442 	}
443 
444 	td = curthread;				/* XXX */
445 	cred = td->td_ucred;		/* XXX */
446 	np = VTOSMB(vp);
447 	smp = VFSTOSMBFS(vp->v_mount);
448 	pages = ap->a_m;
449 	npages = ap->a_count;
450 
451 	/*
452 	 * If the requested page is partially valid, just return it and
453 	 * allow the pager to zero-out the blanks.  Partially valid pages
454 	 * can only occur at the file EOF.
455 	 *
456 	 * XXXGL: is that true for SMB filesystem?
457 	 */
458 	VM_OBJECT_WLOCK(object);
459 	if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0)
460 		goto out;
461 	VM_OBJECT_WUNLOCK(object);
462 
463 	scred = smbfs_malloc_scred();
464 	smb_makescred(scred, td, cred);
465 
466 	bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
467 
468 	kva = (vm_offset_t) bp->b_data;
469 	pmap_qenter(kva, pages, npages);
470 	VM_CNT_INC(v_vnodein);
471 	VM_CNT_ADD(v_vnodepgsin, npages);
472 
473 	count = npages << PAGE_SHIFT;
474 	iov.iov_base = (caddr_t) kva;
475 	iov.iov_len = count;
476 	uio.uio_iov = &iov;
477 	uio.uio_iovcnt = 1;
478 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
479 	uio.uio_resid = count;
480 	uio.uio_segflg = UIO_SYSSPACE;
481 	uio.uio_rw = UIO_READ;
482 	uio.uio_td = td;
483 
484 	error = smb_read(smp->sm_share, np->n_fid, &uio, scred);
485 	smbfs_free_scred(scred);
486 	pmap_qremove(kva, npages);
487 
488 	uma_zfree(smbfs_pbuf_zone, bp);
489 
490 	if (error && (uio.uio_resid == count)) {
491 		printf("smbfs_getpages: error %d\n",error);
492 		return VM_PAGER_ERROR;
493 	}
494 
495 	size = count - uio.uio_resid;
496 
497 	VM_OBJECT_WLOCK(object);
498 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
499 		vm_page_t m;
500 		nextoff = toff + PAGE_SIZE;
501 		m = pages[i];
502 
503 		if (nextoff <= size) {
504 			/*
505 			 * Read operation filled an entire page
506 			 */
507 			vm_page_valid(m);
508 			KASSERT(m->dirty == 0,
509 			    ("smbfs_getpages: page %p is dirty", m));
510 		} else if (size > toff) {
511 			/*
512 			 * Read operation filled a partial page.
513 			 */
514 			vm_page_invalid(m);
515 			vm_page_set_valid_range(m, 0, size - toff);
516 			KASSERT(m->dirty == 0,
517 			    ("smbfs_getpages: page %p is dirty", m));
518 		} else {
519 			/*
520 			 * Read operation was short.  If no error occurred
521 			 * we may have hit a zero-fill section.   We simply
522 			 * leave valid set to 0.
523 			 */
524 			;
525 		}
526 	}
527 out:
528 	VM_OBJECT_WUNLOCK(object);
529 	if (ap->a_rbehind)
530 		*ap->a_rbehind = 0;
531 	if (ap->a_rahead)
532 		*ap->a_rahead = 0;
533 	return (VM_PAGER_OK);
534 #endif /* SMBFS_RWGENERIC */
535 }
536 
537 /*
538  * Vnode op for VM putpages.
539  * possible bug: all IO done in sync mode
540  * Note that vop_close always invalidate pages before close, so it's
541  * not necessary to open vnode.
542  */
543 int
544 smbfs_putpages(ap)
545 	struct vop_putpages_args /* {
546 		struct vnode *a_vp;
547 		vm_page_t *a_m;
548 		int a_count;
549 		int a_sync;
550 		int *a_rtvals;
551 	} */ *ap;
552 {
553 	int error;
554 	struct vnode *vp = ap->a_vp;
555 	struct thread *td;
556 	struct ucred *cred;
557 
558 #ifdef SMBFS_RWGENERIC
559 	td = curthread;			/* XXX */
560 	cred = td->td_ucred;		/* XXX */
561 	VOP_OPEN(vp, FWRITE, cred, td, NULL);
562 	error = vop_stdputpages(ap);
563 	VOP_CLOSE(vp, FWRITE, cred, td);
564 	return error;
565 #else
566 	struct uio uio;
567 	struct iovec iov;
568 	vm_offset_t kva;
569 	struct buf *bp;
570 	int i, npages, count;
571 	int *rtvals;
572 	struct smbmount *smp;
573 	struct smbnode *np;
574 	struct smb_cred *scred;
575 	vm_page_t *pages;
576 
577 	td = curthread;			/* XXX */
578 	cred = td->td_ucred;		/* XXX */
579 /*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
580 	np = VTOSMB(vp);
581 	smp = VFSTOSMBFS(vp->v_mount);
582 	pages = ap->a_m;
583 	count = ap->a_count;
584 	rtvals = ap->a_rtvals;
585 	npages = btoc(count);
586 
587 	for (i = 0; i < npages; i++) {
588 		rtvals[i] = VM_PAGER_ERROR;
589 	}
590 
591 	bp = uma_zalloc(smbfs_pbuf_zone, M_WAITOK);
592 
593 	kva = (vm_offset_t) bp->b_data;
594 	pmap_qenter(kva, pages, npages);
595 	VM_CNT_INC(v_vnodeout);
596 	VM_CNT_ADD(v_vnodepgsout, count);
597 
598 	iov.iov_base = (caddr_t) kva;
599 	iov.iov_len = count;
600 	uio.uio_iov = &iov;
601 	uio.uio_iovcnt = 1;
602 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
603 	uio.uio_resid = count;
604 	uio.uio_segflg = UIO_SYSSPACE;
605 	uio.uio_rw = UIO_WRITE;
606 	uio.uio_td = td;
607 	SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uio.uio_offset,
608 	    uio.uio_resid);
609 
610 	scred = smbfs_malloc_scred();
611 	smb_makescred(scred, td, cred);
612 	error = smb_write(smp->sm_share, np->n_fid, &uio, scred);
613 	smbfs_free_scred(scred);
614 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
615 	SMBVDEBUG("paged write done: %d\n", error);
616 
617 	pmap_qremove(kva, npages);
618 
619 	uma_zfree(smbfs_pbuf_zone, bp);
620 
621 	if (error == 0) {
622 		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid,
623 		    npages * PAGE_SIZE, npages * PAGE_SIZE);
624 	}
625 	return (rtvals[0]);
626 #endif /* SMBFS_RWGENERIC */
627 }
628 
629 /*
630  * Flush and invalidate all dirty buffers. If another process is already
631  * doing the flush, just wait for completion.
632  */
633 int
634 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
635 {
636 	struct smbnode *np = VTOSMB(vp);
637 	int error = 0;
638 
639 	if (VN_IS_DOOMED(vp))
640 		return 0;
641 
642 	while (np->n_flag & NFLUSHINPROG) {
643 		np->n_flag |= NFLUSHWANT;
644 		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
645 		error = smb_td_intr(td);
646 		if (error == EINTR)
647 			return EINTR;
648 	}
649 	np->n_flag |= NFLUSHINPROG;
650 
651 	if (vp->v_bufobj.bo_object != NULL) {
652 		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
653 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
654 		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
655 	}
656 
657 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
658 	while (error) {
659 		if (error == ERESTART || error == EINTR) {
660 			np->n_flag &= ~NFLUSHINPROG;
661 			if (np->n_flag & NFLUSHWANT) {
662 				np->n_flag &= ~NFLUSHWANT;
663 				wakeup(&np->n_flag);
664 			}
665 			return EINTR;
666 		}
667 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
668 	}
669 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
670 	if (np->n_flag & NFLUSHWANT) {
671 		np->n_flag &= ~NFLUSHWANT;
672 		wakeup(&np->n_flag);
673 	}
674 	return (error);
675 }
676