xref: /freebsd/sys/fs/smbfs/smbfs_io.c (revision d8b878873e7aa8df1972cc6a642804b17eb61087)
1 /*-
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/resourcevar.h>	/* defines plimit structure in proc struct */
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/fcntl.h>
35 #include <sys/bio.h>
36 #include <sys/buf.h>
37 #include <sys/mount.h>
38 #include <sys/namei.h>
39 #include <sys/vnode.h>
40 #include <sys/dirent.h>
41 #include <sys/signalvar.h>
42 #include <sys/sysctl.h>
43 #include <sys/vmmeter.h>
44 
45 #include <vm/vm.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_pager.h>
50 #include <vm/vnode_pager.h>
51 /*
52 #include <sys/ioccom.h>
53 */
54 #include <netsmb/smb.h>
55 #include <netsmb/smb_conn.h>
56 #include <netsmb/smb_subr.h>
57 
58 #include <fs/smbfs/smbfs.h>
59 #include <fs/smbfs/smbfs_node.h>
60 #include <fs/smbfs/smbfs_subr.h>
61 
62 /*#define SMBFS_RWGENERIC*/
63 
64 extern int smbfs_pbuf_freecnt;
65 
66 static int smbfs_fastlookup = 1;
67 
68 SYSCTL_DECL(_vfs_smbfs);
69 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, "");
70 
71 
72 #define DE_SIZE	(sizeof(struct dirent))
73 
74 static int
75 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
76 {
77 	struct dirent de;
78 	struct componentname cn;
79 	struct smb_cred scred;
80 	struct smbfs_fctx *ctx;
81 	struct vnode *newvp;
82 	struct smbnode *np = VTOSMB(vp);
83 	int error/*, *eofflag = ap->a_eofflag*/;
84 	long offset, limit;
85 
86 	np = VTOSMB(vp);
87 	SMBVDEBUG("dirname='%s'\n", np->n_name);
88 	smb_makescred(&scred, uio->uio_td, cred);
89 	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
90 	limit = uio->uio_resid / DE_SIZE;
91 	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
92 		return EINVAL;
93 	while (limit && offset < 2) {
94 		limit--;
95 		bzero((caddr_t)&de, DE_SIZE);
96 		de.d_reclen = DE_SIZE;
97 		de.d_fileno = (offset == 0) ? np->n_ino :
98 		    (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
99 		if (de.d_fileno == 0)
100 			de.d_fileno = 0x7ffffffd + offset;
101 		de.d_namlen = offset + 1;
102 		de.d_name[0] = '.';
103 		de.d_name[1] = '.';
104 		de.d_name[offset + 1] = '\0';
105 		de.d_type = DT_DIR;
106 		error = uiomove(&de, DE_SIZE, uio);
107 		if (error)
108 			return error;
109 		offset++;
110 		uio->uio_offset += DE_SIZE;
111 	}
112 	if (limit == 0)
113 		return 0;
114 	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
115 		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
116 		if (np->n_dirseq) {
117 			smbfs_findclose(np->n_dirseq, &scred);
118 			np->n_dirseq = NULL;
119 		}
120 		np->n_dirofs = 2;
121 		error = smbfs_findopen(np, "*", 1,
122 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
123 		    &scred, &ctx);
124 		if (error) {
125 			SMBVDEBUG("can not open search, error = %d", error);
126 			return error;
127 		}
128 		np->n_dirseq = ctx;
129 	} else
130 		ctx = np->n_dirseq;
131 	while (np->n_dirofs < offset) {
132 		error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
133 		if (error) {
134 			smbfs_findclose(np->n_dirseq, &scred);
135 			np->n_dirseq = NULL;
136 			return error == ENOENT ? 0 : error;
137 		}
138 	}
139 	error = 0;
140 	for (; limit; limit--, offset++) {
141 		error = smbfs_findnext(ctx, limit, &scred);
142 		if (error)
143 			break;
144 		np->n_dirofs++;
145 		bzero((caddr_t)&de, DE_SIZE);
146 		de.d_reclen = DE_SIZE;
147 		de.d_fileno = ctx->f_attr.fa_ino;
148 		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
149 		de.d_namlen = ctx->f_nmlen;
150 		bcopy(ctx->f_name, de.d_name, de.d_namlen);
151 		de.d_name[de.d_namlen] = '\0';
152 		if (smbfs_fastlookup) {
153 			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
154 			    ctx->f_nmlen, &ctx->f_attr, &newvp);
155 			if (!error) {
156 				cn.cn_nameptr = de.d_name;
157 				cn.cn_namelen = de.d_namlen;
158 				cache_enter(vp, newvp, &cn);
159 				vput(newvp);
160 			}
161 		}
162 		error = uiomove(&de, DE_SIZE, uio);
163 		if (error)
164 			break;
165 	}
166 	if (error == ENOENT)
167 		error = 0;
168 	uio->uio_offset = offset * DE_SIZE;
169 	return error;
170 }
171 
172 int
173 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
174 {
175 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
176 	struct smbnode *np = VTOSMB(vp);
177 	struct thread *td;
178 	struct vattr vattr;
179 	struct smb_cred scred;
180 	int error, lks;
181 
182 	/*
183 	 * Protect against method which is not supported for now
184 	 */
185 	if (uiop->uio_segflg == UIO_NOCOPY)
186 		return EOPNOTSUPP;
187 
188 	if (vp->v_type != VREG && vp->v_type != VDIR) {
189 		SMBFSERR("vn types other than VREG or VDIR are unsupported !\n");
190 		return EIO;
191 	}
192 	if (uiop->uio_resid == 0)
193 		return 0;
194 	if (uiop->uio_offset < 0)
195 		return EINVAL;
196 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
197 		return EFBIG;*/
198 	td = uiop->uio_td;
199 	if (vp->v_type == VDIR) {
200 		lks = LK_EXCLUSIVE;	/* lockstatus(vp->v_vnlock); */
201 		if (lks == LK_SHARED)
202 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
203 		error = smbfs_readvdir(vp, uiop, cred);
204 		if (lks == LK_SHARED)
205 			vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
206 		return error;
207 	}
208 
209 /*	biosize = SSTOCN(smp->sm_share)->sc_txmax;*/
210 	if (np->n_flag & NMODIFIED) {
211 		smbfs_attr_cacheremove(vp);
212 		error = VOP_GETATTR(vp, &vattr, cred);
213 		if (error)
214 			return error;
215 		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
216 	} else {
217 		error = VOP_GETATTR(vp, &vattr, cred);
218 		if (error)
219 			return error;
220 		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
221 			error = smbfs_vinvalbuf(vp, td);
222 			if (error)
223 				return error;
224 			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
225 		}
226 	}
227 	smb_makescred(&scred, td, cred);
228 	return smb_read(smp->sm_share, np->n_fid, uiop, &scred);
229 }
230 
231 int
232 smbfs_writevnode(struct vnode *vp, struct uio *uiop,
233 	struct ucred *cred, int ioflag)
234 {
235 	struct smbmount *smp = VTOSMBFS(vp);
236 	struct smbnode *np = VTOSMB(vp);
237 	struct smb_cred scred;
238 	struct proc *p;
239 	struct thread *td;
240 	int error = 0;
241 
242 	if (vp->v_type != VREG) {
243 		SMBERROR("vn types other than VREG unsupported !\n");
244 		return EIO;
245 	}
246 	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
247 	if (uiop->uio_offset < 0)
248 		return EINVAL;
249 /*	if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize)
250 		return (EFBIG);*/
251 	td = uiop->uio_td;
252 	p = td->td_proc;
253 	if (ioflag & (IO_APPEND | IO_SYNC)) {
254 		if (np->n_flag & NMODIFIED) {
255 			smbfs_attr_cacheremove(vp);
256 			error = smbfs_vinvalbuf(vp, td);
257 			if (error)
258 				return error;
259 		}
260 		if (ioflag & IO_APPEND) {
261 #ifdef notyet
262 			/*
263 			 * File size can be changed by another client
264 			 */
265 			smbfs_attr_cacheremove(vp);
266 			error = VOP_GETATTR(vp, &vattr, cred);
267 			if (error) return (error);
268 #endif
269 			uiop->uio_offset = np->n_size;
270 		}
271 	}
272 	if (uiop->uio_resid == 0)
273 		return 0;
274 	if (p != NULL) {
275 		PROC_LOCK(p);
276 		if (uiop->uio_offset + uiop->uio_resid >
277 		    lim_cur(p, RLIMIT_FSIZE)) {
278 			psignal(p, SIGXFSZ);
279 			PROC_UNLOCK(p);
280 			return EFBIG;
281 		}
282 		PROC_UNLOCK(p);
283 	}
284 	smb_makescred(&scred, td, cred);
285 	error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
286 	SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
287 	if (!error) {
288 		if (uiop->uio_offset > np->n_size) {
289 			np->n_size = uiop->uio_offset;
290 			vnode_pager_setsize(vp, np->n_size);
291 		}
292 	}
293 	return error;
294 }
295 
296 /*
297  * Do an I/O operation to/from a cache block.
298  */
299 int
300 smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
301 {
302 	struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
303 	struct smbnode *np = VTOSMB(vp);
304 	struct uio uio, *uiop = &uio;
305 	struct iovec io;
306 	struct smb_cred scred;
307 	int error = 0;
308 
309 	uiop->uio_iov = &io;
310 	uiop->uio_iovcnt = 1;
311 	uiop->uio_segflg = UIO_SYSSPACE;
312 	uiop->uio_td = td;
313 
314 	smb_makescred(&scred, td, cr);
315 
316 	if (bp->b_iocmd == BIO_READ) {
317 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
318 	    io.iov_base = bp->b_data;
319 	    uiop->uio_rw = UIO_READ;
320 	    switch (vp->v_type) {
321 	      case VREG:
322 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
323 		error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
324 		if (error)
325 			break;
326 		if (uiop->uio_resid) {
327 			int left = uiop->uio_resid;
328 			int nread = bp->b_bcount - left;
329 			if (left > 0)
330 			    bzero((char *)bp->b_data + nread, left);
331 		}
332 		break;
333 	    default:
334 		printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
335 		break;
336 	    };
337 	    if (error) {
338 		bp->b_error = error;
339 		bp->b_ioflags |= BIO_ERROR;
340 	    }
341 	} else { /* write */
342 	    if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
343 		bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
344 
345 	    if (bp->b_dirtyend > bp->b_dirtyoff) {
346 		io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
347 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
348 		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
349 		uiop->uio_rw = UIO_WRITE;
350 		error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
351 
352 		/*
353 		 * For an interrupted write, the buffer is still valid
354 		 * and the write hasn't been pushed to the server yet,
355 		 * so we can't set BIO_ERROR and report the interruption
356 		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
357 		 * is not relevant, so the rpc attempt is essentially
358 		 * a noop.  For the case of a V3 write rpc not being
359 		 * committed to stable storage, the block is still
360 		 * dirty and requires either a commit rpc or another
361 		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
362 		 * the block is reused. This is indicated by setting
363 		 * the B_DELWRI and B_NEEDCOMMIT flags.
364 		 */
365 		if (error == EINTR
366 		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
367 			int s;
368 
369 			s = splbio();
370 			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
371 			if ((bp->b_flags & B_ASYNC) == 0)
372 			    bp->b_flags |= B_EINTR;
373 			if ((bp->b_flags & B_PAGING) == 0) {
374 			    bdirty(bp);
375 			    bp->b_flags &= ~B_DONE;
376 			}
377 			if ((bp->b_flags & B_ASYNC) == 0)
378 			    bp->b_flags |= B_EINTR;
379 			splx(s);
380 		} else {
381 			if (error) {
382 				bp->b_ioflags |= BIO_ERROR;
383 				bp->b_error = error;
384 			}
385 			bp->b_dirtyoff = bp->b_dirtyend = 0;
386 		}
387 	    } else {
388 		bp->b_resid = 0;
389 		bufdone(bp);
390 		return 0;
391 	    }
392 	}
393 	bp->b_resid = uiop->uio_resid;
394 	bufdone(bp);
395 	return error;
396 }
397 
398 /*
399  * Vnode op for VM getpages.
400  * Wish wish .... get rid from multiple IO routines
401  */
402 int
403 smbfs_getpages(ap)
404 	struct vop_getpages_args /* {
405 		struct vnode *a_vp;
406 		vm_page_t *a_m;
407 		int a_count;
408 		int a_reqpage;
409 		vm_ooffset_t a_offset;
410 	} */ *ap;
411 {
412 #ifdef SMBFS_RWGENERIC
413 	return vop_stdgetpages(ap);
414 #else
415 	int i, error, nextoff, size, toff, npages, count, reqpage;
416 	struct uio uio;
417 	struct iovec iov;
418 	vm_offset_t kva;
419 	struct buf *bp;
420 	struct vnode *vp;
421 	struct thread *td;
422 	struct ucred *cred;
423 	struct smbmount *smp;
424 	struct smbnode *np;
425 	struct smb_cred scred;
426 	vm_object_t object;
427 	vm_page_t *pages, m;
428 
429 	vp = ap->a_vp;
430 	if ((object = vp->v_object) == NULL) {
431 		printf("smbfs_getpages: called with non-merged cache vnode??\n");
432 		return VM_PAGER_ERROR;
433 	}
434 
435 	td = curthread;				/* XXX */
436 	cred = td->td_ucred;		/* XXX */
437 	np = VTOSMB(vp);
438 	smp = VFSTOSMBFS(vp->v_mount);
439 	pages = ap->a_m;
440 	count = ap->a_count;
441 	npages = btoc(count);
442 	reqpage = ap->a_reqpage;
443 
444 	/*
445 	 * If the requested page is partially valid, just return it and
446 	 * allow the pager to zero-out the blanks.  Partially valid pages
447 	 * can only occur at the file EOF.
448 	 */
449 	m = pages[reqpage];
450 
451 	VM_OBJECT_LOCK(object);
452 	if (m->valid != 0) {
453 		vm_page_lock_queues();
454 		for (i = 0; i < npages; ++i) {
455 			if (i != reqpage)
456 				vm_page_free(pages[i]);
457 		}
458 		vm_page_unlock_queues();
459 		VM_OBJECT_UNLOCK(object);
460 		return 0;
461 	}
462 	VM_OBJECT_UNLOCK(object);
463 
464 	smb_makescred(&scred, td, cred);
465 
466 	bp = getpbuf(&smbfs_pbuf_freecnt);
467 
468 	kva = (vm_offset_t) bp->b_data;
469 	pmap_qenter(kva, pages, npages);
470 	PCPU_INC(cnt.v_vnodein);
471 	PCPU_ADD(cnt.v_vnodepgsin, npages);
472 
473 	iov.iov_base = (caddr_t) kva;
474 	iov.iov_len = count;
475 	uio.uio_iov = &iov;
476 	uio.uio_iovcnt = 1;
477 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
478 	uio.uio_resid = count;
479 	uio.uio_segflg = UIO_SYSSPACE;
480 	uio.uio_rw = UIO_READ;
481 	uio.uio_td = td;
482 
483 	error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
484 	pmap_qremove(kva, npages);
485 
486 	relpbuf(bp, &smbfs_pbuf_freecnt);
487 
488 	VM_OBJECT_LOCK(object);
489 	if (error && (uio.uio_resid == count)) {
490 		printf("smbfs_getpages: error %d\n",error);
491 		vm_page_lock_queues();
492 		for (i = 0; i < npages; i++) {
493 			if (reqpage != i)
494 				vm_page_free(pages[i]);
495 		}
496 		vm_page_unlock_queues();
497 		VM_OBJECT_UNLOCK(object);
498 		return VM_PAGER_ERROR;
499 	}
500 
501 	size = count - uio.uio_resid;
502 
503 	vm_page_lock_queues();
504 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
505 		vm_page_t m;
506 		nextoff = toff + PAGE_SIZE;
507 		m = pages[i];
508 
509 		if (nextoff <= size) {
510 			/*
511 			 * Read operation filled an entire page
512 			 */
513 			m->valid = VM_PAGE_BITS_ALL;
514 			KASSERT(m->dirty == 0,
515 			    ("smbfs_getpages: page %p is dirty", m));
516 		} else if (size > toff) {
517 			/*
518 			 * Read operation filled a partial page.
519 			 */
520 			m->valid = 0;
521 			vm_page_set_valid(m, 0, size - toff);
522 			KASSERT(m->dirty == 0,
523 			    ("smbfs_getpages: page %p is dirty", m));
524 		} else {
525 			/*
526 			 * Read operation was short.  If no error occured
527 			 * we may have hit a zero-fill section.   We simply
528 			 * leave valid set to 0.
529 			 */
530 			;
531 		}
532 
533 		if (i != reqpage) {
534 			/*
535 			 * Whether or not to leave the page activated is up in
536 			 * the air, but we should put the page on a page queue
537 			 * somewhere (it already is in the object).  Result:
538 			 * It appears that emperical results show that
539 			 * deactivating pages is best.
540 			 */
541 
542 			/*
543 			 * Just in case someone was asking for this page we
544 			 * now tell them that it is ok to use.
545 			 */
546 			if (!error) {
547 				if (m->oflags & VPO_WANTED)
548 					vm_page_activate(m);
549 				else
550 					vm_page_deactivate(m);
551 				vm_page_wakeup(m);
552 			} else {
553 				vm_page_free(m);
554 			}
555 		}
556 	}
557 	vm_page_unlock_queues();
558 	VM_OBJECT_UNLOCK(object);
559 	return 0;
560 #endif /* SMBFS_RWGENERIC */
561 }
562 
563 /*
564  * Vnode op for VM putpages.
565  * possible bug: all IO done in sync mode
566  * Note that vop_close always invalidate pages before close, so it's
567  * not necessary to open vnode.
568  */
569 int
570 smbfs_putpages(ap)
571 	struct vop_putpages_args /* {
572 		struct vnode *a_vp;
573 		vm_page_t *a_m;
574 		int a_count;
575 		int a_sync;
576 		int *a_rtvals;
577 		vm_ooffset_t a_offset;
578 	} */ *ap;
579 {
580 	int error;
581 	struct vnode *vp = ap->a_vp;
582 	struct thread *td;
583 	struct ucred *cred;
584 
585 #ifdef SMBFS_RWGENERIC
586 	td = curthread;			/* XXX */
587 	cred = td->td_ucred;		/* XXX */
588 	VOP_OPEN(vp, FWRITE, cred, td, NULL);
589 	error = vop_stdputpages(ap);
590 	VOP_CLOSE(vp, FWRITE, cred, td);
591 	return error;
592 #else
593 	struct uio uio;
594 	struct iovec iov;
595 	vm_offset_t kva;
596 	struct buf *bp;
597 	int i, npages, count;
598 	int *rtvals;
599 	struct smbmount *smp;
600 	struct smbnode *np;
601 	struct smb_cred scred;
602 	vm_page_t *pages;
603 
604 	td = curthread;			/* XXX */
605 	cred = td->td_ucred;		/* XXX */
606 /*	VOP_OPEN(vp, FWRITE, cred, td, NULL);*/
607 	np = VTOSMB(vp);
608 	smp = VFSTOSMBFS(vp->v_mount);
609 	pages = ap->a_m;
610 	count = ap->a_count;
611 	rtvals = ap->a_rtvals;
612 	npages = btoc(count);
613 
614 	for (i = 0; i < npages; i++) {
615 		rtvals[i] = VM_PAGER_AGAIN;
616 	}
617 
618 	bp = getpbuf(&smbfs_pbuf_freecnt);
619 
620 	kva = (vm_offset_t) bp->b_data;
621 	pmap_qenter(kva, pages, npages);
622 	PCPU_INC(cnt.v_vnodeout);
623 	PCPU_ADD(cnt.v_vnodepgsout, count);
624 
625 	iov.iov_base = (caddr_t) kva;
626 	iov.iov_len = count;
627 	uio.uio_iov = &iov;
628 	uio.uio_iovcnt = 1;
629 	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
630 	uio.uio_resid = count;
631 	uio.uio_segflg = UIO_SYSSPACE;
632 	uio.uio_rw = UIO_WRITE;
633 	uio.uio_td = td;
634 	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);
635 
636 	smb_makescred(&scred, td, cred);
637 	error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
638 /*	VOP_CLOSE(vp, FWRITE, cred, td);*/
639 	SMBVDEBUG("paged write done: %d\n", error);
640 
641 	pmap_qremove(kva, npages);
642 
643 	relpbuf(bp, &smbfs_pbuf_freecnt);
644 
645 	if (!error) {
646 		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
647 		vm_page_lock_queues();
648 		for (i = 0; i < nwritten; i++) {
649 			rtvals[i] = VM_PAGER_OK;
650 			vm_page_undirty(pages[i]);
651 		}
652 		vm_page_unlock_queues();
653 	}
654 	return rtvals[0];
655 #endif /* SMBFS_RWGENERIC */
656 }
657 
658 /*
659  * Flush and invalidate all dirty buffers. If another process is already
660  * doing the flush, just wait for completion.
661  */
662 int
663 smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
664 {
665 	struct smbnode *np = VTOSMB(vp);
666 	int error = 0;
667 
668 	if (vp->v_iflag & VI_DOOMED)
669 		return 0;
670 
671 	while (np->n_flag & NFLUSHINPROG) {
672 		np->n_flag |= NFLUSHWANT;
673 		error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz);
674 		error = smb_td_intr(td);
675 		if (error == EINTR)
676 			return EINTR;
677 	}
678 	np->n_flag |= NFLUSHINPROG;
679 
680 	if (vp->v_bufobj.bo_object != NULL) {
681 		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
682 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
683 		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
684 	}
685 
686 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
687 	while (error) {
688 		if (error == ERESTART || error == EINTR) {
689 			np->n_flag &= ~NFLUSHINPROG;
690 			if (np->n_flag & NFLUSHWANT) {
691 				np->n_flag &= ~NFLUSHWANT;
692 				wakeup(&np->n_flag);
693 			}
694 			return EINTR;
695 		}
696 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
697 	}
698 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
699 	if (np->n_flag & NFLUSHWANT) {
700 		np->n_flag &= ~NFLUSHWANT;
701 		wakeup(&np->n_flag);
702 	}
703 	return (error);
704 }
705