1 /* 2 * Copyright (c) 2000-2001, Boris Popov 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Boris Popov. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 * 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/resourcevar.h> /* defines plimit structure in proc struct */ 38 #include <sys/kernel.h> 39 #include <sys/proc.h> 40 #include <sys/fcntl.h> 41 #include <sys/bio.h> 42 #include <sys/buf.h> 43 #include <sys/mount.h> 44 #include <sys/namei.h> 45 #include <sys/vnode.h> 46 #include <sys/dirent.h> 47 #include <sys/signalvar.h> 48 #include <sys/sysctl.h> 49 #include <sys/vmmeter.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_extern.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_pager.h> 56 #include <vm/vnode_pager.h> 57 /* 58 #include <sys/ioccom.h> 59 */ 60 #include <netsmb/smb.h> 61 #include <netsmb/smb_conn.h> 62 #include <netsmb/smb_subr.h> 63 64 #include <fs/smbfs/smbfs.h> 65 #include <fs/smbfs/smbfs_node.h> 66 #include <fs/smbfs/smbfs_subr.h> 67 68 /*#define SMBFS_RWGENERIC*/ 69 70 extern int smbfs_pbuf_freecnt; 71 72 static int smbfs_fastlookup = 1; 73 74 SYSCTL_DECL(_vfs_smbfs); 75 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, ""); 76 77 78 #define DE_SIZE (sizeof(struct dirent)) 79 80 static int 81 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred) 82 { 83 struct dirent de; 84 struct componentname cn; 85 struct smb_cred scred; 86 struct smbfs_fctx *ctx; 87 struct vnode *newvp; 88 struct smbnode *np = VTOSMB(vp); 89 int error/*, *eofflag = ap->a_eofflag*/; 90 long offset, limit; 91 92 np = VTOSMB(vp); 93 SMBVDEBUG("dirname='%s'\n", np->n_name); 94 smb_makescred(&scred, uio->uio_td, cred); 95 offset = uio->uio_offset / DE_SIZE; /* offset in the directory */ 96 limit = uio->uio_resid / DE_SIZE; 97 if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) 98 return EINVAL; 99 while (limit && offset < 2) { 100 limit--; 101 bzero((caddr_t)&de, DE_SIZE); 102 de.d_reclen = DE_SIZE; 103 de.d_fileno = (offset == 0) ? np->n_ino : 104 (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2); 105 if (de.d_fileno == 0) 106 de.d_fileno = 0x7ffffffd + offset; 107 de.d_namlen = offset + 1; 108 de.d_name[0] = '.'; 109 de.d_name[1] = '.'; 110 de.d_name[offset + 1] = '\0'; 111 de.d_type = DT_DIR; 112 error = uiomove(&de, DE_SIZE, uio); 113 if (error) 114 return error; 115 offset++; 116 uio->uio_offset += DE_SIZE; 117 } 118 if (limit == 0) 119 return 0; 120 if (offset != np->n_dirofs || np->n_dirseq == NULL) { 121 SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs); 122 if (np->n_dirseq) { 123 smbfs_findclose(np->n_dirseq, &scred); 124 np->n_dirseq = NULL; 125 } 126 np->n_dirofs = 2; 127 error = smbfs_findopen(np, "*", 1, 128 SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR, 129 &scred, &ctx); 130 if (error) { 131 SMBVDEBUG("can not open search, error = %d", error); 132 return error; 133 } 134 np->n_dirseq = ctx; 135 } else 136 ctx = np->n_dirseq; 137 while (np->n_dirofs < offset) { 138 error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred); 139 if (error) { 140 smbfs_findclose(np->n_dirseq, &scred); 141 np->n_dirseq = NULL; 142 return error == ENOENT ? 0 : error; 143 } 144 } 145 error = 0; 146 for (; limit; limit--, offset++) { 147 error = smbfs_findnext(ctx, limit, &scred); 148 if (error) 149 break; 150 np->n_dirofs++; 151 bzero((caddr_t)&de, DE_SIZE); 152 de.d_reclen = DE_SIZE; 153 de.d_fileno = ctx->f_attr.fa_ino; 154 de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG; 155 de.d_namlen = ctx->f_nmlen; 156 bcopy(ctx->f_name, de.d_name, de.d_namlen); 157 de.d_name[de.d_namlen] = '\0'; 158 if (smbfs_fastlookup) { 159 error = smbfs_nget(vp->v_mount, vp, ctx->f_name, 160 ctx->f_nmlen, &ctx->f_attr, &newvp); 161 if (!error) { 162 cn.cn_nameptr = de.d_name; 163 cn.cn_namelen = de.d_namlen; 164 cache_enter(vp, newvp, &cn); 165 vput(newvp); 166 } 167 } 168 error = uiomove(&de, DE_SIZE, uio); 169 if (error) 170 break; 171 } 172 if (error == ENOENT) 173 error = 0; 174 uio->uio_offset = offset * DE_SIZE; 175 return error; 176 } 177 178 int 179 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred) 180 { 181 struct smbmount *smp = VFSTOSMBFS(vp->v_mount); 182 struct smbnode *np = VTOSMB(vp); 183 struct thread *td; 184 struct vattr vattr; 185 struct smb_cred scred; 186 int error, lks; 187 188 /* 189 * Protect against method which is not supported for now 190 */ 191 if (uiop->uio_segflg == UIO_NOCOPY) 192 return EOPNOTSUPP; 193 194 if (vp->v_type != VREG && vp->v_type != VDIR) { 195 SMBFSERR("vn types other than VREG or VDIR are unsupported !\n"); 196 return EIO; 197 } 198 if (uiop->uio_resid == 0) 199 return 0; 200 if (uiop->uio_offset < 0) 201 return EINVAL; 202 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize) 203 return EFBIG;*/ 204 td = uiop->uio_td; 205 if (vp->v_type == VDIR) { 206 lks = LK_EXCLUSIVE;/*lockstatus(vp->v_vnlock, td);*/ 207 if (lks == LK_SHARED) 208 vn_lock(vp, LK_UPGRADE | LK_RETRY, td); 209 error = smbfs_readvdir(vp, uiop, cred); 210 if (lks == LK_SHARED) 211 vn_lock(vp, LK_DOWNGRADE | LK_RETRY, td); 212 return error; 213 } 214 215 /* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/ 216 if (np->n_flag & NMODIFIED) { 217 smbfs_attr_cacheremove(vp); 218 error = VOP_GETATTR(vp, &vattr, cred, td); 219 if (error) 220 return error; 221 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec; 222 } else { 223 error = VOP_GETATTR(vp, &vattr, cred, td); 224 if (error) 225 return error; 226 if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) { 227 error = smbfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 228 if (error) 229 return error; 230 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec; 231 } 232 } 233 smb_makescred(&scred, td, cred); 234 return smb_read(smp->sm_share, np->n_fid, uiop, &scred); 235 } 236 237 int 238 smbfs_writevnode(struct vnode *vp, struct uio *uiop, 239 struct ucred *cred, int ioflag) 240 { 241 struct smbmount *smp = VTOSMBFS(vp); 242 struct smbnode *np = VTOSMB(vp); 243 struct smb_cred scred; 244 struct proc *p; 245 struct thread *td; 246 int error = 0; 247 248 if (vp->v_type != VREG) { 249 SMBERROR("vn types other than VREG unsupported !\n"); 250 return EIO; 251 } 252 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid); 253 if (uiop->uio_offset < 0) 254 return EINVAL; 255 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize) 256 return (EFBIG);*/ 257 td = uiop->uio_td; 258 p = td->td_proc; 259 if (ioflag & (IO_APPEND | IO_SYNC)) { 260 if (np->n_flag & NMODIFIED) { 261 smbfs_attr_cacheremove(vp); 262 error = smbfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 263 if (error) 264 return error; 265 } 266 if (ioflag & IO_APPEND) { 267 #if notyet 268 /* 269 * File size can be changed by another client 270 */ 271 smbfs_attr_cacheremove(vp); 272 error = VOP_GETATTR(vp, &vattr, cred, td); 273 if (error) return (error); 274 #endif 275 uiop->uio_offset = np->n_size; 276 } 277 } 278 if (uiop->uio_resid == 0) 279 return 0; 280 if (p && uiop->uio_offset + uiop->uio_resid > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 281 PROC_LOCK(td->td_proc); 282 psignal(td->td_proc, SIGXFSZ); 283 PROC_UNLOCK(td->td_proc); 284 return EFBIG; 285 } 286 smb_makescred(&scred, td, cred); 287 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred); 288 SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid); 289 if (!error) { 290 if (uiop->uio_offset > np->n_size) { 291 np->n_size = uiop->uio_offset; 292 vnode_pager_setsize(vp, np->n_size); 293 } 294 } 295 return error; 296 } 297 298 /* 299 * Do an I/O operation to/from a cache block. 300 */ 301 int 302 smbfs_doio(struct buf *bp, struct ucred *cr, struct thread *td) 303 { 304 struct vnode *vp = bp->b_vp; 305 struct smbmount *smp = VFSTOSMBFS(vp->v_mount); 306 struct smbnode *np = VTOSMB(vp); 307 struct uio uio, *uiop = &uio; 308 struct iovec io; 309 struct smb_cred scred; 310 int error = 0; 311 312 uiop->uio_iov = &io; 313 uiop->uio_iovcnt = 1; 314 uiop->uio_segflg = UIO_SYSSPACE; 315 uiop->uio_td = td; 316 317 smb_makescred(&scred, td, cr); 318 319 if (bp->b_iocmd == BIO_READ) { 320 io.iov_len = uiop->uio_resid = bp->b_bcount; 321 io.iov_base = bp->b_data; 322 uiop->uio_rw = UIO_READ; 323 switch (vp->v_type) { 324 case VREG: 325 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 326 error = smb_read(smp->sm_share, np->n_fid, uiop, &scred); 327 if (error) 328 break; 329 if (uiop->uio_resid) { 330 int left = uiop->uio_resid; 331 int nread = bp->b_bcount - left; 332 if (left > 0) 333 bzero((char *)bp->b_data + nread, left); 334 } 335 break; 336 default: 337 printf("smbfs_doio: type %x unexpected\n",vp->v_type); 338 break; 339 }; 340 if (error) { 341 bp->b_error = error; 342 bp->b_ioflags |= BIO_ERROR; 343 } 344 } else { /* write */ 345 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size) 346 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE); 347 348 if (bp->b_dirtyend > bp->b_dirtyoff) { 349 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff; 350 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 351 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 352 uiop->uio_rw = UIO_WRITE; 353 bp->b_flags |= B_WRITEINPROG; 354 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred); 355 bp->b_flags &= ~B_WRITEINPROG; 356 357 /* 358 * For an interrupted write, the buffer is still valid 359 * and the write hasn't been pushed to the server yet, 360 * so we can't set BIO_ERROR and report the interruption 361 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 362 * is not relevant, so the rpc attempt is essentially 363 * a noop. For the case of a V3 write rpc not being 364 * committed to stable storage, the block is still 365 * dirty and requires either a commit rpc or another 366 * write rpc with iomode == NFSV3WRITE_FILESYNC before 367 * the block is reused. This is indicated by setting 368 * the B_DELWRI and B_NEEDCOMMIT flags. 369 */ 370 if (error == EINTR 371 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 372 int s; 373 374 s = splbio(); 375 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 376 if ((bp->b_flags & B_ASYNC) == 0) 377 bp->b_flags |= B_EINTR; 378 if ((bp->b_flags & B_PAGING) == 0) { 379 bdirty(bp); 380 bp->b_flags &= ~B_DONE; 381 } 382 if ((bp->b_flags & B_ASYNC) == 0) 383 bp->b_flags |= B_EINTR; 384 splx(s); 385 } else { 386 if (error) { 387 bp->b_ioflags |= BIO_ERROR; 388 bp->b_error = error; 389 } 390 bp->b_dirtyoff = bp->b_dirtyend = 0; 391 } 392 } else { 393 bp->b_resid = 0; 394 bufdone(bp); 395 return 0; 396 } 397 } 398 bp->b_resid = uiop->uio_resid; 399 bufdone(bp); 400 return error; 401 } 402 403 /* 404 * Vnode op for VM getpages. 405 * Wish wish .... get rid from multiple IO routines 406 */ 407 int 408 smbfs_getpages(ap) 409 struct vop_getpages_args /* { 410 struct vnode *a_vp; 411 vm_page_t *a_m; 412 int a_count; 413 int a_reqpage; 414 vm_ooffset_t a_offset; 415 } */ *ap; 416 { 417 #ifdef SMBFS_RWGENERIC 418 return vop_stdgetpages(ap); 419 #else 420 int i, error, nextoff, size, toff, npages, count, reqpage; 421 struct uio uio; 422 struct iovec iov; 423 vm_offset_t kva; 424 struct buf *bp; 425 struct vnode *vp; 426 struct thread *td; 427 struct ucred *cred; 428 struct smbmount *smp; 429 struct smbnode *np; 430 struct smb_cred scred; 431 vm_object_t object; 432 vm_page_t *pages, m; 433 434 vp = ap->a_vp; 435 if ((object = vp->v_object) == NULL) { 436 printf("smbfs_getpages: called with non-merged cache vnode??\n"); 437 return VM_PAGER_ERROR; 438 } 439 440 td = curthread; /* XXX */ 441 cred = td->td_ucred; /* XXX */ 442 np = VTOSMB(vp); 443 smp = VFSTOSMBFS(vp->v_mount); 444 pages = ap->a_m; 445 count = ap->a_count; 446 npages = btoc(count); 447 reqpage = ap->a_reqpage; 448 449 /* 450 * If the requested page is partially valid, just return it and 451 * allow the pager to zero-out the blanks. Partially valid pages 452 * can only occur at the file EOF. 453 */ 454 m = pages[reqpage]; 455 456 if (m->valid != 0) { 457 /* handled by vm_fault now */ 458 /* vm_page_zero_invalid(m, TRUE); */ 459 VM_OBJECT_LOCK(object); 460 vm_page_lock_queues(); 461 for (i = 0; i < npages; ++i) { 462 if (i != reqpage) 463 vm_page_free(pages[i]); 464 } 465 vm_page_unlock_queues(); 466 VM_OBJECT_UNLOCK(object); 467 return 0; 468 } 469 470 smb_makescred(&scred, td, cred); 471 472 bp = getpbuf(&smbfs_pbuf_freecnt); 473 474 kva = (vm_offset_t) bp->b_data; 475 pmap_qenter(kva, pages, npages); 476 cnt.v_vnodein++; 477 cnt.v_vnodepgsin += npages; 478 479 iov.iov_base = (caddr_t) kva; 480 iov.iov_len = count; 481 uio.uio_iov = &iov; 482 uio.uio_iovcnt = 1; 483 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 484 uio.uio_resid = count; 485 uio.uio_segflg = UIO_SYSSPACE; 486 uio.uio_rw = UIO_READ; 487 uio.uio_td = td; 488 489 error = smb_read(smp->sm_share, np->n_fid, &uio, &scred); 490 pmap_qremove(kva, npages); 491 492 relpbuf(bp, &smbfs_pbuf_freecnt); 493 494 if (error) 495 VM_OBJECT_LOCK(object); 496 if (error && (uio.uio_resid == count)) { 497 printf("smbfs_getpages: error %d\n",error); 498 vm_page_lock_queues(); 499 for (i = 0; i < npages; i++) { 500 if (reqpage != i) 501 vm_page_free(pages[i]); 502 } 503 vm_page_unlock_queues(); 504 VM_OBJECT_UNLOCK(object); 505 return VM_PAGER_ERROR; 506 } 507 508 size = count - uio.uio_resid; 509 510 vm_page_lock_queues(); 511 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 512 vm_page_t m; 513 nextoff = toff + PAGE_SIZE; 514 m = pages[i]; 515 516 m->flags &= ~PG_ZERO; 517 518 if (nextoff <= size) { 519 /* 520 * Read operation filled an entire page 521 */ 522 m->valid = VM_PAGE_BITS_ALL; 523 vm_page_undirty(m); 524 } else if (size > toff) { 525 /* 526 * Read operation filled a partial page. 527 */ 528 m->valid = 0; 529 vm_page_set_validclean(m, 0, size - toff); 530 /* handled by vm_fault now */ 531 /* vm_page_zero_invalid(m, TRUE); */ 532 } else { 533 /* 534 * Read operation was short. If no error occured 535 * we may have hit a zero-fill section. We simply 536 * leave valid set to 0. 537 */ 538 ; 539 } 540 541 if (i != reqpage) { 542 /* 543 * Whether or not to leave the page activated is up in 544 * the air, but we should put the page on a page queue 545 * somewhere (it already is in the object). Result: 546 * It appears that emperical results show that 547 * deactivating pages is best. 548 */ 549 550 /* 551 * Just in case someone was asking for this page we 552 * now tell them that it is ok to use. 553 */ 554 if (!error) { 555 if (m->flags & PG_WANTED) 556 vm_page_activate(m); 557 else 558 vm_page_deactivate(m); 559 vm_page_wakeup(m); 560 } else { 561 vm_page_free(m); 562 } 563 } 564 } 565 vm_page_unlock_queues(); 566 if (error) 567 VM_OBJECT_UNLOCK(object); 568 return 0; 569 #endif /* SMBFS_RWGENERIC */ 570 } 571 572 /* 573 * Vnode op for VM putpages. 574 * possible bug: all IO done in sync mode 575 * Note that vop_close always invalidate pages before close, so it's 576 * not necessary to open vnode. 577 */ 578 int 579 smbfs_putpages(ap) 580 struct vop_putpages_args /* { 581 struct vnode *a_vp; 582 vm_page_t *a_m; 583 int a_count; 584 int a_sync; 585 int *a_rtvals; 586 vm_ooffset_t a_offset; 587 } */ *ap; 588 { 589 int error; 590 struct vnode *vp = ap->a_vp; 591 struct thread *td; 592 struct ucred *cred; 593 594 #ifdef SMBFS_RWGENERIC 595 td = curthread; /* XXX */ 596 cred = td->td_ucred; /* XXX */ 597 VOP_OPEN(vp, FWRITE, cred, td, -1); 598 error = vop_stdputpages(ap); 599 VOP_CLOSE(vp, FWRITE, cred, td); 600 return error; 601 #else 602 struct uio uio; 603 struct iovec iov; 604 vm_offset_t kva; 605 struct buf *bp; 606 int i, npages, count; 607 int *rtvals; 608 struct smbmount *smp; 609 struct smbnode *np; 610 struct smb_cred scred; 611 vm_page_t *pages; 612 613 td = curthread; /* XXX */ 614 cred = td->td_ucred; /* XXX */ 615 /* VOP_OPEN(vp, FWRITE, cred, td, -1);*/ 616 np = VTOSMB(vp); 617 smp = VFSTOSMBFS(vp->v_mount); 618 pages = ap->a_m; 619 count = ap->a_count; 620 rtvals = ap->a_rtvals; 621 npages = btoc(count); 622 623 for (i = 0; i < npages; i++) { 624 rtvals[i] = VM_PAGER_AGAIN; 625 } 626 627 bp = getpbuf(&smbfs_pbuf_freecnt); 628 629 kva = (vm_offset_t) bp->b_data; 630 pmap_qenter(kva, pages, npages); 631 cnt.v_vnodeout++; 632 cnt.v_vnodepgsout += count; 633 634 iov.iov_base = (caddr_t) kva; 635 iov.iov_len = count; 636 uio.uio_iov = &iov; 637 uio.uio_iovcnt = 1; 638 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 639 uio.uio_resid = count; 640 uio.uio_segflg = UIO_SYSSPACE; 641 uio.uio_rw = UIO_WRITE; 642 uio.uio_td = td; 643 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid); 644 645 smb_makescred(&scred, td, cred); 646 error = smb_write(smp->sm_share, np->n_fid, &uio, &scred); 647 /* VOP_CLOSE(vp, FWRITE, cred, td);*/ 648 SMBVDEBUG("paged write done: %d\n", error); 649 650 pmap_qremove(kva, npages); 651 652 relpbuf(bp, &smbfs_pbuf_freecnt); 653 654 if (!error) { 655 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; 656 vm_page_lock_queues(); 657 for (i = 0; i < nwritten; i++) { 658 rtvals[i] = VM_PAGER_OK; 659 vm_page_undirty(pages[i]); 660 } 661 vm_page_unlock_queues(); 662 } 663 return rtvals[0]; 664 #endif /* SMBFS_RWGENERIC */ 665 } 666 667 /* 668 * Flush and invalidate all dirty buffers. If another process is already 669 * doing the flush, just wait for completion. 670 */ 671 int 672 smbfs_vinvalbuf(vp, flags, cred, td, intrflg) 673 struct vnode *vp; 674 int flags; 675 struct ucred *cred; 676 struct thread *td; 677 int intrflg; 678 { 679 struct smbnode *np = VTOSMB(vp); 680 int error = 0, slpflag, slptimeo; 681 682 VI_LOCK(vp); 683 if (vp->v_iflag & VI_XLOCK) { 684 VI_UNLOCK(vp); 685 return 0; 686 } 687 VI_UNLOCK(vp); 688 689 if (intrflg) { 690 slpflag = PCATCH; 691 slptimeo = 2 * hz; 692 } else { 693 slpflag = 0; 694 slptimeo = 0; 695 } 696 while (np->n_flag & NFLUSHINPROG) { 697 np->n_flag |= NFLUSHWANT; 698 error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", slptimeo); 699 error = smb_td_intr(td); 700 if (error == EINTR && intrflg) 701 return EINTR; 702 } 703 np->n_flag |= NFLUSHINPROG; 704 error = vinvalbuf(vp, flags, cred, td, slpflag, 0); 705 while (error) { 706 if (intrflg && (error == ERESTART || error == EINTR)) { 707 np->n_flag &= ~NFLUSHINPROG; 708 if (np->n_flag & NFLUSHWANT) { 709 np->n_flag &= ~NFLUSHWANT; 710 wakeup(&np->n_flag); 711 } 712 return EINTR; 713 } 714 error = vinvalbuf(vp, flags, cred, td, slpflag, 0); 715 } 716 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); 717 if (np->n_flag & NFLUSHWANT) { 718 np->n_flag &= ~NFLUSHWANT; 719 wakeup(&np->n_flag); 720 } 721 return (error); 722 } 723