1 /* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * Copyright (c) 1993, 1994 John S. Dyson 6 * Copyright (c) 1995, David Greenman 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41 * $Id: vnode_pager.c,v 1.70 1997/03/08 04:33:47 dyson Exp $ 42 */ 43 44 /* 45 * Page to/from files (vnodes). 46 */ 47 48 /* 49 * TODO: 50 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 51 * greatly re-simplify the vnode_pager. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/kernel.h> 57 #include <sys/proc.h> 58 #include <sys/malloc.h> 59 #include <sys/vnode.h> 60 #include <sys/uio.h> 61 #include <sys/mount.h> 62 #include <sys/buf.h> 63 #include <sys/vmmeter.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_param.h> 67 #include <vm/vm_prot.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_page.h> 70 #include <vm/vm_pager.h> 71 #include <vm/vnode_pager.h> 72 #include <vm/vm_extern.h> 73 74 static vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_ooffset_t address, 75 int *run)); 76 static void vnode_pager_iodone __P((struct buf *bp)); 77 static int vnode_pager_input_smlfs __P((vm_object_t object, vm_page_t m)); 78 static int vnode_pager_input_old __P((vm_object_t object, vm_page_t m)); 79 static void vnode_pager_dealloc __P((vm_object_t)); 80 static int vnode_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 81 static int vnode_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *)); 82 static boolean_t vnode_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *)); 83 84 struct pagerops vnodepagerops = { 85 NULL, 86 vnode_pager_alloc, 87 vnode_pager_dealloc, 88 vnode_pager_getpages, 89 vnode_pager_putpages, 90 vnode_pager_haspage, 91 NULL 92 }; 93 94 static int vnode_pager_leaf_getpages __P((vm_object_t object, vm_page_t *m, 95 int count, int reqpage)); 96 static int vnode_pager_leaf_putpages __P((vm_object_t object, vm_page_t *m, 97 int count, boolean_t sync, 98 int *rtvals)); 99 100 /* 101 * Allocate (or lookup) pager for a vnode. 102 * Handle is a vnode pointer. 103 */ 104 vm_object_t 105 vnode_pager_alloc(handle, size, prot, offset) 106 void *handle; 107 vm_size_t size; 108 vm_prot_t prot; 109 vm_ooffset_t offset; 110 { 111 vm_object_t object; 112 struct vnode *vp; 113 114 /* 115 * Pageout to vnode, no can do yet. 116 */ 117 if (handle == NULL) 118 return (NULL); 119 120 vp = (struct vnode *) handle; 121 122 /* 123 * Prevent race condition when allocating the object. This 124 * can happen with NFS vnodes since the nfsnode isn't locked. 125 */ 126 while (vp->v_flag & VOLOCK) { 127 vp->v_flag |= VOWANT; 128 tsleep(vp, PVM, "vnpobj", 0); 129 } 130 vp->v_flag |= VOLOCK; 131 132 /* 133 * If the object is being terminated, wait for it to 134 * go away. 135 */ 136 while (((object = vp->v_object) != NULL) && 137 (object->flags & OBJ_DEAD)) { 138 tsleep(object, PVM, "vadead", 0); 139 } 140 141 if (object == NULL) { 142 /* 143 * And an object of the appropriate size 144 */ 145 object = vm_object_allocate(OBJT_VNODE, size); 146 if (vp->v_type == VREG) 147 object->flags = OBJ_CANPERSIST; 148 else 149 object->flags = 0; 150 151 if (vp->v_usecount == 0) 152 panic("vnode_pager_alloc: no vnode reference"); 153 /* 154 * Hold a reference to the vnode and initialize object data. 155 */ 156 vp->v_usecount++; 157 object->un_pager.vnp.vnp_size = (vm_ooffset_t) size * PAGE_SIZE; 158 159 object->handle = handle; 160 vp->v_object = object; 161 } else { 162 /* 163 * vm_object_reference() will remove the object from the cache if 164 * found and gain a reference to the object. 165 */ 166 vm_object_reference(object); 167 } 168 169 if (vp->v_type == VREG) 170 vp->v_flag |= VVMIO; 171 172 vp->v_flag &= ~VOLOCK; 173 if (vp->v_flag & VOWANT) { 174 vp->v_flag &= ~VOWANT; 175 wakeup(vp); 176 } 177 return (object); 178 } 179 180 static void 181 vnode_pager_dealloc(object) 182 vm_object_t object; 183 { 184 register struct vnode *vp = object->handle; 185 186 if (vp == NULL) 187 panic("vnode_pager_dealloc: pager already dealloced"); 188 189 if (object->paging_in_progress) { 190 int s = splbio(); 191 while (object->paging_in_progress) { 192 object->flags |= OBJ_PIPWNT; 193 tsleep(object, PVM, "vnpdea", 0); 194 } 195 splx(s); 196 } 197 198 object->handle = NULL; 199 200 vp->v_object = NULL; 201 vp->v_flag &= ~(VTEXT | VVMIO); 202 vp->v_flag |= VAGE; 203 vrele(vp); 204 } 205 206 static boolean_t 207 vnode_pager_haspage(object, pindex, before, after) 208 vm_object_t object; 209 vm_pindex_t pindex; 210 int *before; 211 int *after; 212 { 213 struct vnode *vp = object->handle; 214 daddr_t bn; 215 int err; 216 daddr_t reqblock; 217 int poff; 218 int bsize; 219 int pagesperblock, blocksperpage; 220 221 /* 222 * If filesystem no longer mounted or offset beyond end of file we do 223 * not have the page. 224 */ 225 if ((vp->v_mount == NULL) || 226 (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)) 227 return FALSE; 228 229 bsize = vp->v_mount->mnt_stat.f_iosize; 230 pagesperblock = bsize / PAGE_SIZE; 231 blocksperpage = 0; 232 if (pagesperblock > 0) { 233 reqblock = pindex / pagesperblock; 234 } else { 235 blocksperpage = (PAGE_SIZE / bsize); 236 reqblock = pindex * blocksperpage; 237 } 238 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, 239 after, before); 240 if (err) 241 return TRUE; 242 if ( bn == -1) 243 return FALSE; 244 if (pagesperblock > 0) { 245 poff = pindex - (reqblock * pagesperblock); 246 if (before) { 247 *before *= pagesperblock; 248 *before += poff; 249 } 250 if (after) { 251 int numafter; 252 *after *= pagesperblock; 253 numafter = pagesperblock - (poff + 1); 254 if (IDX_TO_OFF(pindex + numafter) > object->un_pager.vnp.vnp_size) { 255 numafter = OFF_TO_IDX((object->un_pager.vnp.vnp_size - IDX_TO_OFF(pindex))); 256 } 257 *after += numafter; 258 } 259 } else { 260 if (before) { 261 *before /= blocksperpage; 262 } 263 264 if (after) { 265 *after /= blocksperpage; 266 } 267 } 268 return TRUE; 269 } 270 271 /* 272 * Lets the VM system know about a change in size for a file. 273 * We adjust our own internal size and flush any cached pages in 274 * the associated object that are affected by the size change. 275 * 276 * Note: this routine may be invoked as a result of a pager put 277 * operation (possibly at object termination time), so we must be careful. 278 */ 279 void 280 vnode_pager_setsize(vp, nsize) 281 struct vnode *vp; 282 vm_ooffset_t nsize; 283 { 284 vm_object_t object = vp->v_object; 285 286 if (object == NULL) 287 return; 288 289 /* 290 * Hasn't changed size 291 */ 292 if (nsize == object->un_pager.vnp.vnp_size) 293 return; 294 295 /* 296 * File has shrunk. Toss any cached pages beyond the new EOF. 297 */ 298 if (nsize < object->un_pager.vnp.vnp_size) { 299 vm_ooffset_t nsizerounded; 300 nsizerounded = IDX_TO_OFF(OFF_TO_IDX(nsize + PAGE_MASK)); 301 if (nsizerounded < object->un_pager.vnp.vnp_size) { 302 vm_object_page_remove(object, 303 OFF_TO_IDX(nsize + PAGE_MASK), 304 OFF_TO_IDX(object->un_pager.vnp.vnp_size), 305 FALSE); 306 } 307 /* 308 * this gets rid of garbage at the end of a page that is now 309 * only partially backed by the vnode... 310 */ 311 if (nsize & PAGE_MASK) { 312 vm_offset_t kva; 313 vm_page_t m; 314 315 m = vm_page_lookup(object, OFF_TO_IDX(nsize)); 316 if (m) { 317 kva = vm_pager_map_page(m); 318 bzero((caddr_t) kva + (nsize & PAGE_MASK), 319 (int) (round_page(nsize) - nsize)); 320 vm_pager_unmap_page(kva); 321 } 322 } 323 } 324 object->un_pager.vnp.vnp_size = nsize; 325 object->size = OFF_TO_IDX(nsize + PAGE_MASK); 326 } 327 328 void 329 vnode_pager_umount(mp) 330 register struct mount *mp; 331 { 332 struct proc *p = curproc; /* XXX */ 333 struct vnode *vp, *nvp; 334 335 loop: 336 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 337 /* 338 * Vnode can be reclaimed by getnewvnode() while we 339 * traverse the list. 340 */ 341 if (vp->v_mount != mp) 342 goto loop; 343 344 /* 345 * Save the next pointer now since uncaching may terminate the 346 * object and render vnode invalid 347 */ 348 nvp = vp->v_mntvnodes.le_next; 349 350 if (vp->v_object != NULL) { 351 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 352 vnode_pager_uncache(vp, p); 353 VOP_UNLOCK(vp, 0, p); 354 } 355 } 356 } 357 358 /* 359 * Remove vnode associated object from the object cache. 360 * This routine must be called with the vnode locked. 361 * 362 * XXX unlock the vnode. 363 * We must do this since uncaching the object may result in its 364 * destruction which may initiate paging activity which may necessitate 365 * re-locking the vnode. 366 */ 367 void 368 vnode_pager_uncache(vp, p) 369 struct vnode *vp; 370 struct proc *p; 371 { 372 vm_object_t object; 373 374 /* 375 * Not a mapped vnode 376 */ 377 object = vp->v_object; 378 if (object == NULL) 379 return; 380 381 vm_object_reference(object); 382 383 /* 384 * XXX We really should handle locking on 385 * VBLK devices... 386 */ 387 if (vp->v_type != VBLK) 388 VOP_UNLOCK(vp, 0, p); 389 pager_cache(object, FALSE); 390 if (vp->v_type != VBLK) 391 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 392 return; 393 } 394 395 396 void 397 vnode_pager_freepage(m) 398 vm_page_t m; 399 { 400 PAGE_WAKEUP(m); 401 vm_page_free(m); 402 } 403 404 /* 405 * calculate the linear (byte) disk address of specified virtual 406 * file address 407 */ 408 static vm_offset_t 409 vnode_pager_addr(vp, address, run) 410 struct vnode *vp; 411 vm_ooffset_t address; 412 int *run; 413 { 414 int rtaddress; 415 int bsize; 416 daddr_t block; 417 struct vnode *rtvp; 418 int err; 419 daddr_t vblock; 420 int voffset; 421 422 if ((int) address < 0) 423 return -1; 424 425 if (vp->v_mount == NULL) 426 return -1; 427 428 bsize = vp->v_mount->mnt_stat.f_iosize; 429 vblock = address / bsize; 430 voffset = address % bsize; 431 432 err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL); 433 434 if (err || (block == -1)) 435 rtaddress = -1; 436 else { 437 rtaddress = block + voffset / DEV_BSIZE; 438 if( run) { 439 *run += 1; 440 *run *= bsize/PAGE_SIZE; 441 *run -= voffset/PAGE_SIZE; 442 } 443 } 444 445 return rtaddress; 446 } 447 448 /* 449 * interrupt routine for I/O completion 450 */ 451 static void 452 vnode_pager_iodone(bp) 453 struct buf *bp; 454 { 455 bp->b_flags |= B_DONE; 456 wakeup(bp); 457 } 458 459 /* 460 * small block file system vnode pager input 461 */ 462 static int 463 vnode_pager_input_smlfs(object, m) 464 vm_object_t object; 465 vm_page_t m; 466 { 467 int i; 468 int s; 469 struct vnode *dp, *vp; 470 struct buf *bp; 471 vm_offset_t kva; 472 int fileaddr; 473 vm_offset_t bsize; 474 int error = 0; 475 476 vp = object->handle; 477 if (vp->v_mount == NULL) 478 return VM_PAGER_BAD; 479 480 bsize = vp->v_mount->mnt_stat.f_iosize; 481 482 483 VOP_BMAP(vp, 0, &dp, 0, NULL, NULL); 484 485 kva = vm_pager_map_page(m); 486 487 for (i = 0; i < PAGE_SIZE / bsize; i++) { 488 489 if ((vm_page_bits(IDX_TO_OFF(m->pindex) + i * bsize, bsize) & m->valid)) 490 continue; 491 492 fileaddr = vnode_pager_addr(vp, 493 IDX_TO_OFF(m->pindex) + i * bsize, (int *)0); 494 if (fileaddr != -1) { 495 bp = getpbuf(); 496 497 /* build a minimal buffer header */ 498 bp->b_flags = B_BUSY | B_READ | B_CALL; 499 bp->b_iodone = vnode_pager_iodone; 500 bp->b_proc = curproc; 501 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 502 if (bp->b_rcred != NOCRED) 503 crhold(bp->b_rcred); 504 if (bp->b_wcred != NOCRED) 505 crhold(bp->b_wcred); 506 bp->b_un.b_addr = (caddr_t) kva + i * bsize; 507 bp->b_blkno = fileaddr; 508 pbgetvp(dp, bp); 509 bp->b_bcount = bsize; 510 bp->b_bufsize = bsize; 511 512 /* do the input */ 513 VOP_STRATEGY(bp); 514 515 /* we definitely need to be at splbio here */ 516 517 s = splbio(); 518 while ((bp->b_flags & B_DONE) == 0) { 519 tsleep(bp, PVM, "vnsrd", 0); 520 } 521 splx(s); 522 if ((bp->b_flags & B_ERROR) != 0) 523 error = EIO; 524 525 /* 526 * free the buffer header back to the swap buffer pool 527 */ 528 relpbuf(bp); 529 if (error) 530 break; 531 532 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 533 } else { 534 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 535 bzero((caddr_t) kva + i * bsize, bsize); 536 } 537 } 538 vm_pager_unmap_page(kva); 539 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 540 m->flags &= ~PG_ZERO; 541 if (error) { 542 return VM_PAGER_ERROR; 543 } 544 return VM_PAGER_OK; 545 546 } 547 548 549 /* 550 * old style vnode pager output routine 551 */ 552 static int 553 vnode_pager_input_old(object, m) 554 vm_object_t object; 555 vm_page_t m; 556 { 557 struct uio auio; 558 struct iovec aiov; 559 int error; 560 int size; 561 vm_offset_t kva; 562 563 error = 0; 564 565 /* 566 * Return failure if beyond current EOF 567 */ 568 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 569 return VM_PAGER_BAD; 570 } else { 571 size = PAGE_SIZE; 572 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 573 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 574 575 /* 576 * Allocate a kernel virtual address and initialize so that 577 * we can use VOP_READ/WRITE routines. 578 */ 579 kva = vm_pager_map_page(m); 580 581 aiov.iov_base = (caddr_t) kva; 582 aiov.iov_len = size; 583 auio.uio_iov = &aiov; 584 auio.uio_iovcnt = 1; 585 auio.uio_offset = IDX_TO_OFF(m->pindex); 586 auio.uio_segflg = UIO_SYSSPACE; 587 auio.uio_rw = UIO_READ; 588 auio.uio_resid = size; 589 auio.uio_procp = (struct proc *) 0; 590 591 error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred); 592 if (!error) { 593 register int count = size - auio.uio_resid; 594 595 if (count == 0) 596 error = EINVAL; 597 else if (count != PAGE_SIZE) 598 bzero((caddr_t) kva + count, PAGE_SIZE - count); 599 } 600 vm_pager_unmap_page(kva); 601 } 602 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 603 m->dirty = 0; 604 m->flags &= ~PG_ZERO; 605 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 606 } 607 608 /* 609 * generic vnode pager input routine 610 */ 611 612 static int 613 vnode_pager_getpages(object, m, count, reqpage) 614 vm_object_t object; 615 vm_page_t *m; 616 int count; 617 int reqpage; 618 { 619 int rtval; 620 struct vnode *vp; 621 if (object->flags & OBJ_VNODE_GONE) 622 return VM_PAGER_ERROR; 623 vp = object->handle; 624 rtval = VOP_GETPAGES(vp, m, count*PAGE_SIZE, reqpage, 0); 625 if (rtval == EOPNOTSUPP) 626 return vnode_pager_leaf_getpages(object, m, count, reqpage); 627 else 628 return rtval; 629 } 630 631 static int 632 vnode_pager_leaf_getpages(object, m, count, reqpage) 633 vm_object_t object; 634 vm_page_t *m; 635 int count; 636 int reqpage; 637 { 638 vm_offset_t kva; 639 off_t foff; 640 int i, size, bsize, first, firstaddr; 641 struct vnode *dp, *vp; 642 int runpg; 643 int runend; 644 struct buf *bp; 645 int s; 646 int error = 0; 647 648 vp = object->handle; 649 if (vp->v_mount == NULL) 650 return VM_PAGER_BAD; 651 652 bsize = vp->v_mount->mnt_stat.f_iosize; 653 654 /* get the UNDERLYING device for the file with VOP_BMAP() */ 655 656 /* 657 * originally, we did not check for an error return value -- assuming 658 * an fs always has a bmap entry point -- that assumption is wrong!!! 659 */ 660 foff = IDX_TO_OFF(m[reqpage]->pindex); 661 662 /* 663 * if we can't bmap, use old VOP code 664 */ 665 if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) { 666 for (i = 0; i < count; i++) { 667 if (i != reqpage) { 668 vnode_pager_freepage(m[i]); 669 } 670 } 671 cnt.v_vnodein++; 672 cnt.v_vnodepgsin++; 673 return vnode_pager_input_old(object, m[reqpage]); 674 675 /* 676 * if the blocksize is smaller than a page size, then use 677 * special small filesystem code. NFS sometimes has a small 678 * blocksize, but it can handle large reads itself. 679 */ 680 } else if ((PAGE_SIZE / bsize) > 1 && 681 (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) { 682 683 for (i = 0; i < count; i++) { 684 if (i != reqpage) { 685 vnode_pager_freepage(m[i]); 686 } 687 } 688 cnt.v_vnodein++; 689 cnt.v_vnodepgsin++; 690 return vnode_pager_input_smlfs(object, m[reqpage]); 691 } 692 /* 693 * if ANY DEV_BSIZE blocks are valid on a large filesystem block 694 * then, the entire page is valid -- 695 * XXX no it isn't 696 */ 697 698 if (m[reqpage]->valid != VM_PAGE_BITS_ALL) 699 m[reqpage]->valid = 0; 700 701 if (m[reqpage]->valid) { 702 m[reqpage]->valid = VM_PAGE_BITS_ALL; 703 for (i = 0; i < count; i++) { 704 if (i != reqpage) 705 vnode_pager_freepage(m[i]); 706 } 707 return VM_PAGER_OK; 708 } 709 710 /* 711 * here on direct device I/O 712 */ 713 714 firstaddr = -1; 715 /* 716 * calculate the run that includes the required page 717 */ 718 for(first = 0, i = 0; i < count; i = runend) { 719 firstaddr = vnode_pager_addr(vp, 720 IDX_TO_OFF(m[i]->pindex), &runpg); 721 if (firstaddr == -1) { 722 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 723 panic("vnode_pager_putpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d", 724 firstaddr, foff, object->un_pager.vnp.vnp_size); 725 } 726 vnode_pager_freepage(m[i]); 727 runend = i + 1; 728 first = runend; 729 continue; 730 } 731 runend = i + runpg; 732 if (runend <= reqpage) { 733 int j; 734 for (j = i; j < runend; j++) { 735 vnode_pager_freepage(m[j]); 736 } 737 } else { 738 if (runpg < (count - first)) { 739 for (i = first + runpg; i < count; i++) 740 vnode_pager_freepage(m[i]); 741 count = first + runpg; 742 } 743 break; 744 } 745 first = runend; 746 } 747 748 /* 749 * the first and last page have been calculated now, move input pages 750 * to be zero based... 751 */ 752 if (first != 0) { 753 for (i = first; i < count; i++) { 754 m[i - first] = m[i]; 755 } 756 count -= first; 757 reqpage -= first; 758 } 759 760 /* 761 * calculate the file virtual address for the transfer 762 */ 763 foff = IDX_TO_OFF(m[0]->pindex); 764 765 /* 766 * calculate the size of the transfer 767 */ 768 size = count * PAGE_SIZE; 769 if ((foff + size) > object->un_pager.vnp.vnp_size) 770 size = object->un_pager.vnp.vnp_size - foff; 771 772 /* 773 * round up physical size for real devices 774 */ 775 if (dp->v_type == VBLK || dp->v_type == VCHR) 776 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 777 778 bp = getpbuf(); 779 kva = (vm_offset_t) bp->b_data; 780 781 /* 782 * and map the pages to be read into the kva 783 */ 784 pmap_qenter(kva, m, count); 785 786 /* build a minimal buffer header */ 787 bp->b_flags = B_BUSY | B_READ | B_CALL; 788 bp->b_iodone = vnode_pager_iodone; 789 /* B_PHYS is not set, but it is nice to fill this in */ 790 bp->b_proc = curproc; 791 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 792 if (bp->b_rcred != NOCRED) 793 crhold(bp->b_rcred); 794 if (bp->b_wcred != NOCRED) 795 crhold(bp->b_wcred); 796 bp->b_blkno = firstaddr; 797 pbgetvp(dp, bp); 798 bp->b_bcount = size; 799 bp->b_bufsize = size; 800 801 cnt.v_vnodein++; 802 cnt.v_vnodepgsin += count; 803 804 /* do the input */ 805 VOP_STRATEGY(bp); 806 807 s = splbio(); 808 /* we definitely need to be at splbio here */ 809 810 while ((bp->b_flags & B_DONE) == 0) { 811 tsleep(bp, PVM, "vnread", 0); 812 } 813 splx(s); 814 if ((bp->b_flags & B_ERROR) != 0) 815 error = EIO; 816 817 if (!error) { 818 if (size != count * PAGE_SIZE) 819 bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 820 } 821 pmap_qremove(kva, count); 822 823 /* 824 * free the buffer header back to the swap buffer pool 825 */ 826 relpbuf(bp); 827 828 for (i = 0; i < count; i++) { 829 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); 830 m[i]->dirty = 0; 831 m[i]->valid = VM_PAGE_BITS_ALL; 832 m[i]->flags &= ~PG_ZERO; 833 if (i != reqpage) { 834 835 /* 836 * whether or not to leave the page activated is up in 837 * the air, but we should put the page on a page queue 838 * somewhere. (it already is in the object). Result: 839 * It appears that emperical results show that 840 * deactivating pages is best. 841 */ 842 843 /* 844 * just in case someone was asking for this page we 845 * now tell them that it is ok to use 846 */ 847 if (!error) { 848 vm_page_deactivate(m[i]); 849 PAGE_WAKEUP(m[i]); 850 } else { 851 vnode_pager_freepage(m[i]); 852 } 853 } 854 } 855 if (error) { 856 printf("vnode_pager_getpages: I/O read error\n"); 857 } 858 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 859 } 860 861 static int 862 vnode_pager_putpages(object, m, count, sync, rtvals) 863 vm_object_t object; 864 vm_page_t *m; 865 int count; 866 boolean_t sync; 867 int *rtvals; 868 { 869 int rtval; 870 struct vnode *vp; 871 872 if (object->flags & OBJ_VNODE_GONE) 873 return VM_PAGER_ERROR; 874 875 vp = object->handle; 876 rtval = VOP_PUTPAGES(vp, m, count*PAGE_SIZE, sync, rtvals, 0); 877 if (rtval == EOPNOTSUPP) 878 return vnode_pager_leaf_putpages(object, m, count, sync, rtvals); 879 else 880 return rtval; 881 } 882 883 /* 884 * generic vnode pager output routine 885 */ 886 static int 887 vnode_pager_leaf_putpages(object, m, count, sync, rtvals) 888 vm_object_t object; 889 vm_page_t *m; 890 int count; 891 boolean_t sync; 892 int *rtvals; 893 { 894 int i; 895 896 struct vnode *vp; 897 int maxsize, ncount; 898 vm_ooffset_t poffset; 899 struct uio auio; 900 struct iovec aiov; 901 int error; 902 903 vp = object->handle;; 904 for (i = 0; i < count; i++) 905 rtvals[i] = VM_PAGER_AGAIN; 906 907 if ((int) m[0]->pindex < 0) { 908 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->pindex, m[0]->dirty); 909 rtvals[0] = VM_PAGER_BAD; 910 return VM_PAGER_BAD; 911 } 912 913 maxsize = count * PAGE_SIZE; 914 ncount = count; 915 916 poffset = IDX_TO_OFF(m[0]->pindex); 917 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 918 if (object->un_pager.vnp.vnp_size > poffset) 919 maxsize = object->un_pager.vnp.vnp_size - poffset; 920 else 921 maxsize = 0; 922 ncount = btoc(maxsize); 923 if (ncount < count) { 924 for (i = ncount; i < count; i++) { 925 rtvals[i] = VM_PAGER_BAD; 926 } 927 #ifdef BOGUS 928 if (ncount == 0) { 929 printf("vnode_pager_putpages: write past end of file: %d, %lu\n", 930 poffset, 931 (unsigned long) object->un_pager.vnp.vnp_size); 932 return rtvals[0]; 933 } 934 #endif 935 } 936 } 937 938 for (i = 0; i < count; i++) { 939 m[i]->busy++; 940 m[i]->flags &= ~PG_BUSY; 941 } 942 943 aiov.iov_base = (caddr_t) 0; 944 aiov.iov_len = maxsize; 945 auio.uio_iov = &aiov; 946 auio.uio_iovcnt = 1; 947 auio.uio_offset = poffset; 948 auio.uio_segflg = UIO_NOCOPY; 949 auio.uio_rw = UIO_WRITE; 950 auio.uio_resid = maxsize; 951 auio.uio_procp = (struct proc *) 0; 952 error = VOP_WRITE(vp, &auio, IO_VMIO|(sync?IO_SYNC:0), curproc->p_ucred); 953 cnt.v_vnodeout++; 954 cnt.v_vnodepgsout += ncount; 955 956 if (error) { 957 printf("vnode_pager_putpages: I/O error %d\n", error); 958 } 959 if (auio.uio_resid) { 960 printf("vnode_pager_putpages: residual I/O %d at %ld\n", 961 auio.uio_resid, m[0]->pindex); 962 } 963 for (i = 0; i < count; i++) { 964 m[i]->busy--; 965 if (i < ncount) { 966 rtvals[i] = VM_PAGER_OK; 967 } 968 if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED)) 969 wakeup(m[i]); 970 } 971 return rtvals[0]; 972 } 973 974 struct vnode * 975 vnode_pager_lock(object) 976 vm_object_t object; 977 { 978 struct proc *p = curproc; /* XXX */ 979 980 for (; object != NULL; object = object->backing_object) { 981 if (object->type != OBJT_VNODE) 982 continue; 983 984 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE, p); 985 return object->handle; 986 } 987 return NULL; 988 } 989