1 /* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * Copyright (c) 1993, 1994 John S. Dyson 6 * Copyright (c) 1995, David Greenman 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41 * $Id: vnode_pager.c,v 1.50 1995/10/19 21:35:03 davidg Exp $ 42 */ 43 44 /* 45 * Page to/from files (vnodes). 46 */ 47 48 /* 49 * TODO: 50 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 51 * greatly re-simplify the vnode_pager. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/kernel.h> 57 #include <sys/proc.h> 58 #include <sys/malloc.h> 59 #include <sys/vnode.h> 60 #include <sys/uio.h> 61 #include <sys/mount.h> 62 #include <sys/buf.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_pager.h> 67 #include <vm/vnode_pager.h> 68 69 struct pagerops vnodepagerops = { 70 NULL, 71 vnode_pager_alloc, 72 vnode_pager_dealloc, 73 vnode_pager_getpages, 74 vnode_pager_putpages, 75 vnode_pager_haspage, 76 NULL 77 }; 78 79 static int vnode_pager_leaf_getpages(); 80 81 static int vnode_pager_leaf_putpages(); 82 /* 83 * Allocate (or lookup) pager for a vnode. 84 * Handle is a vnode pointer. 85 */ 86 vm_object_t 87 vnode_pager_alloc(handle, size, prot, offset) 88 void *handle; 89 vm_size_t size; 90 vm_prot_t prot; 91 vm_offset_t offset; 92 { 93 vm_object_t object; 94 struct vnode *vp; 95 96 /* 97 * Pageout to vnode, no can do yet. 98 */ 99 if (handle == NULL) 100 return (NULL); 101 102 vp = (struct vnode *) handle; 103 104 /* 105 * Prevent race condition when allocating the object. This 106 * can happen with NFS vnodes since the nfsnode isn't locked. 107 */ 108 while (vp->v_flag & VOLOCK) { 109 vp->v_flag |= VOWANT; 110 tsleep(vp, PVM, "vnpobj", 0); 111 } 112 vp->v_flag |= VOLOCK; 113 114 /* 115 * If the object is being terminated, wait for it to 116 * go away. 117 */ 118 while (((object = vp->v_object) != NULL) && (object->flags & OBJ_DEAD)) { 119 tsleep(object, PVM, "vadead", 0); 120 } 121 122 if (object == NULL) { 123 /* 124 * And an object of the appropriate size 125 */ 126 object = vm_object_allocate(OBJT_VNODE, round_page(size)); 127 object->flags = OBJ_CANPERSIST; 128 129 /* 130 * Hold a reference to the vnode and initialize object data. 131 */ 132 VREF(vp); 133 object->un_pager.vnp.vnp_size = size; 134 135 object->handle = handle; 136 vp->v_object = object; 137 } else { 138 /* 139 * vm_object_reference() will remove the object from the cache if 140 * found and gain a reference to the object. 141 */ 142 vm_object_reference(object); 143 } 144 145 if (vp->v_type == VREG) 146 vp->v_flag |= VVMIO; 147 148 vp->v_flag &= ~VOLOCK; 149 if (vp->v_flag & VOWANT) { 150 vp->v_flag &= ~VOWANT; 151 wakeup(vp); 152 } 153 return (object); 154 } 155 156 void 157 vnode_pager_dealloc(object) 158 vm_object_t object; 159 { 160 register struct vnode *vp = object->handle; 161 162 if (vp == NULL) 163 panic("vnode_pager_dealloc: pager already dealloced"); 164 165 if (object->paging_in_progress) { 166 int s = splbio(); 167 while (object->paging_in_progress) { 168 object->flags |= OBJ_PIPWNT; 169 tsleep(object, PVM, "vnpdea", 0); 170 } 171 splx(s); 172 } 173 174 object->handle = NULL; 175 176 vp->v_object = NULL; 177 vp->v_flag &= ~(VTEXT | VVMIO); 178 vp->v_flag |= VAGE; 179 vrele(vp); 180 } 181 182 boolean_t 183 vnode_pager_haspage(object, offset, before, after) 184 vm_object_t object; 185 vm_offset_t offset; 186 int *before; 187 int *after; 188 { 189 struct vnode *vp = object->handle; 190 daddr_t bn; 191 int err, run; 192 daddr_t reqblock; 193 int poff; 194 int bsize; 195 int pagesperblock; 196 197 /* 198 * If filesystem no longer mounted or offset beyond end of file we do 199 * not have the page. 200 */ 201 if ((vp->v_mount == NULL) || (offset >= object->un_pager.vnp.vnp_size)) 202 return FALSE; 203 204 bsize = vp->v_mount->mnt_stat.f_iosize; 205 pagesperblock = bsize / PAGE_SIZE; 206 reqblock = offset / bsize; 207 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, 208 after, before); 209 if (err) 210 return TRUE; 211 if ( bn == -1) 212 return FALSE; 213 poff = (offset - (reqblock * bsize)) / PAGE_SIZE; 214 if (before) { 215 *before *= pagesperblock; 216 *before += poff; 217 } 218 if (after) { 219 int numafter; 220 *after *= pagesperblock; 221 numafter = pagesperblock - (poff + 1); 222 if (offset + numafter * PAGE_SIZE > object->un_pager.vnp.vnp_size) { 223 numafter = (object->un_pager.vnp.vnp_size - offset)/PAGE_SIZE; 224 } 225 *after += numafter; 226 } 227 return TRUE; 228 } 229 230 /* 231 * Lets the VM system know about a change in size for a file. 232 * We adjust our own internal size and flush any cached pages in 233 * the associated object that are affected by the size change. 234 * 235 * Note: this routine may be invoked as a result of a pager put 236 * operation (possibly at object termination time), so we must be careful. 237 */ 238 void 239 vnode_pager_setsize(vp, nsize) 240 struct vnode *vp; 241 u_long nsize; 242 { 243 vm_object_t object = vp->v_object; 244 245 if (object == NULL) 246 return; 247 248 /* 249 * Hasn't changed size 250 */ 251 if (nsize == object->un_pager.vnp.vnp_size) 252 return; 253 254 /* 255 * File has shrunk. Toss any cached pages beyond the new EOF. 256 */ 257 if (nsize < object->un_pager.vnp.vnp_size) { 258 if (round_page((vm_offset_t) nsize) < object->un_pager.vnp.vnp_size) { 259 vm_object_page_remove(object, 260 round_page((vm_offset_t) nsize), object->un_pager.vnp.vnp_size, FALSE); 261 } 262 /* 263 * this gets rid of garbage at the end of a page that is now 264 * only partially backed by the vnode... 265 */ 266 if (nsize & PAGE_MASK) { 267 vm_offset_t kva; 268 vm_page_t m; 269 270 m = vm_page_lookup(object, trunc_page((vm_offset_t) nsize)); 271 if (m) { 272 kva = vm_pager_map_page(m); 273 bzero((caddr_t) kva + (nsize & PAGE_MASK), 274 round_page(nsize) - nsize); 275 vm_pager_unmap_page(kva); 276 } 277 } 278 } 279 object->un_pager.vnp.vnp_size = (vm_offset_t) nsize; 280 object->size = round_page(nsize); 281 } 282 283 void 284 vnode_pager_umount(mp) 285 register struct mount *mp; 286 { 287 struct vnode *vp, *nvp; 288 289 loop: 290 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 291 /* 292 * Vnode can be reclaimed by getnewvnode() while we 293 * traverse the list. 294 */ 295 if (vp->v_mount != mp) 296 goto loop; 297 298 /* 299 * Save the next pointer now since uncaching may terminate the 300 * object and render vnode invalid 301 */ 302 nvp = vp->v_mntvnodes.le_next; 303 304 if (vp->v_object != NULL) { 305 VOP_LOCK(vp); 306 vnode_pager_uncache(vp); 307 VOP_UNLOCK(vp); 308 } 309 } 310 } 311 312 /* 313 * Remove vnode associated object from the object cache. 314 * This routine must be called with the vnode locked. 315 * 316 * XXX unlock the vnode. 317 * We must do this since uncaching the object may result in its 318 * destruction which may initiate paging activity which may necessitate 319 * re-locking the vnode. 320 */ 321 void 322 vnode_pager_uncache(vp) 323 struct vnode *vp; 324 { 325 vm_object_t object; 326 327 /* 328 * Not a mapped vnode 329 */ 330 object = vp->v_object; 331 if (object == NULL) 332 return; 333 334 vm_object_reference(object); 335 VOP_UNLOCK(vp); 336 pager_cache(object, FALSE); 337 VOP_LOCK(vp); 338 return; 339 } 340 341 342 void 343 vnode_pager_freepage(m) 344 vm_page_t m; 345 { 346 PAGE_WAKEUP(m); 347 vm_page_free(m); 348 } 349 350 /* 351 * calculate the linear (byte) disk address of specified virtual 352 * file address 353 */ 354 vm_offset_t 355 vnode_pager_addr(vp, address, run) 356 struct vnode *vp; 357 vm_offset_t address; 358 int *run; 359 { 360 int rtaddress; 361 int bsize; 362 vm_offset_t block; 363 struct vnode *rtvp; 364 int err; 365 int vblock, voffset; 366 367 if ((int) address < 0) 368 return -1; 369 370 if (vp->v_mount == NULL) 371 return -1; 372 373 bsize = vp->v_mount->mnt_stat.f_iosize; 374 vblock = address / bsize; 375 voffset = address % bsize; 376 377 err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL); 378 379 if (err || (block == -1)) 380 rtaddress = -1; 381 else { 382 rtaddress = block + voffset / DEV_BSIZE; 383 if( run) { 384 *run += 1; 385 *run *= bsize/PAGE_SIZE; 386 *run -= voffset/PAGE_SIZE; 387 } 388 } 389 390 return rtaddress; 391 } 392 393 /* 394 * interrupt routine for I/O completion 395 */ 396 void 397 vnode_pager_iodone(bp) 398 struct buf *bp; 399 { 400 bp->b_flags |= B_DONE; 401 wakeup(bp); 402 } 403 404 /* 405 * small block file system vnode pager input 406 */ 407 int 408 vnode_pager_input_smlfs(object, m) 409 vm_object_t object; 410 vm_page_t m; 411 { 412 int i; 413 int s; 414 struct vnode *dp, *vp; 415 struct buf *bp; 416 vm_offset_t kva; 417 int fileaddr; 418 vm_offset_t bsize; 419 int error = 0; 420 421 vp = object->handle; 422 if (vp->v_mount == NULL) 423 return VM_PAGER_BAD; 424 425 bsize = vp->v_mount->mnt_stat.f_iosize; 426 427 428 VOP_BMAP(vp, 0, &dp, 0, NULL, NULL); 429 430 kva = vm_pager_map_page(m); 431 432 for (i = 0; i < PAGE_SIZE / bsize; i++) { 433 434 if ((vm_page_bits(m->offset + i * bsize, bsize) & m->valid)) 435 continue; 436 437 fileaddr = vnode_pager_addr(vp, m->offset + i * bsize, (int *)0); 438 if (fileaddr != -1) { 439 bp = getpbuf(); 440 441 /* build a minimal buffer header */ 442 bp->b_flags = B_BUSY | B_READ | B_CALL; 443 bp->b_iodone = vnode_pager_iodone; 444 bp->b_proc = curproc; 445 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 446 if (bp->b_rcred != NOCRED) 447 crhold(bp->b_rcred); 448 if (bp->b_wcred != NOCRED) 449 crhold(bp->b_wcred); 450 bp->b_un.b_addr = (caddr_t) kva + i * bsize; 451 bp->b_blkno = fileaddr; 452 pbgetvp(dp, bp); 453 bp->b_bcount = bsize; 454 bp->b_bufsize = bsize; 455 456 /* do the input */ 457 VOP_STRATEGY(bp); 458 459 /* we definitely need to be at splbio here */ 460 461 s = splbio(); 462 while ((bp->b_flags & B_DONE) == 0) { 463 tsleep(bp, PVM, "vnsrd", 0); 464 } 465 splx(s); 466 if ((bp->b_flags & B_ERROR) != 0) 467 error = EIO; 468 469 /* 470 * free the buffer header back to the swap buffer pool 471 */ 472 relpbuf(bp); 473 if (error) 474 break; 475 476 vm_page_set_validclean(m, (i * bsize) & (PAGE_SIZE-1), bsize); 477 } else { 478 vm_page_set_validclean(m, (i * bsize) & (PAGE_SIZE-1), bsize); 479 bzero((caddr_t) kva + i * bsize, bsize); 480 } 481 } 482 vm_pager_unmap_page(kva); 483 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 484 m->flags &= ~PG_ZERO; 485 if (error) { 486 return VM_PAGER_ERROR; 487 } 488 return VM_PAGER_OK; 489 490 } 491 492 493 /* 494 * old style vnode pager output routine 495 */ 496 int 497 vnode_pager_input_old(object, m) 498 vm_object_t object; 499 vm_page_t m; 500 { 501 struct uio auio; 502 struct iovec aiov; 503 int error; 504 int size; 505 vm_offset_t kva; 506 507 error = 0; 508 509 /* 510 * Return failure if beyond current EOF 511 */ 512 if (m->offset >= object->un_pager.vnp.vnp_size) { 513 return VM_PAGER_BAD; 514 } else { 515 size = PAGE_SIZE; 516 if (m->offset + size > object->un_pager.vnp.vnp_size) 517 size = object->un_pager.vnp.vnp_size - m->offset; 518 519 /* 520 * Allocate a kernel virtual address and initialize so that 521 * we can use VOP_READ/WRITE routines. 522 */ 523 kva = vm_pager_map_page(m); 524 525 aiov.iov_base = (caddr_t) kva; 526 aiov.iov_len = size; 527 auio.uio_iov = &aiov; 528 auio.uio_iovcnt = 1; 529 auio.uio_offset = m->offset; 530 auio.uio_segflg = UIO_SYSSPACE; 531 auio.uio_rw = UIO_READ; 532 auio.uio_resid = size; 533 auio.uio_procp = (struct proc *) 0; 534 535 error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred); 536 if (!error) { 537 register int count = size - auio.uio_resid; 538 539 if (count == 0) 540 error = EINVAL; 541 else if (count != PAGE_SIZE) 542 bzero((caddr_t) kva + count, PAGE_SIZE - count); 543 } 544 vm_pager_unmap_page(kva); 545 } 546 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 547 m->dirty = 0; 548 m->flags &= ~PG_ZERO; 549 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 550 } 551 552 /* 553 * generic vnode pager input routine 554 */ 555 556 int 557 vnode_pager_getpages(object, m, count, reqpage) 558 vm_object_t object; 559 vm_page_t *m; 560 int count; 561 int reqpage; 562 { 563 int rtval; 564 struct vnode *vp; 565 vp = object->handle; 566 rtval = VOP_GETPAGES(vp, m, count*PAGE_SIZE, reqpage, 0); 567 if (rtval == EOPNOTSUPP) 568 return vnode_pager_leaf_getpages(object, m, count, reqpage, 0); 569 else 570 return rtval; 571 } 572 573 static int 574 vnode_pager_leaf_getpages(object, m, count, reqpage) 575 vm_object_t object; 576 vm_page_t *m; 577 int count; 578 int reqpage; 579 { 580 vm_offset_t kva, foff; 581 int i, size, bsize, first, firstaddr; 582 struct vnode *dp, *vp; 583 int runpg; 584 int runend; 585 struct buf *bp; 586 int s; 587 int error = 0; 588 589 vp = object->handle; 590 if (vp->v_mount == NULL) 591 return VM_PAGER_BAD; 592 593 bsize = vp->v_mount->mnt_stat.f_iosize; 594 595 /* get the UNDERLYING device for the file with VOP_BMAP() */ 596 597 /* 598 * originally, we did not check for an error return value -- assuming 599 * an fs always has a bmap entry point -- that assumption is wrong!!! 600 */ 601 foff = m[reqpage]->offset; 602 603 /* 604 * if we can't bmap, use old VOP code 605 */ 606 if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) { 607 for (i = 0; i < count; i++) { 608 if (i != reqpage) { 609 vnode_pager_freepage(m[i]); 610 } 611 } 612 cnt.v_vnodein++; 613 cnt.v_vnodepgsin++; 614 return vnode_pager_input_old(object, m[reqpage]); 615 616 /* 617 * if the blocksize is smaller than a page size, then use 618 * special small filesystem code. NFS sometimes has a small 619 * blocksize, but it can handle large reads itself. 620 */ 621 } else if ((PAGE_SIZE / bsize) > 1 && 622 (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) { 623 624 for (i = 0; i < count; i++) { 625 if (i != reqpage) { 626 vnode_pager_freepage(m[i]); 627 } 628 } 629 cnt.v_vnodein++; 630 cnt.v_vnodepgsin++; 631 return vnode_pager_input_smlfs(object, m[reqpage]); 632 } 633 /* 634 * if ANY DEV_BSIZE blocks are valid on a large filesystem block 635 * then, the entire page is valid -- 636 */ 637 if (m[reqpage]->valid) { 638 m[reqpage]->valid = VM_PAGE_BITS_ALL; 639 for (i = 0; i < count; i++) { 640 if (i != reqpage) 641 vnode_pager_freepage(m[i]); 642 } 643 return VM_PAGER_OK; 644 } 645 646 /* 647 * here on direct device I/O 648 */ 649 650 firstaddr = -1; 651 /* 652 * calculate the run that includes the required page 653 */ 654 for(first = 0, i = 0; i < count; i = runend) { 655 firstaddr = vnode_pager_addr(vp, m[i]->offset, &runpg); 656 if (firstaddr == -1) { 657 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 658 panic("vnode_pager_putpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d", 659 firstaddr, foff, object->un_pager.vnp.vnp_size); 660 } 661 vnode_pager_freepage(m[i]); 662 runend = i + 1; 663 first = runend; 664 continue; 665 } 666 runend = i + runpg; 667 if (runend <= reqpage) { 668 int j; 669 for (j = i; j < runend; j++) { 670 vnode_pager_freepage(m[j]); 671 } 672 } else { 673 if (runpg < (count - first)) { 674 for (i = first + runpg; i < count; i++) 675 vnode_pager_freepage(m[i]); 676 count = first + runpg; 677 } 678 break; 679 } 680 first = runend; 681 } 682 683 /* 684 * the first and last page have been calculated now, move input pages 685 * to be zero based... 686 */ 687 if (first != 0) { 688 for (i = first; i < count; i++) { 689 m[i - first] = m[i]; 690 } 691 count -= first; 692 reqpage -= first; 693 } 694 695 /* 696 * calculate the file virtual address for the transfer 697 */ 698 foff = m[0]->offset; 699 700 /* 701 * calculate the size of the transfer 702 */ 703 size = count * PAGE_SIZE; 704 if ((foff + size) > object->un_pager.vnp.vnp_size) 705 size = object->un_pager.vnp.vnp_size - foff; 706 707 /* 708 * round up physical size for real devices 709 */ 710 if (dp->v_type == VBLK || dp->v_type == VCHR) 711 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 712 713 bp = getpbuf(); 714 kva = (vm_offset_t) bp->b_data; 715 716 /* 717 * and map the pages to be read into the kva 718 */ 719 pmap_qenter(kva, m, count); 720 721 /* build a minimal buffer header */ 722 bp->b_flags = B_BUSY | B_READ | B_CALL; 723 bp->b_iodone = vnode_pager_iodone; 724 /* B_PHYS is not set, but it is nice to fill this in */ 725 bp->b_proc = curproc; 726 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 727 if (bp->b_rcred != NOCRED) 728 crhold(bp->b_rcred); 729 if (bp->b_wcred != NOCRED) 730 crhold(bp->b_wcred); 731 bp->b_blkno = firstaddr; 732 pbgetvp(dp, bp); 733 bp->b_bcount = size; 734 bp->b_bufsize = size; 735 736 cnt.v_vnodein++; 737 cnt.v_vnodepgsin += count; 738 739 /* do the input */ 740 VOP_STRATEGY(bp); 741 742 s = splbio(); 743 /* we definitely need to be at splbio here */ 744 745 while ((bp->b_flags & B_DONE) == 0) { 746 tsleep(bp, PVM, "vnread", 0); 747 } 748 splx(s); 749 if ((bp->b_flags & B_ERROR) != 0) 750 error = EIO; 751 752 if (!error) { 753 if (size != count * PAGE_SIZE) 754 bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 755 } 756 pmap_qremove(kva, count); 757 758 /* 759 * free the buffer header back to the swap buffer pool 760 */ 761 relpbuf(bp); 762 763 for (i = 0; i < count; i++) { 764 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); 765 m[i]->dirty = 0; 766 m[i]->valid = VM_PAGE_BITS_ALL; 767 m[i]->flags &= ~PG_ZERO; 768 if (i != reqpage) { 769 770 /* 771 * whether or not to leave the page activated is up in 772 * the air, but we should put the page on a page queue 773 * somewhere. (it already is in the object). Result: 774 * It appears that emperical results show that 775 * deactivating pages is best. 776 */ 777 778 /* 779 * just in case someone was asking for this page we 780 * now tell them that it is ok to use 781 */ 782 if (!error) { 783 vm_page_deactivate(m[i]); 784 PAGE_WAKEUP(m[i]); 785 } else { 786 vnode_pager_freepage(m[i]); 787 } 788 } 789 } 790 if (error) { 791 printf("vnode_pager_getpages: I/O read error\n"); 792 } 793 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 794 } 795 796 int 797 vnode_pager_putpages(object, m, count, sync, rtvals) 798 vm_object_t object; 799 vm_page_t *m; 800 int count; 801 boolean_t sync; 802 int *rtvals; 803 { 804 int rtval; 805 struct vnode *vp; 806 vp = object->handle; 807 rtval = VOP_PUTPAGES(vp, m, count*PAGE_SIZE, sync, rtvals, 0); 808 if (rtval == EOPNOTSUPP) 809 return vnode_pager_leaf_putpages(object, m, count, sync, rtvals, 0); 810 else 811 return rtval; 812 } 813 814 /* 815 * generic vnode pager output routine 816 */ 817 static int 818 vnode_pager_leaf_putpages(object, m, count, sync, rtvals) 819 vm_object_t object; 820 vm_page_t *m; 821 int count; 822 boolean_t sync; 823 int *rtvals; 824 { 825 int i; 826 827 struct vnode *vp; 828 int maxsize, ncount; 829 struct uio auio; 830 struct iovec aiov; 831 int error; 832 833 vp = object->handle;; 834 for (i = 0; i < count; i++) 835 rtvals[i] = VM_PAGER_AGAIN; 836 837 if ((int) m[0]->offset < 0) { 838 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->offset, m[0]->dirty); 839 rtvals[0] = VM_PAGER_BAD; 840 return VM_PAGER_BAD; 841 } 842 843 maxsize = count * PAGE_SIZE; 844 ncount = count; 845 846 if (maxsize + m[0]->offset > object->un_pager.vnp.vnp_size) { 847 if (object->un_pager.vnp.vnp_size > m[0]->offset) 848 maxsize = object->un_pager.vnp.vnp_size - m[0]->offset; 849 else 850 maxsize = 0; 851 ncount = (maxsize + PAGE_SIZE - 1) / PAGE_SIZE; 852 if (ncount < count) { 853 for (i = ncount; i < count; i++) { 854 rtvals[i] = VM_PAGER_BAD; 855 } 856 if (ncount == 0) { 857 printf("vnode_pager_putpages: write past end of file: %d, %d\n", 858 m[0]->offset, object->un_pager.vnp.vnp_size); 859 return rtvals[0]; 860 } 861 } 862 } 863 864 for (i = 0; i < count; i++) { 865 m[i]->busy++; 866 m[i]->flags &= ~PG_BUSY; 867 } 868 869 aiov.iov_base = (caddr_t) 0; 870 aiov.iov_len = maxsize; 871 auio.uio_iov = &aiov; 872 auio.uio_iovcnt = 1; 873 auio.uio_offset = m[0]->offset; 874 auio.uio_segflg = UIO_NOCOPY; 875 auio.uio_rw = UIO_WRITE; 876 auio.uio_resid = maxsize; 877 auio.uio_procp = (struct proc *) 0; 878 error = VOP_WRITE(vp, &auio, IO_VMIO, curproc->p_ucred); 879 cnt.v_vnodeout++; 880 cnt.v_vnodepgsout += ncount; 881 882 if (error) { 883 printf("vnode_pager_putpages: I/O error %d\n", error); 884 } 885 if (auio.uio_resid) { 886 printf("vnode_pager_putpages: residual I/O %d at %d\n", auio.uio_resid, m[0]->offset); 887 } 888 for (i = 0; i < count; i++) { 889 m[i]->busy--; 890 if (i < ncount) { 891 rtvals[i] = VM_PAGER_OK; 892 } 893 if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED)) 894 wakeup(m[i]); 895 } 896 return rtvals[0]; 897 } 898 899 struct vnode * 900 vnode_pager_lock(object) 901 vm_object_t object; 902 { 903 for (; object != NULL; object = object->backing_object) { 904 if (object->type != OBJT_VNODE) 905 continue; 906 907 VOP_LOCK(object->handle); 908 return object->handle; 909 } 910 return NULL; 911 } 912