1 /* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * Copyright (c) 1993, 1994 John S. Dyson 6 * Copyright (c) 1995, David Greenman 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41 * $Id: vnode_pager.c,v 1.78 1997/12/29 00:25:11 dyson Exp $ 42 */ 43 44 /* 45 * Page to/from files (vnodes). 46 */ 47 48 /* 49 * TODO: 50 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 51 * greatly re-simplify the vnode_pager. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/proc.h> 57 #include <sys/vnode.h> 58 #include <sys/mount.h> 59 #include <sys/buf.h> 60 #include <sys/vmmeter.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_prot.h> 64 #include <vm/vm_object.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_pager.h> 67 #include <vm/vm_map.h> 68 #include <vm/vnode_pager.h> 69 #include <vm/vm_extern.h> 70 71 static vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_ooffset_t address, 72 int *run)); 73 static void vnode_pager_iodone __P((struct buf *bp)); 74 static int vnode_pager_input_smlfs __P((vm_object_t object, vm_page_t m)); 75 static int vnode_pager_input_old __P((vm_object_t object, vm_page_t m)); 76 static void vnode_pager_dealloc __P((vm_object_t)); 77 static int vnode_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 78 static int vnode_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *)); 79 static boolean_t vnode_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *)); 80 81 struct pagerops vnodepagerops = { 82 NULL, 83 vnode_pager_alloc, 84 vnode_pager_dealloc, 85 vnode_pager_getpages, 86 vnode_pager_putpages, 87 vnode_pager_haspage, 88 NULL 89 }; 90 91 static int vnode_pager_leaf_getpages __P((vm_object_t object, vm_page_t *m, 92 int count, int reqpage)); 93 static int vnode_pager_leaf_putpages __P((vm_object_t object, vm_page_t *m, 94 int count, boolean_t sync, 95 int *rtvals)); 96 97 /* 98 * Allocate (or lookup) pager for a vnode. 99 * Handle is a vnode pointer. 100 */ 101 vm_object_t 102 vnode_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot, 103 vm_ooffset_t offset) 104 { 105 vm_object_t object; 106 struct vnode *vp; 107 108 /* 109 * Pageout to vnode, no can do yet. 110 */ 111 if (handle == NULL) 112 return (NULL); 113 114 vp = (struct vnode *) handle; 115 116 /* 117 * Prevent race condition when allocating the object. This 118 * can happen with NFS vnodes since the nfsnode isn't locked. 119 */ 120 while (vp->v_flag & VOLOCK) { 121 vp->v_flag |= VOWANT; 122 tsleep(vp, PVM, "vnpobj", 0); 123 } 124 vp->v_flag |= VOLOCK; 125 126 /* 127 * If the object is being terminated, wait for it to 128 * go away. 129 */ 130 while (((object = vp->v_object) != NULL) && 131 (object->flags & OBJ_DEAD)) { 132 tsleep(object, PVM, "vadead", 0); 133 } 134 135 if (vp->v_usecount == 0) 136 panic("vnode_pager_alloc: no vnode reference"); 137 138 if (object == NULL) { 139 /* 140 * And an object of the appropriate size 141 */ 142 object = vm_object_allocate(OBJT_VNODE, size); 143 object->flags = 0; 144 145 object->un_pager.vnp.vnp_size = (vm_ooffset_t) size * PAGE_SIZE; 146 147 object->handle = handle; 148 vp->v_object = object; 149 vp->v_usecount++; 150 } else { 151 object->ref_count++; 152 vp->v_usecount++; 153 } 154 155 vp->v_flag &= ~VOLOCK; 156 if (vp->v_flag & VOWANT) { 157 vp->v_flag &= ~VOWANT; 158 wakeup(vp); 159 } 160 return (object); 161 } 162 163 static void 164 vnode_pager_dealloc(object) 165 vm_object_t object; 166 { 167 register struct vnode *vp = object->handle; 168 169 if (vp == NULL) 170 panic("vnode_pager_dealloc: pager already dealloced"); 171 172 if (object->paging_in_progress) { 173 int s = splbio(); 174 while (object->paging_in_progress) { 175 object->flags |= OBJ_PIPWNT; 176 tsleep(object, PVM, "vnpdea", 0); 177 } 178 splx(s); 179 } 180 181 object->flags |= OBJ_DEAD; 182 object->handle = NULL; 183 object->type = OBJT_DEFAULT; 184 vp->v_object = NULL; 185 vp->v_flag &= ~(VTEXT|VOBJBUF); 186 } 187 188 static boolean_t 189 vnode_pager_haspage(object, pindex, before, after) 190 vm_object_t object; 191 vm_pindex_t pindex; 192 int *before; 193 int *after; 194 { 195 struct vnode *vp = object->handle; 196 daddr_t bn; 197 int err; 198 daddr_t reqblock; 199 int poff; 200 int bsize; 201 int pagesperblock, blocksperpage; 202 203 /* 204 * If filesystem no longer mounted or offset beyond end of file we do 205 * not have the page. 206 */ 207 if ((vp->v_mount == NULL) || 208 (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)) 209 return FALSE; 210 211 bsize = vp->v_mount->mnt_stat.f_iosize; 212 pagesperblock = bsize / PAGE_SIZE; 213 blocksperpage = 0; 214 if (pagesperblock > 0) { 215 reqblock = pindex / pagesperblock; 216 } else { 217 blocksperpage = (PAGE_SIZE / bsize); 218 reqblock = pindex * blocksperpage; 219 } 220 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, 221 after, before); 222 if (err) 223 return TRUE; 224 if ( bn == -1) 225 return FALSE; 226 if (pagesperblock > 0) { 227 poff = pindex - (reqblock * pagesperblock); 228 if (before) { 229 *before *= pagesperblock; 230 *before += poff; 231 } 232 if (after) { 233 int numafter; 234 *after *= pagesperblock; 235 numafter = pagesperblock - (poff + 1); 236 if (IDX_TO_OFF(pindex + numafter) > object->un_pager.vnp.vnp_size) { 237 numafter = OFF_TO_IDX((object->un_pager.vnp.vnp_size - IDX_TO_OFF(pindex))); 238 } 239 *after += numafter; 240 } 241 } else { 242 if (before) { 243 *before /= blocksperpage; 244 } 245 246 if (after) { 247 *after /= blocksperpage; 248 } 249 } 250 return TRUE; 251 } 252 253 /* 254 * Lets the VM system know about a change in size for a file. 255 * We adjust our own internal size and flush any cached pages in 256 * the associated object that are affected by the size change. 257 * 258 * Note: this routine may be invoked as a result of a pager put 259 * operation (possibly at object termination time), so we must be careful. 260 */ 261 void 262 vnode_pager_setsize(vp, nsize) 263 struct vnode *vp; 264 vm_ooffset_t nsize; 265 { 266 vm_object_t object = vp->v_object; 267 268 if (object == NULL) 269 return; 270 271 /* 272 * Hasn't changed size 273 */ 274 if (nsize == object->un_pager.vnp.vnp_size) 275 return; 276 277 /* 278 * File has shrunk. Toss any cached pages beyond the new EOF. 279 */ 280 if (nsize < object->un_pager.vnp.vnp_size) { 281 vm_ooffset_t nsizerounded; 282 nsizerounded = IDX_TO_OFF(OFF_TO_IDX(nsize + PAGE_MASK)); 283 if (nsizerounded < object->un_pager.vnp.vnp_size) { 284 vm_pindex_t st, end; 285 st = OFF_TO_IDX(nsize + PAGE_MASK); 286 end = OFF_TO_IDX(object->un_pager.vnp.vnp_size); 287 288 vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size); 289 vm_object_page_remove(object, st, end, FALSE); 290 } 291 /* 292 * this gets rid of garbage at the end of a page that is now 293 * only partially backed by the vnode... 294 */ 295 if (nsize & PAGE_MASK) { 296 vm_offset_t kva; 297 vm_page_t m; 298 299 m = vm_page_lookup(object, OFF_TO_IDX(nsize)); 300 if (m) { 301 kva = vm_pager_map_page(m); 302 bzero((caddr_t) kva + (nsize & PAGE_MASK), 303 (int) (round_page(nsize) - nsize)); 304 vm_pager_unmap_page(kva); 305 } 306 } 307 } 308 object->un_pager.vnp.vnp_size = nsize; 309 object->size = OFF_TO_IDX(nsize + PAGE_MASK); 310 } 311 312 void 313 vnode_pager_freepage(m) 314 vm_page_t m; 315 { 316 PAGE_WAKEUP(m); 317 vm_page_free(m); 318 } 319 320 /* 321 * calculate the linear (byte) disk address of specified virtual 322 * file address 323 */ 324 static vm_offset_t 325 vnode_pager_addr(vp, address, run) 326 struct vnode *vp; 327 vm_ooffset_t address; 328 int *run; 329 { 330 int rtaddress; 331 int bsize; 332 daddr_t block; 333 struct vnode *rtvp; 334 int err; 335 daddr_t vblock; 336 int voffset; 337 338 if ((int) address < 0) 339 return -1; 340 341 if (vp->v_mount == NULL) 342 return -1; 343 344 bsize = vp->v_mount->mnt_stat.f_iosize; 345 vblock = address / bsize; 346 voffset = address % bsize; 347 348 err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL); 349 350 if (err || (block == -1)) 351 rtaddress = -1; 352 else { 353 rtaddress = block + voffset / DEV_BSIZE; 354 if( run) { 355 *run += 1; 356 *run *= bsize/PAGE_SIZE; 357 *run -= voffset/PAGE_SIZE; 358 } 359 } 360 361 return rtaddress; 362 } 363 364 /* 365 * interrupt routine for I/O completion 366 */ 367 static void 368 vnode_pager_iodone(bp) 369 struct buf *bp; 370 { 371 bp->b_flags |= B_DONE; 372 wakeup(bp); 373 } 374 375 /* 376 * small block file system vnode pager input 377 */ 378 static int 379 vnode_pager_input_smlfs(object, m) 380 vm_object_t object; 381 vm_page_t m; 382 { 383 int i; 384 int s; 385 struct vnode *dp, *vp; 386 struct buf *bp; 387 vm_offset_t kva; 388 int fileaddr; 389 vm_offset_t bsize; 390 int error = 0; 391 392 vp = object->handle; 393 if (vp->v_mount == NULL) 394 return VM_PAGER_BAD; 395 396 bsize = vp->v_mount->mnt_stat.f_iosize; 397 398 399 VOP_BMAP(vp, 0, &dp, 0, NULL, NULL); 400 401 kva = vm_pager_map_page(m); 402 403 for (i = 0; i < PAGE_SIZE / bsize; i++) { 404 405 if ((vm_page_bits(IDX_TO_OFF(m->pindex) + i * bsize, bsize) & m->valid)) 406 continue; 407 408 fileaddr = vnode_pager_addr(vp, 409 IDX_TO_OFF(m->pindex) + i * bsize, (int *)0); 410 if (fileaddr != -1) { 411 bp = getpbuf(); 412 413 /* build a minimal buffer header */ 414 bp->b_flags = B_BUSY | B_READ | B_CALL; 415 bp->b_iodone = vnode_pager_iodone; 416 bp->b_proc = curproc; 417 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 418 if (bp->b_rcred != NOCRED) 419 crhold(bp->b_rcred); 420 if (bp->b_wcred != NOCRED) 421 crhold(bp->b_wcred); 422 bp->b_data = (caddr_t) kva + i * bsize; 423 bp->b_blkno = fileaddr; 424 pbgetvp(dp, bp); 425 bp->b_bcount = bsize; 426 bp->b_bufsize = bsize; 427 428 /* do the input */ 429 VOP_STRATEGY(bp); 430 431 /* we definitely need to be at splbio here */ 432 433 s = splbio(); 434 while ((bp->b_flags & B_DONE) == 0) { 435 tsleep(bp, PVM, "vnsrd", 0); 436 } 437 splx(s); 438 if ((bp->b_flags & B_ERROR) != 0) 439 error = EIO; 440 441 /* 442 * free the buffer header back to the swap buffer pool 443 */ 444 relpbuf(bp); 445 if (error) 446 break; 447 448 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 449 } else { 450 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 451 bzero((caddr_t) kva + i * bsize, bsize); 452 } 453 } 454 vm_pager_unmap_page(kva); 455 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 456 m->flags &= ~PG_ZERO; 457 if (error) { 458 return VM_PAGER_ERROR; 459 } 460 return VM_PAGER_OK; 461 462 } 463 464 465 /* 466 * old style vnode pager output routine 467 */ 468 static int 469 vnode_pager_input_old(object, m) 470 vm_object_t object; 471 vm_page_t m; 472 { 473 struct uio auio; 474 struct iovec aiov; 475 int error; 476 int size; 477 vm_offset_t kva; 478 479 error = 0; 480 481 /* 482 * Return failure if beyond current EOF 483 */ 484 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 485 return VM_PAGER_BAD; 486 } else { 487 size = PAGE_SIZE; 488 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 489 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 490 491 /* 492 * Allocate a kernel virtual address and initialize so that 493 * we can use VOP_READ/WRITE routines. 494 */ 495 kva = vm_pager_map_page(m); 496 497 aiov.iov_base = (caddr_t) kva; 498 aiov.iov_len = size; 499 auio.uio_iov = &aiov; 500 auio.uio_iovcnt = 1; 501 auio.uio_offset = IDX_TO_OFF(m->pindex); 502 auio.uio_segflg = UIO_SYSSPACE; 503 auio.uio_rw = UIO_READ; 504 auio.uio_resid = size; 505 auio.uio_procp = (struct proc *) 0; 506 507 error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred); 508 if (!error) { 509 register int count = size - auio.uio_resid; 510 511 if (count == 0) 512 error = EINVAL; 513 else if (count != PAGE_SIZE) 514 bzero((caddr_t) kva + count, PAGE_SIZE - count); 515 } 516 vm_pager_unmap_page(kva); 517 } 518 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 519 m->dirty = 0; 520 m->flags &= ~PG_ZERO; 521 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 522 } 523 524 /* 525 * generic vnode pager input routine 526 */ 527 528 static int 529 vnode_pager_getpages(object, m, count, reqpage) 530 vm_object_t object; 531 vm_page_t *m; 532 int count; 533 int reqpage; 534 { 535 int rtval; 536 struct vnode *vp; 537 538 vp = object->handle; 539 rtval = VOP_GETPAGES(vp, m, count*PAGE_SIZE, reqpage, 0); 540 if (rtval == EOPNOTSUPP) 541 return vnode_pager_leaf_getpages(object, m, count, reqpage); 542 else 543 return rtval; 544 } 545 546 static int 547 vnode_pager_leaf_getpages(object, m, count, reqpage) 548 vm_object_t object; 549 vm_page_t *m; 550 int count; 551 int reqpage; 552 { 553 vm_offset_t kva; 554 off_t foff; 555 int i, size, bsize, first, firstaddr; 556 struct vnode *dp, *vp; 557 int runpg; 558 int runend; 559 struct buf *bp; 560 int s; 561 int error = 0; 562 563 vp = object->handle; 564 if (vp->v_mount == NULL) 565 return VM_PAGER_BAD; 566 567 bsize = vp->v_mount->mnt_stat.f_iosize; 568 569 /* get the UNDERLYING device for the file with VOP_BMAP() */ 570 571 /* 572 * originally, we did not check for an error return value -- assuming 573 * an fs always has a bmap entry point -- that assumption is wrong!!! 574 */ 575 foff = IDX_TO_OFF(m[reqpage]->pindex); 576 577 /* 578 * if we can't bmap, use old VOP code 579 */ 580 if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) { 581 for (i = 0; i < count; i++) { 582 if (i != reqpage) { 583 vnode_pager_freepage(m[i]); 584 } 585 } 586 cnt.v_vnodein++; 587 cnt.v_vnodepgsin++; 588 return vnode_pager_input_old(object, m[reqpage]); 589 590 /* 591 * if the blocksize is smaller than a page size, then use 592 * special small filesystem code. NFS sometimes has a small 593 * blocksize, but it can handle large reads itself. 594 */ 595 } else if ((PAGE_SIZE / bsize) > 1 && 596 (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) { 597 598 for (i = 0; i < count; i++) { 599 if (i != reqpage) { 600 vnode_pager_freepage(m[i]); 601 } 602 } 603 cnt.v_vnodein++; 604 cnt.v_vnodepgsin++; 605 return vnode_pager_input_smlfs(object, m[reqpage]); 606 } 607 /* 608 * if ANY DEV_BSIZE blocks are valid on a large filesystem block 609 * then, the entire page is valid -- 610 * XXX no it isn't 611 */ 612 613 if (m[reqpage]->valid != VM_PAGE_BITS_ALL) 614 m[reqpage]->valid = 0; 615 616 if (m[reqpage]->valid) { 617 m[reqpage]->valid = VM_PAGE_BITS_ALL; 618 for (i = 0; i < count; i++) { 619 if (i != reqpage) 620 vnode_pager_freepage(m[i]); 621 } 622 return VM_PAGER_OK; 623 } 624 625 /* 626 * here on direct device I/O 627 */ 628 629 firstaddr = -1; 630 /* 631 * calculate the run that includes the required page 632 */ 633 for(first = 0, i = 0; i < count; i = runend) { 634 firstaddr = vnode_pager_addr(vp, 635 IDX_TO_OFF(m[i]->pindex), &runpg); 636 if (firstaddr == -1) { 637 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 638 panic("vnode_pager_getpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d", 639 firstaddr, foff, object->un_pager.vnp.vnp_size); 640 } 641 vnode_pager_freepage(m[i]); 642 runend = i + 1; 643 first = runend; 644 continue; 645 } 646 runend = i + runpg; 647 if (runend <= reqpage) { 648 int j; 649 for (j = i; j < runend; j++) { 650 vnode_pager_freepage(m[j]); 651 } 652 } else { 653 if (runpg < (count - first)) { 654 for (i = first + runpg; i < count; i++) 655 vnode_pager_freepage(m[i]); 656 count = first + runpg; 657 } 658 break; 659 } 660 first = runend; 661 } 662 663 /* 664 * the first and last page have been calculated now, move input pages 665 * to be zero based... 666 */ 667 if (first != 0) { 668 for (i = first; i < count; i++) { 669 m[i - first] = m[i]; 670 } 671 count -= first; 672 reqpage -= first; 673 } 674 675 /* 676 * calculate the file virtual address for the transfer 677 */ 678 foff = IDX_TO_OFF(m[0]->pindex); 679 680 /* 681 * calculate the size of the transfer 682 */ 683 size = count * PAGE_SIZE; 684 if ((foff + size) > object->un_pager.vnp.vnp_size) 685 size = object->un_pager.vnp.vnp_size - foff; 686 687 /* 688 * round up physical size for real devices 689 */ 690 if (dp->v_type == VBLK || dp->v_type == VCHR) 691 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 692 693 bp = getpbuf(); 694 kva = (vm_offset_t) bp->b_data; 695 696 /* 697 * and map the pages to be read into the kva 698 */ 699 pmap_qenter(kva, m, count); 700 701 /* build a minimal buffer header */ 702 bp->b_flags = B_BUSY | B_READ | B_CALL; 703 bp->b_iodone = vnode_pager_iodone; 704 /* B_PHYS is not set, but it is nice to fill this in */ 705 bp->b_proc = curproc; 706 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 707 if (bp->b_rcred != NOCRED) 708 crhold(bp->b_rcred); 709 if (bp->b_wcred != NOCRED) 710 crhold(bp->b_wcred); 711 bp->b_blkno = firstaddr; 712 pbgetvp(dp, bp); 713 bp->b_bcount = size; 714 bp->b_bufsize = size; 715 716 cnt.v_vnodein++; 717 cnt.v_vnodepgsin += count; 718 719 /* do the input */ 720 VOP_STRATEGY(bp); 721 722 s = splbio(); 723 /* we definitely need to be at splbio here */ 724 725 while ((bp->b_flags & B_DONE) == 0) { 726 tsleep(bp, PVM, "vnread", 0); 727 } 728 splx(s); 729 if ((bp->b_flags & B_ERROR) != 0) 730 error = EIO; 731 732 if (!error) { 733 if (size != count * PAGE_SIZE) 734 bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 735 } 736 pmap_qremove(kva, count); 737 738 /* 739 * free the buffer header back to the swap buffer pool 740 */ 741 relpbuf(bp); 742 743 for (i = 0; i < count; i++) { 744 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); 745 m[i]->dirty = 0; 746 m[i]->valid = VM_PAGE_BITS_ALL; 747 m[i]->flags &= ~PG_ZERO; 748 if (i != reqpage) { 749 750 /* 751 * whether or not to leave the page activated is up in 752 * the air, but we should put the page on a page queue 753 * somewhere. (it already is in the object). Result: 754 * It appears that emperical results show that 755 * deactivating pages is best. 756 */ 757 758 /* 759 * just in case someone was asking for this page we 760 * now tell them that it is ok to use 761 */ 762 if (!error) { 763 vm_page_deactivate(m[i]); 764 PAGE_WAKEUP(m[i]); 765 } else { 766 vnode_pager_freepage(m[i]); 767 } 768 } 769 } 770 if (error) { 771 printf("vnode_pager_getpages: I/O read error\n"); 772 } 773 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 774 } 775 776 static int 777 vnode_pager_putpages(object, m, count, sync, rtvals) 778 vm_object_t object; 779 vm_page_t *m; 780 int count; 781 boolean_t sync; 782 int *rtvals; 783 { 784 int rtval; 785 struct vnode *vp; 786 787 vp = object->handle; 788 rtval = VOP_PUTPAGES(vp, m, count*PAGE_SIZE, sync, rtvals, 0); 789 if (rtval == EOPNOTSUPP) 790 return vnode_pager_leaf_putpages(object, m, count, sync, rtvals); 791 else 792 return rtval; 793 } 794 795 /* 796 * generic vnode pager output routine 797 */ 798 static int 799 vnode_pager_leaf_putpages(object, m, count, sync, rtvals) 800 vm_object_t object; 801 vm_page_t *m; 802 int count; 803 boolean_t sync; 804 int *rtvals; 805 { 806 int i; 807 808 struct vnode *vp; 809 int maxsize, ncount; 810 vm_ooffset_t poffset; 811 struct uio auio; 812 struct iovec aiov; 813 int error; 814 815 vp = object->handle;; 816 for (i = 0; i < count; i++) 817 rtvals[i] = VM_PAGER_AGAIN; 818 819 if ((int) m[0]->pindex < 0) { 820 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->pindex, m[0]->dirty); 821 rtvals[0] = VM_PAGER_BAD; 822 return VM_PAGER_BAD; 823 } 824 825 maxsize = count * PAGE_SIZE; 826 ncount = count; 827 828 poffset = IDX_TO_OFF(m[0]->pindex); 829 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 830 if (object->un_pager.vnp.vnp_size > poffset) 831 maxsize = object->un_pager.vnp.vnp_size - poffset; 832 else 833 maxsize = 0; 834 ncount = btoc(maxsize); 835 if (ncount < count) { 836 for (i = ncount; i < count; i++) { 837 rtvals[i] = VM_PAGER_BAD; 838 } 839 #ifdef BOGUS 840 if (ncount == 0) { 841 printf("vnode_pager_putpages: write past end of file: %d, %lu\n", 842 poffset, 843 (unsigned long) object->un_pager.vnp.vnp_size); 844 return rtvals[0]; 845 } 846 #endif 847 } 848 } 849 850 for (i = 0; i < count; i++) { 851 m[i]->busy++; 852 m[i]->flags &= ~PG_BUSY; 853 } 854 855 aiov.iov_base = (caddr_t) 0; 856 aiov.iov_len = maxsize; 857 auio.uio_iov = &aiov; 858 auio.uio_iovcnt = 1; 859 auio.uio_offset = poffset; 860 auio.uio_segflg = UIO_NOCOPY; 861 auio.uio_rw = UIO_WRITE; 862 auio.uio_resid = maxsize; 863 auio.uio_procp = (struct proc *) 0; 864 error = VOP_WRITE(vp, &auio, IO_VMIO|(sync?IO_SYNC:0), curproc->p_ucred); 865 cnt.v_vnodeout++; 866 cnt.v_vnodepgsout += ncount; 867 868 if (error) { 869 printf("vnode_pager_putpages: I/O error %d\n", error); 870 } 871 if (auio.uio_resid) { 872 printf("vnode_pager_putpages: residual I/O %d at %ld\n", 873 auio.uio_resid, m[0]->pindex); 874 } 875 for (i = 0; i < count; i++) { 876 m[i]->busy--; 877 if (i < ncount) { 878 rtvals[i] = VM_PAGER_OK; 879 } 880 if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED)) 881 wakeup(m[i]); 882 } 883 return rtvals[0]; 884 } 885 886 struct vnode * 887 vnode_pager_lock(object) 888 vm_object_t object; 889 { 890 struct proc *p = curproc; /* XXX */ 891 892 for (; object != NULL; object = object->backing_object) { 893 if (object->type != OBJT_VNODE) 894 continue; 895 896 vn_lock(object->handle, 897 LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p); 898 return object->handle; 899 } 900 return NULL; 901 } 902