1 /* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * Copyright (c) 1993, 1994 John S. Dyson 6 * Copyright (c) 1995, David Greenman 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41 * $FreeBSD$ 42 */ 43 44 /* 45 * Page to/from files (vnodes). 46 */ 47 48 /* 49 * TODO: 50 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 51 * greatly re-simplify the vnode_pager. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/proc.h> 57 #include <sys/vnode.h> 58 #include <sys/mount.h> 59 #include <sys/bio.h> 60 #include <sys/buf.h> 61 #include <sys/vmmeter.h> 62 #include <sys/conf.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_pager.h> 68 #include <vm/vm_map.h> 69 #include <vm/vnode_pager.h> 70 #include <vm/vm_extern.h> 71 72 static vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_ooffset_t address, 73 int *run)); 74 static void vnode_pager_iodone __P((struct buf *bp)); 75 static int vnode_pager_input_smlfs __P((vm_object_t object, vm_page_t m)); 76 static int vnode_pager_input_old __P((vm_object_t object, vm_page_t m)); 77 static void vnode_pager_dealloc __P((vm_object_t)); 78 static int vnode_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 79 static void vnode_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *)); 80 static boolean_t vnode_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *)); 81 82 struct pagerops vnodepagerops = { 83 NULL, 84 vnode_pager_alloc, 85 vnode_pager_dealloc, 86 vnode_pager_getpages, 87 vnode_pager_putpages, 88 vnode_pager_haspage, 89 NULL 90 }; 91 92 int vnode_pbuf_freecnt = -1; /* start out unlimited */ 93 94 95 /* 96 * Allocate (or lookup) pager for a vnode. 97 * Handle is a vnode pointer. 98 */ 99 vm_object_t 100 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 101 vm_ooffset_t offset) 102 { 103 vm_object_t object; 104 struct vnode *vp; 105 106 /* 107 * Pageout to vnode, no can do yet. 108 */ 109 if (handle == NULL) 110 return (NULL); 111 112 /* 113 * XXX hack - This initialization should be put somewhere else. 114 */ 115 if (vnode_pbuf_freecnt < 0) { 116 vnode_pbuf_freecnt = nswbuf / 2 + 1; 117 } 118 119 vp = (struct vnode *) handle; 120 121 /* 122 * Prevent race condition when allocating the object. This 123 * can happen with NFS vnodes since the nfsnode isn't locked. 124 */ 125 while (vp->v_flag & VOLOCK) { 126 vp->v_flag |= VOWANT; 127 tsleep(vp, PVM, "vnpobj", 0); 128 } 129 vp->v_flag |= VOLOCK; 130 131 /* 132 * If the object is being terminated, wait for it to 133 * go away. 134 */ 135 while (((object = vp->v_object) != NULL) && 136 (object->flags & OBJ_DEAD)) { 137 tsleep(object, PVM, "vadead", 0); 138 } 139 140 if (vp->v_usecount == 0) 141 panic("vnode_pager_alloc: no vnode reference"); 142 143 if (object == NULL) { 144 /* 145 * And an object of the appropriate size 146 */ 147 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); 148 object->flags = 0; 149 150 object->un_pager.vnp.vnp_size = size; 151 152 object->handle = handle; 153 vp->v_object = object; 154 vp->v_usecount++; 155 } else { 156 object->ref_count++; 157 vp->v_usecount++; 158 } 159 160 vp->v_flag &= ~VOLOCK; 161 if (vp->v_flag & VOWANT) { 162 vp->v_flag &= ~VOWANT; 163 wakeup(vp); 164 } 165 return (object); 166 } 167 168 static void 169 vnode_pager_dealloc(object) 170 vm_object_t object; 171 { 172 register struct vnode *vp = object->handle; 173 174 if (vp == NULL) 175 panic("vnode_pager_dealloc: pager already dealloced"); 176 177 vm_object_pip_wait(object, "vnpdea"); 178 179 object->handle = NULL; 180 object->type = OBJT_DEAD; 181 vp->v_object = NULL; 182 vp->v_flag &= ~(VTEXT | VOBJBUF); 183 } 184 185 static boolean_t 186 vnode_pager_haspage(object, pindex, before, after) 187 vm_object_t object; 188 vm_pindex_t pindex; 189 int *before; 190 int *after; 191 { 192 struct vnode *vp = object->handle; 193 daddr_t bn; 194 int err; 195 daddr_t reqblock; 196 int poff; 197 int bsize; 198 int pagesperblock, blocksperpage; 199 200 /* 201 * If no vp or vp is doomed or marked transparent to VM, we do not 202 * have the page. 203 */ 204 if ((vp == NULL) || (vp->v_flag & VDOOMED)) 205 return FALSE; 206 207 /* 208 * If filesystem no longer mounted or offset beyond end of file we do 209 * not have the page. 210 */ 211 if ((vp->v_mount == NULL) || 212 (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)) 213 return FALSE; 214 215 bsize = vp->v_mount->mnt_stat.f_iosize; 216 pagesperblock = bsize / PAGE_SIZE; 217 blocksperpage = 0; 218 if (pagesperblock > 0) { 219 reqblock = pindex / pagesperblock; 220 } else { 221 blocksperpage = (PAGE_SIZE / bsize); 222 reqblock = pindex * blocksperpage; 223 } 224 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, 225 after, before); 226 if (err) 227 return TRUE; 228 if ( bn == -1) 229 return FALSE; 230 if (pagesperblock > 0) { 231 poff = pindex - (reqblock * pagesperblock); 232 if (before) { 233 *before *= pagesperblock; 234 *before += poff; 235 } 236 if (after) { 237 int numafter; 238 *after *= pagesperblock; 239 numafter = pagesperblock - (poff + 1); 240 if (IDX_TO_OFF(pindex + numafter) > object->un_pager.vnp.vnp_size) { 241 numafter = OFF_TO_IDX((object->un_pager.vnp.vnp_size - IDX_TO_OFF(pindex))); 242 } 243 *after += numafter; 244 } 245 } else { 246 if (before) { 247 *before /= blocksperpage; 248 } 249 250 if (after) { 251 *after /= blocksperpage; 252 } 253 } 254 return TRUE; 255 } 256 257 /* 258 * Lets the VM system know about a change in size for a file. 259 * We adjust our own internal size and flush any cached pages in 260 * the associated object that are affected by the size change. 261 * 262 * Note: this routine may be invoked as a result of a pager put 263 * operation (possibly at object termination time), so we must be careful. 264 */ 265 void 266 vnode_pager_setsize(vp, nsize) 267 struct vnode *vp; 268 vm_ooffset_t nsize; 269 { 270 vm_pindex_t nobjsize; 271 vm_object_t object = vp->v_object; 272 273 if (object == NULL) 274 return; 275 276 /* 277 * Hasn't changed size 278 */ 279 if (nsize == object->un_pager.vnp.vnp_size) 280 return; 281 282 nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 283 284 /* 285 * File has shrunk. Toss any cached pages beyond the new EOF. 286 */ 287 if (nsize < object->un_pager.vnp.vnp_size) { 288 vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size); 289 if (nobjsize < object->size) { 290 vm_object_page_remove(object, nobjsize, object->size, 291 FALSE); 292 } 293 /* 294 * this gets rid of garbage at the end of a page that is now 295 * only partially backed by the vnode... 296 */ 297 if (nsize & PAGE_MASK) { 298 vm_offset_t kva; 299 vm_page_t m; 300 301 m = vm_page_lookup(object, OFF_TO_IDX(nsize)); 302 if (m) { 303 int base = (int)nsize & PAGE_MASK; 304 int size = PAGE_SIZE - base; 305 306 /* 307 * Clear out partial-page garbage in case 308 * the page has been mapped. 309 */ 310 kva = vm_pager_map_page(m); 311 bzero((caddr_t)kva + base, size); 312 vm_pager_unmap_page(kva); 313 314 /* 315 * Clear out partial-page dirty bits. This 316 * has the side effect of setting the valid 317 * bits, but that is ok. There are a bunch 318 * of places in the VM system where we expected 319 * m->dirty == VM_PAGE_BITS_ALL. The file EOF 320 * case is one of them. If the page is still 321 * partially dirty, make it fully dirty. 322 */ 323 vm_page_set_validclean(m, base, size); 324 if (m->dirty != 0) 325 m->dirty = VM_PAGE_BITS_ALL; 326 } 327 } 328 } 329 object->un_pager.vnp.vnp_size = nsize; 330 object->size = nobjsize; 331 } 332 333 void 334 vnode_pager_freepage(m) 335 vm_page_t m; 336 { 337 vm_page_free(m); 338 } 339 340 /* 341 * calculate the linear (byte) disk address of specified virtual 342 * file address 343 */ 344 static vm_offset_t 345 vnode_pager_addr(vp, address, run) 346 struct vnode *vp; 347 vm_ooffset_t address; 348 int *run; 349 { 350 int rtaddress; 351 int bsize; 352 daddr_t block; 353 struct vnode *rtvp; 354 int err; 355 daddr_t vblock; 356 int voffset; 357 358 if ((int) address < 0) 359 return -1; 360 361 if (vp->v_mount == NULL) 362 return -1; 363 364 bsize = vp->v_mount->mnt_stat.f_iosize; 365 vblock = address / bsize; 366 voffset = address % bsize; 367 368 err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL); 369 370 if (err || (block == -1)) 371 rtaddress = -1; 372 else { 373 rtaddress = block + voffset / DEV_BSIZE; 374 if( run) { 375 *run += 1; 376 *run *= bsize/PAGE_SIZE; 377 *run -= voffset/PAGE_SIZE; 378 } 379 } 380 381 return rtaddress; 382 } 383 384 /* 385 * interrupt routine for I/O completion 386 */ 387 static void 388 vnode_pager_iodone(bp) 389 struct buf *bp; 390 { 391 bp->b_flags |= B_DONE; 392 wakeup(bp); 393 } 394 395 /* 396 * small block file system vnode pager input 397 */ 398 static int 399 vnode_pager_input_smlfs(object, m) 400 vm_object_t object; 401 vm_page_t m; 402 { 403 int i; 404 int s; 405 struct vnode *dp, *vp; 406 struct buf *bp; 407 vm_offset_t kva; 408 int fileaddr; 409 vm_offset_t bsize; 410 int error = 0; 411 412 vp = object->handle; 413 if (vp->v_mount == NULL) 414 return VM_PAGER_BAD; 415 416 bsize = vp->v_mount->mnt_stat.f_iosize; 417 418 419 VOP_BMAP(vp, 0, &dp, 0, NULL, NULL); 420 421 kva = vm_pager_map_page(m); 422 423 for (i = 0; i < PAGE_SIZE / bsize; i++) { 424 425 if (vm_page_bits(i * bsize, bsize) & m->valid) 426 continue; 427 428 fileaddr = vnode_pager_addr(vp, 429 IDX_TO_OFF(m->pindex) + i * bsize, (int *)0); 430 if (fileaddr != -1) { 431 bp = getpbuf(&vnode_pbuf_freecnt); 432 433 /* build a minimal buffer header */ 434 bp->b_iocmd = BIO_READ; 435 bp->b_iodone = vnode_pager_iodone; 436 bp->b_rcred = bp->b_wcred = curproc->p_ucred; 437 if (bp->b_rcred != NOCRED) 438 crhold(bp->b_rcred); 439 if (bp->b_wcred != NOCRED) 440 crhold(bp->b_wcred); 441 bp->b_data = (caddr_t) kva + i * bsize; 442 bp->b_blkno = fileaddr; 443 pbgetvp(dp, bp); 444 bp->b_bcount = bsize; 445 bp->b_bufsize = bsize; 446 bp->b_runningbufspace = bp->b_bufsize; 447 runningbufspace += bp->b_runningbufspace; 448 449 /* do the input */ 450 BUF_STRATEGY(bp); 451 452 /* we definitely need to be at splvm here */ 453 454 s = splvm(); 455 while ((bp->b_flags & B_DONE) == 0) { 456 tsleep(bp, PVM, "vnsrd", 0); 457 } 458 splx(s); 459 if ((bp->b_ioflags & BIO_ERROR) != 0) 460 error = EIO; 461 462 /* 463 * free the buffer header back to the swap buffer pool 464 */ 465 relpbuf(bp, &vnode_pbuf_freecnt); 466 if (error) 467 break; 468 469 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 470 } else { 471 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 472 bzero((caddr_t) kva + i * bsize, bsize); 473 } 474 } 475 vm_pager_unmap_page(kva); 476 pmap_clear_modify(m); 477 vm_page_flag_clear(m, PG_ZERO); 478 if (error) { 479 return VM_PAGER_ERROR; 480 } 481 return VM_PAGER_OK; 482 483 } 484 485 486 /* 487 * old style vnode pager output routine 488 */ 489 static int 490 vnode_pager_input_old(object, m) 491 vm_object_t object; 492 vm_page_t m; 493 { 494 struct uio auio; 495 struct iovec aiov; 496 int error; 497 int size; 498 vm_offset_t kva; 499 500 error = 0; 501 502 /* 503 * Return failure if beyond current EOF 504 */ 505 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 506 return VM_PAGER_BAD; 507 } else { 508 size = PAGE_SIZE; 509 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 510 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 511 512 /* 513 * Allocate a kernel virtual address and initialize so that 514 * we can use VOP_READ/WRITE routines. 515 */ 516 kva = vm_pager_map_page(m); 517 518 aiov.iov_base = (caddr_t) kva; 519 aiov.iov_len = size; 520 auio.uio_iov = &aiov; 521 auio.uio_iovcnt = 1; 522 auio.uio_offset = IDX_TO_OFF(m->pindex); 523 auio.uio_segflg = UIO_SYSSPACE; 524 auio.uio_rw = UIO_READ; 525 auio.uio_resid = size; 526 auio.uio_procp = curproc; 527 528 error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred); 529 if (!error) { 530 register int count = size - auio.uio_resid; 531 532 if (count == 0) 533 error = EINVAL; 534 else if (count != PAGE_SIZE) 535 bzero((caddr_t) kva + count, PAGE_SIZE - count); 536 } 537 vm_pager_unmap_page(kva); 538 } 539 pmap_clear_modify(m); 540 vm_page_undirty(m); 541 vm_page_flag_clear(m, PG_ZERO); 542 if (!error) 543 m->valid = VM_PAGE_BITS_ALL; 544 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 545 } 546 547 /* 548 * generic vnode pager input routine 549 */ 550 551 /* 552 * EOPNOTSUPP is no longer legal. For local media VFS's that do not 553 * implement their own VOP_GETPAGES, their VOP_GETPAGES should call to 554 * vnode_pager_generic_getpages() to implement the previous behaviour. 555 * 556 * All other FS's should use the bypass to get to the local media 557 * backing vp's VOP_GETPAGES. 558 */ 559 static int 560 vnode_pager_getpages(object, m, count, reqpage) 561 vm_object_t object; 562 vm_page_t *m; 563 int count; 564 int reqpage; 565 { 566 int rtval; 567 struct vnode *vp; 568 int bytes = count * PAGE_SIZE; 569 570 vp = object->handle; 571 /* 572 * XXX temporary diagnostic message to help track stale FS code, 573 * Returning EOPNOTSUPP from here may make things unhappy. 574 */ 575 rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); 576 if (rtval == EOPNOTSUPP) { 577 printf("vnode_pager: *** WARNING *** stale FS getpages\n"); 578 rtval = vnode_pager_generic_getpages( vp, m, bytes, reqpage); 579 } 580 return rtval; 581 } 582 583 584 /* 585 * This is now called from local media FS's to operate against their 586 * own vnodes if they fail to implement VOP_GETPAGES. 587 */ 588 int 589 vnode_pager_generic_getpages(vp, m, bytecount, reqpage) 590 struct vnode *vp; 591 vm_page_t *m; 592 int bytecount; 593 int reqpage; 594 { 595 vm_object_t object; 596 vm_offset_t kva; 597 off_t foff, tfoff, nextoff; 598 int i, size, bsize, first, firstaddr; 599 struct vnode *dp; 600 int runpg; 601 int runend; 602 struct buf *bp; 603 int s; 604 int count; 605 int error = 0; 606 607 object = vp->v_object; 608 count = bytecount / PAGE_SIZE; 609 610 if (vp->v_mount == NULL) 611 return VM_PAGER_BAD; 612 613 bsize = vp->v_mount->mnt_stat.f_iosize; 614 615 /* get the UNDERLYING device for the file with VOP_BMAP() */ 616 617 /* 618 * originally, we did not check for an error return value -- assuming 619 * an fs always has a bmap entry point -- that assumption is wrong!!! 620 */ 621 foff = IDX_TO_OFF(m[reqpage]->pindex); 622 623 /* 624 * if we can't bmap, use old VOP code 625 */ 626 if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) { 627 for (i = 0; i < count; i++) { 628 if (i != reqpage) { 629 vnode_pager_freepage(m[i]); 630 } 631 } 632 cnt.v_vnodein++; 633 cnt.v_vnodepgsin++; 634 return vnode_pager_input_old(object, m[reqpage]); 635 636 /* 637 * if the blocksize is smaller than a page size, then use 638 * special small filesystem code. NFS sometimes has a small 639 * blocksize, but it can handle large reads itself. 640 */ 641 } else if ((PAGE_SIZE / bsize) > 1 && 642 (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { 643 for (i = 0; i < count; i++) { 644 if (i != reqpage) { 645 vnode_pager_freepage(m[i]); 646 } 647 } 648 cnt.v_vnodein++; 649 cnt.v_vnodepgsin++; 650 return vnode_pager_input_smlfs(object, m[reqpage]); 651 } 652 653 /* 654 * If we have a completely valid page available to us, we can 655 * clean up and return. Otherwise we have to re-read the 656 * media. 657 */ 658 659 if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { 660 for (i = 0; i < count; i++) { 661 if (i != reqpage) 662 vnode_pager_freepage(m[i]); 663 } 664 return VM_PAGER_OK; 665 } 666 m[reqpage]->valid = 0; 667 668 /* 669 * here on direct device I/O 670 */ 671 672 firstaddr = -1; 673 /* 674 * calculate the run that includes the required page 675 */ 676 for(first = 0, i = 0; i < count; i = runend) { 677 firstaddr = vnode_pager_addr(vp, 678 IDX_TO_OFF(m[i]->pindex), &runpg); 679 if (firstaddr == -1) { 680 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 681 /* XXX no %qd in kernel. */ 682 panic("vnode_pager_getpages: unexpected missing page: firstaddr: %d, foff: 0x%lx%08lx, vnp_size: 0x%lx%08lx", 683 firstaddr, (u_long)(foff >> 32), 684 (u_long)(u_int32_t)foff, 685 (u_long)(u_int32_t) 686 (object->un_pager.vnp.vnp_size >> 32), 687 (u_long)(u_int32_t) 688 object->un_pager.vnp.vnp_size); 689 } 690 vnode_pager_freepage(m[i]); 691 runend = i + 1; 692 first = runend; 693 continue; 694 } 695 runend = i + runpg; 696 if (runend <= reqpage) { 697 int j; 698 for (j = i; j < runend; j++) { 699 vnode_pager_freepage(m[j]); 700 } 701 } else { 702 if (runpg < (count - first)) { 703 for (i = first + runpg; i < count; i++) 704 vnode_pager_freepage(m[i]); 705 count = first + runpg; 706 } 707 break; 708 } 709 first = runend; 710 } 711 712 /* 713 * the first and last page have been calculated now, move input pages 714 * to be zero based... 715 */ 716 if (first != 0) { 717 for (i = first; i < count; i++) { 718 m[i - first] = m[i]; 719 } 720 count -= first; 721 reqpage -= first; 722 } 723 724 /* 725 * calculate the file virtual address for the transfer 726 */ 727 foff = IDX_TO_OFF(m[0]->pindex); 728 729 /* 730 * calculate the size of the transfer 731 */ 732 size = count * PAGE_SIZE; 733 if ((foff + size) > object->un_pager.vnp.vnp_size) 734 size = object->un_pager.vnp.vnp_size - foff; 735 736 /* 737 * round up physical size for real devices. 738 */ 739 if (dp->v_type == VBLK || dp->v_type == VCHR) { 740 int secmask = dp->v_rdev->si_bsize_phys - 1; 741 KASSERT(secmask < PAGE_SIZE, ("vnode_pager_generic_getpages: sector size %d too large\n", secmask + 1)); 742 size = (size + secmask) & ~secmask; 743 } 744 745 bp = getpbuf(&vnode_pbuf_freecnt); 746 kva = (vm_offset_t) bp->b_data; 747 748 /* 749 * and map the pages to be read into the kva 750 */ 751 pmap_qenter(kva, m, count); 752 753 /* build a minimal buffer header */ 754 bp->b_iocmd = BIO_READ; 755 bp->b_iodone = vnode_pager_iodone; 756 /* B_PHYS is not set, but it is nice to fill this in */ 757 bp->b_rcred = bp->b_wcred = curproc->p_ucred; 758 if (bp->b_rcred != NOCRED) 759 crhold(bp->b_rcred); 760 if (bp->b_wcred != NOCRED) 761 crhold(bp->b_wcred); 762 bp->b_blkno = firstaddr; 763 pbgetvp(dp, bp); 764 bp->b_bcount = size; 765 bp->b_bufsize = size; 766 bp->b_runningbufspace = bp->b_bufsize; 767 runningbufspace += bp->b_runningbufspace; 768 769 cnt.v_vnodein++; 770 cnt.v_vnodepgsin += count; 771 772 /* do the input */ 773 BUF_STRATEGY(bp); 774 775 s = splvm(); 776 /* we definitely need to be at splvm here */ 777 778 while ((bp->b_flags & B_DONE) == 0) { 779 tsleep(bp, PVM, "vnread", 0); 780 } 781 splx(s); 782 if ((bp->b_ioflags & BIO_ERROR) != 0) 783 error = EIO; 784 785 if (!error) { 786 if (size != count * PAGE_SIZE) 787 bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 788 } 789 pmap_qremove(kva, count); 790 791 /* 792 * free the buffer header back to the swap buffer pool 793 */ 794 relpbuf(bp, &vnode_pbuf_freecnt); 795 796 for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { 797 vm_page_t mt; 798 799 nextoff = tfoff + PAGE_SIZE; 800 mt = m[i]; 801 802 if (nextoff <= object->un_pager.vnp.vnp_size) { 803 /* 804 * Read filled up entire page. 805 */ 806 mt->valid = VM_PAGE_BITS_ALL; 807 vm_page_undirty(mt); /* should be an assert? XXX */ 808 pmap_clear_modify(mt); 809 } else { 810 /* 811 * Read did not fill up entire page. Since this 812 * is getpages, the page may be mapped, so we have 813 * to zero the invalid portions of the page even 814 * though we aren't setting them valid. 815 * 816 * Currently we do not set the entire page valid, 817 * we just try to clear the piece that we couldn't 818 * read. 819 */ 820 vm_page_set_validclean(mt, 0, 821 object->un_pager.vnp.vnp_size - tfoff); 822 /* handled by vm_fault now */ 823 /* vm_page_zero_invalid(mt, FALSE); */ 824 } 825 826 vm_page_flag_clear(mt, PG_ZERO); 827 if (i != reqpage) { 828 829 /* 830 * whether or not to leave the page activated is up in 831 * the air, but we should put the page on a page queue 832 * somewhere. (it already is in the object). Result: 833 * It appears that empirical results show that 834 * deactivating pages is best. 835 */ 836 837 /* 838 * just in case someone was asking for this page we 839 * now tell them that it is ok to use 840 */ 841 if (!error) { 842 if (mt->flags & PG_WANTED) 843 vm_page_activate(mt); 844 else 845 vm_page_deactivate(mt); 846 vm_page_wakeup(mt); 847 } else { 848 vnode_pager_freepage(mt); 849 } 850 } 851 } 852 if (error) { 853 printf("vnode_pager_getpages: I/O read error\n"); 854 } 855 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 856 } 857 858 /* 859 * EOPNOTSUPP is no longer legal. For local media VFS's that do not 860 * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 861 * vnode_pager_generic_putpages() to implement the previous behaviour. 862 * 863 * All other FS's should use the bypass to get to the local media 864 * backing vp's VOP_PUTPAGES. 865 */ 866 static void 867 vnode_pager_putpages(object, m, count, sync, rtvals) 868 vm_object_t object; 869 vm_page_t *m; 870 int count; 871 boolean_t sync; 872 int *rtvals; 873 { 874 int rtval; 875 struct vnode *vp; 876 struct mount *mp; 877 int bytes = count * PAGE_SIZE; 878 879 /* 880 * Force synchronous operation if we are extremely low on memory 881 * to prevent a low-memory deadlock. VOP operations often need to 882 * allocate more memory to initiate the I/O ( i.e. do a BMAP 883 * operation ). The swapper handles the case by limiting the amount 884 * of asynchronous I/O, but that sort of solution doesn't scale well 885 * for the vnode pager without a lot of work. 886 * 887 * Also, the backing vnode's iodone routine may not wake the pageout 888 * daemon up. This should be probably be addressed XXX. 889 */ 890 891 if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 892 sync |= OBJPC_SYNC; 893 894 /* 895 * Call device-specific putpages function 896 */ 897 898 vp = object->handle; 899 if (vp->v_type != VREG) 900 mp = NULL; 901 (void)vn_start_write(vp, &mp, V_WAIT); 902 rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); 903 if (rtval == EOPNOTSUPP) { 904 printf("vnode_pager: *** WARNING *** stale FS putpages\n"); 905 rtval = vnode_pager_generic_putpages( vp, m, bytes, sync, rtvals); 906 } 907 vn_finished_write(mp); 908 } 909 910 911 /* 912 * This is now called from local media FS's to operate against their 913 * own vnodes if they fail to implement VOP_PUTPAGES. 914 * 915 * This is typically called indirectly via the pageout daemon and 916 * clustering has already typically occured, so in general we ask the 917 * underlying filesystem to write the data out asynchronously rather 918 * then delayed. 919 */ 920 int 921 vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) 922 struct vnode *vp; 923 vm_page_t *m; 924 int bytecount; 925 int flags; 926 int *rtvals; 927 { 928 int i; 929 vm_object_t object; 930 int count; 931 932 int maxsize, ncount; 933 vm_ooffset_t poffset; 934 struct uio auio; 935 struct iovec aiov; 936 int error; 937 int ioflags; 938 939 object = vp->v_object; 940 count = bytecount / PAGE_SIZE; 941 942 for (i = 0; i < count; i++) 943 rtvals[i] = VM_PAGER_AGAIN; 944 945 if ((int) m[0]->pindex < 0) { 946 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%x)\n", 947 (long)m[0]->pindex, m[0]->dirty); 948 rtvals[0] = VM_PAGER_BAD; 949 return VM_PAGER_BAD; 950 } 951 952 maxsize = count * PAGE_SIZE; 953 ncount = count; 954 955 poffset = IDX_TO_OFF(m[0]->pindex); 956 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 957 if (object->un_pager.vnp.vnp_size > poffset) 958 maxsize = object->un_pager.vnp.vnp_size - poffset; 959 else 960 maxsize = 0; 961 ncount = btoc(maxsize); 962 if (ncount < count) { 963 for (i = ncount; i < count; i++) { 964 rtvals[i] = VM_PAGER_BAD; 965 } 966 } 967 } 968 969 /* 970 * pageouts are already clustered, use IO_ASYNC t o force a bawrite() 971 * rather then a bdwrite() to prevent paging I/O from saturating 972 * the buffer cache. 973 */ 974 ioflags = IO_VMIO; 975 ioflags |= (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) ? IO_SYNC: IO_ASYNC; 976 ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0; 977 978 aiov.iov_base = (caddr_t) 0; 979 aiov.iov_len = maxsize; 980 auio.uio_iov = &aiov; 981 auio.uio_iovcnt = 1; 982 auio.uio_offset = poffset; 983 auio.uio_segflg = UIO_NOCOPY; 984 auio.uio_rw = UIO_WRITE; 985 auio.uio_resid = maxsize; 986 auio.uio_procp = (struct proc *) 0; 987 error = VOP_WRITE(vp, &auio, ioflags, curproc->p_ucred); 988 cnt.v_vnodeout++; 989 cnt.v_vnodepgsout += ncount; 990 991 if (error) { 992 printf("vnode_pager_putpages: I/O error %d\n", error); 993 } 994 if (auio.uio_resid) { 995 printf("vnode_pager_putpages: residual I/O %d at %lu\n", 996 auio.uio_resid, (u_long)m[0]->pindex); 997 } 998 for (i = 0; i < ncount; i++) { 999 rtvals[i] = VM_PAGER_OK; 1000 } 1001 return rtvals[0]; 1002 } 1003 1004 struct vnode * 1005 vnode_pager_lock(object) 1006 vm_object_t object; 1007 { 1008 struct proc *p = curproc; /* XXX */ 1009 1010 for (; object != NULL; object = object->backing_object) { 1011 if (object->type != OBJT_VNODE) 1012 continue; 1013 if (object->flags & OBJ_DEAD) 1014 return NULL; 1015 1016 while (vget(object->handle, 1017 LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p)) { 1018 if ((object->flags & OBJ_DEAD) || (object->type != OBJT_VNODE)) 1019 return NULL; 1020 printf("vnode_pager_lock: retrying\n"); 1021 } 1022 return object->handle; 1023 } 1024 return NULL; 1025 } 1026