1 /* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * Copyright (c) 1993, 1994 John S. Dyson 6 * Copyright (c) 1995, David Greenman 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41 * $FreeBSD$ 42 */ 43 44 /* 45 * Page to/from files (vnodes). 46 */ 47 48 /* 49 * TODO: 50 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 51 * greatly re-simplify the vnode_pager. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/proc.h> 57 #include <sys/vnode.h> 58 #include <sys/mount.h> 59 #include <sys/bio.h> 60 #include <sys/buf.h> 61 #include <sys/vmmeter.h> 62 #include <sys/conf.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_pager.h> 68 #include <vm/vm_map.h> 69 #include <vm/vnode_pager.h> 70 #include <vm/vm_extern.h> 71 72 static vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_ooffset_t address, 73 int *run)); 74 static void vnode_pager_iodone __P((struct buf *bp)); 75 static int vnode_pager_input_smlfs __P((vm_object_t object, vm_page_t m)); 76 static int vnode_pager_input_old __P((vm_object_t object, vm_page_t m)); 77 static void vnode_pager_dealloc __P((vm_object_t)); 78 static int vnode_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 79 static void vnode_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *)); 80 static boolean_t vnode_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *)); 81 82 struct pagerops vnodepagerops = { 83 NULL, 84 vnode_pager_alloc, 85 vnode_pager_dealloc, 86 vnode_pager_getpages, 87 vnode_pager_putpages, 88 vnode_pager_haspage, 89 NULL 90 }; 91 92 int vnode_pbuf_freecnt = -1; /* start out unlimited */ 93 94 95 /* 96 * Allocate (or lookup) pager for a vnode. 97 * Handle is a vnode pointer. 98 */ 99 vm_object_t 100 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 101 vm_ooffset_t offset) 102 { 103 vm_object_t object; 104 struct vnode *vp; 105 106 /* 107 * Pageout to vnode, no can do yet. 108 */ 109 if (handle == NULL) 110 return (NULL); 111 112 /* 113 * XXX hack - This initialization should be put somewhere else. 114 */ 115 if (vnode_pbuf_freecnt < 0) { 116 vnode_pbuf_freecnt = nswbuf / 2 + 1; 117 } 118 119 vp = (struct vnode *) handle; 120 121 /* 122 * Prevent race condition when allocating the object. This 123 * can happen with NFS vnodes since the nfsnode isn't locked. 124 */ 125 while (vp->v_flag & VOLOCK) { 126 vp->v_flag |= VOWANT; 127 tsleep(vp, PVM, "vnpobj", 0); 128 } 129 vp->v_flag |= VOLOCK; 130 131 /* 132 * If the object is being terminated, wait for it to 133 * go away. 134 */ 135 while (((object = vp->v_object) != NULL) && 136 (object->flags & OBJ_DEAD)) { 137 tsleep(object, PVM, "vadead", 0); 138 } 139 140 if (vp->v_usecount == 0) 141 panic("vnode_pager_alloc: no vnode reference"); 142 143 if (object == NULL) { 144 /* 145 * And an object of the appropriate size 146 */ 147 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); 148 object->flags = 0; 149 150 object->un_pager.vnp.vnp_size = size; 151 152 object->handle = handle; 153 vp->v_object = object; 154 vp->v_usecount++; 155 } else { 156 object->ref_count++; 157 vp->v_usecount++; 158 } 159 160 vp->v_flag &= ~VOLOCK; 161 if (vp->v_flag & VOWANT) { 162 vp->v_flag &= ~VOWANT; 163 wakeup(vp); 164 } 165 return (object); 166 } 167 168 static void 169 vnode_pager_dealloc(object) 170 vm_object_t object; 171 { 172 register struct vnode *vp = object->handle; 173 174 if (vp == NULL) 175 panic("vnode_pager_dealloc: pager already dealloced"); 176 177 vm_object_pip_wait(object, "vnpdea"); 178 179 object->handle = NULL; 180 object->type = OBJT_DEAD; 181 vp->v_object = NULL; 182 vp->v_flag &= ~(VTEXT | VOBJBUF); 183 } 184 185 static boolean_t 186 vnode_pager_haspage(object, pindex, before, after) 187 vm_object_t object; 188 vm_pindex_t pindex; 189 int *before; 190 int *after; 191 { 192 struct vnode *vp = object->handle; 193 daddr_t bn; 194 int err; 195 daddr_t reqblock; 196 int poff; 197 int bsize; 198 int pagesperblock, blocksperpage; 199 200 /* 201 * If no vp or vp is doomed or marked transparent to VM, we do not 202 * have the page. 203 */ 204 if ((vp == NULL) || (vp->v_flag & VDOOMED)) 205 return FALSE; 206 207 /* 208 * If filesystem no longer mounted or offset beyond end of file we do 209 * not have the page. 210 */ 211 if ((vp->v_mount == NULL) || 212 (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)) 213 return FALSE; 214 215 bsize = vp->v_mount->mnt_stat.f_iosize; 216 pagesperblock = bsize / PAGE_SIZE; 217 blocksperpage = 0; 218 if (pagesperblock > 0) { 219 reqblock = pindex / pagesperblock; 220 } else { 221 blocksperpage = (PAGE_SIZE / bsize); 222 reqblock = pindex * blocksperpage; 223 } 224 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, 225 after, before); 226 if (err) 227 return TRUE; 228 if ( bn == -1) 229 return FALSE; 230 if (pagesperblock > 0) { 231 poff = pindex - (reqblock * pagesperblock); 232 if (before) { 233 *before *= pagesperblock; 234 *before += poff; 235 } 236 if (after) { 237 int numafter; 238 *after *= pagesperblock; 239 numafter = pagesperblock - (poff + 1); 240 if (IDX_TO_OFF(pindex + numafter) > object->un_pager.vnp.vnp_size) { 241 numafter = OFF_TO_IDX((object->un_pager.vnp.vnp_size - IDX_TO_OFF(pindex))); 242 } 243 *after += numafter; 244 } 245 } else { 246 if (before) { 247 *before /= blocksperpage; 248 } 249 250 if (after) { 251 *after /= blocksperpage; 252 } 253 } 254 return TRUE; 255 } 256 257 /* 258 * Lets the VM system know about a change in size for a file. 259 * We adjust our own internal size and flush any cached pages in 260 * the associated object that are affected by the size change. 261 * 262 * Note: this routine may be invoked as a result of a pager put 263 * operation (possibly at object termination time), so we must be careful. 264 */ 265 void 266 vnode_pager_setsize(vp, nsize) 267 struct vnode *vp; 268 vm_ooffset_t nsize; 269 { 270 vm_pindex_t nobjsize; 271 vm_object_t object = vp->v_object; 272 273 if (object == NULL) 274 return; 275 276 /* 277 * Hasn't changed size 278 */ 279 if (nsize == object->un_pager.vnp.vnp_size) 280 return; 281 282 nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 283 284 /* 285 * File has shrunk. Toss any cached pages beyond the new EOF. 286 */ 287 if (nsize < object->un_pager.vnp.vnp_size) { 288 vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size); 289 if (nobjsize < object->size) { 290 vm_object_page_remove(object, nobjsize, object->size, 291 FALSE); 292 } 293 /* 294 * this gets rid of garbage at the end of a page that is now 295 * only partially backed by the vnode... 296 */ 297 if (nsize & PAGE_MASK) { 298 vm_offset_t kva; 299 vm_page_t m; 300 301 m = vm_page_lookup(object, OFF_TO_IDX(nsize)); 302 if (m) { 303 int base = (int)nsize & PAGE_MASK; 304 int size = PAGE_SIZE - base; 305 306 /* 307 * Clear out partial-page garbage in case 308 * the page has been mapped. 309 */ 310 kva = vm_pager_map_page(m); 311 bzero((caddr_t)kva + base, size); 312 vm_pager_unmap_page(kva); 313 314 /* 315 * Clear out partial-page dirty bits. This 316 * has the side effect of setting the valid 317 * bits, but that is ok. There are a bunch 318 * of places in the VM system where we expected 319 * m->dirty == VM_PAGE_BITS_ALL. The file EOF 320 * case is one of them. If the page is still 321 * partially dirty, make it fully dirty. 322 */ 323 vm_page_set_validclean(m, base, size); 324 if (m->dirty != 0) 325 m->dirty = VM_PAGE_BITS_ALL; 326 } 327 } 328 } 329 object->un_pager.vnp.vnp_size = nsize; 330 object->size = nobjsize; 331 } 332 333 /* 334 * calculate the linear (byte) disk address of specified virtual 335 * file address 336 */ 337 static vm_offset_t 338 vnode_pager_addr(vp, address, run) 339 struct vnode *vp; 340 vm_ooffset_t address; 341 int *run; 342 { 343 int rtaddress; 344 int bsize; 345 daddr_t block; 346 struct vnode *rtvp; 347 int err; 348 daddr_t vblock; 349 int voffset; 350 351 if ((int) address < 0) 352 return -1; 353 354 if (vp->v_mount == NULL) 355 return -1; 356 357 bsize = vp->v_mount->mnt_stat.f_iosize; 358 vblock = address / bsize; 359 voffset = address % bsize; 360 361 err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL); 362 363 if (err || (block == -1)) 364 rtaddress = -1; 365 else { 366 rtaddress = block + voffset / DEV_BSIZE; 367 if( run) { 368 *run += 1; 369 *run *= bsize/PAGE_SIZE; 370 *run -= voffset/PAGE_SIZE; 371 } 372 } 373 374 return rtaddress; 375 } 376 377 /* 378 * interrupt routine for I/O completion 379 */ 380 static void 381 vnode_pager_iodone(bp) 382 struct buf *bp; 383 { 384 bp->b_flags |= B_DONE; 385 wakeup(bp); 386 } 387 388 /* 389 * small block file system vnode pager input 390 */ 391 static int 392 vnode_pager_input_smlfs(object, m) 393 vm_object_t object; 394 vm_page_t m; 395 { 396 int i; 397 int s; 398 struct vnode *dp, *vp; 399 struct buf *bp; 400 vm_offset_t kva; 401 int fileaddr; 402 vm_offset_t bsize; 403 int error = 0; 404 405 vp = object->handle; 406 if (vp->v_mount == NULL) 407 return VM_PAGER_BAD; 408 409 bsize = vp->v_mount->mnt_stat.f_iosize; 410 411 412 VOP_BMAP(vp, 0, &dp, 0, NULL, NULL); 413 414 kva = vm_pager_map_page(m); 415 416 for (i = 0; i < PAGE_SIZE / bsize; i++) { 417 418 if (vm_page_bits(i * bsize, bsize) & m->valid) 419 continue; 420 421 fileaddr = vnode_pager_addr(vp, 422 IDX_TO_OFF(m->pindex) + i * bsize, (int *)0); 423 if (fileaddr != -1) { 424 bp = getpbuf(&vnode_pbuf_freecnt); 425 426 /* build a minimal buffer header */ 427 bp->b_iocmd = BIO_READ; 428 bp->b_iodone = vnode_pager_iodone; 429 bp->b_rcred = bp->b_wcred = curproc->p_ucred; 430 if (bp->b_rcred != NOCRED) 431 crhold(bp->b_rcred); 432 if (bp->b_wcred != NOCRED) 433 crhold(bp->b_wcred); 434 bp->b_data = (caddr_t) kva + i * bsize; 435 bp->b_blkno = fileaddr; 436 pbgetvp(dp, bp); 437 bp->b_bcount = bsize; 438 bp->b_bufsize = bsize; 439 bp->b_runningbufspace = bp->b_bufsize; 440 runningbufspace += bp->b_runningbufspace; 441 442 /* do the input */ 443 BUF_STRATEGY(bp); 444 445 /* we definitely need to be at splvm here */ 446 447 s = splvm(); 448 while ((bp->b_flags & B_DONE) == 0) { 449 tsleep(bp, PVM, "vnsrd", 0); 450 } 451 splx(s); 452 if ((bp->b_ioflags & BIO_ERROR) != 0) 453 error = EIO; 454 455 /* 456 * free the buffer header back to the swap buffer pool 457 */ 458 relpbuf(bp, &vnode_pbuf_freecnt); 459 if (error) 460 break; 461 462 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 463 } else { 464 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 465 bzero((caddr_t) kva + i * bsize, bsize); 466 } 467 } 468 vm_pager_unmap_page(kva); 469 pmap_clear_modify(m); 470 vm_page_flag_clear(m, PG_ZERO); 471 if (error) { 472 return VM_PAGER_ERROR; 473 } 474 return VM_PAGER_OK; 475 476 } 477 478 479 /* 480 * old style vnode pager output routine 481 */ 482 static int 483 vnode_pager_input_old(object, m) 484 vm_object_t object; 485 vm_page_t m; 486 { 487 struct uio auio; 488 struct iovec aiov; 489 int error; 490 int size; 491 vm_offset_t kva; 492 493 error = 0; 494 495 /* 496 * Return failure if beyond current EOF 497 */ 498 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 499 return VM_PAGER_BAD; 500 } else { 501 size = PAGE_SIZE; 502 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 503 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 504 505 /* 506 * Allocate a kernel virtual address and initialize so that 507 * we can use VOP_READ/WRITE routines. 508 */ 509 kva = vm_pager_map_page(m); 510 511 aiov.iov_base = (caddr_t) kva; 512 aiov.iov_len = size; 513 auio.uio_iov = &aiov; 514 auio.uio_iovcnt = 1; 515 auio.uio_offset = IDX_TO_OFF(m->pindex); 516 auio.uio_segflg = UIO_SYSSPACE; 517 auio.uio_rw = UIO_READ; 518 auio.uio_resid = size; 519 auio.uio_procp = curproc; 520 521 error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred); 522 if (!error) { 523 register int count = size - auio.uio_resid; 524 525 if (count == 0) 526 error = EINVAL; 527 else if (count != PAGE_SIZE) 528 bzero((caddr_t) kva + count, PAGE_SIZE - count); 529 } 530 vm_pager_unmap_page(kva); 531 } 532 pmap_clear_modify(m); 533 vm_page_undirty(m); 534 vm_page_flag_clear(m, PG_ZERO); 535 if (!error) 536 m->valid = VM_PAGE_BITS_ALL; 537 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 538 } 539 540 /* 541 * generic vnode pager input routine 542 */ 543 544 /* 545 * EOPNOTSUPP is no longer legal. For local media VFS's that do not 546 * implement their own VOP_GETPAGES, their VOP_GETPAGES should call to 547 * vnode_pager_generic_getpages() to implement the previous behaviour. 548 * 549 * All other FS's should use the bypass to get to the local media 550 * backing vp's VOP_GETPAGES. 551 */ 552 static int 553 vnode_pager_getpages(object, m, count, reqpage) 554 vm_object_t object; 555 vm_page_t *m; 556 int count; 557 int reqpage; 558 { 559 int rtval; 560 struct vnode *vp; 561 int bytes = count * PAGE_SIZE; 562 563 vp = object->handle; 564 /* 565 * XXX temporary diagnostic message to help track stale FS code, 566 * Returning EOPNOTSUPP from here may make things unhappy. 567 */ 568 rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); 569 if (rtval == EOPNOTSUPP) { 570 printf("vnode_pager: *** WARNING *** stale FS getpages\n"); 571 rtval = vnode_pager_generic_getpages( vp, m, bytes, reqpage); 572 } 573 return rtval; 574 } 575 576 577 /* 578 * This is now called from local media FS's to operate against their 579 * own vnodes if they fail to implement VOP_GETPAGES. 580 */ 581 int 582 vnode_pager_generic_getpages(vp, m, bytecount, reqpage) 583 struct vnode *vp; 584 vm_page_t *m; 585 int bytecount; 586 int reqpage; 587 { 588 vm_object_t object; 589 vm_offset_t kva; 590 off_t foff, tfoff, nextoff; 591 int i, size, bsize, first, firstaddr; 592 struct vnode *dp; 593 int runpg; 594 int runend; 595 struct buf *bp; 596 int s; 597 int count; 598 int error = 0; 599 600 object = vp->v_object; 601 count = bytecount / PAGE_SIZE; 602 603 if (vp->v_mount == NULL) 604 return VM_PAGER_BAD; 605 606 bsize = vp->v_mount->mnt_stat.f_iosize; 607 608 /* get the UNDERLYING device for the file with VOP_BMAP() */ 609 610 /* 611 * originally, we did not check for an error return value -- assuming 612 * an fs always has a bmap entry point -- that assumption is wrong!!! 613 */ 614 foff = IDX_TO_OFF(m[reqpage]->pindex); 615 616 /* 617 * if we can't bmap, use old VOP code 618 */ 619 if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) { 620 for (i = 0; i < count; i++) { 621 if (i != reqpage) { 622 vm_page_free(m[i]); 623 } 624 } 625 cnt.v_vnodein++; 626 cnt.v_vnodepgsin++; 627 return vnode_pager_input_old(object, m[reqpage]); 628 629 /* 630 * if the blocksize is smaller than a page size, then use 631 * special small filesystem code. NFS sometimes has a small 632 * blocksize, but it can handle large reads itself. 633 */ 634 } else if ((PAGE_SIZE / bsize) > 1 && 635 (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { 636 for (i = 0; i < count; i++) { 637 if (i != reqpage) { 638 vm_page_free(m[i]); 639 } 640 } 641 cnt.v_vnodein++; 642 cnt.v_vnodepgsin++; 643 return vnode_pager_input_smlfs(object, m[reqpage]); 644 } 645 646 /* 647 * If we have a completely valid page available to us, we can 648 * clean up and return. Otherwise we have to re-read the 649 * media. 650 */ 651 652 if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { 653 for (i = 0; i < count; i++) { 654 if (i != reqpage) 655 vm_page_free(m[i]); 656 } 657 return VM_PAGER_OK; 658 } 659 m[reqpage]->valid = 0; 660 661 /* 662 * here on direct device I/O 663 */ 664 665 firstaddr = -1; 666 /* 667 * calculate the run that includes the required page 668 */ 669 for(first = 0, i = 0; i < count; i = runend) { 670 firstaddr = vnode_pager_addr(vp, 671 IDX_TO_OFF(m[i]->pindex), &runpg); 672 if (firstaddr == -1) { 673 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 674 /* XXX no %qd in kernel. */ 675 panic("vnode_pager_getpages: unexpected missing page: firstaddr: %d, foff: 0x%lx%08lx, vnp_size: 0x%lx%08lx", 676 firstaddr, (u_long)(foff >> 32), 677 (u_long)(u_int32_t)foff, 678 (u_long)(u_int32_t) 679 (object->un_pager.vnp.vnp_size >> 32), 680 (u_long)(u_int32_t) 681 object->un_pager.vnp.vnp_size); 682 } 683 vm_page_free(m[i]); 684 runend = i + 1; 685 first = runend; 686 continue; 687 } 688 runend = i + runpg; 689 if (runend <= reqpage) { 690 int j; 691 for (j = i; j < runend; j++) { 692 vm_page_free(m[j]); 693 } 694 } else { 695 if (runpg < (count - first)) { 696 for (i = first + runpg; i < count; i++) 697 vm_page_free(m[i]); 698 count = first + runpg; 699 } 700 break; 701 } 702 first = runend; 703 } 704 705 /* 706 * the first and last page have been calculated now, move input pages 707 * to be zero based... 708 */ 709 if (first != 0) { 710 for (i = first; i < count; i++) { 711 m[i - first] = m[i]; 712 } 713 count -= first; 714 reqpage -= first; 715 } 716 717 /* 718 * calculate the file virtual address for the transfer 719 */ 720 foff = IDX_TO_OFF(m[0]->pindex); 721 722 /* 723 * calculate the size of the transfer 724 */ 725 size = count * PAGE_SIZE; 726 if ((foff + size) > object->un_pager.vnp.vnp_size) 727 size = object->un_pager.vnp.vnp_size - foff; 728 729 /* 730 * round up physical size for real devices. 731 */ 732 if (dp->v_type == VBLK || dp->v_type == VCHR) { 733 int secmask = dp->v_rdev->si_bsize_phys - 1; 734 KASSERT(secmask < PAGE_SIZE, ("vnode_pager_generic_getpages: sector size %d too large\n", secmask + 1)); 735 size = (size + secmask) & ~secmask; 736 } 737 738 bp = getpbuf(&vnode_pbuf_freecnt); 739 kva = (vm_offset_t) bp->b_data; 740 741 /* 742 * and map the pages to be read into the kva 743 */ 744 pmap_qenter(kva, m, count); 745 746 /* build a minimal buffer header */ 747 bp->b_iocmd = BIO_READ; 748 bp->b_iodone = vnode_pager_iodone; 749 /* B_PHYS is not set, but it is nice to fill this in */ 750 bp->b_rcred = bp->b_wcred = curproc->p_ucred; 751 if (bp->b_rcred != NOCRED) 752 crhold(bp->b_rcred); 753 if (bp->b_wcred != NOCRED) 754 crhold(bp->b_wcred); 755 bp->b_blkno = firstaddr; 756 pbgetvp(dp, bp); 757 bp->b_bcount = size; 758 bp->b_bufsize = size; 759 bp->b_runningbufspace = bp->b_bufsize; 760 runningbufspace += bp->b_runningbufspace; 761 762 cnt.v_vnodein++; 763 cnt.v_vnodepgsin += count; 764 765 /* do the input */ 766 BUF_STRATEGY(bp); 767 768 s = splvm(); 769 /* we definitely need to be at splvm here */ 770 771 while ((bp->b_flags & B_DONE) == 0) { 772 tsleep(bp, PVM, "vnread", 0); 773 } 774 splx(s); 775 if ((bp->b_ioflags & BIO_ERROR) != 0) 776 error = EIO; 777 778 if (!error) { 779 if (size != count * PAGE_SIZE) 780 bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 781 } 782 pmap_qremove(kva, count); 783 784 /* 785 * free the buffer header back to the swap buffer pool 786 */ 787 relpbuf(bp, &vnode_pbuf_freecnt); 788 789 for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { 790 vm_page_t mt; 791 792 nextoff = tfoff + PAGE_SIZE; 793 mt = m[i]; 794 795 if (nextoff <= object->un_pager.vnp.vnp_size) { 796 /* 797 * Read filled up entire page. 798 */ 799 mt->valid = VM_PAGE_BITS_ALL; 800 vm_page_undirty(mt); /* should be an assert? XXX */ 801 pmap_clear_modify(mt); 802 } else { 803 /* 804 * Read did not fill up entire page. Since this 805 * is getpages, the page may be mapped, so we have 806 * to zero the invalid portions of the page even 807 * though we aren't setting them valid. 808 * 809 * Currently we do not set the entire page valid, 810 * we just try to clear the piece that we couldn't 811 * read. 812 */ 813 vm_page_set_validclean(mt, 0, 814 object->un_pager.vnp.vnp_size - tfoff); 815 /* handled by vm_fault now */ 816 /* vm_page_zero_invalid(mt, FALSE); */ 817 } 818 819 vm_page_flag_clear(mt, PG_ZERO); 820 if (i != reqpage) { 821 822 /* 823 * whether or not to leave the page activated is up in 824 * the air, but we should put the page on a page queue 825 * somewhere. (it already is in the object). Result: 826 * It appears that empirical results show that 827 * deactivating pages is best. 828 */ 829 830 /* 831 * just in case someone was asking for this page we 832 * now tell them that it is ok to use 833 */ 834 if (!error) { 835 if (mt->flags & PG_WANTED) 836 vm_page_activate(mt); 837 else 838 vm_page_deactivate(mt); 839 vm_page_wakeup(mt); 840 } else { 841 vm_page_free(mt); 842 } 843 } 844 } 845 if (error) { 846 printf("vnode_pager_getpages: I/O read error\n"); 847 } 848 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 849 } 850 851 /* 852 * EOPNOTSUPP is no longer legal. For local media VFS's that do not 853 * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 854 * vnode_pager_generic_putpages() to implement the previous behaviour. 855 * 856 * All other FS's should use the bypass to get to the local media 857 * backing vp's VOP_PUTPAGES. 858 */ 859 static void 860 vnode_pager_putpages(object, m, count, sync, rtvals) 861 vm_object_t object; 862 vm_page_t *m; 863 int count; 864 boolean_t sync; 865 int *rtvals; 866 { 867 int rtval; 868 struct vnode *vp; 869 struct mount *mp; 870 int bytes = count * PAGE_SIZE; 871 872 /* 873 * Force synchronous operation if we are extremely low on memory 874 * to prevent a low-memory deadlock. VOP operations often need to 875 * allocate more memory to initiate the I/O ( i.e. do a BMAP 876 * operation ). The swapper handles the case by limiting the amount 877 * of asynchronous I/O, but that sort of solution doesn't scale well 878 * for the vnode pager without a lot of work. 879 * 880 * Also, the backing vnode's iodone routine may not wake the pageout 881 * daemon up. This should be probably be addressed XXX. 882 */ 883 884 if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 885 sync |= OBJPC_SYNC; 886 887 /* 888 * Call device-specific putpages function 889 */ 890 891 vp = object->handle; 892 if (vp->v_type != VREG) 893 mp = NULL; 894 (void)vn_start_write(vp, &mp, V_WAIT); 895 rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); 896 if (rtval == EOPNOTSUPP) { 897 printf("vnode_pager: *** WARNING *** stale FS putpages\n"); 898 rtval = vnode_pager_generic_putpages( vp, m, bytes, sync, rtvals); 899 } 900 vn_finished_write(mp); 901 } 902 903 904 /* 905 * This is now called from local media FS's to operate against their 906 * own vnodes if they fail to implement VOP_PUTPAGES. 907 * 908 * This is typically called indirectly via the pageout daemon and 909 * clustering has already typically occured, so in general we ask the 910 * underlying filesystem to write the data out asynchronously rather 911 * then delayed. 912 */ 913 int 914 vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) 915 struct vnode *vp; 916 vm_page_t *m; 917 int bytecount; 918 int flags; 919 int *rtvals; 920 { 921 int i; 922 vm_object_t object; 923 int count; 924 925 int maxsize, ncount; 926 vm_ooffset_t poffset; 927 struct uio auio; 928 struct iovec aiov; 929 int error; 930 int ioflags; 931 932 object = vp->v_object; 933 count = bytecount / PAGE_SIZE; 934 935 for (i = 0; i < count; i++) 936 rtvals[i] = VM_PAGER_AGAIN; 937 938 if ((int) m[0]->pindex < 0) { 939 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%x)\n", 940 (long)m[0]->pindex, m[0]->dirty); 941 rtvals[0] = VM_PAGER_BAD; 942 return VM_PAGER_BAD; 943 } 944 945 maxsize = count * PAGE_SIZE; 946 ncount = count; 947 948 poffset = IDX_TO_OFF(m[0]->pindex); 949 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 950 if (object->un_pager.vnp.vnp_size > poffset) 951 maxsize = object->un_pager.vnp.vnp_size - poffset; 952 else 953 maxsize = 0; 954 ncount = btoc(maxsize); 955 if (ncount < count) { 956 for (i = ncount; i < count; i++) { 957 rtvals[i] = VM_PAGER_BAD; 958 } 959 } 960 } 961 962 /* 963 * pageouts are already clustered, use IO_ASYNC t o force a bawrite() 964 * rather then a bdwrite() to prevent paging I/O from saturating 965 * the buffer cache. 966 */ 967 ioflags = IO_VMIO; 968 ioflags |= (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) ? IO_SYNC: IO_ASYNC; 969 ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0; 970 971 aiov.iov_base = (caddr_t) 0; 972 aiov.iov_len = maxsize; 973 auio.uio_iov = &aiov; 974 auio.uio_iovcnt = 1; 975 auio.uio_offset = poffset; 976 auio.uio_segflg = UIO_NOCOPY; 977 auio.uio_rw = UIO_WRITE; 978 auio.uio_resid = maxsize; 979 auio.uio_procp = (struct proc *) 0; 980 error = VOP_WRITE(vp, &auio, ioflags, curproc->p_ucred); 981 cnt.v_vnodeout++; 982 cnt.v_vnodepgsout += ncount; 983 984 if (error) { 985 printf("vnode_pager_putpages: I/O error %d\n", error); 986 } 987 if (auio.uio_resid) { 988 printf("vnode_pager_putpages: residual I/O %d at %lu\n", 989 auio.uio_resid, (u_long)m[0]->pindex); 990 } 991 for (i = 0; i < ncount; i++) { 992 rtvals[i] = VM_PAGER_OK; 993 } 994 return rtvals[0]; 995 } 996 997 struct vnode * 998 vnode_pager_lock(object) 999 vm_object_t object; 1000 { 1001 struct proc *p = curproc; /* XXX */ 1002 1003 for (; object != NULL; object = object->backing_object) { 1004 if (object->type != OBJT_VNODE) 1005 continue; 1006 if (object->flags & OBJ_DEAD) 1007 return NULL; 1008 1009 while (vget(object->handle, 1010 LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p)) { 1011 if ((object->flags & OBJ_DEAD) || (object->type != OBJT_VNODE)) 1012 return NULL; 1013 printf("vnode_pager_lock: retrying\n"); 1014 } 1015 return object->handle; 1016 } 1017 return NULL; 1018 } 1019