1 /* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * Copyright (c) 1993, 1994 John S. Dyson 6 * Copyright (c) 1995, David Greenman 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41 * $FreeBSD$ 42 */ 43 44 /* 45 * Page to/from files (vnodes). 46 */ 47 48 /* 49 * TODO: 50 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 51 * greatly re-simplify the vnode_pager. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/proc.h> 57 #include <sys/vnode.h> 58 #include <sys/mount.h> 59 #include <sys/bio.h> 60 #include <sys/buf.h> 61 #include <sys/vmmeter.h> 62 #include <sys/conf.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_pager.h> 68 #include <vm/vm_map.h> 69 #include <vm/vnode_pager.h> 70 #include <vm/vm_extern.h> 71 72 static void vnode_pager_init(void); 73 static vm_offset_t vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 74 int *run); 75 static void vnode_pager_iodone(struct buf *bp); 76 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 77 static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 78 static void vnode_pager_dealloc(vm_object_t); 79 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int); 80 static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 81 static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 82 83 struct pagerops vnodepagerops = { 84 vnode_pager_init, 85 vnode_pager_alloc, 86 vnode_pager_dealloc, 87 vnode_pager_getpages, 88 vnode_pager_putpages, 89 vnode_pager_haspage, 90 NULL 91 }; 92 93 int vnode_pbuf_freecnt; 94 95 void 96 vnode_pager_init(void) 97 { 98 99 vnode_pbuf_freecnt = nswbuf / 2 + 1; 100 } 101 102 /* 103 * Allocate (or lookup) pager for a vnode. 104 * Handle is a vnode pointer. 105 * 106 * MPSAFE 107 */ 108 vm_object_t 109 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 110 vm_ooffset_t offset) 111 { 112 vm_object_t object; 113 struct vnode *vp; 114 115 /* 116 * Pageout to vnode, no can do yet. 117 */ 118 if (handle == NULL) 119 return (NULL); 120 121 vp = (struct vnode *) handle; 122 123 mtx_lock(&Giant); 124 /* 125 * Prevent race condition when allocating the object. This 126 * can happen with NFS vnodes since the nfsnode isn't locked. 127 */ 128 VI_LOCK(vp); 129 while (vp->v_iflag & VI_OLOCK) { 130 vp->v_iflag |= VI_OWANT; 131 msleep(vp, VI_MTX(vp), PVM, "vnpobj", 0); 132 } 133 vp->v_iflag |= VI_OLOCK; 134 VI_UNLOCK(vp); 135 136 /* 137 * If the object is being terminated, wait for it to 138 * go away. 139 */ 140 while (((object = vp->v_object) != NULL) && 141 (object->flags & OBJ_DEAD)) { 142 tsleep(object, PVM, "vadead", 0); 143 } 144 145 if (vp->v_usecount == 0) 146 panic("vnode_pager_alloc: no vnode reference"); 147 148 if (object == NULL) { 149 /* 150 * And an object of the appropriate size 151 */ 152 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); 153 154 object->un_pager.vnp.vnp_size = size; 155 156 object->handle = handle; 157 vp->v_object = object; 158 } else { 159 object->ref_count++; 160 } 161 VI_LOCK(vp); 162 vp->v_usecount++; 163 vp->v_iflag &= ~VI_OLOCK; 164 if (vp->v_iflag & VI_OWANT) { 165 vp->v_iflag &= ~VI_OWANT; 166 wakeup(vp); 167 } 168 VI_UNLOCK(vp); 169 mtx_unlock(&Giant); 170 return (object); 171 } 172 173 static void 174 vnode_pager_dealloc(object) 175 vm_object_t object; 176 { 177 struct vnode *vp = object->handle; 178 179 GIANT_REQUIRED; 180 if (vp == NULL) 181 panic("vnode_pager_dealloc: pager already dealloced"); 182 183 vm_object_pip_wait(object, "vnpdea"); 184 185 object->handle = NULL; 186 object->type = OBJT_DEAD; 187 ASSERT_VOP_LOCKED(vp, "vnode_pager_dealloc"); 188 vp->v_object = NULL; 189 vp->v_vflag &= ~(VV_TEXT | VV_OBJBUF); 190 } 191 192 static boolean_t 193 vnode_pager_haspage(object, pindex, before, after) 194 vm_object_t object; 195 vm_pindex_t pindex; 196 int *before; 197 int *after; 198 { 199 struct vnode *vp = object->handle; 200 daddr_t bn; 201 int err; 202 daddr_t reqblock; 203 int poff; 204 int bsize; 205 int pagesperblock, blocksperpage; 206 207 GIANT_REQUIRED; 208 /* 209 * If no vp or vp is doomed or marked transparent to VM, we do not 210 * have the page. 211 */ 212 if (vp == NULL) 213 return FALSE; 214 215 mp_fixme("Unlocked iflags access"); 216 if (vp->v_iflag & VI_DOOMED) 217 return FALSE; 218 /* 219 * If filesystem no longer mounted or offset beyond end of file we do 220 * not have the page. 221 */ 222 if ((vp->v_mount == NULL) || 223 (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)) 224 return FALSE; 225 226 bsize = vp->v_mount->mnt_stat.f_iosize; 227 pagesperblock = bsize / PAGE_SIZE; 228 blocksperpage = 0; 229 if (pagesperblock > 0) { 230 reqblock = pindex / pagesperblock; 231 } else { 232 blocksperpage = (PAGE_SIZE / bsize); 233 reqblock = pindex * blocksperpage; 234 } 235 err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, 236 after, before); 237 if (err) 238 return TRUE; 239 if (bn == -1) 240 return FALSE; 241 if (pagesperblock > 0) { 242 poff = pindex - (reqblock * pagesperblock); 243 if (before) { 244 *before *= pagesperblock; 245 *before += poff; 246 } 247 if (after) { 248 int numafter; 249 *after *= pagesperblock; 250 numafter = pagesperblock - (poff + 1); 251 if (IDX_TO_OFF(pindex + numafter) > 252 object->un_pager.vnp.vnp_size) { 253 numafter = 254 OFF_TO_IDX(object->un_pager.vnp.vnp_size) - 255 pindex; 256 } 257 *after += numafter; 258 } 259 } else { 260 if (before) { 261 *before /= blocksperpage; 262 } 263 264 if (after) { 265 *after /= blocksperpage; 266 } 267 } 268 return TRUE; 269 } 270 271 /* 272 * Lets the VM system know about a change in size for a file. 273 * We adjust our own internal size and flush any cached pages in 274 * the associated object that are affected by the size change. 275 * 276 * Note: this routine may be invoked as a result of a pager put 277 * operation (possibly at object termination time), so we must be careful. 278 */ 279 void 280 vnode_pager_setsize(vp, nsize) 281 struct vnode *vp; 282 vm_ooffset_t nsize; 283 { 284 vm_pindex_t nobjsize; 285 vm_object_t object = vp->v_object; 286 287 GIANT_REQUIRED; 288 289 if (object == NULL) 290 return; 291 292 /* 293 * Hasn't changed size 294 */ 295 if (nsize == object->un_pager.vnp.vnp_size) 296 return; 297 298 nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 299 300 /* 301 * File has shrunk. Toss any cached pages beyond the new EOF. 302 */ 303 if (nsize < object->un_pager.vnp.vnp_size) { 304 #ifdef ENABLE_VFS_IOOPT 305 vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size); 306 #endif 307 if (nobjsize < object->size) { 308 vm_object_page_remove(object, nobjsize, object->size, 309 FALSE); 310 } 311 /* 312 * this gets rid of garbage at the end of a page that is now 313 * only partially backed by the vnode. 314 * 315 * XXX for some reason (I don't know yet), if we take a 316 * completely invalid page and mark it partially valid 317 * it can screw up NFS reads, so we don't allow the case. 318 */ 319 if (nsize & PAGE_MASK) { 320 vm_page_t m; 321 322 m = vm_page_lookup(object, OFF_TO_IDX(nsize)); 323 if (m && m->valid) { 324 int base = (int)nsize & PAGE_MASK; 325 int size = PAGE_SIZE - base; 326 327 /* 328 * Clear out partial-page garbage in case 329 * the page has been mapped. 330 */ 331 vm_page_zero_fill_area(m, base, size); 332 333 /* 334 * XXX work around SMP data integrity race 335 * by unmapping the page from user processes. 336 * The garbage we just cleared may be mapped 337 * to a user process running on another cpu 338 * and this code is not running through normal 339 * I/O channels which handle SMP issues for 340 * us, so unmap page to synchronize all cpus. 341 * 342 * XXX should vm_pager_unmap_page() have 343 * dealt with this? 344 */ 345 vm_page_protect(m, VM_PROT_NONE); 346 347 /* 348 * Clear out partial-page dirty bits. This 349 * has the side effect of setting the valid 350 * bits, but that is ok. There are a bunch 351 * of places in the VM system where we expected 352 * m->dirty == VM_PAGE_BITS_ALL. The file EOF 353 * case is one of them. If the page is still 354 * partially dirty, make it fully dirty. 355 * 356 * note that we do not clear out the valid 357 * bits. This would prevent bogus_page 358 * replacement from working properly. 359 */ 360 vm_page_set_validclean(m, base, size); 361 if (m->dirty != 0) 362 m->dirty = VM_PAGE_BITS_ALL; 363 } 364 } 365 } 366 object->un_pager.vnp.vnp_size = nsize; 367 object->size = nobjsize; 368 } 369 370 /* 371 * calculate the linear (byte) disk address of specified virtual 372 * file address 373 */ 374 static vm_offset_t 375 vnode_pager_addr(vp, address, run) 376 struct vnode *vp; 377 vm_ooffset_t address; 378 int *run; 379 { 380 int rtaddress; 381 int bsize; 382 daddr_t block; 383 struct vnode *rtvp; 384 int err; 385 daddr_t vblock; 386 int voffset; 387 388 GIANT_REQUIRED; 389 if ((int) address < 0) 390 return -1; 391 392 if (vp->v_mount == NULL) 393 return -1; 394 395 bsize = vp->v_mount->mnt_stat.f_iosize; 396 vblock = address / bsize; 397 voffset = address % bsize; 398 399 err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL); 400 401 if (err || (block == -1)) 402 rtaddress = -1; 403 else { 404 rtaddress = block + voffset / DEV_BSIZE; 405 if (run) { 406 *run += 1; 407 *run *= bsize/PAGE_SIZE; 408 *run -= voffset/PAGE_SIZE; 409 } 410 } 411 412 return rtaddress; 413 } 414 415 /* 416 * interrupt routine for I/O completion 417 */ 418 static void 419 vnode_pager_iodone(bp) 420 struct buf *bp; 421 { 422 bp->b_flags |= B_DONE; 423 wakeup(bp); 424 } 425 426 /* 427 * small block filesystem vnode pager input 428 */ 429 static int 430 vnode_pager_input_smlfs(object, m) 431 vm_object_t object; 432 vm_page_t m; 433 { 434 int i; 435 int s; 436 struct vnode *dp, *vp; 437 struct buf *bp; 438 vm_offset_t kva; 439 int fileaddr; 440 vm_offset_t bsize; 441 int error = 0; 442 443 GIANT_REQUIRED; 444 445 vp = object->handle; 446 if (vp->v_mount == NULL) 447 return VM_PAGER_BAD; 448 449 bsize = vp->v_mount->mnt_stat.f_iosize; 450 451 VOP_BMAP(vp, 0, &dp, 0, NULL, NULL); 452 453 kva = vm_pager_map_page(m); 454 455 for (i = 0; i < PAGE_SIZE / bsize; i++) { 456 vm_ooffset_t address; 457 458 if (vm_page_bits(i * bsize, bsize) & m->valid) 459 continue; 460 461 address = IDX_TO_OFF(m->pindex) + i * bsize; 462 if (address >= object->un_pager.vnp.vnp_size) { 463 fileaddr = -1; 464 } else { 465 fileaddr = vnode_pager_addr(vp, address, NULL); 466 } 467 if (fileaddr != -1) { 468 bp = getpbuf(&vnode_pbuf_freecnt); 469 470 /* build a minimal buffer header */ 471 bp->b_iocmd = BIO_READ; 472 bp->b_iodone = vnode_pager_iodone; 473 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 474 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 475 bp->b_rcred = crhold(curthread->td_ucred); 476 bp->b_wcred = crhold(curthread->td_ucred); 477 bp->b_data = (caddr_t) kva + i * bsize; 478 bp->b_blkno = fileaddr; 479 pbgetvp(dp, bp); 480 bp->b_bcount = bsize; 481 bp->b_bufsize = bsize; 482 bp->b_runningbufspace = bp->b_bufsize; 483 runningbufspace += bp->b_runningbufspace; 484 485 /* do the input */ 486 BUF_STRATEGY(bp); 487 488 /* we definitely need to be at splvm here */ 489 490 s = splvm(); 491 while ((bp->b_flags & B_DONE) == 0) { 492 tsleep(bp, PVM, "vnsrd", 0); 493 } 494 splx(s); 495 if ((bp->b_ioflags & BIO_ERROR) != 0) 496 error = EIO; 497 498 /* 499 * free the buffer header back to the swap buffer pool 500 */ 501 relpbuf(bp, &vnode_pbuf_freecnt); 502 if (error) 503 break; 504 505 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 506 } else { 507 vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 508 bzero((caddr_t) kva + i * bsize, bsize); 509 } 510 } 511 vm_pager_unmap_page(kva); 512 pmap_clear_modify(m); 513 vm_page_flag_clear(m, PG_ZERO); 514 if (error) { 515 return VM_PAGER_ERROR; 516 } 517 return VM_PAGER_OK; 518 519 } 520 521 522 /* 523 * old style vnode pager output routine 524 */ 525 static int 526 vnode_pager_input_old(object, m) 527 vm_object_t object; 528 vm_page_t m; 529 { 530 struct uio auio; 531 struct iovec aiov; 532 int error; 533 int size; 534 vm_offset_t kva; 535 struct vnode *vp; 536 537 GIANT_REQUIRED; 538 error = 0; 539 540 /* 541 * Return failure if beyond current EOF 542 */ 543 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 544 return VM_PAGER_BAD; 545 } else { 546 size = PAGE_SIZE; 547 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 548 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 549 550 /* 551 * Allocate a kernel virtual address and initialize so that 552 * we can use VOP_READ/WRITE routines. 553 */ 554 kva = vm_pager_map_page(m); 555 556 vp = object->handle; 557 aiov.iov_base = (caddr_t) kva; 558 aiov.iov_len = size; 559 auio.uio_iov = &aiov; 560 auio.uio_iovcnt = 1; 561 auio.uio_offset = IDX_TO_OFF(m->pindex); 562 auio.uio_segflg = UIO_SYSSPACE; 563 auio.uio_rw = UIO_READ; 564 auio.uio_resid = size; 565 auio.uio_td = curthread; 566 567 error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 568 if (!error) { 569 int count = size - auio.uio_resid; 570 571 if (count == 0) 572 error = EINVAL; 573 else if (count != PAGE_SIZE) 574 bzero((caddr_t) kva + count, PAGE_SIZE - count); 575 } 576 vm_pager_unmap_page(kva); 577 } 578 pmap_clear_modify(m); 579 vm_page_undirty(m); 580 vm_page_flag_clear(m, PG_ZERO); 581 if (!error) 582 m->valid = VM_PAGE_BITS_ALL; 583 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 584 } 585 586 /* 587 * generic vnode pager input routine 588 */ 589 590 /* 591 * Local media VFS's that do not implement their own VOP_GETPAGES 592 * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 593 * to implement the previous behaviour. 594 * 595 * All other FS's should use the bypass to get to the local media 596 * backing vp's VOP_GETPAGES. 597 */ 598 static int 599 vnode_pager_getpages(object, m, count, reqpage) 600 vm_object_t object; 601 vm_page_t *m; 602 int count; 603 int reqpage; 604 { 605 int rtval; 606 struct vnode *vp; 607 int bytes = count * PAGE_SIZE; 608 609 GIANT_REQUIRED; 610 vp = object->handle; 611 rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); 612 KASSERT(rtval != EOPNOTSUPP, 613 ("vnode_pager: FS getpages not implemented\n")); 614 return rtval; 615 } 616 617 /* 618 * This is now called from local media FS's to operate against their 619 * own vnodes if they fail to implement VOP_GETPAGES. 620 */ 621 int 622 vnode_pager_generic_getpages(vp, m, bytecount, reqpage) 623 struct vnode *vp; 624 vm_page_t *m; 625 int bytecount; 626 int reqpage; 627 { 628 vm_object_t object; 629 vm_offset_t kva; 630 off_t foff, tfoff, nextoff; 631 int i, j, size, bsize, first, firstaddr; 632 struct vnode *dp; 633 int runpg; 634 int runend; 635 struct buf *bp; 636 int s; 637 int count; 638 int error = 0; 639 640 GIANT_REQUIRED; 641 object = vp->v_object; 642 count = bytecount / PAGE_SIZE; 643 644 if (vp->v_mount == NULL) 645 return VM_PAGER_BAD; 646 647 bsize = vp->v_mount->mnt_stat.f_iosize; 648 649 /* get the UNDERLYING device for the file with VOP_BMAP() */ 650 651 /* 652 * originally, we did not check for an error return value -- assuming 653 * an fs always has a bmap entry point -- that assumption is wrong!!! 654 */ 655 foff = IDX_TO_OFF(m[reqpage]->pindex); 656 657 /* 658 * if we can't bmap, use old VOP code 659 */ 660 if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) { 661 vm_page_lock_queues(); 662 for (i = 0; i < count; i++) 663 if (i != reqpage) 664 vm_page_free(m[i]); 665 vm_page_unlock_queues(); 666 cnt.v_vnodein++; 667 cnt.v_vnodepgsin++; 668 return vnode_pager_input_old(object, m[reqpage]); 669 670 /* 671 * if the blocksize is smaller than a page size, then use 672 * special small filesystem code. NFS sometimes has a small 673 * blocksize, but it can handle large reads itself. 674 */ 675 } else if ((PAGE_SIZE / bsize) > 1 && 676 (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { 677 vm_page_lock_queues(); 678 for (i = 0; i < count; i++) 679 if (i != reqpage) 680 vm_page_free(m[i]); 681 vm_page_unlock_queues(); 682 cnt.v_vnodein++; 683 cnt.v_vnodepgsin++; 684 return vnode_pager_input_smlfs(object, m[reqpage]); 685 } 686 687 /* 688 * If we have a completely valid page available to us, we can 689 * clean up and return. Otherwise we have to re-read the 690 * media. 691 */ 692 if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { 693 vm_page_lock_queues(); 694 for (i = 0; i < count; i++) 695 if (i != reqpage) 696 vm_page_free(m[i]); 697 vm_page_unlock_queues(); 698 return VM_PAGER_OK; 699 } 700 m[reqpage]->valid = 0; 701 702 /* 703 * here on direct device I/O 704 */ 705 firstaddr = -1; 706 707 /* 708 * calculate the run that includes the required page 709 */ 710 for (first = 0, i = 0; i < count; i = runend) { 711 firstaddr = vnode_pager_addr(vp, 712 IDX_TO_OFF(m[i]->pindex), &runpg); 713 if (firstaddr == -1) { 714 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 715 /* XXX no %qd in kernel. */ 716 panic("vnode_pager_getpages: unexpected missing page: firstaddr: %d, foff: 0x%lx%08lx, vnp_size: 0x%lx%08lx", 717 firstaddr, (u_long)(foff >> 32), 718 (u_long)(u_int32_t)foff, 719 (u_long)(u_int32_t) 720 (object->un_pager.vnp.vnp_size >> 32), 721 (u_long)(u_int32_t) 722 object->un_pager.vnp.vnp_size); 723 } 724 vm_page_lock_queues(); 725 vm_page_free(m[i]); 726 vm_page_unlock_queues(); 727 runend = i + 1; 728 first = runend; 729 continue; 730 } 731 runend = i + runpg; 732 if (runend <= reqpage) { 733 vm_page_lock_queues(); 734 for (j = i; j < runend; j++) 735 vm_page_free(m[j]); 736 vm_page_unlock_queues(); 737 } else { 738 if (runpg < (count - first)) { 739 vm_page_lock_queues(); 740 for (i = first + runpg; i < count; i++) 741 vm_page_free(m[i]); 742 vm_page_unlock_queues(); 743 count = first + runpg; 744 } 745 break; 746 } 747 first = runend; 748 } 749 750 /* 751 * the first and last page have been calculated now, move input pages 752 * to be zero based... 753 */ 754 if (first != 0) { 755 for (i = first; i < count; i++) { 756 m[i - first] = m[i]; 757 } 758 count -= first; 759 reqpage -= first; 760 } 761 762 /* 763 * calculate the file virtual address for the transfer 764 */ 765 foff = IDX_TO_OFF(m[0]->pindex); 766 767 /* 768 * calculate the size of the transfer 769 */ 770 size = count * PAGE_SIZE; 771 if ((foff + size) > object->un_pager.vnp.vnp_size) 772 size = object->un_pager.vnp.vnp_size - foff; 773 774 /* 775 * round up physical size for real devices. 776 */ 777 if (dp->v_type == VBLK || dp->v_type == VCHR) { 778 int secmask = dp->v_rdev->si_bsize_phys - 1; 779 KASSERT(secmask < PAGE_SIZE, ("vnode_pager_generic_getpages: sector size %d too large\n", secmask + 1)); 780 size = (size + secmask) & ~secmask; 781 } 782 783 bp = getpbuf(&vnode_pbuf_freecnt); 784 kva = (vm_offset_t) bp->b_data; 785 786 /* 787 * and map the pages to be read into the kva 788 */ 789 pmap_qenter(kva, m, count); 790 791 /* build a minimal buffer header */ 792 bp->b_iocmd = BIO_READ; 793 bp->b_iodone = vnode_pager_iodone; 794 /* B_PHYS is not set, but it is nice to fill this in */ 795 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 796 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 797 bp->b_rcred = crhold(curthread->td_ucred); 798 bp->b_wcred = crhold(curthread->td_ucred); 799 bp->b_blkno = firstaddr; 800 pbgetvp(dp, bp); 801 bp->b_bcount = size; 802 bp->b_bufsize = size; 803 bp->b_runningbufspace = bp->b_bufsize; 804 runningbufspace += bp->b_runningbufspace; 805 806 cnt.v_vnodein++; 807 cnt.v_vnodepgsin += count; 808 809 /* do the input */ 810 BUF_STRATEGY(bp); 811 812 s = splvm(); 813 /* we definitely need to be at splvm here */ 814 815 while ((bp->b_flags & B_DONE) == 0) { 816 tsleep(bp, PVM, "vnread", 0); 817 } 818 splx(s); 819 if ((bp->b_ioflags & BIO_ERROR) != 0) 820 error = EIO; 821 822 if (!error) { 823 if (size != count * PAGE_SIZE) 824 bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 825 } 826 pmap_qremove(kva, count); 827 828 /* 829 * free the buffer header back to the swap buffer pool 830 */ 831 relpbuf(bp, &vnode_pbuf_freecnt); 832 833 vm_page_lock_queues(); 834 for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { 835 vm_page_t mt; 836 837 nextoff = tfoff + PAGE_SIZE; 838 mt = m[i]; 839 840 if (nextoff <= object->un_pager.vnp.vnp_size) { 841 /* 842 * Read filled up entire page. 843 */ 844 mt->valid = VM_PAGE_BITS_ALL; 845 vm_page_undirty(mt); /* should be an assert? XXX */ 846 pmap_clear_modify(mt); 847 } else { 848 /* 849 * Read did not fill up entire page. Since this 850 * is getpages, the page may be mapped, so we have 851 * to zero the invalid portions of the page even 852 * though we aren't setting them valid. 853 * 854 * Currently we do not set the entire page valid, 855 * we just try to clear the piece that we couldn't 856 * read. 857 */ 858 vm_page_set_validclean(mt, 0, 859 object->un_pager.vnp.vnp_size - tfoff); 860 /* handled by vm_fault now */ 861 /* vm_page_zero_invalid(mt, FALSE); */ 862 } 863 864 vm_page_flag_clear(mt, PG_ZERO); 865 if (i != reqpage) { 866 867 /* 868 * whether or not to leave the page activated is up in 869 * the air, but we should put the page on a page queue 870 * somewhere. (it already is in the object). Result: 871 * It appears that empirical results show that 872 * deactivating pages is best. 873 */ 874 875 /* 876 * just in case someone was asking for this page we 877 * now tell them that it is ok to use 878 */ 879 if (!error) { 880 if (mt->flags & PG_WANTED) 881 vm_page_activate(mt); 882 else 883 vm_page_deactivate(mt); 884 vm_page_wakeup(mt); 885 } else { 886 vm_page_free(mt); 887 } 888 } 889 } 890 vm_page_unlock_queues(); 891 if (error) { 892 printf("vnode_pager_getpages: I/O read error\n"); 893 } 894 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 895 } 896 897 /* 898 * EOPNOTSUPP is no longer legal. For local media VFS's that do not 899 * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 900 * vnode_pager_generic_putpages() to implement the previous behaviour. 901 * 902 * All other FS's should use the bypass to get to the local media 903 * backing vp's VOP_PUTPAGES. 904 */ 905 static void 906 vnode_pager_putpages(object, m, count, sync, rtvals) 907 vm_object_t object; 908 vm_page_t *m; 909 int count; 910 boolean_t sync; 911 int *rtvals; 912 { 913 int rtval; 914 struct vnode *vp; 915 struct mount *mp; 916 int bytes = count * PAGE_SIZE; 917 918 GIANT_REQUIRED; 919 /* 920 * Force synchronous operation if we are extremely low on memory 921 * to prevent a low-memory deadlock. VOP operations often need to 922 * allocate more memory to initiate the I/O ( i.e. do a BMAP 923 * operation ). The swapper handles the case by limiting the amount 924 * of asynchronous I/O, but that sort of solution doesn't scale well 925 * for the vnode pager without a lot of work. 926 * 927 * Also, the backing vnode's iodone routine may not wake the pageout 928 * daemon up. This should be probably be addressed XXX. 929 */ 930 931 if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 932 sync |= OBJPC_SYNC; 933 934 /* 935 * Call device-specific putpages function 936 */ 937 vp = object->handle; 938 if (vp->v_type != VREG) 939 mp = NULL; 940 (void)vn_start_write(vp, &mp, V_WAIT); 941 rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); 942 KASSERT(rtval != EOPNOTSUPP, 943 ("vnode_pager: stale FS putpages\n")); 944 vn_finished_write(mp); 945 } 946 947 948 /* 949 * This is now called from local media FS's to operate against their 950 * own vnodes if they fail to implement VOP_PUTPAGES. 951 * 952 * This is typically called indirectly via the pageout daemon and 953 * clustering has already typically occured, so in general we ask the 954 * underlying filesystem to write the data out asynchronously rather 955 * then delayed. 956 */ 957 int 958 vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) 959 struct vnode *vp; 960 vm_page_t *m; 961 int bytecount; 962 int flags; 963 int *rtvals; 964 { 965 int i; 966 vm_object_t object; 967 int count; 968 969 int maxsize, ncount; 970 vm_ooffset_t poffset; 971 struct uio auio; 972 struct iovec aiov; 973 int error; 974 int ioflags; 975 976 GIANT_REQUIRED; 977 object = vp->v_object; 978 count = bytecount / PAGE_SIZE; 979 980 for (i = 0; i < count; i++) 981 rtvals[i] = VM_PAGER_AGAIN; 982 983 if ((int) m[0]->pindex < 0) { 984 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%x)\n", 985 (long)m[0]->pindex, m[0]->dirty); 986 rtvals[0] = VM_PAGER_BAD; 987 return VM_PAGER_BAD; 988 } 989 990 maxsize = count * PAGE_SIZE; 991 ncount = count; 992 993 poffset = IDX_TO_OFF(m[0]->pindex); 994 995 /* 996 * If the page-aligned write is larger then the actual file we 997 * have to invalidate pages occuring beyond the file EOF. However, 998 * there is an edge case where a file may not be page-aligned where 999 * the last page is partially invalid. In this case the filesystem 1000 * may not properly clear the dirty bits for the entire page (which 1001 * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 1002 * With the page locked we are free to fix-up the dirty bits here. 1003 * 1004 * We do not under any circumstances truncate the valid bits, as 1005 * this will screw up bogus page replacement. 1006 */ 1007 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 1008 if (object->un_pager.vnp.vnp_size > poffset) { 1009 int pgoff; 1010 1011 maxsize = object->un_pager.vnp.vnp_size - poffset; 1012 ncount = btoc(maxsize); 1013 if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1014 vm_page_clear_dirty(m[ncount - 1], pgoff, 1015 PAGE_SIZE - pgoff); 1016 } 1017 } else { 1018 maxsize = 0; 1019 ncount = 0; 1020 } 1021 if (ncount < count) { 1022 for (i = ncount; i < count; i++) { 1023 rtvals[i] = VM_PAGER_BAD; 1024 } 1025 } 1026 } 1027 1028 /* 1029 * pageouts are already clustered, use IO_ASYNC t o force a bawrite() 1030 * rather then a bdwrite() to prevent paging I/O from saturating 1031 * the buffer cache. 1032 */ 1033 ioflags = IO_VMIO; 1034 ioflags |= (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) ? IO_SYNC: IO_ASYNC; 1035 ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0; 1036 1037 aiov.iov_base = (caddr_t) 0; 1038 aiov.iov_len = maxsize; 1039 auio.uio_iov = &aiov; 1040 auio.uio_iovcnt = 1; 1041 auio.uio_offset = poffset; 1042 auio.uio_segflg = UIO_NOCOPY; 1043 auio.uio_rw = UIO_WRITE; 1044 auio.uio_resid = maxsize; 1045 auio.uio_td = (struct thread *) 0; 1046 error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred); 1047 cnt.v_vnodeout++; 1048 cnt.v_vnodepgsout += ncount; 1049 1050 if (error) { 1051 printf("vnode_pager_putpages: I/O error %d\n", error); 1052 } 1053 if (auio.uio_resid) { 1054 printf("vnode_pager_putpages: residual I/O %d at %lu\n", 1055 auio.uio_resid, (u_long)m[0]->pindex); 1056 } 1057 for (i = 0; i < ncount; i++) { 1058 rtvals[i] = VM_PAGER_OK; 1059 } 1060 return rtvals[0]; 1061 } 1062 1063 struct vnode * 1064 vnode_pager_lock(object) 1065 vm_object_t object; 1066 { 1067 struct thread *td = curthread; /* XXX */ 1068 1069 GIANT_REQUIRED; 1070 1071 for (; object != NULL; object = object->backing_object) { 1072 if (object->type != OBJT_VNODE) 1073 continue; 1074 if (object->flags & OBJ_DEAD) { 1075 return NULL; 1076 } 1077 1078 /* XXX; If object->handle can change, we need to cache it. */ 1079 while (vget(object->handle, 1080 LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, td)){ 1081 if ((object->flags & OBJ_DEAD) || (object->type != OBJT_VNODE)) 1082 return NULL; 1083 printf("vnode_pager_lock: retrying\n"); 1084 } 1085 return object->handle; 1086 } 1087 return NULL; 1088 } 1089