1 /*- 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * Copyright (c) 1993, 1994 John S. Dyson 6 * Copyright (c) 1995, David Greenman 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41 */ 42 43 /* 44 * Page to/from files (vnodes). 45 */ 46 47 /* 48 * TODO: 49 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 50 * greatly re-simplify the vnode_pager. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include <sys/param.h> 57 #include <sys/systm.h> 58 #include <sys/proc.h> 59 #include <sys/vnode.h> 60 #include <sys/mount.h> 61 #include <sys/bio.h> 62 #include <sys/buf.h> 63 #include <sys/vmmeter.h> 64 #include <sys/limits.h> 65 #include <sys/conf.h> 66 #include <sys/sf_buf.h> 67 68 #include <machine/atomic.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_object.h> 72 #include <vm/vm_page.h> 73 #include <vm/vm_pager.h> 74 #include <vm/vm_map.h> 75 #include <vm/vnode_pager.h> 76 #include <vm/vm_extern.h> 77 78 static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 79 daddr_t *rtaddress, int *run); 80 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 81 static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 82 static void vnode_pager_dealloc(vm_object_t); 83 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int); 84 static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 85 static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 86 static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, 87 vm_ooffset_t, struct ucred *cred); 88 89 struct pagerops vnodepagerops = { 90 .pgo_alloc = vnode_pager_alloc, 91 .pgo_dealloc = vnode_pager_dealloc, 92 .pgo_getpages = vnode_pager_getpages, 93 .pgo_putpages = vnode_pager_putpages, 94 .pgo_haspage = vnode_pager_haspage, 95 }; 96 97 int vnode_pbuf_freecnt; 98 99 /* Create the VM system backing object for this vnode */ 100 int 101 vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 102 { 103 vm_object_t object; 104 vm_ooffset_t size = isize; 105 struct vattr va; 106 107 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 108 return (0); 109 110 while ((object = vp->v_object) != NULL) { 111 VM_OBJECT_LOCK(object); 112 if (!(object->flags & OBJ_DEAD)) { 113 VM_OBJECT_UNLOCK(object); 114 return (0); 115 } 116 VOP_UNLOCK(vp, 0); 117 vm_object_set_flag(object, OBJ_DISCONNECTWNT); 118 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0); 119 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 120 } 121 122 if (size == 0) { 123 if (vn_isdisk(vp, NULL)) { 124 size = IDX_TO_OFF(INT_MAX); 125 } else { 126 if (VOP_GETATTR(vp, &va, td->td_ucred)) 127 return (0); 128 size = va.va_size; 129 } 130 } 131 132 object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); 133 /* 134 * Dereference the reference we just created. This assumes 135 * that the object is associated with the vp. 136 */ 137 VM_OBJECT_LOCK(object); 138 object->ref_count--; 139 VM_OBJECT_UNLOCK(object); 140 vrele(vp); 141 142 KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object")); 143 144 return (0); 145 } 146 147 void 148 vnode_destroy_vobject(struct vnode *vp) 149 { 150 struct vm_object *obj; 151 152 obj = vp->v_object; 153 if (obj == NULL) 154 return; 155 ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 156 VM_OBJECT_LOCK(obj); 157 if (obj->ref_count == 0) { 158 /* 159 * vclean() may be called twice. The first time 160 * removes the primary reference to the object, 161 * the second time goes one further and is a 162 * special-case to terminate the object. 163 * 164 * don't double-terminate the object 165 */ 166 if ((obj->flags & OBJ_DEAD) == 0) 167 vm_object_terminate(obj); 168 else 169 VM_OBJECT_UNLOCK(obj); 170 } else { 171 /* 172 * Woe to the process that tries to page now :-). 173 */ 174 vm_pager_deallocate(obj); 175 VM_OBJECT_UNLOCK(obj); 176 } 177 vp->v_object = NULL; 178 } 179 180 181 /* 182 * Allocate (or lookup) pager for a vnode. 183 * Handle is a vnode pointer. 184 * 185 * MPSAFE 186 */ 187 vm_object_t 188 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 189 vm_ooffset_t offset, struct ucred *cred) 190 { 191 vm_object_t object; 192 struct vnode *vp; 193 194 /* 195 * Pageout to vnode, no can do yet. 196 */ 197 if (handle == NULL) 198 return (NULL); 199 200 vp = (struct vnode *) handle; 201 202 /* 203 * If the object is being terminated, wait for it to 204 * go away. 205 */ 206 retry: 207 while ((object = vp->v_object) != NULL) { 208 VM_OBJECT_LOCK(object); 209 if ((object->flags & OBJ_DEAD) == 0) 210 break; 211 vm_object_set_flag(object, OBJ_DISCONNECTWNT); 212 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); 213 } 214 215 if (vp->v_usecount == 0) 216 panic("vnode_pager_alloc: no vnode reference"); 217 218 if (object == NULL) { 219 /* 220 * Add an object of the appropriate size 221 */ 222 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); 223 224 object->un_pager.vnp.vnp_size = size; 225 object->un_pager.vnp.writemappings = 0; 226 227 object->handle = handle; 228 VI_LOCK(vp); 229 if (vp->v_object != NULL) { 230 /* 231 * Object has been created while we were sleeping 232 */ 233 VI_UNLOCK(vp); 234 vm_object_destroy(object); 235 goto retry; 236 } 237 vp->v_object = object; 238 VI_UNLOCK(vp); 239 } else { 240 object->ref_count++; 241 VM_OBJECT_UNLOCK(object); 242 } 243 vref(vp); 244 return (object); 245 } 246 247 /* 248 * The object must be locked. 249 */ 250 static void 251 vnode_pager_dealloc(object) 252 vm_object_t object; 253 { 254 struct vnode *vp; 255 int refs; 256 257 vp = object->handle; 258 if (vp == NULL) 259 panic("vnode_pager_dealloc: pager already dealloced"); 260 261 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 262 vm_object_pip_wait(object, "vnpdea"); 263 refs = object->ref_count; 264 265 object->handle = NULL; 266 object->type = OBJT_DEAD; 267 if (object->flags & OBJ_DISCONNECTWNT) { 268 vm_object_clear_flag(object, OBJ_DISCONNECTWNT); 269 wakeup(object); 270 } 271 ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 272 if (object->un_pager.vnp.writemappings > 0) { 273 object->un_pager.vnp.writemappings = 0; 274 vp->v_writecount--; 275 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 276 __func__, vp, vp->v_writecount); 277 } 278 vp->v_object = NULL; 279 vp->v_vflag &= ~VV_TEXT; 280 VM_OBJECT_UNLOCK(object); 281 while (refs-- > 0) 282 vunref(vp); 283 VM_OBJECT_LOCK(object); 284 } 285 286 static boolean_t 287 vnode_pager_haspage(object, pindex, before, after) 288 vm_object_t object; 289 vm_pindex_t pindex; 290 int *before; 291 int *after; 292 { 293 struct vnode *vp = object->handle; 294 daddr_t bn; 295 int err; 296 daddr_t reqblock; 297 int poff; 298 int bsize; 299 int pagesperblock, blocksperpage; 300 int vfslocked; 301 302 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 303 /* 304 * If no vp or vp is doomed or marked transparent to VM, we do not 305 * have the page. 306 */ 307 if (vp == NULL || vp->v_iflag & VI_DOOMED) 308 return FALSE; 309 /* 310 * If the offset is beyond end of file we do 311 * not have the page. 312 */ 313 if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 314 return FALSE; 315 316 bsize = vp->v_mount->mnt_stat.f_iosize; 317 pagesperblock = bsize / PAGE_SIZE; 318 blocksperpage = 0; 319 if (pagesperblock > 0) { 320 reqblock = pindex / pagesperblock; 321 } else { 322 blocksperpage = (PAGE_SIZE / bsize); 323 reqblock = pindex * blocksperpage; 324 } 325 VM_OBJECT_UNLOCK(object); 326 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 327 err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 328 VFS_UNLOCK_GIANT(vfslocked); 329 VM_OBJECT_LOCK(object); 330 if (err) 331 return TRUE; 332 if (bn == -1) 333 return FALSE; 334 if (pagesperblock > 0) { 335 poff = pindex - (reqblock * pagesperblock); 336 if (before) { 337 *before *= pagesperblock; 338 *before += poff; 339 } 340 if (after) { 341 int numafter; 342 *after *= pagesperblock; 343 numafter = pagesperblock - (poff + 1); 344 if (IDX_TO_OFF(pindex + numafter) > 345 object->un_pager.vnp.vnp_size) { 346 numafter = 347 OFF_TO_IDX(object->un_pager.vnp.vnp_size) - 348 pindex; 349 } 350 *after += numafter; 351 } 352 } else { 353 if (before) { 354 *before /= blocksperpage; 355 } 356 357 if (after) { 358 *after /= blocksperpage; 359 } 360 } 361 return TRUE; 362 } 363 364 /* 365 * Lets the VM system know about a change in size for a file. 366 * We adjust our own internal size and flush any cached pages in 367 * the associated object that are affected by the size change. 368 * 369 * Note: this routine may be invoked as a result of a pager put 370 * operation (possibly at object termination time), so we must be careful. 371 */ 372 void 373 vnode_pager_setsize(vp, nsize) 374 struct vnode *vp; 375 vm_ooffset_t nsize; 376 { 377 vm_object_t object; 378 vm_page_t m; 379 vm_pindex_t nobjsize; 380 381 if ((object = vp->v_object) == NULL) 382 return; 383 /* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */ 384 VM_OBJECT_LOCK(object); 385 if (nsize == object->un_pager.vnp.vnp_size) { 386 /* 387 * Hasn't changed size 388 */ 389 VM_OBJECT_UNLOCK(object); 390 return; 391 } 392 nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 393 if (nsize < object->un_pager.vnp.vnp_size) { 394 /* 395 * File has shrunk. Toss any cached pages beyond the new EOF. 396 */ 397 if (nobjsize < object->size) 398 vm_object_page_remove(object, nobjsize, object->size, 399 0); 400 /* 401 * this gets rid of garbage at the end of a page that is now 402 * only partially backed by the vnode. 403 * 404 * XXX for some reason (I don't know yet), if we take a 405 * completely invalid page and mark it partially valid 406 * it can screw up NFS reads, so we don't allow the case. 407 */ 408 if ((nsize & PAGE_MASK) && 409 (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL && 410 m->valid != 0) { 411 int base = (int)nsize & PAGE_MASK; 412 int size = PAGE_SIZE - base; 413 414 /* 415 * Clear out partial-page garbage in case 416 * the page has been mapped. 417 */ 418 pmap_zero_page_area(m, base, size); 419 420 /* 421 * Update the valid bits to reflect the blocks that 422 * have been zeroed. Some of these valid bits may 423 * have already been set. 424 */ 425 vm_page_set_valid_range(m, base, size); 426 427 /* 428 * Round "base" to the next block boundary so that the 429 * dirty bit for a partially zeroed block is not 430 * cleared. 431 */ 432 base = roundup2(base, DEV_BSIZE); 433 434 /* 435 * Clear out partial-page dirty bits. 436 * 437 * note that we do not clear out the valid 438 * bits. This would prevent bogus_page 439 * replacement from working properly. 440 */ 441 vm_page_clear_dirty(m, base, PAGE_SIZE - base); 442 } else if ((nsize & PAGE_MASK) && 443 __predict_false(object->cache != NULL)) { 444 vm_page_cache_free(object, OFF_TO_IDX(nsize), 445 nobjsize); 446 } 447 } 448 object->un_pager.vnp.vnp_size = nsize; 449 object->size = nobjsize; 450 VM_OBJECT_UNLOCK(object); 451 } 452 453 /* 454 * calculate the linear (byte) disk address of specified virtual 455 * file address 456 */ 457 static int 458 vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 459 int *run) 460 { 461 int bsize; 462 int err; 463 daddr_t vblock; 464 daddr_t voffset; 465 466 if (address < 0) 467 return -1; 468 469 if (vp->v_iflag & VI_DOOMED) 470 return -1; 471 472 bsize = vp->v_mount->mnt_stat.f_iosize; 473 vblock = address / bsize; 474 voffset = address % bsize; 475 476 err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 477 if (err == 0) { 478 if (*rtaddress != -1) 479 *rtaddress += voffset / DEV_BSIZE; 480 if (run) { 481 *run += 1; 482 *run *= bsize/PAGE_SIZE; 483 *run -= voffset/PAGE_SIZE; 484 } 485 } 486 487 return (err); 488 } 489 490 /* 491 * small block filesystem vnode pager input 492 */ 493 static int 494 vnode_pager_input_smlfs(object, m) 495 vm_object_t object; 496 vm_page_t m; 497 { 498 struct vnode *vp; 499 struct bufobj *bo; 500 struct buf *bp; 501 struct sf_buf *sf; 502 daddr_t fileaddr; 503 vm_offset_t bsize; 504 vm_page_bits_t bits; 505 int error, i; 506 507 error = 0; 508 vp = object->handle; 509 if (vp->v_iflag & VI_DOOMED) 510 return VM_PAGER_BAD; 511 512 bsize = vp->v_mount->mnt_stat.f_iosize; 513 514 VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 515 516 sf = sf_buf_alloc(m, 0); 517 518 for (i = 0; i < PAGE_SIZE / bsize; i++) { 519 vm_ooffset_t address; 520 521 bits = vm_page_bits(i * bsize, bsize); 522 if (m->valid & bits) 523 continue; 524 525 address = IDX_TO_OFF(m->pindex) + i * bsize; 526 if (address >= object->un_pager.vnp.vnp_size) { 527 fileaddr = -1; 528 } else { 529 error = vnode_pager_addr(vp, address, &fileaddr, NULL); 530 if (error) 531 break; 532 } 533 if (fileaddr != -1) { 534 bp = getpbuf(&vnode_pbuf_freecnt); 535 536 /* build a minimal buffer header */ 537 bp->b_iocmd = BIO_READ; 538 bp->b_iodone = bdone; 539 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 540 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 541 bp->b_rcred = crhold(curthread->td_ucred); 542 bp->b_wcred = crhold(curthread->td_ucred); 543 bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 544 bp->b_blkno = fileaddr; 545 pbgetbo(bo, bp); 546 bp->b_vp = vp; 547 bp->b_bcount = bsize; 548 bp->b_bufsize = bsize; 549 bp->b_runningbufspace = bp->b_bufsize; 550 atomic_add_long(&runningbufspace, bp->b_runningbufspace); 551 552 /* do the input */ 553 bp->b_iooffset = dbtob(bp->b_blkno); 554 bstrategy(bp); 555 556 bwait(bp, PVM, "vnsrd"); 557 558 if ((bp->b_ioflags & BIO_ERROR) != 0) 559 error = EIO; 560 561 /* 562 * free the buffer header back to the swap buffer pool 563 */ 564 bp->b_vp = NULL; 565 pbrelbo(bp); 566 relpbuf(bp, &vnode_pbuf_freecnt); 567 if (error) 568 break; 569 } else 570 bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 571 KASSERT((m->dirty & bits) == 0, 572 ("vnode_pager_input_smlfs: page %p is dirty", m)); 573 VM_OBJECT_LOCK(object); 574 m->valid |= bits; 575 VM_OBJECT_UNLOCK(object); 576 } 577 sf_buf_free(sf); 578 if (error) { 579 return VM_PAGER_ERROR; 580 } 581 return VM_PAGER_OK; 582 } 583 584 /* 585 * old style vnode pager input routine 586 */ 587 static int 588 vnode_pager_input_old(object, m) 589 vm_object_t object; 590 vm_page_t m; 591 { 592 struct uio auio; 593 struct iovec aiov; 594 int error; 595 int size; 596 struct sf_buf *sf; 597 struct vnode *vp; 598 599 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 600 error = 0; 601 602 /* 603 * Return failure if beyond current EOF 604 */ 605 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 606 return VM_PAGER_BAD; 607 } else { 608 size = PAGE_SIZE; 609 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 610 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 611 vp = object->handle; 612 VM_OBJECT_UNLOCK(object); 613 614 /* 615 * Allocate a kernel virtual address and initialize so that 616 * we can use VOP_READ/WRITE routines. 617 */ 618 sf = sf_buf_alloc(m, 0); 619 620 aiov.iov_base = (caddr_t)sf_buf_kva(sf); 621 aiov.iov_len = size; 622 auio.uio_iov = &aiov; 623 auio.uio_iovcnt = 1; 624 auio.uio_offset = IDX_TO_OFF(m->pindex); 625 auio.uio_segflg = UIO_SYSSPACE; 626 auio.uio_rw = UIO_READ; 627 auio.uio_resid = size; 628 auio.uio_td = curthread; 629 630 error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 631 if (!error) { 632 int count = size - auio.uio_resid; 633 634 if (count == 0) 635 error = EINVAL; 636 else if (count != PAGE_SIZE) 637 bzero((caddr_t)sf_buf_kva(sf) + count, 638 PAGE_SIZE - count); 639 } 640 sf_buf_free(sf); 641 642 VM_OBJECT_LOCK(object); 643 } 644 KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); 645 if (!error) 646 m->valid = VM_PAGE_BITS_ALL; 647 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 648 } 649 650 /* 651 * generic vnode pager input routine 652 */ 653 654 /* 655 * Local media VFS's that do not implement their own VOP_GETPAGES 656 * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 657 * to implement the previous behaviour. 658 * 659 * All other FS's should use the bypass to get to the local media 660 * backing vp's VOP_GETPAGES. 661 */ 662 static int 663 vnode_pager_getpages(object, m, count, reqpage) 664 vm_object_t object; 665 vm_page_t *m; 666 int count; 667 int reqpage; 668 { 669 int rtval; 670 struct vnode *vp; 671 int bytes = count * PAGE_SIZE; 672 int vfslocked; 673 674 vp = object->handle; 675 VM_OBJECT_UNLOCK(object); 676 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 677 rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); 678 KASSERT(rtval != EOPNOTSUPP, 679 ("vnode_pager: FS getpages not implemented\n")); 680 VFS_UNLOCK_GIANT(vfslocked); 681 VM_OBJECT_LOCK(object); 682 return rtval; 683 } 684 685 /* 686 * This is now called from local media FS's to operate against their 687 * own vnodes if they fail to implement VOP_GETPAGES. 688 */ 689 int 690 vnode_pager_generic_getpages(vp, m, bytecount, reqpage) 691 struct vnode *vp; 692 vm_page_t *m; 693 int bytecount; 694 int reqpage; 695 { 696 vm_object_t object; 697 vm_offset_t kva; 698 off_t foff, tfoff, nextoff; 699 int i, j, size, bsize, first; 700 daddr_t firstaddr, reqblock; 701 struct bufobj *bo; 702 int runpg; 703 int runend; 704 struct buf *bp; 705 int count; 706 int error; 707 708 object = vp->v_object; 709 count = bytecount / PAGE_SIZE; 710 711 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 712 ("vnode_pager_generic_getpages does not support devices")); 713 if (vp->v_iflag & VI_DOOMED) 714 return VM_PAGER_BAD; 715 716 bsize = vp->v_mount->mnt_stat.f_iosize; 717 718 /* get the UNDERLYING device for the file with VOP_BMAP() */ 719 720 /* 721 * originally, we did not check for an error return value -- assuming 722 * an fs always has a bmap entry point -- that assumption is wrong!!! 723 */ 724 foff = IDX_TO_OFF(m[reqpage]->pindex); 725 726 /* 727 * if we can't bmap, use old VOP code 728 */ 729 error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL); 730 if (error == EOPNOTSUPP) { 731 VM_OBJECT_LOCK(object); 732 733 for (i = 0; i < count; i++) 734 if (i != reqpage) { 735 vm_page_lock(m[i]); 736 vm_page_free(m[i]); 737 vm_page_unlock(m[i]); 738 } 739 PCPU_INC(cnt.v_vnodein); 740 PCPU_INC(cnt.v_vnodepgsin); 741 error = vnode_pager_input_old(object, m[reqpage]); 742 VM_OBJECT_UNLOCK(object); 743 return (error); 744 } else if (error != 0) { 745 VM_OBJECT_LOCK(object); 746 for (i = 0; i < count; i++) 747 if (i != reqpage) { 748 vm_page_lock(m[i]); 749 vm_page_free(m[i]); 750 vm_page_unlock(m[i]); 751 } 752 VM_OBJECT_UNLOCK(object); 753 return (VM_PAGER_ERROR); 754 755 /* 756 * if the blocksize is smaller than a page size, then use 757 * special small filesystem code. NFS sometimes has a small 758 * blocksize, but it can handle large reads itself. 759 */ 760 } else if ((PAGE_SIZE / bsize) > 1 && 761 (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { 762 VM_OBJECT_LOCK(object); 763 for (i = 0; i < count; i++) 764 if (i != reqpage) { 765 vm_page_lock(m[i]); 766 vm_page_free(m[i]); 767 vm_page_unlock(m[i]); 768 } 769 VM_OBJECT_UNLOCK(object); 770 PCPU_INC(cnt.v_vnodein); 771 PCPU_INC(cnt.v_vnodepgsin); 772 return vnode_pager_input_smlfs(object, m[reqpage]); 773 } 774 775 /* 776 * If we have a completely valid page available to us, we can 777 * clean up and return. Otherwise we have to re-read the 778 * media. 779 */ 780 VM_OBJECT_LOCK(object); 781 if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { 782 for (i = 0; i < count; i++) 783 if (i != reqpage) { 784 vm_page_lock(m[i]); 785 vm_page_free(m[i]); 786 vm_page_unlock(m[i]); 787 } 788 VM_OBJECT_UNLOCK(object); 789 return VM_PAGER_OK; 790 } else if (reqblock == -1) { 791 pmap_zero_page(m[reqpage]); 792 KASSERT(m[reqpage]->dirty == 0, 793 ("vnode_pager_generic_getpages: page %p is dirty", m)); 794 m[reqpage]->valid = VM_PAGE_BITS_ALL; 795 for (i = 0; i < count; i++) 796 if (i != reqpage) { 797 vm_page_lock(m[i]); 798 vm_page_free(m[i]); 799 vm_page_unlock(m[i]); 800 } 801 VM_OBJECT_UNLOCK(object); 802 return (VM_PAGER_OK); 803 } 804 m[reqpage]->valid = 0; 805 VM_OBJECT_UNLOCK(object); 806 807 /* 808 * here on direct device I/O 809 */ 810 firstaddr = -1; 811 812 /* 813 * calculate the run that includes the required page 814 */ 815 for (first = 0, i = 0; i < count; i = runend) { 816 if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr, 817 &runpg) != 0) { 818 VM_OBJECT_LOCK(object); 819 for (; i < count; i++) 820 if (i != reqpage) { 821 vm_page_lock(m[i]); 822 vm_page_free(m[i]); 823 vm_page_unlock(m[i]); 824 } 825 VM_OBJECT_UNLOCK(object); 826 return (VM_PAGER_ERROR); 827 } 828 if (firstaddr == -1) { 829 VM_OBJECT_LOCK(object); 830 if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 831 panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx", 832 (intmax_t)firstaddr, (uintmax_t)(foff >> 32), 833 (uintmax_t)foff, 834 (uintmax_t) 835 (object->un_pager.vnp.vnp_size >> 32), 836 (uintmax_t)object->un_pager.vnp.vnp_size); 837 } 838 vm_page_lock(m[i]); 839 vm_page_free(m[i]); 840 vm_page_unlock(m[i]); 841 VM_OBJECT_UNLOCK(object); 842 runend = i + 1; 843 first = runend; 844 continue; 845 } 846 runend = i + runpg; 847 if (runend <= reqpage) { 848 VM_OBJECT_LOCK(object); 849 for (j = i; j < runend; j++) { 850 vm_page_lock(m[j]); 851 vm_page_free(m[j]); 852 vm_page_unlock(m[j]); 853 } 854 VM_OBJECT_UNLOCK(object); 855 } else { 856 if (runpg < (count - first)) { 857 VM_OBJECT_LOCK(object); 858 for (i = first + runpg; i < count; i++) { 859 vm_page_lock(m[i]); 860 vm_page_free(m[i]); 861 vm_page_unlock(m[i]); 862 } 863 VM_OBJECT_UNLOCK(object); 864 count = first + runpg; 865 } 866 break; 867 } 868 first = runend; 869 } 870 871 /* 872 * the first and last page have been calculated now, move input pages 873 * to be zero based... 874 */ 875 if (first != 0) { 876 m += first; 877 count -= first; 878 reqpage -= first; 879 } 880 881 /* 882 * calculate the file virtual address for the transfer 883 */ 884 foff = IDX_TO_OFF(m[0]->pindex); 885 886 /* 887 * calculate the size of the transfer 888 */ 889 size = count * PAGE_SIZE; 890 KASSERT(count > 0, ("zero count")); 891 if ((foff + size) > object->un_pager.vnp.vnp_size) 892 size = object->un_pager.vnp.vnp_size - foff; 893 KASSERT(size > 0, ("zero size")); 894 895 /* 896 * round up physical size for real devices. 897 */ 898 if (1) { 899 int secmask = bo->bo_bsize - 1; 900 KASSERT(secmask < PAGE_SIZE && secmask > 0, 901 ("vnode_pager_generic_getpages: sector size %d too large", 902 secmask + 1)); 903 size = (size + secmask) & ~secmask; 904 } 905 906 bp = getpbuf(&vnode_pbuf_freecnt); 907 kva = (vm_offset_t) bp->b_data; 908 909 /* 910 * and map the pages to be read into the kva 911 */ 912 pmap_qenter(kva, m, count); 913 914 /* build a minimal buffer header */ 915 bp->b_iocmd = BIO_READ; 916 bp->b_iodone = bdone; 917 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 918 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 919 bp->b_rcred = crhold(curthread->td_ucred); 920 bp->b_wcred = crhold(curthread->td_ucred); 921 bp->b_blkno = firstaddr; 922 pbgetbo(bo, bp); 923 bp->b_vp = vp; 924 bp->b_bcount = size; 925 bp->b_bufsize = size; 926 bp->b_runningbufspace = bp->b_bufsize; 927 atomic_add_long(&runningbufspace, bp->b_runningbufspace); 928 929 PCPU_INC(cnt.v_vnodein); 930 PCPU_ADD(cnt.v_vnodepgsin, count); 931 932 /* do the input */ 933 bp->b_iooffset = dbtob(bp->b_blkno); 934 bstrategy(bp); 935 936 bwait(bp, PVM, "vnread"); 937 938 if ((bp->b_ioflags & BIO_ERROR) != 0) 939 error = EIO; 940 941 if (!error) { 942 if (size != count * PAGE_SIZE) 943 bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 944 } 945 pmap_qremove(kva, count); 946 947 /* 948 * free the buffer header back to the swap buffer pool 949 */ 950 bp->b_vp = NULL; 951 pbrelbo(bp); 952 relpbuf(bp, &vnode_pbuf_freecnt); 953 954 VM_OBJECT_LOCK(object); 955 for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { 956 vm_page_t mt; 957 958 nextoff = tfoff + PAGE_SIZE; 959 mt = m[i]; 960 961 if (nextoff <= object->un_pager.vnp.vnp_size) { 962 /* 963 * Read filled up entire page. 964 */ 965 mt->valid = VM_PAGE_BITS_ALL; 966 KASSERT(mt->dirty == 0, 967 ("vnode_pager_generic_getpages: page %p is dirty", 968 mt)); 969 KASSERT(!pmap_page_is_mapped(mt), 970 ("vnode_pager_generic_getpages: page %p is mapped", 971 mt)); 972 } else { 973 /* 974 * Read did not fill up entire page. 975 * 976 * Currently we do not set the entire page valid, 977 * we just try to clear the piece that we couldn't 978 * read. 979 */ 980 vm_page_set_valid_range(mt, 0, 981 object->un_pager.vnp.vnp_size - tfoff); 982 KASSERT((mt->dirty & vm_page_bits(0, 983 object->un_pager.vnp.vnp_size - tfoff)) == 0, 984 ("vnode_pager_generic_getpages: page %p is dirty", 985 mt)); 986 } 987 988 if (i != reqpage) { 989 990 /* 991 * whether or not to leave the page activated is up in 992 * the air, but we should put the page on a page queue 993 * somewhere. (it already is in the object). Result: 994 * It appears that empirical results show that 995 * deactivating pages is best. 996 */ 997 998 /* 999 * just in case someone was asking for this page we 1000 * now tell them that it is ok to use 1001 */ 1002 if (!error) { 1003 if (mt->oflags & VPO_WANTED) { 1004 vm_page_lock(mt); 1005 vm_page_activate(mt); 1006 vm_page_unlock(mt); 1007 } else { 1008 vm_page_lock(mt); 1009 vm_page_deactivate(mt); 1010 vm_page_unlock(mt); 1011 } 1012 vm_page_wakeup(mt); 1013 } else { 1014 vm_page_lock(mt); 1015 vm_page_free(mt); 1016 vm_page_unlock(mt); 1017 } 1018 } 1019 } 1020 VM_OBJECT_UNLOCK(object); 1021 if (error) { 1022 printf("vnode_pager_getpages: I/O read error\n"); 1023 } 1024 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 1025 } 1026 1027 /* 1028 * EOPNOTSUPP is no longer legal. For local media VFS's that do not 1029 * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 1030 * vnode_pager_generic_putpages() to implement the previous behaviour. 1031 * 1032 * All other FS's should use the bypass to get to the local media 1033 * backing vp's VOP_PUTPAGES. 1034 */ 1035 static void 1036 vnode_pager_putpages(object, m, count, sync, rtvals) 1037 vm_object_t object; 1038 vm_page_t *m; 1039 int count; 1040 boolean_t sync; 1041 int *rtvals; 1042 { 1043 int rtval; 1044 struct vnode *vp; 1045 int bytes = count * PAGE_SIZE; 1046 1047 /* 1048 * Force synchronous operation if we are extremely low on memory 1049 * to prevent a low-memory deadlock. VOP operations often need to 1050 * allocate more memory to initiate the I/O ( i.e. do a BMAP 1051 * operation ). The swapper handles the case by limiting the amount 1052 * of asynchronous I/O, but that sort of solution doesn't scale well 1053 * for the vnode pager without a lot of work. 1054 * 1055 * Also, the backing vnode's iodone routine may not wake the pageout 1056 * daemon up. This should be probably be addressed XXX. 1057 */ 1058 1059 if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 1060 sync |= OBJPC_SYNC; 1061 1062 /* 1063 * Call device-specific putpages function 1064 */ 1065 vp = object->handle; 1066 VM_OBJECT_UNLOCK(object); 1067 rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); 1068 KASSERT(rtval != EOPNOTSUPP, 1069 ("vnode_pager: stale FS putpages\n")); 1070 VM_OBJECT_LOCK(object); 1071 } 1072 1073 1074 /* 1075 * This is now called from local media FS's to operate against their 1076 * own vnodes if they fail to implement VOP_PUTPAGES. 1077 * 1078 * This is typically called indirectly via the pageout daemon and 1079 * clustering has already typically occured, so in general we ask the 1080 * underlying filesystem to write the data out asynchronously rather 1081 * then delayed. 1082 */ 1083 int 1084 vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount, 1085 int flags, int *rtvals) 1086 { 1087 int i; 1088 vm_object_t object; 1089 vm_page_t m; 1090 int count; 1091 1092 int maxsize, ncount; 1093 vm_ooffset_t poffset; 1094 struct uio auio; 1095 struct iovec aiov; 1096 int error; 1097 int ioflags; 1098 int ppscheck = 0; 1099 static struct timeval lastfail; 1100 static int curfail; 1101 1102 object = vp->v_object; 1103 count = bytecount / PAGE_SIZE; 1104 1105 for (i = 0; i < count; i++) 1106 rtvals[i] = VM_PAGER_ERROR; 1107 1108 if ((int64_t)ma[0]->pindex < 0) { 1109 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n", 1110 (long)ma[0]->pindex, (u_long)ma[0]->dirty); 1111 rtvals[0] = VM_PAGER_BAD; 1112 return VM_PAGER_BAD; 1113 } 1114 1115 maxsize = count * PAGE_SIZE; 1116 ncount = count; 1117 1118 poffset = IDX_TO_OFF(ma[0]->pindex); 1119 1120 /* 1121 * If the page-aligned write is larger then the actual file we 1122 * have to invalidate pages occuring beyond the file EOF. However, 1123 * there is an edge case where a file may not be page-aligned where 1124 * the last page is partially invalid. In this case the filesystem 1125 * may not properly clear the dirty bits for the entire page (which 1126 * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 1127 * With the page locked we are free to fix-up the dirty bits here. 1128 * 1129 * We do not under any circumstances truncate the valid bits, as 1130 * this will screw up bogus page replacement. 1131 */ 1132 VM_OBJECT_LOCK(object); 1133 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 1134 if (object->un_pager.vnp.vnp_size > poffset) { 1135 int pgoff; 1136 1137 maxsize = object->un_pager.vnp.vnp_size - poffset; 1138 ncount = btoc(maxsize); 1139 if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1140 /* 1141 * If the object is locked and the following 1142 * conditions hold, then the page's dirty 1143 * field cannot be concurrently changed by a 1144 * pmap operation. 1145 */ 1146 m = ma[ncount - 1]; 1147 KASSERT(m->busy > 0, 1148 ("vnode_pager_generic_putpages: page %p is not busy", m)); 1149 KASSERT((m->aflags & PGA_WRITEABLE) == 0, 1150 ("vnode_pager_generic_putpages: page %p is not read-only", m)); 1151 vm_page_clear_dirty(m, pgoff, PAGE_SIZE - 1152 pgoff); 1153 } 1154 } else { 1155 maxsize = 0; 1156 ncount = 0; 1157 } 1158 if (ncount < count) { 1159 for (i = ncount; i < count; i++) { 1160 rtvals[i] = VM_PAGER_BAD; 1161 } 1162 } 1163 } 1164 VM_OBJECT_UNLOCK(object); 1165 1166 /* 1167 * pageouts are already clustered, use IO_ASYNC to force a bawrite() 1168 * rather then a bdwrite() to prevent paging I/O from saturating 1169 * the buffer cache. Dummy-up the sequential heuristic to cause 1170 * large ranges to cluster. If neither IO_SYNC or IO_ASYNC is set, 1171 * the system decides how to cluster. 1172 */ 1173 ioflags = IO_VMIO; 1174 if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) 1175 ioflags |= IO_SYNC; 1176 else if ((flags & VM_PAGER_CLUSTER_OK) == 0) 1177 ioflags |= IO_ASYNC; 1178 ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0; 1179 ioflags |= IO_SEQMAX << IO_SEQSHIFT; 1180 1181 aiov.iov_base = (caddr_t) 0; 1182 aiov.iov_len = maxsize; 1183 auio.uio_iov = &aiov; 1184 auio.uio_iovcnt = 1; 1185 auio.uio_offset = poffset; 1186 auio.uio_segflg = UIO_NOCOPY; 1187 auio.uio_rw = UIO_WRITE; 1188 auio.uio_resid = maxsize; 1189 auio.uio_td = (struct thread *) 0; 1190 error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred); 1191 PCPU_INC(cnt.v_vnodeout); 1192 PCPU_ADD(cnt.v_vnodepgsout, ncount); 1193 1194 if (error) { 1195 if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1))) 1196 printf("vnode_pager_putpages: I/O error %d\n", error); 1197 } 1198 if (auio.uio_resid) { 1199 if (ppscheck || ppsratecheck(&lastfail, &curfail, 1)) 1200 printf("vnode_pager_putpages: residual I/O %zd at %lu\n", 1201 auio.uio_resid, (u_long)ma[0]->pindex); 1202 } 1203 for (i = 0; i < ncount; i++) { 1204 rtvals[i] = VM_PAGER_OK; 1205 } 1206 return rtvals[0]; 1207 } 1208 1209 void 1210 vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written) 1211 { 1212 vm_object_t obj; 1213 int i, pos; 1214 1215 if (written == 0) 1216 return; 1217 obj = ma[0]->object; 1218 VM_OBJECT_LOCK(obj); 1219 for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) { 1220 if (pos < trunc_page(written)) { 1221 rtvals[i] = VM_PAGER_OK; 1222 vm_page_undirty(ma[i]); 1223 } else { 1224 /* Partially written page. */ 1225 rtvals[i] = VM_PAGER_AGAIN; 1226 vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK); 1227 } 1228 } 1229 VM_OBJECT_UNLOCK(obj); 1230 } 1231 1232 void 1233 vnode_pager_update_writecount(vm_object_t object, vm_offset_t start, 1234 vm_offset_t end) 1235 { 1236 struct vnode *vp; 1237 vm_ooffset_t old_wm; 1238 1239 VM_OBJECT_LOCK(object); 1240 if (object->type != OBJT_VNODE) { 1241 VM_OBJECT_UNLOCK(object); 1242 return; 1243 } 1244 old_wm = object->un_pager.vnp.writemappings; 1245 object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start; 1246 vp = object->handle; 1247 if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) { 1248 ASSERT_VOP_ELOCKED(vp, "v_writecount inc"); 1249 vp->v_writecount++; 1250 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 1251 __func__, vp, vp->v_writecount); 1252 } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) { 1253 ASSERT_VOP_ELOCKED(vp, "v_writecount dec"); 1254 vp->v_writecount--; 1255 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 1256 __func__, vp, vp->v_writecount); 1257 } 1258 VM_OBJECT_UNLOCK(object); 1259 } 1260 1261 void 1262 vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, 1263 vm_offset_t end) 1264 { 1265 struct vnode *vp; 1266 struct mount *mp; 1267 vm_offset_t inc; 1268 int vfslocked; 1269 1270 VM_OBJECT_LOCK(object); 1271 1272 /* 1273 * First, recheck the object type to account for the race when 1274 * the vnode is reclaimed. 1275 */ 1276 if (object->type != OBJT_VNODE) { 1277 VM_OBJECT_UNLOCK(object); 1278 return; 1279 } 1280 1281 /* 1282 * Optimize for the case when writemappings is not going to 1283 * zero. 1284 */ 1285 inc = end - start; 1286 if (object->un_pager.vnp.writemappings != inc) { 1287 object->un_pager.vnp.writemappings -= inc; 1288 VM_OBJECT_UNLOCK(object); 1289 return; 1290 } 1291 1292 vp = object->handle; 1293 vhold(vp); 1294 VM_OBJECT_UNLOCK(object); 1295 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1296 mp = NULL; 1297 vn_start_write(vp, &mp, V_WAIT); 1298 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1299 1300 /* 1301 * Decrement the object's writemappings, by swapping the start 1302 * and end arguments for vnode_pager_update_writecount(). If 1303 * there was not a race with vnode reclaimation, then the 1304 * vnode's v_writecount is decremented. 1305 */ 1306 vnode_pager_update_writecount(object, end, start); 1307 VOP_UNLOCK(vp, 0); 1308 vdrop(vp); 1309 if (mp != NULL) 1310 vn_finished_write(mp); 1311 VFS_UNLOCK_GIANT(vfslocked); 1312 } 1313