1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1991 The Regents of the University of California. 6 * All rights reserved. 7 * Copyright (c) 1993, 1994 John S. Dyson 8 * Copyright (c) 1995, David Greenman 9 * 10 * This code is derived from software contributed to Berkeley by 11 * the Systems Programming Group of the University of Utah Computer 12 * Science Department. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 */ 42 43 /* 44 * Page to/from files (vnodes). 45 */ 46 47 /* 48 * TODO: 49 * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 50 * greatly re-simplify the vnode_pager. 51 */ 52 53 #include <sys/cdefs.h> 54 #include "opt_vm.h" 55 56 #include <sys/param.h> 57 #include <sys/kernel.h> 58 #include <sys/systm.h> 59 #include <sys/sysctl.h> 60 #include <sys/proc.h> 61 #include <sys/vnode.h> 62 #include <sys/mount.h> 63 #include <sys/bio.h> 64 #include <sys/buf.h> 65 #include <sys/vmmeter.h> 66 #include <sys/ktr.h> 67 #include <sys/limits.h> 68 #include <sys/conf.h> 69 #include <sys/refcount.h> 70 #include <sys/rwlock.h> 71 #include <sys/sf_buf.h> 72 #include <sys/domainset.h> 73 #include <sys/user.h> 74 75 #include <machine/atomic.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <vm/vm_object.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_pager.h> 82 #include <vm/vm_map.h> 83 #include <vm/vnode_pager.h> 84 #include <vm/vm_extern.h> 85 #include <vm/uma.h> 86 87 static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 88 daddr_t *rtaddress, int *run); 89 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 90 static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 91 static void vnode_pager_dealloc(vm_object_t); 92 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); 93 static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, 94 int *, vop_getpages_iodone_t, void *); 95 static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); 96 static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 97 static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, 98 vm_ooffset_t, struct ucred *cred); 99 static int vnode_pager_generic_getpages_done(struct buf *); 100 static void vnode_pager_generic_getpages_done_async(struct buf *); 101 static void vnode_pager_update_writecount(vm_object_t, vm_offset_t, 102 vm_offset_t); 103 static void vnode_pager_release_writecount(vm_object_t, vm_offset_t, 104 vm_offset_t); 105 static void vnode_pager_getvp(vm_object_t, struct vnode **, bool *); 106 107 const struct pagerops vnodepagerops = { 108 .pgo_kvme_type = KVME_TYPE_VNODE, 109 .pgo_alloc = vnode_pager_alloc, 110 .pgo_dealloc = vnode_pager_dealloc, 111 .pgo_getpages = vnode_pager_getpages, 112 .pgo_getpages_async = vnode_pager_getpages_async, 113 .pgo_putpages = vnode_pager_putpages, 114 .pgo_haspage = vnode_pager_haspage, 115 .pgo_update_writecount = vnode_pager_update_writecount, 116 .pgo_release_writecount = vnode_pager_release_writecount, 117 .pgo_set_writeable_dirty = vm_object_set_writeable_dirty_, 118 .pgo_mightbedirty = vm_object_mightbedirty_, 119 .pgo_getvp = vnode_pager_getvp, 120 }; 121 122 static struct domainset *vnode_domainset = NULL; 123 124 SYSCTL_PROC(_debug, OID_AUTO, vnode_domainset, 125 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_RW, &vnode_domainset, 0, 126 sysctl_handle_domainset, "A", "Default vnode NUMA policy"); 127 128 static int nvnpbufs; 129 SYSCTL_INT(_vm, OID_AUTO, vnode_pbufs, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 130 &nvnpbufs, 0, "number of physical buffers allocated for vnode pager"); 131 132 static uma_zone_t vnode_pbuf_zone; 133 134 static void 135 vnode_pager_init(void *dummy) 136 { 137 138 #ifdef __LP64__ 139 nvnpbufs = nswbuf * 2; 140 #else 141 nvnpbufs = nswbuf / 2; 142 #endif 143 TUNABLE_INT_FETCH("vm.vnode_pbufs", &nvnpbufs); 144 vnode_pbuf_zone = pbuf_zsecond_create("vnpbuf", nvnpbufs); 145 } 146 SYSINIT(vnode_pager, SI_SUB_CPU, SI_ORDER_ANY, vnode_pager_init, NULL); 147 148 /* Create the VM system backing object for this vnode */ 149 static int 150 vnode_create_vobject_any(struct vnode *vp, off_t isize, struct thread *td) 151 { 152 vm_object_t object; 153 vm_ooffset_t size; 154 bool last; 155 156 object = vp->v_object; 157 if (object != NULL) 158 return (0); 159 160 if (isize == VNODE_NO_SIZE) { 161 if (vn_getsize_locked(vp, &size, td->td_ucred) != 0) 162 return (0); 163 } else { 164 size = isize; 165 } 166 167 object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); 168 /* 169 * Dereference the reference we just created. This assumes 170 * that the object is associated with the vp. We still have 171 * to serialize with vnode_pager_dealloc() for the last 172 * potential reference. 173 */ 174 VM_OBJECT_RLOCK(object); 175 last = refcount_release(&object->ref_count); 176 VM_OBJECT_RUNLOCK(object); 177 if (last) 178 vrele(vp); 179 180 VNASSERT(vp->v_object != NULL, vp, ("%s: NULL object", __func__)); 181 182 return (0); 183 } 184 185 int 186 vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 187 { 188 VNASSERT(!vn_isdisk(vp), vp, ("%s: disk vnode", __func__)); 189 VNASSERT(isize == VNODE_NO_SIZE || isize >= 0, vp, 190 ("%s: invalid size (%jd)", __func__, (intmax_t)isize)); 191 192 if (!vn_canvmio(vp)) 193 return (0); 194 195 return (vnode_create_vobject_any(vp, isize, td)); 196 } 197 198 int 199 vnode_create_disk_vobject(struct vnode *vp, off_t isize, struct thread *td) 200 { 201 VNASSERT(isize > 0, vp, ("%s: invalid size (%jd)", __func__, 202 (intmax_t)isize)); 203 204 return (vnode_create_vobject_any(vp, isize, td)); 205 } 206 207 void 208 vnode_destroy_vobject(struct vnode *vp) 209 { 210 struct vm_object *obj; 211 212 obj = vp->v_object; 213 if (obj == NULL || obj->handle != vp) 214 return; 215 ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 216 VM_OBJECT_WLOCK(obj); 217 MPASS(obj->type == OBJT_VNODE); 218 umtx_shm_object_terminated(obj); 219 if (obj->ref_count == 0) { 220 KASSERT((obj->flags & OBJ_DEAD) == 0, 221 ("vnode_destroy_vobject: Terminating dead object")); 222 vm_object_set_flag(obj, OBJ_DEAD); 223 224 /* 225 * Clean pages and flush buffers. 226 */ 227 vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); 228 VM_OBJECT_WUNLOCK(obj); 229 230 vinvalbuf(vp, V_SAVE, 0, 0); 231 232 BO_LOCK(&vp->v_bufobj); 233 vp->v_bufobj.bo_flag |= BO_DEAD; 234 BO_UNLOCK(&vp->v_bufobj); 235 236 VM_OBJECT_WLOCK(obj); 237 vm_object_terminate(obj); 238 } else { 239 /* 240 * Woe to the process that tries to page now :-). 241 */ 242 vm_pager_deallocate(obj); 243 VM_OBJECT_WUNLOCK(obj); 244 } 245 KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object)); 246 } 247 248 /* 249 * Allocate (or lookup) pager for a vnode. 250 * Handle is a vnode pointer. 251 */ 252 vm_object_t 253 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 254 vm_ooffset_t offset, struct ucred *cred) 255 { 256 vm_object_t object; 257 struct vnode *vp; 258 259 /* 260 * Pageout to vnode, no can do yet. 261 */ 262 if (handle == NULL) 263 return (NULL); 264 265 vp = (struct vnode *)handle; 266 ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc"); 267 VNPASS(vp->v_usecount > 0, vp); 268 retry: 269 object = vp->v_object; 270 271 if (object == NULL) { 272 /* 273 * Add an object of the appropriate size 274 */ 275 object = vm_object_allocate(OBJT_VNODE, 276 OFF_TO_IDX(round_page(size))); 277 278 object->un_pager.vnp.vnp_size = size; 279 object->un_pager.vnp.writemappings = 0; 280 object->domain.dr_policy = vnode_domainset; 281 object->handle = handle; 282 if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) { 283 VM_OBJECT_WLOCK(object); 284 vm_object_set_flag(object, OBJ_SIZEVNLOCK); 285 VM_OBJECT_WUNLOCK(object); 286 } 287 VI_LOCK(vp); 288 if (vp->v_object != NULL) { 289 /* 290 * Object has been created while we were allocating. 291 */ 292 VI_UNLOCK(vp); 293 VM_OBJECT_WLOCK(object); 294 KASSERT(object->ref_count == 1, 295 ("leaked ref %p %d", object, object->ref_count)); 296 object->type = OBJT_DEAD; 297 refcount_init(&object->ref_count, 0); 298 VM_OBJECT_WUNLOCK(object); 299 vm_object_destroy(object); 300 goto retry; 301 } 302 vp->v_object = object; 303 VI_UNLOCK(vp); 304 vrefact(vp); 305 } else { 306 vm_object_reference(object); 307 #if VM_NRESERVLEVEL > 0 308 if ((object->flags & OBJ_COLORED) == 0) { 309 VM_OBJECT_WLOCK(object); 310 vm_object_color(object, 0); 311 VM_OBJECT_WUNLOCK(object); 312 } 313 #endif 314 } 315 return (object); 316 } 317 318 /* 319 * The object must be locked. 320 */ 321 static void 322 vnode_pager_dealloc(vm_object_t object) 323 { 324 struct vnode *vp; 325 int refs; 326 327 vp = object->handle; 328 if (vp == NULL) 329 panic("vnode_pager_dealloc: pager already dealloced"); 330 331 VM_OBJECT_ASSERT_WLOCKED(object); 332 vm_object_pip_wait(object, "vnpdea"); 333 refs = object->ref_count; 334 335 object->handle = NULL; 336 object->type = OBJT_DEAD; 337 ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 338 if (object->un_pager.vnp.writemappings > 0) { 339 object->un_pager.vnp.writemappings = 0; 340 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 341 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 342 __func__, vp, vp->v_writecount); 343 } 344 vp->v_object = NULL; 345 VI_LOCK(vp); 346 347 /* 348 * vm_map_entry_set_vnode_text() cannot reach this vnode by 349 * following object->handle. Clear all text references now. 350 * This also clears the transient references from 351 * kern_execve(), which is fine because dead_vnodeops uses nop 352 * for VOP_UNSET_TEXT(). 353 */ 354 if (vp->v_writecount < 0) 355 vp->v_writecount = 0; 356 VI_UNLOCK(vp); 357 VM_OBJECT_WUNLOCK(object); 358 if (refs > 0) 359 vunref(vp); 360 VM_OBJECT_WLOCK(object); 361 } 362 363 static boolean_t 364 vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 365 int *after) 366 { 367 struct vnode *vp = object->handle; 368 daddr_t bn; 369 uintptr_t lockstate; 370 int err; 371 daddr_t reqblock; 372 int poff; 373 int bsize; 374 int pagesperblock, blocksperpage; 375 376 VM_OBJECT_ASSERT_LOCKED(object); 377 /* 378 * If no vp or vp is doomed or marked transparent to VM, we do not 379 * have the page. 380 */ 381 if (vp == NULL || VN_IS_DOOMED(vp)) 382 return FALSE; 383 /* 384 * If the offset is beyond end of file we do 385 * not have the page. 386 */ 387 if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 388 return FALSE; 389 390 bsize = vp->v_mount->mnt_stat.f_iosize; 391 pagesperblock = bsize / PAGE_SIZE; 392 blocksperpage = 0; 393 if (pagesperblock > 0) { 394 reqblock = pindex / pagesperblock; 395 } else { 396 blocksperpage = (PAGE_SIZE / bsize); 397 reqblock = pindex * blocksperpage; 398 } 399 lockstate = VM_OBJECT_DROP(object); 400 err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 401 VM_OBJECT_PICKUP(object, lockstate); 402 if (err) 403 return TRUE; 404 if (bn == -1) 405 return FALSE; 406 if (pagesperblock > 0) { 407 poff = pindex - (reqblock * pagesperblock); 408 if (before) { 409 *before *= pagesperblock; 410 *before += poff; 411 } 412 if (after) { 413 /* 414 * The BMAP vop can report a partial block in the 415 * 'after', but must not report blocks after EOF. 416 * Assert the latter, and truncate 'after' in case 417 * of the former. 418 */ 419 KASSERT((reqblock + *after) * pagesperblock < 420 roundup2(object->size, pagesperblock), 421 ("%s: reqblock %jd after %d size %ju", __func__, 422 (intmax_t )reqblock, *after, 423 (uintmax_t )object->size)); 424 *after *= pagesperblock; 425 *after += pagesperblock - (poff + 1); 426 if (pindex + *after >= object->size) 427 *after = object->size - 1 - pindex; 428 } 429 } else { 430 if (before) { 431 *before /= blocksperpage; 432 } 433 434 if (after) { 435 *after /= blocksperpage; 436 } 437 } 438 return TRUE; 439 } 440 441 /* 442 * Internal routine clearing partial-page content 443 */ 444 static void 445 vnode_pager_subpage_purge(struct vm_page *m, int base, int end) 446 { 447 int size; 448 449 KASSERT(end > base && end <= PAGE_SIZE, 450 ("%s: start %d end %d", __func__, base, end)); 451 size = end - base; 452 453 /* 454 * Clear out partial-page garbage in case 455 * the page has been mapped. 456 */ 457 pmap_zero_page_area(m, base, size); 458 459 /* 460 * Update the valid bits to reflect the blocks 461 * that have been zeroed. Some of these valid 462 * bits may have already been set. 463 */ 464 vm_page_set_valid_range(m, base, size); 465 466 /* 467 * Round up "base" to the next block boundary so 468 * that the dirty bit for a partially zeroed 469 * block is not cleared. 470 */ 471 base = roundup2(base, DEV_BSIZE); 472 end = rounddown2(end, DEV_BSIZE); 473 474 if (end > base) { 475 /* 476 * Clear out partial-page dirty bits. 477 * 478 * note that we do not clear out the 479 * valid bits. This would prevent 480 * bogus_page replacement from working 481 * properly. 482 */ 483 vm_page_clear_dirty(m, base, end - base); 484 } 485 486 } 487 488 /* 489 * Lets the VM system know about a change in size for a file. 490 * We adjust our own internal size and flush any cached pages in 491 * the associated object that are affected by the size change. 492 * 493 * Note: this routine may be invoked as a result of a pager put 494 * operation (possibly at object termination time), so we must be careful. 495 */ 496 void 497 vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) 498 { 499 vm_object_t object; 500 vm_page_t m; 501 vm_pindex_t nobjsize; 502 503 if ((object = vp->v_object) == NULL) 504 return; 505 #ifdef INVARIANTS 506 { 507 struct mount *mp; 508 509 mp = vp->v_mount; 510 if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0) 511 assert_vop_elocked(vp, 512 "vnode_pager_setsize and not locked vnode"); 513 } 514 #endif 515 VM_OBJECT_WLOCK(object); 516 if (object->type == OBJT_DEAD) { 517 VM_OBJECT_WUNLOCK(object); 518 return; 519 } 520 KASSERT(object->type == OBJT_VNODE, 521 ("not vnode-backed object %p", object)); 522 if (nsize == object->un_pager.vnp.vnp_size) { 523 /* 524 * Hasn't changed size 525 */ 526 VM_OBJECT_WUNLOCK(object); 527 return; 528 } 529 nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 530 if (nsize < object->un_pager.vnp.vnp_size) { 531 /* 532 * File has shrunk. Toss any cached pages beyond the new EOF. 533 */ 534 if (nobjsize < object->size) 535 vm_object_page_remove(object, nobjsize, object->size, 536 0); 537 /* 538 * this gets rid of garbage at the end of a page that is now 539 * only partially backed by the vnode. 540 * 541 * XXX for some reason (I don't know yet), if we take a 542 * completely invalid page and mark it partially valid 543 * it can screw up NFS reads, so we don't allow the case. 544 */ 545 if (!(nsize & PAGE_MASK)) 546 goto out; 547 m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT); 548 if (m == NULL) 549 goto out; 550 if (!vm_page_none_valid(m)) 551 vnode_pager_subpage_purge(m, (int)nsize & PAGE_MASK, 552 PAGE_SIZE); 553 vm_page_xunbusy(m); 554 } 555 out: 556 #if defined(__powerpc__) && !defined(__powerpc64__) 557 object->un_pager.vnp.vnp_size = nsize; 558 #else 559 atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); 560 #endif 561 object->size = nobjsize; 562 VM_OBJECT_WUNLOCK(object); 563 } 564 565 /* 566 * Lets the VM system know about the purged range for a file. We toss away any 567 * cached pages in the associated object that are affected by the purge 568 * operation. Partial-page area not aligned to page boundaries will be zeroed 569 * and the dirty blocks in DEV_BSIZE unit within a page will not be flushed. 570 */ 571 void 572 vnode_pager_purge_range(struct vnode *vp, vm_ooffset_t start, vm_ooffset_t end) 573 { 574 struct vm_page *m; 575 struct vm_object *object; 576 vm_pindex_t pi, pistart, piend; 577 bool same_page; 578 int base, pend; 579 580 ASSERT_VOP_LOCKED(vp, "vnode_pager_purge_range"); 581 582 object = vp->v_object; 583 pi = start + PAGE_MASK < start ? OBJ_MAX_SIZE : 584 OFF_TO_IDX(start + PAGE_MASK); 585 pistart = OFF_TO_IDX(start); 586 piend = end == 0 ? OBJ_MAX_SIZE : OFF_TO_IDX(end); 587 same_page = pistart == piend; 588 if ((end != 0 && end <= start) || object == NULL) 589 return; 590 591 VM_OBJECT_WLOCK(object); 592 593 if (pi < piend) 594 vm_object_page_remove(object, pi, piend, 0); 595 596 if ((start & PAGE_MASK) != 0) { 597 base = (int)start & PAGE_MASK; 598 pend = same_page ? (int)end & PAGE_MASK : PAGE_SIZE; 599 m = vm_page_grab(object, pistart, VM_ALLOC_NOCREAT); 600 if (m != NULL) { 601 if (!vm_page_none_valid(m)) 602 vnode_pager_subpage_purge(m, base, pend); 603 vm_page_xunbusy(m); 604 } 605 if (same_page) 606 goto out; 607 } 608 if ((end & PAGE_MASK) != 0) { 609 base = same_page ? (int)start & PAGE_MASK : 0 ; 610 pend = (int)end & PAGE_MASK; 611 m = vm_page_grab(object, piend, VM_ALLOC_NOCREAT); 612 if (m != NULL) { 613 if (!vm_page_none_valid(m)) 614 vnode_pager_subpage_purge(m, base, pend); 615 vm_page_xunbusy(m); 616 } 617 } 618 out: 619 VM_OBJECT_WUNLOCK(object); 620 } 621 622 /* 623 * calculate the linear (byte) disk address of specified virtual 624 * file address 625 */ 626 static int 627 vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 628 int *run) 629 { 630 int bsize; 631 int err; 632 daddr_t vblock; 633 daddr_t voffset; 634 635 if (VN_IS_DOOMED(vp)) 636 return -1; 637 638 bsize = vp->v_mount->mnt_stat.f_iosize; 639 vblock = address / bsize; 640 voffset = address % bsize; 641 642 err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 643 if (err == 0) { 644 if (*rtaddress != -1) 645 *rtaddress += voffset / DEV_BSIZE; 646 if (run) { 647 *run += 1; 648 *run *= bsize / PAGE_SIZE; 649 *run -= voffset / PAGE_SIZE; 650 } 651 } 652 653 return (err); 654 } 655 656 static void 657 vnode_pager_input_bdone(struct buf *bp) 658 { 659 runningbufwakeup(bp); 660 bdone(bp); 661 } 662 663 /* 664 * small block filesystem vnode pager input 665 */ 666 static int 667 vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) 668 { 669 struct vnode *vp; 670 struct bufobj *bo; 671 struct buf *bp; 672 struct sf_buf *sf; 673 daddr_t fileaddr; 674 vm_offset_t bsize; 675 vm_page_bits_t bits; 676 int error, i; 677 678 error = 0; 679 vp = object->handle; 680 if (VN_IS_DOOMED(vp)) 681 return VM_PAGER_BAD; 682 683 bsize = vp->v_mount->mnt_stat.f_iosize; 684 685 VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 686 687 sf = sf_buf_alloc(m, 0); 688 689 for (i = 0; i < PAGE_SIZE / bsize; i++) { 690 vm_ooffset_t address; 691 692 bits = vm_page_bits(i * bsize, bsize); 693 if (m->valid & bits) 694 continue; 695 696 address = IDX_TO_OFF(m->pindex) + i * bsize; 697 if (address >= object->un_pager.vnp.vnp_size) { 698 fileaddr = -1; 699 } else { 700 error = vnode_pager_addr(vp, address, &fileaddr, NULL); 701 if (error) 702 break; 703 } 704 if (fileaddr != -1) { 705 bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 706 707 /* build a minimal buffer header */ 708 bp->b_iocmd = BIO_READ; 709 bp->b_iodone = vnode_pager_input_bdone; 710 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 711 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 712 bp->b_rcred = crhold(curthread->td_ucred); 713 bp->b_wcred = crhold(curthread->td_ucred); 714 bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 715 bp->b_blkno = fileaddr; 716 pbgetbo(bo, bp); 717 bp->b_vp = vp; 718 bp->b_bcount = bsize; 719 bp->b_bufsize = bsize; 720 (void)runningbufclaim(bp, bp->b_bufsize); 721 722 /* do the input */ 723 bp->b_iooffset = dbtob(bp->b_blkno); 724 bstrategy(bp); 725 726 bwait(bp, PVM, "vnsrd"); 727 728 if ((bp->b_ioflags & BIO_ERROR) != 0) { 729 KASSERT(bp->b_error != 0, 730 ("%s: buf error but b_error == 0\n", __func__)); 731 error = bp->b_error; 732 } 733 734 /* 735 * free the buffer header back to the swap buffer pool 736 */ 737 bp->b_vp = NULL; 738 pbrelbo(bp); 739 uma_zfree(vnode_pbuf_zone, bp); 740 if (error) 741 break; 742 } else 743 bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 744 KASSERT((m->dirty & bits) == 0, 745 ("vnode_pager_input_smlfs: page %p is dirty", m)); 746 vm_page_bits_set(m, &m->valid, bits); 747 } 748 sf_buf_free(sf); 749 if (error) { 750 return VM_PAGER_ERROR; 751 } 752 return VM_PAGER_OK; 753 } 754 755 /* 756 * old style vnode pager input routine 757 */ 758 static int 759 vnode_pager_input_old(vm_object_t object, vm_page_t m) 760 { 761 struct uio auio; 762 struct iovec aiov; 763 int error; 764 int size; 765 struct sf_buf *sf; 766 struct vnode *vp; 767 768 VM_OBJECT_ASSERT_WLOCKED(object); 769 error = 0; 770 771 /* 772 * Return failure if beyond current EOF 773 */ 774 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 775 return VM_PAGER_BAD; 776 } else { 777 size = PAGE_SIZE; 778 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 779 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 780 vp = object->handle; 781 VM_OBJECT_WUNLOCK(object); 782 783 /* 784 * Allocate a kernel virtual address and initialize so that 785 * we can use VOP_READ/WRITE routines. 786 */ 787 sf = sf_buf_alloc(m, 0); 788 789 aiov.iov_base = (caddr_t)sf_buf_kva(sf); 790 aiov.iov_len = size; 791 auio.uio_iov = &aiov; 792 auio.uio_iovcnt = 1; 793 auio.uio_offset = IDX_TO_OFF(m->pindex); 794 auio.uio_segflg = UIO_SYSSPACE; 795 auio.uio_rw = UIO_READ; 796 auio.uio_resid = size; 797 auio.uio_td = curthread; 798 799 error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 800 if (!error) { 801 int count = size - auio.uio_resid; 802 803 if (count == 0) 804 error = EINVAL; 805 else if (count != PAGE_SIZE) 806 bzero((caddr_t)sf_buf_kva(sf) + count, 807 PAGE_SIZE - count); 808 } 809 sf_buf_free(sf); 810 811 VM_OBJECT_WLOCK(object); 812 } 813 KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); 814 if (!error) 815 vm_page_valid(m); 816 return error ? VM_PAGER_ERROR : VM_PAGER_OK; 817 } 818 819 /* 820 * generic vnode pager input routine 821 */ 822 823 /* 824 * Local media VFS's that do not implement their own VOP_GETPAGES 825 * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 826 * to implement the previous behaviour. 827 * 828 * All other FS's should use the bypass to get to the local media 829 * backing vp's VOP_GETPAGES. 830 */ 831 static int 832 vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind, 833 int *rahead) 834 { 835 struct vnode *vp; 836 int rtval; 837 838 /* Handle is stable with paging in progress. */ 839 vp = object->handle; 840 rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead); 841 KASSERT(rtval != EOPNOTSUPP, 842 ("vnode_pager: FS getpages not implemented\n")); 843 return rtval; 844 } 845 846 static int 847 vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count, 848 int *rbehind, int *rahead, vop_getpages_iodone_t iodone, void *arg) 849 { 850 struct vnode *vp; 851 int rtval; 852 853 vp = object->handle; 854 rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg); 855 KASSERT(rtval != EOPNOTSUPP, 856 ("vnode_pager: FS getpages_async not implemented\n")); 857 return (rtval); 858 } 859 860 /* 861 * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for 862 * local filesystems, where partially valid pages can only occur at 863 * the end of file. 864 */ 865 int 866 vnode_pager_local_getpages(struct vop_getpages_args *ap) 867 { 868 869 return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 870 ap->a_rbehind, ap->a_rahead, NULL, NULL)); 871 } 872 873 int 874 vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap) 875 { 876 int error; 877 878 error = vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 879 ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg); 880 if (error != 0 && ap->a_iodone != NULL) 881 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 882 return (error); 883 } 884 885 /* 886 * This is now called from local media FS's to operate against their 887 * own vnodes if they fail to implement VOP_GETPAGES. 888 */ 889 int 890 vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, 891 int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg) 892 { 893 vm_object_t object; 894 struct bufobj *bo; 895 struct buf *bp; 896 off_t foff; 897 #ifdef INVARIANTS 898 off_t blkno0; 899 #endif 900 int bsize, pagesperblock; 901 int error, before, after, rbehind, rahead, poff, i; 902 int bytecount, secmask; 903 904 KASSERT(!VN_ISDEV(vp), ("%s does not support devices", __func__)); 905 906 if (VN_IS_DOOMED(vp)) 907 return (VM_PAGER_BAD); 908 909 object = vp->v_object; 910 foff = IDX_TO_OFF(m[0]->pindex); 911 bsize = vp->v_mount->mnt_stat.f_iosize; 912 pagesperblock = bsize / PAGE_SIZE; 913 914 KASSERT(foff < object->un_pager.vnp.vnp_size, 915 ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); 916 KASSERT(count <= atop(maxphys), 917 ("%s: requested %d pages", __func__, count)); 918 919 /* 920 * The last page has valid blocks. Invalid part can only 921 * exist at the end of file, and the page is made fully valid 922 * by zeroing in vm_pager_get_pages(). 923 */ 924 if (!vm_page_none_valid(m[count - 1]) && --count == 0) { 925 if (iodone != NULL) 926 iodone(arg, m, 1, 0); 927 return (VM_PAGER_OK); 928 } 929 930 bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 931 MPASS((bp->b_flags & B_MAXPHYS) != 0); 932 933 /* 934 * Get the underlying device blocks for the file with VOP_BMAP(). 935 * If the file system doesn't support VOP_BMAP, use old way of 936 * getting pages via VOP_READ. 937 */ 938 error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); 939 if (error == EOPNOTSUPP) { 940 uma_zfree(vnode_pbuf_zone, bp); 941 VM_OBJECT_WLOCK(object); 942 for (i = 0; i < count; i++) { 943 VM_CNT_INC(v_vnodein); 944 VM_CNT_INC(v_vnodepgsin); 945 error = vnode_pager_input_old(object, m[i]); 946 if (error) 947 break; 948 } 949 VM_OBJECT_WUNLOCK(object); 950 return (error); 951 } else if (error != 0) { 952 uma_zfree(vnode_pbuf_zone, bp); 953 return (VM_PAGER_ERROR); 954 } 955 956 /* 957 * If the file system supports BMAP, but blocksize is smaller 958 * than a page size, then use special small filesystem code. 959 */ 960 if (pagesperblock == 0) { 961 uma_zfree(vnode_pbuf_zone, bp); 962 for (i = 0; i < count; i++) { 963 VM_CNT_INC(v_vnodein); 964 VM_CNT_INC(v_vnodepgsin); 965 error = vnode_pager_input_smlfs(object, m[i]); 966 if (error) 967 break; 968 } 969 return (error); 970 } 971 972 /* 973 * A sparse file can be encountered only for a single page request, 974 * which may not be preceded by call to vm_pager_haspage(). 975 */ 976 if (bp->b_blkno == -1) { 977 KASSERT(count == 1, 978 ("%s: array[%d] request to a sparse file %p", __func__, 979 count, vp)); 980 uma_zfree(vnode_pbuf_zone, bp); 981 pmap_zero_page(m[0]); 982 KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", 983 __func__, m[0])); 984 vm_page_valid(m[0]); 985 return (VM_PAGER_OK); 986 } 987 988 #ifdef INVARIANTS 989 blkno0 = bp->b_blkno; 990 #endif 991 bp->b_blkno += (foff % bsize) / DEV_BSIZE; 992 993 /* Recalculate blocks available after/before to pages. */ 994 poff = (foff % bsize) / PAGE_SIZE; 995 before *= pagesperblock; 996 before += poff; 997 after *= pagesperblock; 998 after += pagesperblock - (poff + 1); 999 if (m[0]->pindex + after >= object->size) 1000 after = object->size - 1 - m[0]->pindex; 1001 KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d", 1002 __func__, count, after + 1)); 1003 after -= count - 1; 1004 1005 /* Trim requested rbehind/rahead to possible values. */ 1006 rbehind = a_rbehind ? *a_rbehind : 0; 1007 rahead = a_rahead ? *a_rahead : 0; 1008 rbehind = min(rbehind, before); 1009 rahead = min(rahead, after); 1010 1011 /* 1012 * Check that total amount of pages fit into buf. Trim rbehind and 1013 * rahead evenly if not. 1014 */ 1015 if (rbehind + rahead + count > atop(maxphys)) { 1016 int trim, sum; 1017 1018 trim = rbehind + rahead + count - atop(maxphys) + 1; 1019 sum = rbehind + rahead; 1020 if (rbehind == before) { 1021 /* Roundup rbehind trim to block size. */ 1022 rbehind -= roundup(trim * rbehind / sum, pagesperblock); 1023 if (rbehind < 0) 1024 rbehind = 0; 1025 } else 1026 rbehind -= trim * rbehind / sum; 1027 rahead -= trim * rahead / sum; 1028 } 1029 KASSERT(rbehind + rahead + count <= atop(maxphys), 1030 ("%s: behind %d ahead %d count %d maxphys %lu", __func__, 1031 rbehind, rahead, count, maxphys)); 1032 1033 /* 1034 * Fill in the bp->b_pages[] array with requested and optional 1035 * read behind or read ahead pages. Read behind pages are looked 1036 * up in a backward direction, down to a first cached page. Same 1037 * for read ahead pages, but there is no need to shift the array 1038 * in case of encountering a cached page. 1039 */ 1040 if (rbehind != 0 || rahead != 0) { 1041 VM_OBJECT_WLOCK(object); 1042 vm_object_prepare_buf_pages(object, bp->b_pages, count, 1043 &rbehind, &rahead, m); 1044 VM_OBJECT_WUNLOCK(object); 1045 } else { 1046 for (int j = 0; j < count; j++) 1047 bp->b_pages[j] = m[j]; 1048 } 1049 bp->b_blkno -= IDX_TO_OFF(rbehind) / DEV_BSIZE; 1050 bp->b_pgbefore = rbehind; 1051 bp->b_pgafter = rahead; 1052 bp->b_npages = rbehind + count + rahead; 1053 1054 /* Report back actual behind/ahead read. */ 1055 if (a_rbehind) 1056 *a_rbehind = bp->b_pgbefore; 1057 if (a_rahead) 1058 *a_rahead = bp->b_pgafter; 1059 1060 #ifdef INVARIANTS 1061 KASSERT(bp->b_npages <= atop(maxphys), 1062 ("%s: buf %p overflowed", __func__, bp)); 1063 for (int j = 1, prev = 0; j < bp->b_npages; j++) { 1064 if (bp->b_pages[j] == bogus_page) 1065 continue; 1066 KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex == 1067 j - prev, ("%s: pages array not consecutive, bp %p", 1068 __func__, bp)); 1069 prev = j; 1070 } 1071 #endif 1072 1073 /* 1074 * Recalculate first offset and bytecount with regards to read behind. 1075 * Truncate bytecount to vnode real size and round up physical size 1076 * for real devices. 1077 */ 1078 foff = IDX_TO_OFF(bp->b_pages[0]->pindex); 1079 bytecount = ptoa(bp->b_npages); 1080 if ((foff + bytecount) > object->un_pager.vnp.vnp_size) 1081 bytecount = object->un_pager.vnp.vnp_size - foff; 1082 secmask = bo->bo_bsize - 1; 1083 KASSERT(secmask < PAGE_SIZE && secmask > 0, 1084 ("%s: sector size %d too large", __func__, secmask + 1)); 1085 bytecount = (bytecount + secmask) & ~secmask; 1086 1087 /* 1088 * And map the pages to be read into the kva, if the filesystem 1089 * requires mapped buffers. 1090 */ 1091 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && 1092 unmapped_buf_allowed) { 1093 bp->b_data = unmapped_buf; 1094 bp->b_offset = 0; 1095 } else { 1096 bp->b_data = bp->b_kvabase; 1097 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 1098 } 1099 1100 /* Build a minimal buffer header. */ 1101 bp->b_iocmd = BIO_READ; 1102 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 1103 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 1104 bp->b_rcred = crhold(curthread->td_ucred); 1105 bp->b_wcred = crhold(curthread->td_ucred); 1106 pbgetbo(bo, bp); 1107 bp->b_vp = vp; 1108 bp->b_bcount = bp->b_bufsize = bytecount; 1109 bp->b_iooffset = dbtob(bp->b_blkno); 1110 KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == 1111 (blkno0 - bp->b_blkno) * DEV_BSIZE + 1112 IDX_TO_OFF(m[0]->pindex) % bsize, 1113 ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju " 1114 "blkno0 %ju b_blkno %ju", bsize, 1115 (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, 1116 (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); 1117 1118 (void)runningbufclaim(bp, bp->b_bufsize); 1119 1120 VM_CNT_INC(v_vnodein); 1121 VM_CNT_ADD(v_vnodepgsin, bp->b_npages); 1122 1123 if (iodone != NULL) { /* async */ 1124 bp->b_pgiodone = iodone; 1125 bp->b_caller1 = arg; 1126 bp->b_iodone = vnode_pager_generic_getpages_done_async; 1127 bp->b_flags |= B_ASYNC; 1128 BUF_KERNPROC(bp); 1129 bstrategy(bp); 1130 return (VM_PAGER_OK); 1131 } else { 1132 bp->b_iodone = bdone; 1133 bstrategy(bp); 1134 bwait(bp, PVM, "vnread"); 1135 error = vnode_pager_generic_getpages_done(bp); 1136 for (i = 0; i < bp->b_npages; i++) 1137 bp->b_pages[i] = NULL; 1138 bp->b_vp = NULL; 1139 pbrelbo(bp); 1140 uma_zfree(vnode_pbuf_zone, bp); 1141 return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); 1142 } 1143 } 1144 1145 static void 1146 vnode_pager_generic_getpages_done_async(struct buf *bp) 1147 { 1148 int error; 1149 1150 error = vnode_pager_generic_getpages_done(bp); 1151 /* Run the iodone upon the requested range. */ 1152 bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore, 1153 bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error); 1154 for (int i = 0; i < bp->b_npages; i++) 1155 bp->b_pages[i] = NULL; 1156 bp->b_vp = NULL; 1157 pbrelbo(bp); 1158 uma_zfree(vnode_pbuf_zone, bp); 1159 } 1160 1161 static int 1162 vnode_pager_generic_getpages_done(struct buf *bp) 1163 { 1164 vm_object_t object; 1165 off_t tfoff, nextoff; 1166 int i, error; 1167 1168 KASSERT((bp->b_ioflags & BIO_ERROR) == 0 || bp->b_error != 0, 1169 ("%s: buf error but b_error == 0\n", __func__)); 1170 error = (bp->b_ioflags & BIO_ERROR) != 0 ? bp->b_error : 0; 1171 object = bp->b_vp->v_object; 1172 1173 runningbufwakeup(bp); 1174 1175 if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) { 1176 if (!buf_mapped(bp)) { 1177 bp->b_data = bp->b_kvabase; 1178 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, 1179 bp->b_npages); 1180 } 1181 bzero(bp->b_data + bp->b_bcount, 1182 PAGE_SIZE * bp->b_npages - bp->b_bcount); 1183 } 1184 if (buf_mapped(bp)) { 1185 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1186 bp->b_data = unmapped_buf; 1187 } 1188 1189 /* 1190 * If the read failed, we must free any read ahead/behind pages here. 1191 * The requested pages are freed by the caller (for sync requests) 1192 * or by the bp->b_pgiodone callback (for async requests). 1193 */ 1194 if (error != 0) { 1195 VM_OBJECT_WLOCK(object); 1196 for (i = 0; i < bp->b_pgbefore; i++) 1197 vm_page_free_invalid(bp->b_pages[i]); 1198 for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++) 1199 vm_page_free_invalid(bp->b_pages[i]); 1200 VM_OBJECT_WUNLOCK(object); 1201 return (error); 1202 } 1203 1204 /* Read lock to protect size. */ 1205 VM_OBJECT_RLOCK(object); 1206 for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); 1207 i < bp->b_npages; i++, tfoff = nextoff) { 1208 vm_page_t mt; 1209 1210 nextoff = tfoff + PAGE_SIZE; 1211 mt = bp->b_pages[i]; 1212 if (mt == bogus_page) 1213 continue; 1214 1215 if (nextoff <= object->un_pager.vnp.vnp_size) { 1216 /* 1217 * Read filled up entire page. 1218 */ 1219 vm_page_valid(mt); 1220 KASSERT(mt->dirty == 0, 1221 ("%s: page %p is dirty", __func__, mt)); 1222 KASSERT(!pmap_page_is_mapped(mt), 1223 ("%s: page %p is mapped", __func__, mt)); 1224 } else { 1225 /* 1226 * Read did not fill up entire page. 1227 * 1228 * Currently we do not set the entire page valid, 1229 * we just try to clear the piece that we couldn't 1230 * read. 1231 */ 1232 vm_page_set_valid_range(mt, 0, 1233 object->un_pager.vnp.vnp_size - tfoff); 1234 KASSERT((mt->dirty & vm_page_bits(0, 1235 object->un_pager.vnp.vnp_size - tfoff)) == 0, 1236 ("%s: page %p is dirty", __func__, mt)); 1237 } 1238 1239 if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) 1240 vm_page_readahead_finish(mt); 1241 } 1242 VM_OBJECT_RUNLOCK(object); 1243 1244 return (error); 1245 } 1246 1247 /* 1248 * EOPNOTSUPP is no longer legal. For local media VFS's that do not 1249 * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 1250 * vnode_pager_generic_putpages() to implement the previous behaviour. 1251 * 1252 * All other FS's should use the bypass to get to the local media 1253 * backing vp's VOP_PUTPAGES. 1254 */ 1255 static void 1256 vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1257 int flags, int *rtvals) 1258 { 1259 int rtval __diagused; 1260 struct vnode *vp; 1261 int bytes = count * PAGE_SIZE; 1262 1263 /* 1264 * Force synchronous operation if we are extremely low on memory 1265 * to prevent a low-memory deadlock. VOP operations often need to 1266 * allocate more memory to initiate the I/O ( i.e. do a BMAP 1267 * operation ). The swapper handles the case by limiting the amount 1268 * of asynchronous I/O, but that sort of solution doesn't scale well 1269 * for the vnode pager without a lot of work. 1270 * 1271 * Also, the backing vnode's iodone routine may not wake the pageout 1272 * daemon up. This should be probably be addressed XXX. 1273 */ 1274 1275 if (vm_page_count_min()) 1276 flags |= VM_PAGER_PUT_SYNC; 1277 1278 /* 1279 * Call device-specific putpages function 1280 */ 1281 vp = object->handle; 1282 VM_OBJECT_WUNLOCK(object); 1283 rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals); 1284 KASSERT(rtval != EOPNOTSUPP, 1285 ("vnode_pager: stale FS putpages\n")); 1286 VM_OBJECT_WLOCK(object); 1287 } 1288 1289 static int 1290 vn_off2bidx(vm_ooffset_t offset) 1291 { 1292 1293 return ((offset & PAGE_MASK) / DEV_BSIZE); 1294 } 1295 1296 static bool 1297 vn_dirty_blk(vm_page_t m, vm_ooffset_t offset) 1298 { 1299 1300 KASSERT(IDX_TO_OFF(m->pindex) <= offset && 1301 offset < IDX_TO_OFF(m->pindex + 1), 1302 ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex, 1303 (uintmax_t)offset)); 1304 return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0); 1305 } 1306 1307 /* 1308 * This is now called from local media FS's to operate against their 1309 * own vnodes if they fail to implement VOP_PUTPAGES. 1310 * 1311 * This is typically called indirectly via the pageout daemon and 1312 * clustering has already typically occurred, so in general we ask the 1313 * underlying filesystem to write the data out asynchronously rather 1314 * then delayed. 1315 */ 1316 int 1317 vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount, 1318 int flags, int *rtvals) 1319 { 1320 vm_object_t object; 1321 vm_page_t m; 1322 vm_ooffset_t max_offset, next_offset, poffset, prev_offset; 1323 struct uio auio; 1324 struct iovec aiov; 1325 off_t prev_resid, wrsz; 1326 int count, error, i, maxsize, ncount, pgoff, ppscheck; 1327 bool in_hole; 1328 static struct timeval lastfail; 1329 static int curfail; 1330 1331 object = vp->v_object; 1332 count = bytecount / PAGE_SIZE; 1333 1334 for (i = 0; i < count; i++) 1335 rtvals[i] = VM_PAGER_ERROR; 1336 1337 if ((int64_t)ma[0]->pindex < 0) { 1338 printf("vnode_pager_generic_putpages: " 1339 "attempt to write meta-data 0x%jx(%lx)\n", 1340 (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty); 1341 rtvals[0] = VM_PAGER_BAD; 1342 return (VM_PAGER_BAD); 1343 } 1344 1345 maxsize = count * PAGE_SIZE; 1346 ncount = count; 1347 1348 poffset = IDX_TO_OFF(ma[0]->pindex); 1349 1350 /* 1351 * If the page-aligned write is larger then the actual file we 1352 * have to invalidate pages occurring beyond the file EOF. However, 1353 * there is an edge case where a file may not be page-aligned where 1354 * the last page is partially invalid. In this case the filesystem 1355 * may not properly clear the dirty bits for the entire page (which 1356 * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 1357 * With the page busied we are free to fix up the dirty bits here. 1358 * 1359 * We do not under any circumstances truncate the valid bits, as 1360 * this will screw up bogus page replacement. 1361 */ 1362 VM_OBJECT_RLOCK(object); 1363 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 1364 if (object->un_pager.vnp.vnp_size > poffset) { 1365 maxsize = object->un_pager.vnp.vnp_size - poffset; 1366 ncount = btoc(maxsize); 1367 if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1368 pgoff = roundup2(pgoff, DEV_BSIZE); 1369 1370 /* 1371 * If the page is busy and the following 1372 * conditions hold, then the page's dirty 1373 * field cannot be concurrently changed by a 1374 * pmap operation. 1375 */ 1376 m = ma[ncount - 1]; 1377 vm_page_assert_sbusied(m); 1378 KASSERT(!pmap_page_is_write_mapped(m), 1379 ("vnode_pager_generic_putpages: page %p is not read-only", m)); 1380 MPASS(m->dirty != 0); 1381 vm_page_clear_dirty(m, pgoff, PAGE_SIZE - 1382 pgoff); 1383 } 1384 } else { 1385 maxsize = 0; 1386 ncount = 0; 1387 } 1388 for (i = ncount; i < count; i++) 1389 rtvals[i] = VM_PAGER_BAD; 1390 } 1391 VM_OBJECT_RUNLOCK(object); 1392 1393 auio.uio_iov = &aiov; 1394 auio.uio_segflg = UIO_NOCOPY; 1395 auio.uio_rw = UIO_WRITE; 1396 auio.uio_td = NULL; 1397 max_offset = roundup2(poffset + maxsize, DEV_BSIZE); 1398 1399 for (prev_offset = poffset; prev_offset < max_offset;) { 1400 /* Skip clean blocks. */ 1401 for (in_hole = true; in_hole && prev_offset < max_offset;) { 1402 m = ma[OFF_TO_IDX(prev_offset - poffset)]; 1403 for (i = vn_off2bidx(prev_offset); 1404 i < sizeof(vm_page_bits_t) * NBBY && 1405 prev_offset < max_offset; i++) { 1406 if (vn_dirty_blk(m, prev_offset)) { 1407 in_hole = false; 1408 break; 1409 } 1410 prev_offset += DEV_BSIZE; 1411 } 1412 } 1413 if (in_hole) 1414 goto write_done; 1415 1416 /* Find longest run of dirty blocks. */ 1417 for (next_offset = prev_offset; next_offset < max_offset;) { 1418 m = ma[OFF_TO_IDX(next_offset - poffset)]; 1419 for (i = vn_off2bidx(next_offset); 1420 i < sizeof(vm_page_bits_t) * NBBY && 1421 next_offset < max_offset; i++) { 1422 if (!vn_dirty_blk(m, next_offset)) 1423 goto start_write; 1424 next_offset += DEV_BSIZE; 1425 } 1426 } 1427 start_write: 1428 if (next_offset > poffset + maxsize) 1429 next_offset = poffset + maxsize; 1430 if (prev_offset == next_offset) 1431 goto write_done; 1432 1433 /* 1434 * Getting here requires finding a dirty block in the 1435 * 'skip clean blocks' loop. 1436 */ 1437 1438 aiov.iov_base = NULL; 1439 auio.uio_iovcnt = 1; 1440 auio.uio_offset = prev_offset; 1441 prev_resid = auio.uio_resid = aiov.iov_len = next_offset - 1442 prev_offset; 1443 error = VOP_WRITE(vp, &auio, 1444 vnode_pager_putpages_ioflags(flags), curthread->td_ucred); 1445 1446 wrsz = prev_resid - auio.uio_resid; 1447 if (wrsz == 0) { 1448 if (ppsratecheck(&lastfail, &curfail, 1) != 0) { 1449 vn_printf(vp, "vnode_pager_putpages: " 1450 "zero-length write at %ju resid %zd\n", 1451 auio.uio_offset, auio.uio_resid); 1452 } 1453 break; 1454 } 1455 1456 /* Adjust the starting offset for next iteration. */ 1457 prev_offset += wrsz; 1458 MPASS(auio.uio_offset == prev_offset); 1459 1460 ppscheck = 0; 1461 if (error != 0 && (ppscheck = ppsratecheck(&lastfail, 1462 &curfail, 1)) != 0) 1463 vn_printf(vp, "vnode_pager_putpages: I/O error %d\n", 1464 error); 1465 if (auio.uio_resid != 0 && (ppscheck != 0 || 1466 ppsratecheck(&lastfail, &curfail, 1) != 0)) 1467 vn_printf(vp, "vnode_pager_putpages: residual I/O %zd " 1468 "at %ju\n", auio.uio_resid, 1469 (uintmax_t)ma[0]->pindex); 1470 if (error != 0 || auio.uio_resid != 0) 1471 break; 1472 } 1473 write_done: 1474 /* Mark completely processed pages. */ 1475 for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++) 1476 rtvals[i] = VM_PAGER_OK; 1477 /* Mark partial EOF page. */ 1478 if (prev_offset == poffset + maxsize && (prev_offset & PAGE_MASK) != 0) 1479 rtvals[i++] = VM_PAGER_OK; 1480 /* Unwritten pages in range, free bonus if the page is clean. */ 1481 for (; i < ncount; i++) 1482 rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR; 1483 VM_CNT_ADD(v_vnodepgsout, i); 1484 VM_CNT_INC(v_vnodeout); 1485 return (rtvals[0]); 1486 } 1487 1488 int 1489 vnode_pager_putpages_ioflags(int pager_flags) 1490 { 1491 int ioflags; 1492 1493 /* 1494 * Pageouts are already clustered, use IO_ASYNC to force a 1495 * bawrite() rather then a bdwrite() to prevent paging I/O 1496 * from saturating the buffer cache. Dummy-up the sequential 1497 * heuristic to cause large ranges to cluster. If neither 1498 * IO_SYNC or IO_ASYNC is set, the system decides how to 1499 * cluster. 1500 */ 1501 ioflags = IO_VMIO; 1502 if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0) 1503 ioflags |= IO_SYNC; 1504 else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0) 1505 ioflags |= IO_ASYNC; 1506 ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0; 1507 ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0; 1508 ioflags |= IO_SEQMAX << IO_SEQSHIFT; 1509 return (ioflags); 1510 } 1511 1512 /* 1513 * vnode_pager_undirty_pages(). 1514 * 1515 * A helper to mark pages as clean after pageout that was possibly 1516 * done with a short write. The lpos argument specifies the page run 1517 * length in bytes, and the written argument specifies how many bytes 1518 * were actually written. eof is the offset past the last valid byte 1519 * in the vnode using the absolute file position of the first byte in 1520 * the run as the base from which it is computed. 1521 */ 1522 void 1523 vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written, off_t eof, 1524 int lpos) 1525 { 1526 int i, pos, pos_devb; 1527 1528 if (written == 0 && eof >= lpos) 1529 return; 1530 for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) { 1531 if (pos < trunc_page(written)) { 1532 rtvals[i] = VM_PAGER_OK; 1533 vm_page_undirty(ma[i]); 1534 } else { 1535 /* Partially written page. */ 1536 rtvals[i] = VM_PAGER_AGAIN; 1537 vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK); 1538 } 1539 } 1540 if (eof >= lpos) /* avoid truncation */ 1541 return; 1542 for (pos = eof, i = OFF_TO_IDX(trunc_page(pos)); pos < lpos; i++) { 1543 if (pos != trunc_page(pos)) { 1544 /* 1545 * The page contains the last valid byte in 1546 * the vnode, mark the rest of the page as 1547 * clean, potentially making the whole page 1548 * clean. 1549 */ 1550 pos_devb = roundup2(pos & PAGE_MASK, DEV_BSIZE); 1551 vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE - 1552 pos_devb); 1553 1554 /* 1555 * If the page was cleaned, report the pageout 1556 * on it as successful. msync() no longer 1557 * needs to write out the page, endlessly 1558 * creating write requests and dirty buffers. 1559 */ 1560 if (ma[i]->dirty == 0) 1561 rtvals[i] = VM_PAGER_OK; 1562 1563 pos = round_page(pos); 1564 } else { 1565 /* vm_pageout_flush() clears dirty */ 1566 rtvals[i] = VM_PAGER_BAD; 1567 pos += PAGE_SIZE; 1568 } 1569 } 1570 } 1571 1572 static void 1573 vnode_pager_update_writecount(vm_object_t object, vm_offset_t start, 1574 vm_offset_t end) 1575 { 1576 struct vnode *vp; 1577 vm_ooffset_t old_wm; 1578 1579 VM_OBJECT_WLOCK(object); 1580 if (object->type != OBJT_VNODE) { 1581 VM_OBJECT_WUNLOCK(object); 1582 return; 1583 } 1584 old_wm = object->un_pager.vnp.writemappings; 1585 object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start; 1586 vp = object->handle; 1587 if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) { 1588 ASSERT_VOP_LOCKED(vp, "v_writecount inc"); 1589 VOP_ADD_WRITECOUNT_CHECKED(vp, 1); 1590 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 1591 __func__, vp, vp->v_writecount); 1592 } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) { 1593 ASSERT_VOP_LOCKED(vp, "v_writecount dec"); 1594 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 1595 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 1596 __func__, vp, vp->v_writecount); 1597 } 1598 VM_OBJECT_WUNLOCK(object); 1599 } 1600 1601 static void 1602 vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, 1603 vm_offset_t end) 1604 { 1605 struct vnode *vp; 1606 struct mount *mp; 1607 vm_offset_t inc; 1608 1609 VM_OBJECT_WLOCK(object); 1610 1611 /* 1612 * First, recheck the object type to account for the race when 1613 * the vnode is reclaimed. 1614 */ 1615 if (object->type != OBJT_VNODE) { 1616 VM_OBJECT_WUNLOCK(object); 1617 return; 1618 } 1619 1620 /* 1621 * Optimize for the case when writemappings is not going to 1622 * zero. 1623 */ 1624 inc = end - start; 1625 if (object->un_pager.vnp.writemappings != inc) { 1626 object->un_pager.vnp.writemappings -= inc; 1627 VM_OBJECT_WUNLOCK(object); 1628 return; 1629 } 1630 1631 vp = object->handle; 1632 vhold(vp); 1633 VM_OBJECT_WUNLOCK(object); 1634 mp = NULL; 1635 vn_start_write(vp, &mp, V_WAIT); 1636 vn_lock(vp, LK_SHARED | LK_RETRY); 1637 1638 /* 1639 * Decrement the object's writemappings, by swapping the start 1640 * and end arguments for vnode_pager_update_writecount(). If 1641 * there was not a race with vnode reclaimation, then the 1642 * vnode's v_writecount is decremented. 1643 */ 1644 vnode_pager_update_writecount(object, end, start); 1645 VOP_UNLOCK(vp); 1646 vdrop(vp); 1647 if (mp != NULL) 1648 vn_finished_write(mp); 1649 } 1650 1651 static void 1652 vnode_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp) 1653 { 1654 *vpp = object->handle; 1655 } 1656 1657 static void 1658 vnode_pager_clean1(struct vnode *vp, int sync_flags) 1659 { 1660 struct vm_object *obj; 1661 1662 ASSERT_VOP_LOCKED(vp, "needs lock for writes"); 1663 obj = vp->v_object; 1664 if (obj == NULL) 1665 return; 1666 1667 VM_OBJECT_WLOCK(obj); 1668 vm_object_page_clean(obj, 0, 0, sync_flags); 1669 VM_OBJECT_WUNLOCK(obj); 1670 } 1671 1672 void 1673 vnode_pager_clean_sync(struct vnode *vp) 1674 { 1675 vnode_pager_clean1(vp, OBJPC_SYNC); 1676 } 1677 1678 void 1679 vnode_pager_clean_async(struct vnode *vp) 1680 { 1681 vnode_pager_clean1(vp, 0); 1682 } 1683