Lines Matching +full:vp +full:- +full:p

1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
50 * greatly re-simplify the vnode_pager.
87 static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
150 vnode_create_vobject_any(struct vnode *vp, off_t isize, struct thread *td) in vnode_create_vobject_any() argument
156 object = vp->v_object; in vnode_create_vobject_any()
161 if (vn_getsize_locked(vp, &size, td->td_ucred) != 0) in vnode_create_vobject_any()
167 object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); in vnode_create_vobject_any()
170 * that the object is associated with the vp. We still have in vnode_create_vobject_any()
175 last = refcount_release(&object->ref_count); in vnode_create_vobject_any()
178 vrele(vp); in vnode_create_vobject_any()
180 VNASSERT(vp->v_object != NULL, vp, ("%s: NULL object", __func__)); in vnode_create_vobject_any()
186 vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) in vnode_create_vobject() argument
188 VNASSERT(!vn_isdisk(vp), vp, ("%s: disk vnode", __func__)); in vnode_create_vobject()
189 VNASSERT(isize == VNODE_NO_SIZE || isize >= 0, vp, in vnode_create_vobject()
192 if (!vn_canvmio(vp)) in vnode_create_vobject()
195 return (vnode_create_vobject_any(vp, isize, td)); in vnode_create_vobject()
199 vnode_create_disk_vobject(struct vnode *vp, off_t isize, struct thread *td) in vnode_create_disk_vobject() argument
201 VNASSERT(isize > 0, vp, ("%s: invalid size (%jd)", __func__, in vnode_create_disk_vobject()
204 return (vnode_create_vobject_any(vp, isize, td)); in vnode_create_disk_vobject()
208 vnode_destroy_vobject(struct vnode *vp) in vnode_destroy_vobject() argument
212 obj = vp->v_object; in vnode_destroy_vobject()
213 if (obj == NULL || obj->handle != vp) in vnode_destroy_vobject()
215 ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); in vnode_destroy_vobject()
217 MPASS(obj->type == OBJT_VNODE); in vnode_destroy_vobject()
219 if (obj->ref_count == 0) { in vnode_destroy_vobject()
220 KASSERT((obj->flags & OBJ_DEAD) == 0, in vnode_destroy_vobject()
230 vinvalbuf(vp, V_SAVE, 0, 0); in vnode_destroy_vobject()
232 BO_LOCK(&vp->v_bufobj); in vnode_destroy_vobject()
233 vp->v_bufobj.bo_flag |= BO_DEAD; in vnode_destroy_vobject()
234 BO_UNLOCK(&vp->v_bufobj); in vnode_destroy_vobject()
240 * Woe to the process that tries to page now :-). in vnode_destroy_vobject()
245 KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object)); in vnode_destroy_vobject()
257 struct vnode *vp; in vnode_pager_alloc() local
265 vp = (struct vnode *)handle; in vnode_pager_alloc()
266 ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc"); in vnode_pager_alloc()
267 VNPASS(vp->v_usecount > 0, vp); in vnode_pager_alloc()
269 object = vp->v_object; in vnode_pager_alloc()
278 object->un_pager.vnp.vnp_size = size; in vnode_pager_alloc()
279 object->un_pager.vnp.writemappings = 0; in vnode_pager_alloc()
280 object->domain.dr_policy = vnode_domainset; in vnode_pager_alloc()
281 object->handle = handle; in vnode_pager_alloc()
282 if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) { in vnode_pager_alloc()
287 VI_LOCK(vp); in vnode_pager_alloc()
288 if (vp->v_object != NULL) { in vnode_pager_alloc()
292 VI_UNLOCK(vp); in vnode_pager_alloc()
294 KASSERT(object->ref_count == 1, in vnode_pager_alloc()
295 ("leaked ref %p %d", object, object->ref_count)); in vnode_pager_alloc()
296 object->type = OBJT_DEAD; in vnode_pager_alloc()
297 refcount_init(&object->ref_count, 0); in vnode_pager_alloc()
302 vp->v_object = object; in vnode_pager_alloc()
303 VI_UNLOCK(vp); in vnode_pager_alloc()
304 vrefact(vp); in vnode_pager_alloc()
308 if ((object->flags & OBJ_COLORED) == 0) { in vnode_pager_alloc()
324 struct vnode *vp; in vnode_pager_dealloc() local
327 vp = object->handle; in vnode_pager_dealloc()
328 if (vp == NULL) in vnode_pager_dealloc()
333 refs = object->ref_count; in vnode_pager_dealloc()
335 object->handle = NULL; in vnode_pager_dealloc()
336 object->type = OBJT_DEAD; in vnode_pager_dealloc()
337 ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); in vnode_pager_dealloc()
338 if (object->un_pager.vnp.writemappings > 0) { in vnode_pager_dealloc()
339 object->un_pager.vnp.writemappings = 0; in vnode_pager_dealloc()
340 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); in vnode_pager_dealloc()
341 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", in vnode_pager_dealloc()
342 __func__, vp, vp->v_writecount); in vnode_pager_dealloc()
344 vp->v_object = NULL; in vnode_pager_dealloc()
345 VI_LOCK(vp); in vnode_pager_dealloc()
349 * following object->handle. Clear all text references now. in vnode_pager_dealloc()
354 if (vp->v_writecount < 0) in vnode_pager_dealloc()
355 vp->v_writecount = 0; in vnode_pager_dealloc()
356 VI_UNLOCK(vp); in vnode_pager_dealloc()
359 vunref(vp); in vnode_pager_dealloc()
367 struct vnode *vp = object->handle; in vnode_pager_haspage() local
378 * If no vp or vp is doomed or marked transparent to VM, we do not in vnode_pager_haspage()
381 if (vp == NULL || VN_IS_DOOMED(vp)) in vnode_pager_haspage()
387 if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) in vnode_pager_haspage()
390 bsize = vp->v_mount->mnt_stat.f_iosize; in vnode_pager_haspage()
400 err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); in vnode_pager_haspage()
404 if (bn == -1) in vnode_pager_haspage()
407 poff = pindex - (reqblock * pagesperblock); in vnode_pager_haspage()
420 roundup2(object->size, pagesperblock), in vnode_pager_haspage()
423 (uintmax_t )object->size)); in vnode_pager_haspage()
425 *after += pagesperblock - (poff + 1); in vnode_pager_haspage()
426 if (pindex + *after >= object->size) in vnode_pager_haspage()
427 *after = object->size - 1 - pindex; in vnode_pager_haspage()
442 * Internal routine clearing partial-page content
451 size = end - base; in vnode_pager_subpage_purge()
454 * Clear out partial-page garbage in case in vnode_pager_subpage_purge()
476 * Clear out partial-page dirty bits. in vnode_pager_subpage_purge()
483 vm_page_clear_dirty(m, base, end - base); in vnode_pager_subpage_purge()
497 vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) in vnode_pager_setsize() argument
503 if ((object = vp->v_object) == NULL) in vnode_pager_setsize()
509 mp = vp->v_mount; in vnode_pager_setsize()
510 if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0) in vnode_pager_setsize()
511 assert_vop_elocked(vp, in vnode_pager_setsize()
516 if (object->type == OBJT_DEAD) { in vnode_pager_setsize()
520 KASSERT(object->type == OBJT_VNODE, in vnode_pager_setsize()
521 ("not vnode-backed object %p", object)); in vnode_pager_setsize()
522 if (nsize == object->un_pager.vnp.vnp_size) { in vnode_pager_setsize()
530 if (nsize < object->un_pager.vnp.vnp_size) { in vnode_pager_setsize()
534 if (nobjsize < object->size) in vnode_pager_setsize()
535 vm_object_page_remove(object, nobjsize, object->size, in vnode_pager_setsize()
557 object->un_pager.vnp.vnp_size = nsize; in vnode_pager_setsize()
559 atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); in vnode_pager_setsize()
561 object->size = nobjsize; in vnode_pager_setsize()
568 * operation. Partial-page area not aligned to page boundaries will be zeroed
572 vnode_pager_purge_range(struct vnode *vp, vm_ooffset_t start, vm_ooffset_t end) in vnode_pager_purge_range() argument
580 ASSERT_VOP_LOCKED(vp, "vnode_pager_purge_range"); in vnode_pager_purge_range()
582 object = vp->v_object; in vnode_pager_purge_range()
627 vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, in vnode_pager_addr() argument
635 if (VN_IS_DOOMED(vp)) in vnode_pager_addr()
636 return -1; in vnode_pager_addr()
638 bsize = vp->v_mount->mnt_stat.f_iosize; in vnode_pager_addr()
642 err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); in vnode_pager_addr()
644 if (*rtaddress != -1) in vnode_pager_addr()
649 *run -= voffset / PAGE_SIZE; in vnode_pager_addr()
669 struct vnode *vp; in vnode_pager_input_smlfs() local
679 vp = object->handle; in vnode_pager_input_smlfs()
680 if (VN_IS_DOOMED(vp)) in vnode_pager_input_smlfs()
683 bsize = vp->v_mount->mnt_stat.f_iosize; in vnode_pager_input_smlfs()
685 VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); in vnode_pager_input_smlfs()
693 if (m->valid & bits) in vnode_pager_input_smlfs()
696 address = IDX_TO_OFF(m->pindex) + i * bsize; in vnode_pager_input_smlfs()
697 if (address >= object->un_pager.vnp.vnp_size) { in vnode_pager_input_smlfs()
698 fileaddr = -1; in vnode_pager_input_smlfs()
700 error = vnode_pager_addr(vp, address, &fileaddr, NULL); in vnode_pager_input_smlfs()
704 if (fileaddr != -1) { in vnode_pager_input_smlfs()
708 bp->b_iocmd = BIO_READ; in vnode_pager_input_smlfs()
709 bp->b_iodone = vnode_pager_input_bdone; in vnode_pager_input_smlfs()
710 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); in vnode_pager_input_smlfs()
711 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); in vnode_pager_input_smlfs()
712 bp->b_rcred = crhold(curthread->td_ucred); in vnode_pager_input_smlfs()
713 bp->b_wcred = crhold(curthread->td_ucred); in vnode_pager_input_smlfs()
714 bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; in vnode_pager_input_smlfs()
715 bp->b_blkno = fileaddr; in vnode_pager_input_smlfs()
717 bp->b_vp = vp; in vnode_pager_input_smlfs()
718 bp->b_bcount = bsize; in vnode_pager_input_smlfs()
719 bp->b_bufsize = bsize; in vnode_pager_input_smlfs()
720 (void)runningbufclaim(bp, bp->b_bufsize); in vnode_pager_input_smlfs()
723 bp->b_iooffset = dbtob(bp->b_blkno); in vnode_pager_input_smlfs()
728 if ((bp->b_ioflags & BIO_ERROR) != 0) { in vnode_pager_input_smlfs()
729 KASSERT(bp->b_error != 0, in vnode_pager_input_smlfs()
731 error = bp->b_error; in vnode_pager_input_smlfs()
737 bp->b_vp = NULL; in vnode_pager_input_smlfs()
744 KASSERT((m->dirty & bits) == 0, in vnode_pager_input_smlfs()
745 ("vnode_pager_input_smlfs: page %p is dirty", m)); in vnode_pager_input_smlfs()
746 vm_page_bits_set(m, &m->valid, bits); in vnode_pager_input_smlfs()
766 struct vnode *vp; in vnode_pager_input_old() local
774 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { in vnode_pager_input_old()
778 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) in vnode_pager_input_old()
779 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); in vnode_pager_input_old()
780 vp = object->handle; in vnode_pager_input_old()
793 auio.uio_offset = IDX_TO_OFF(m->pindex); in vnode_pager_input_old()
799 error = VOP_READ(vp, &auio, 0, curthread->td_ucred); in vnode_pager_input_old()
801 int count = size - auio.uio_resid; in vnode_pager_input_old()
807 PAGE_SIZE - count); in vnode_pager_input_old()
813 KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); in vnode_pager_input_old()
829 * backing vp's VOP_GETPAGES.
835 struct vnode *vp; in vnode_pager_getpages() local
839 vp = object->handle; in vnode_pager_getpages()
840 rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead); in vnode_pager_getpages()
850 struct vnode *vp; in vnode_pager_getpages_async() local
853 vp = object->handle; in vnode_pager_getpages_async()
854 rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg); in vnode_pager_getpages_async()
869 return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, in vnode_pager_local_getpages()
870 ap->a_rbehind, ap->a_rahead, NULL, NULL)); in vnode_pager_local_getpages()
878 error = vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, in vnode_pager_local_getpages_async()
879 ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg); in vnode_pager_local_getpages_async()
880 if (error != 0 && ap->a_iodone != NULL) in vnode_pager_local_getpages_async()
881 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); in vnode_pager_local_getpages_async()
890 vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, in vnode_pager_generic_getpages() argument
904 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, in vnode_pager_generic_getpages()
907 if (VN_IS_DOOMED(vp)) in vnode_pager_generic_getpages()
910 object = vp->v_object; in vnode_pager_generic_getpages()
911 foff = IDX_TO_OFF(m[0]->pindex); in vnode_pager_generic_getpages()
912 bsize = vp->v_mount->mnt_stat.f_iosize; in vnode_pager_generic_getpages()
915 KASSERT(foff < object->un_pager.vnp.vnp_size, in vnode_pager_generic_getpages()
916 ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); in vnode_pager_generic_getpages()
925 if (!vm_page_none_valid(m[count - 1]) && --count == 0) { in vnode_pager_generic_getpages()
932 MPASS((bp->b_flags & B_MAXPHYS) != 0); in vnode_pager_generic_getpages()
939 error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); in vnode_pager_generic_getpages()
977 if (bp->b_blkno == -1) { in vnode_pager_generic_getpages()
979 ("%s: array[%d] request to a sparse file %p", __func__, in vnode_pager_generic_getpages()
980 count, vp)); in vnode_pager_generic_getpages()
983 KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", in vnode_pager_generic_getpages()
990 blkno0 = bp->b_blkno; in vnode_pager_generic_getpages()
992 bp->b_blkno += (foff % bsize) / DEV_BSIZE; in vnode_pager_generic_getpages()
999 after += pagesperblock - (poff + 1); in vnode_pager_generic_getpages()
1000 if (m[0]->pindex + after >= object->size) in vnode_pager_generic_getpages()
1001 after = object->size - 1 - m[0]->pindex; in vnode_pager_generic_getpages()
1004 after -= count - 1; in vnode_pager_generic_getpages()
1019 trim = rbehind + rahead + count - atop(maxphys) + 1; in vnode_pager_generic_getpages()
1023 rbehind -= roundup(trim * rbehind / sum, pagesperblock); in vnode_pager_generic_getpages()
1027 rbehind -= trim * rbehind / sum; in vnode_pager_generic_getpages()
1028 rahead -= trim * rahead / sum; in vnode_pager_generic_getpages()
1035 * Fill in the bp->b_pages[] array with requested and optional in vnode_pager_generic_getpages()
1043 vm_object_prepare_buf_pages(object, bp->b_pages, count, in vnode_pager_generic_getpages()
1048 bp->b_pages[j] = m[j]; in vnode_pager_generic_getpages()
1050 bp->b_blkno -= IDX_TO_OFF(rbehind) / DEV_BSIZE; in vnode_pager_generic_getpages()
1051 bp->b_pgbefore = rbehind; in vnode_pager_generic_getpages()
1052 bp->b_pgafter = rahead; in vnode_pager_generic_getpages()
1053 bp->b_npages = rbehind + count + rahead; in vnode_pager_generic_getpages()
1057 *a_rbehind = bp->b_pgbefore; in vnode_pager_generic_getpages()
1059 *a_rahead = bp->b_pgafter; in vnode_pager_generic_getpages()
1062 KASSERT(bp->b_npages <= atop(maxphys), in vnode_pager_generic_getpages()
1063 ("%s: buf %p overflowed", __func__, bp)); in vnode_pager_generic_getpages()
1064 for (int j = 1, prev = 0; j < bp->b_npages; j++) { in vnode_pager_generic_getpages()
1065 if (bp->b_pages[j] == bogus_page) in vnode_pager_generic_getpages()
1067 KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex == in vnode_pager_generic_getpages()
1068 j - prev, ("%s: pages array not consecutive, bp %p", in vnode_pager_generic_getpages()
1079 foff = IDX_TO_OFF(bp->b_pages[0]->pindex); in vnode_pager_generic_getpages()
1080 bytecount = ptoa(bp->b_npages); in vnode_pager_generic_getpages()
1081 if ((foff + bytecount) > object->un_pager.vnp.vnp_size) in vnode_pager_generic_getpages()
1082 bytecount = object->un_pager.vnp.vnp_size - foff; in vnode_pager_generic_getpages()
1083 secmask = bo->bo_bsize - 1; in vnode_pager_generic_getpages()
1092 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && in vnode_pager_generic_getpages()
1094 bp->b_data = unmapped_buf; in vnode_pager_generic_getpages()
1095 bp->b_offset = 0; in vnode_pager_generic_getpages()
1097 bp->b_data = bp->b_kvabase; in vnode_pager_generic_getpages()
1098 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); in vnode_pager_generic_getpages()
1102 bp->b_iocmd = BIO_READ; in vnode_pager_generic_getpages()
1103 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); in vnode_pager_generic_getpages()
1104 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); in vnode_pager_generic_getpages()
1105 bp->b_rcred = crhold(curthread->td_ucred); in vnode_pager_generic_getpages()
1106 bp->b_wcred = crhold(curthread->td_ucred); in vnode_pager_generic_getpages()
1108 bp->b_vp = vp; in vnode_pager_generic_getpages()
1109 bp->b_bcount = bp->b_bufsize = bytecount; in vnode_pager_generic_getpages()
1110 bp->b_iooffset = dbtob(bp->b_blkno); in vnode_pager_generic_getpages()
1111 KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == in vnode_pager_generic_getpages()
1112 (blkno0 - bp->b_blkno) * DEV_BSIZE + in vnode_pager_generic_getpages()
1113 IDX_TO_OFF(m[0]->pindex) % bsize, in vnode_pager_generic_getpages()
1116 (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, in vnode_pager_generic_getpages()
1117 (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); in vnode_pager_generic_getpages()
1119 (void)runningbufclaim(bp, bp->b_bufsize); in vnode_pager_generic_getpages()
1122 VM_CNT_ADD(v_vnodepgsin, bp->b_npages); in vnode_pager_generic_getpages()
1125 bp->b_pgiodone = iodone; in vnode_pager_generic_getpages()
1126 bp->b_caller1 = arg; in vnode_pager_generic_getpages()
1127 bp->b_iodone = vnode_pager_generic_getpages_done_async; in vnode_pager_generic_getpages()
1128 bp->b_flags |= B_ASYNC; in vnode_pager_generic_getpages()
1133 bp->b_iodone = bdone; in vnode_pager_generic_getpages()
1137 for (i = 0; i < bp->b_npages; i++) in vnode_pager_generic_getpages()
1138 bp->b_pages[i] = NULL; in vnode_pager_generic_getpages()
1139 bp->b_vp = NULL; in vnode_pager_generic_getpages()
1153 bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore, in vnode_pager_generic_getpages_done_async()
1154 bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error); in vnode_pager_generic_getpages_done_async()
1155 for (int i = 0; i < bp->b_npages; i++) in vnode_pager_generic_getpages_done_async()
1156 bp->b_pages[i] = NULL; in vnode_pager_generic_getpages_done_async()
1157 bp->b_vp = NULL; in vnode_pager_generic_getpages_done_async()
1169 KASSERT((bp->b_ioflags & BIO_ERROR) == 0 || bp->b_error != 0, in vnode_pager_generic_getpages_done()
1171 error = (bp->b_ioflags & BIO_ERROR) != 0 ? bp->b_error : 0; in vnode_pager_generic_getpages_done()
1172 object = bp->b_vp->v_object; in vnode_pager_generic_getpages_done()
1176 if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) { in vnode_pager_generic_getpages_done()
1178 bp->b_data = bp->b_kvabase; in vnode_pager_generic_getpages_done()
1179 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, in vnode_pager_generic_getpages_done()
1180 bp->b_npages); in vnode_pager_generic_getpages_done()
1182 bzero(bp->b_data + bp->b_bcount, in vnode_pager_generic_getpages_done()
1183 PAGE_SIZE * bp->b_npages - bp->b_bcount); in vnode_pager_generic_getpages_done()
1186 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); in vnode_pager_generic_getpages_done()
1187 bp->b_data = unmapped_buf; in vnode_pager_generic_getpages_done()
1193 * or by the bp->b_pgiodone callback (for async requests). in vnode_pager_generic_getpages_done()
1197 for (i = 0; i < bp->b_pgbefore; i++) in vnode_pager_generic_getpages_done()
1198 vm_page_free_invalid(bp->b_pages[i]); in vnode_pager_generic_getpages_done()
1199 for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++) in vnode_pager_generic_getpages_done()
1200 vm_page_free_invalid(bp->b_pages[i]); in vnode_pager_generic_getpages_done()
1207 for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); in vnode_pager_generic_getpages_done()
1208 i < bp->b_npages; i++, tfoff = nextoff) { in vnode_pager_generic_getpages_done()
1212 mt = bp->b_pages[i]; in vnode_pager_generic_getpages_done()
1216 if (nextoff <= object->un_pager.vnp.vnp_size) { in vnode_pager_generic_getpages_done()
1221 KASSERT(mt->dirty == 0, in vnode_pager_generic_getpages_done()
1222 ("%s: page %p is dirty", __func__, mt)); in vnode_pager_generic_getpages_done()
1224 ("%s: page %p is mapped", __func__, mt)); in vnode_pager_generic_getpages_done()
1234 object->un_pager.vnp.vnp_size - tfoff); in vnode_pager_generic_getpages_done()
1235 KASSERT((mt->dirty & vm_page_bits(0, in vnode_pager_generic_getpages_done()
1236 object->un_pager.vnp.vnp_size - tfoff)) == 0, in vnode_pager_generic_getpages_done()
1237 ("%s: page %p is dirty", __func__, mt)); in vnode_pager_generic_getpages_done()
1240 if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) in vnode_pager_generic_getpages_done()
1254 * backing vp's VOP_PUTPAGES.
1261 struct vnode *vp; in vnode_pager_putpages() local
1266 * to prevent a low-memory deadlock. VOP operations often need to in vnode_pager_putpages()
1280 * Call device-specific putpages function in vnode_pager_putpages()
1282 vp = object->handle; in vnode_pager_putpages()
1284 rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals); in vnode_pager_putpages()
1301 KASSERT(IDX_TO_OFF(m->pindex) <= offset && in vn_dirty_blk()
1302 offset < IDX_TO_OFF(m->pindex + 1), in vn_dirty_blk()
1303 ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex, in vn_dirty_blk()
1305 return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0); in vn_dirty_blk()
1318 vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount, in vnode_pager_generic_putpages() argument
1332 object = vp->v_object; in vnode_pager_generic_putpages()
1338 if ((int64_t)ma[0]->pindex < 0) { in vnode_pager_generic_putpages()
1340 "attempt to write meta-data 0x%jx(%lx)\n", in vnode_pager_generic_putpages()
1341 (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty); in vnode_pager_generic_putpages()
1349 poffset = IDX_TO_OFF(ma[0]->pindex); in vnode_pager_generic_putpages()
1352 * If the page-aligned write is larger then the actual file we in vnode_pager_generic_putpages()
1354 * there is an edge case where a file may not be page-aligned where in vnode_pager_generic_putpages()
1364 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { in vnode_pager_generic_putpages()
1365 if (object->un_pager.vnp.vnp_size > poffset) { in vnode_pager_generic_putpages()
1366 maxsize = object->un_pager.vnp.vnp_size - poffset; in vnode_pager_generic_putpages()
1377 m = ma[ncount - 1]; in vnode_pager_generic_putpages()
1380 ("vnode_pager_generic_putpages: page %p is not read-only", m)); in vnode_pager_generic_putpages()
1381 MPASS(m->dirty != 0); in vnode_pager_generic_putpages()
1382 vm_page_clear_dirty(m, pgoff, PAGE_SIZE - in vnode_pager_generic_putpages()
1403 m = ma[OFF_TO_IDX(prev_offset - poffset)]; in vnode_pager_generic_putpages()
1419 m = ma[OFF_TO_IDX(next_offset - poffset)]; in vnode_pager_generic_putpages()
1442 prev_resid = auio.uio_resid = aiov.iov_len = next_offset - in vnode_pager_generic_putpages()
1444 error = VOP_WRITE(vp, &auio, in vnode_pager_generic_putpages()
1445 vnode_pager_putpages_ioflags(flags), curthread->td_ucred); in vnode_pager_generic_putpages()
1447 wrsz = prev_resid - auio.uio_resid; in vnode_pager_generic_putpages()
1450 vn_printf(vp, "vnode_pager_putpages: " in vnode_pager_generic_putpages()
1451 "zero-length write at %ju resid %zd\n", in vnode_pager_generic_putpages()
1464 vn_printf(vp, "vnode_pager_putpages: I/O error %d\n", in vnode_pager_generic_putpages()
1468 vn_printf(vp, "vnode_pager_putpages: residual I/O %zd " in vnode_pager_generic_putpages()
1470 (uintmax_t)ma[0]->pindex); in vnode_pager_generic_putpages()
1476 for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++) in vnode_pager_generic_putpages()
1483 rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR; in vnode_pager_generic_putpages()
1497 * from saturating the buffer cache. Dummy-up the sequential in vnode_pager_putpages_ioflags()
1552 vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE - in vnode_pager_undirty_pages()
1561 if (ma[i]->dirty == 0) in vnode_pager_undirty_pages()
1577 struct vnode *vp; in vnode_pager_update_writecount() local
1581 if (object->type != OBJT_VNODE) { in vnode_pager_update_writecount()
1585 old_wm = object->un_pager.vnp.writemappings; in vnode_pager_update_writecount()
1586 object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start; in vnode_pager_update_writecount()
1587 vp = object->handle; in vnode_pager_update_writecount()
1588 if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) { in vnode_pager_update_writecount()
1589 ASSERT_VOP_LOCKED(vp, "v_writecount inc"); in vnode_pager_update_writecount()
1590 VOP_ADD_WRITECOUNT_CHECKED(vp, 1); in vnode_pager_update_writecount()
1591 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", in vnode_pager_update_writecount()
1592 __func__, vp, vp->v_writecount); in vnode_pager_update_writecount()
1593 } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) { in vnode_pager_update_writecount()
1594 ASSERT_VOP_LOCKED(vp, "v_writecount dec"); in vnode_pager_update_writecount()
1595 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); in vnode_pager_update_writecount()
1596 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", in vnode_pager_update_writecount()
1597 __func__, vp, vp->v_writecount); in vnode_pager_update_writecount()
1606 struct vnode *vp; in vnode_pager_release_writecount() local
1616 if (object->type != OBJT_VNODE) { in vnode_pager_release_writecount()
1625 inc = end - start; in vnode_pager_release_writecount()
1626 if (object->un_pager.vnp.writemappings != inc) { in vnode_pager_release_writecount()
1627 object->un_pager.vnp.writemappings -= inc; in vnode_pager_release_writecount()
1632 vp = object->handle; in vnode_pager_release_writecount()
1633 vhold(vp); in vnode_pager_release_writecount()
1636 vn_start_write(vp, &mp, V_WAIT); in vnode_pager_release_writecount()
1637 vn_lock(vp, LK_SHARED | LK_RETRY); in vnode_pager_release_writecount()
1646 VOP_UNLOCK(vp); in vnode_pager_release_writecount()
1647 vdrop(vp); in vnode_pager_release_writecount()
1655 *vpp = object->handle; in vnode_pager_getvp()
1659 vnode_pager_clean1(struct vnode *vp, int sync_flags) in vnode_pager_clean1() argument
1663 ASSERT_VOP_LOCKED(vp, "needs lock for writes"); in vnode_pager_clean1()
1664 obj = vp->v_object; in vnode_pager_clean1()
1674 vnode_pager_clean_sync(struct vnode *vp) in vnode_pager_clean_sync() argument
1676 vnode_pager_clean1(vp, OBJPC_SYNC); in vnode_pager_clean_sync()
1680 vnode_pager_clean_async(struct vnode *vp) in vnode_pager_clean_async() argument
1682 vnode_pager_clean1(vp, 0); in vnode_pager_clean_async()