Lines Matching +full:trim +full:- +full:data +full:- +full:valid
1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * greatly re-simplify the vnode_pager.
156 object = vp->v_object; in vnode_create_vobject_any()
161 if (vn_getsize_locked(vp, &size, td->td_ucred) != 0) in vnode_create_vobject_any()
167 object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); in vnode_create_vobject_any()
175 last = refcount_release(&object->ref_count); in vnode_create_vobject_any()
180 VNASSERT(vp->v_object != NULL, vp, ("%s: NULL object", __func__)); in vnode_create_vobject_any()
212 obj = vp->v_object; in vnode_destroy_vobject()
213 if (obj == NULL || obj->handle != vp) in vnode_destroy_vobject()
217 MPASS(obj->type == OBJT_VNODE); in vnode_destroy_vobject()
219 if (obj->ref_count == 0) { in vnode_destroy_vobject()
220 KASSERT((obj->flags & OBJ_DEAD) == 0, in vnode_destroy_vobject()
232 BO_LOCK(&vp->v_bufobj); in vnode_destroy_vobject()
233 vp->v_bufobj.bo_flag |= BO_DEAD; in vnode_destroy_vobject()
234 BO_UNLOCK(&vp->v_bufobj); in vnode_destroy_vobject()
240 * Woe to the process that tries to page now :-). in vnode_destroy_vobject()
245 KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object)); in vnode_destroy_vobject()
267 VNPASS(vp->v_usecount > 0, vp); in vnode_pager_alloc()
269 object = vp->v_object; in vnode_pager_alloc()
278 object->un_pager.vnp.vnp_size = size; in vnode_pager_alloc()
279 object->un_pager.vnp.writemappings = 0; in vnode_pager_alloc()
280 object->domain.dr_policy = vnode_domainset; in vnode_pager_alloc()
281 object->handle = handle; in vnode_pager_alloc()
282 if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) { in vnode_pager_alloc()
288 if (vp->v_object != NULL) { in vnode_pager_alloc()
294 KASSERT(object->ref_count == 1, in vnode_pager_alloc()
295 ("leaked ref %p %d", object, object->ref_count)); in vnode_pager_alloc()
296 object->type = OBJT_DEAD; in vnode_pager_alloc()
297 refcount_init(&object->ref_count, 0); in vnode_pager_alloc()
302 vp->v_object = object; in vnode_pager_alloc()
308 if ((object->flags & OBJ_COLORED) == 0) { in vnode_pager_alloc()
327 vp = object->handle; in vnode_pager_dealloc()
333 refs = object->ref_count; in vnode_pager_dealloc()
335 object->handle = NULL; in vnode_pager_dealloc()
336 object->type = OBJT_DEAD; in vnode_pager_dealloc()
338 if (object->un_pager.vnp.writemappings > 0) { in vnode_pager_dealloc()
339 object->un_pager.vnp.writemappings = 0; in vnode_pager_dealloc()
340 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); in vnode_pager_dealloc()
342 __func__, vp, vp->v_writecount); in vnode_pager_dealloc()
344 vp->v_object = NULL; in vnode_pager_dealloc()
349 * following object->handle. Clear all text references now. in vnode_pager_dealloc()
354 if (vp->v_writecount < 0) in vnode_pager_dealloc()
355 vp->v_writecount = 0; in vnode_pager_dealloc()
367 struct vnode *vp = object->handle; in vnode_pager_haspage()
387 if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) in vnode_pager_haspage()
390 bsize = vp->v_mount->mnt_stat.f_iosize; in vnode_pager_haspage()
404 if (bn == -1) in vnode_pager_haspage()
407 poff = pindex - (reqblock * pagesperblock); in vnode_pager_haspage()
420 roundup2(object->size, pagesperblock), in vnode_pager_haspage()
423 (uintmax_t )object->size)); in vnode_pager_haspage()
425 *after += pagesperblock - (poff + 1); in vnode_pager_haspage()
426 if (pindex + *after >= object->size) in vnode_pager_haspage()
427 *after = object->size - 1 - pindex; in vnode_pager_haspage()
442 * Internal routine clearing partial-page content
451 size = end - base; in vnode_pager_subpage_purge()
454 * Clear out partial-page garbage in case in vnode_pager_subpage_purge()
460 * Update the valid bits to reflect the blocks in vnode_pager_subpage_purge()
461 * that have been zeroed. Some of these valid in vnode_pager_subpage_purge()
476 * Clear out partial-page dirty bits. in vnode_pager_subpage_purge()
479 * valid bits. This would prevent in vnode_pager_subpage_purge()
483 vm_page_clear_dirty(m, base, end - base); in vnode_pager_subpage_purge()
503 if ((object = vp->v_object) == NULL) in vnode_pager_setsize()
509 mp = vp->v_mount; in vnode_pager_setsize()
510 if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0) in vnode_pager_setsize()
516 if (object->type == OBJT_DEAD) { in vnode_pager_setsize()
520 KASSERT(object->type == OBJT_VNODE, in vnode_pager_setsize()
521 ("not vnode-backed object %p", object)); in vnode_pager_setsize()
522 if (nsize == object->un_pager.vnp.vnp_size) { in vnode_pager_setsize()
530 if (nsize < object->un_pager.vnp.vnp_size) { in vnode_pager_setsize()
534 if (nobjsize < object->size) in vnode_pager_setsize()
535 vm_object_page_remove(object, nobjsize, object->size, in vnode_pager_setsize()
542 * completely invalid page and mark it partially valid in vnode_pager_setsize()
557 object->un_pager.vnp.vnp_size = nsize; in vnode_pager_setsize()
559 atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); in vnode_pager_setsize()
561 object->size = nobjsize; in vnode_pager_setsize()
568 * operation. Partial-page area not aligned to page boundaries will be zeroed
582 object = vp->v_object; in vnode_pager_purge_range()
636 return -1; in vnode_pager_addr()
638 bsize = vp->v_mount->mnt_stat.f_iosize; in vnode_pager_addr()
644 if (*rtaddress != -1) in vnode_pager_addr()
649 *run -= voffset / PAGE_SIZE; in vnode_pager_addr()
679 vp = object->handle; in vnode_pager_input_smlfs()
683 bsize = vp->v_mount->mnt_stat.f_iosize; in vnode_pager_input_smlfs()
693 if (m->valid & bits) in vnode_pager_input_smlfs()
696 address = IDX_TO_OFF(m->pindex) + i * bsize; in vnode_pager_input_smlfs()
697 if (address >= object->un_pager.vnp.vnp_size) { in vnode_pager_input_smlfs()
698 fileaddr = -1; in vnode_pager_input_smlfs()
704 if (fileaddr != -1) { in vnode_pager_input_smlfs()
708 bp->b_iocmd = BIO_READ; in vnode_pager_input_smlfs()
709 bp->b_iodone = vnode_pager_input_bdone; in vnode_pager_input_smlfs()
710 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); in vnode_pager_input_smlfs()
711 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); in vnode_pager_input_smlfs()
712 bp->b_rcred = crhold(curthread->td_ucred); in vnode_pager_input_smlfs()
713 bp->b_wcred = crhold(curthread->td_ucred); in vnode_pager_input_smlfs()
714 bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; in vnode_pager_input_smlfs()
715 bp->b_blkno = fileaddr; in vnode_pager_input_smlfs()
717 bp->b_vp = vp; in vnode_pager_input_smlfs()
718 bp->b_bcount = bsize; in vnode_pager_input_smlfs()
719 bp->b_bufsize = bsize; in vnode_pager_input_smlfs()
720 (void)runningbufclaim(bp, bp->b_bufsize); in vnode_pager_input_smlfs()
723 bp->b_iooffset = dbtob(bp->b_blkno); in vnode_pager_input_smlfs()
728 if ((bp->b_ioflags & BIO_ERROR) != 0) { in vnode_pager_input_smlfs()
729 KASSERT(bp->b_error != 0, in vnode_pager_input_smlfs()
731 error = bp->b_error; in vnode_pager_input_smlfs()
737 bp->b_vp = NULL; in vnode_pager_input_smlfs()
744 KASSERT((m->dirty & bits) == 0, in vnode_pager_input_smlfs()
746 vm_page_bits_set(m, &m->valid, bits); in vnode_pager_input_smlfs()
774 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { in vnode_pager_input_old()
778 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) in vnode_pager_input_old()
779 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); in vnode_pager_input_old()
780 vp = object->handle; in vnode_pager_input_old()
793 auio.uio_offset = IDX_TO_OFF(m->pindex); in vnode_pager_input_old()
799 error = VOP_READ(vp, &auio, 0, curthread->td_ucred); in vnode_pager_input_old()
801 int count = size - auio.uio_resid; in vnode_pager_input_old()
807 PAGE_SIZE - count); in vnode_pager_input_old()
813 KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); in vnode_pager_input_old()
839 vp = object->handle; in vnode_pager_getpages()
853 vp = object->handle; in vnode_pager_getpages_async()
862 * local filesystems, where partially valid pages can only occur at
869 return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, in vnode_pager_local_getpages()
870 ap->a_rbehind, ap->a_rahead, NULL, NULL)); in vnode_pager_local_getpages()
878 error = vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, in vnode_pager_local_getpages_async()
879 ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg); in vnode_pager_local_getpages_async()
880 if (error != 0 && ap->a_iodone != NULL) in vnode_pager_local_getpages_async()
881 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); in vnode_pager_local_getpages_async()
909 object = vp->v_object; in vnode_pager_generic_getpages()
910 foff = IDX_TO_OFF(m[0]->pindex); in vnode_pager_generic_getpages()
911 bsize = vp->v_mount->mnt_stat.f_iosize; in vnode_pager_generic_getpages()
914 KASSERT(foff < object->un_pager.vnp.vnp_size, in vnode_pager_generic_getpages()
920 * The last page has valid blocks. Invalid part can only in vnode_pager_generic_getpages()
921 * exist at the end of file, and the page is made fully valid in vnode_pager_generic_getpages()
924 if (!vm_page_none_valid(m[count - 1]) && --count == 0) { in vnode_pager_generic_getpages()
931 MPASS((bp->b_flags & B_MAXPHYS) != 0); in vnode_pager_generic_getpages()
938 error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); in vnode_pager_generic_getpages()
976 if (bp->b_blkno == -1) { in vnode_pager_generic_getpages()
982 KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", in vnode_pager_generic_getpages()
989 blkno0 = bp->b_blkno; in vnode_pager_generic_getpages()
991 bp->b_blkno += (foff % bsize) / DEV_BSIZE; in vnode_pager_generic_getpages()
998 after += pagesperblock - (poff + 1); in vnode_pager_generic_getpages()
999 if (m[0]->pindex + after >= object->size) in vnode_pager_generic_getpages()
1000 after = object->size - 1 - m[0]->pindex; in vnode_pager_generic_getpages()
1003 after -= count - 1; in vnode_pager_generic_getpages()
1005 /* Trim requested rbehind/rahead to possible values. */ in vnode_pager_generic_getpages()
1012 * Check that total amount of pages fit into buf. Trim rbehind and in vnode_pager_generic_getpages()
1016 int trim, sum; in vnode_pager_generic_getpages() local
1018 trim = rbehind + rahead + count - atop(maxphys) + 1; in vnode_pager_generic_getpages()
1021 /* Roundup rbehind trim to block size. */ in vnode_pager_generic_getpages()
1022 rbehind -= roundup(trim * rbehind / sum, pagesperblock); in vnode_pager_generic_getpages()
1026 rbehind -= trim * rbehind / sum; in vnode_pager_generic_getpages()
1027 rahead -= trim * rahead / sum; in vnode_pager_generic_getpages()
1034 * Fill in the bp->b_pages[] array with requested and optional in vnode_pager_generic_getpages()
1042 vm_object_prepare_buf_pages(object, bp->b_pages, count, in vnode_pager_generic_getpages()
1047 bp->b_pages[j] = m[j]; in vnode_pager_generic_getpages()
1049 bp->b_blkno -= IDX_TO_OFF(rbehind) / DEV_BSIZE; in vnode_pager_generic_getpages()
1050 bp->b_pgbefore = rbehind; in vnode_pager_generic_getpages()
1051 bp->b_pgafter = rahead; in vnode_pager_generic_getpages()
1052 bp->b_npages = rbehind + count + rahead; in vnode_pager_generic_getpages()
1056 *a_rbehind = bp->b_pgbefore; in vnode_pager_generic_getpages()
1058 *a_rahead = bp->b_pgafter; in vnode_pager_generic_getpages()
1061 KASSERT(bp->b_npages <= atop(maxphys), in vnode_pager_generic_getpages()
1063 for (int j = 1, prev = 0; j < bp->b_npages; j++) { in vnode_pager_generic_getpages()
1064 if (bp->b_pages[j] == bogus_page) in vnode_pager_generic_getpages()
1066 KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex == in vnode_pager_generic_getpages()
1067 j - prev, ("%s: pages array not consecutive, bp %p", in vnode_pager_generic_getpages()
1078 foff = IDX_TO_OFF(bp->b_pages[0]->pindex); in vnode_pager_generic_getpages()
1079 bytecount = ptoa(bp->b_npages); in vnode_pager_generic_getpages()
1080 if ((foff + bytecount) > object->un_pager.vnp.vnp_size) in vnode_pager_generic_getpages()
1081 bytecount = object->un_pager.vnp.vnp_size - foff; in vnode_pager_generic_getpages()
1082 secmask = bo->bo_bsize - 1; in vnode_pager_generic_getpages()
1091 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && in vnode_pager_generic_getpages()
1093 bp->b_data = unmapped_buf; in vnode_pager_generic_getpages()
1094 bp->b_offset = 0; in vnode_pager_generic_getpages()
1096 bp->b_data = bp->b_kvabase; in vnode_pager_generic_getpages()
1097 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); in vnode_pager_generic_getpages()
1101 bp->b_iocmd = BIO_READ; in vnode_pager_generic_getpages()
1102 KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); in vnode_pager_generic_getpages()
1103 KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); in vnode_pager_generic_getpages()
1104 bp->b_rcred = crhold(curthread->td_ucred); in vnode_pager_generic_getpages()
1105 bp->b_wcred = crhold(curthread->td_ucred); in vnode_pager_generic_getpages()
1107 bp->b_vp = vp; in vnode_pager_generic_getpages()
1108 bp->b_bcount = bp->b_bufsize = bytecount; in vnode_pager_generic_getpages()
1109 bp->b_iooffset = dbtob(bp->b_blkno); in vnode_pager_generic_getpages()
1110 KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == in vnode_pager_generic_getpages()
1111 (blkno0 - bp->b_blkno) * DEV_BSIZE + in vnode_pager_generic_getpages()
1112 IDX_TO_OFF(m[0]->pindex) % bsize, in vnode_pager_generic_getpages()
1115 (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, in vnode_pager_generic_getpages()
1116 (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); in vnode_pager_generic_getpages()
1118 (void)runningbufclaim(bp, bp->b_bufsize); in vnode_pager_generic_getpages()
1121 VM_CNT_ADD(v_vnodepgsin, bp->b_npages); in vnode_pager_generic_getpages()
1124 bp->b_pgiodone = iodone; in vnode_pager_generic_getpages()
1125 bp->b_caller1 = arg; in vnode_pager_generic_getpages()
1126 bp->b_iodone = vnode_pager_generic_getpages_done_async; in vnode_pager_generic_getpages()
1127 bp->b_flags |= B_ASYNC; in vnode_pager_generic_getpages()
1132 bp->b_iodone = bdone; in vnode_pager_generic_getpages()
1136 for (i = 0; i < bp->b_npages; i++) in vnode_pager_generic_getpages()
1137 bp->b_pages[i] = NULL; in vnode_pager_generic_getpages()
1138 bp->b_vp = NULL; in vnode_pager_generic_getpages()
1152 bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore, in vnode_pager_generic_getpages_done_async()
1153 bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error); in vnode_pager_generic_getpages_done_async()
1154 for (int i = 0; i < bp->b_npages; i++) in vnode_pager_generic_getpages_done_async()
1155 bp->b_pages[i] = NULL; in vnode_pager_generic_getpages_done_async()
1156 bp->b_vp = NULL; in vnode_pager_generic_getpages_done_async()
1168 KASSERT((bp->b_ioflags & BIO_ERROR) == 0 || bp->b_error != 0, in vnode_pager_generic_getpages_done()
1170 error = (bp->b_ioflags & BIO_ERROR) != 0 ? bp->b_error : 0; in vnode_pager_generic_getpages_done()
1171 object = bp->b_vp->v_object; in vnode_pager_generic_getpages_done()
1175 if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) { in vnode_pager_generic_getpages_done()
1177 bp->b_data = bp->b_kvabase; in vnode_pager_generic_getpages_done()
1178 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, in vnode_pager_generic_getpages_done()
1179 bp->b_npages); in vnode_pager_generic_getpages_done()
1181 bzero(bp->b_data + bp->b_bcount, in vnode_pager_generic_getpages_done()
1182 PAGE_SIZE * bp->b_npages - bp->b_bcount); in vnode_pager_generic_getpages_done()
1185 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); in vnode_pager_generic_getpages_done()
1186 bp->b_data = unmapped_buf; in vnode_pager_generic_getpages_done()
1192 * or by the bp->b_pgiodone callback (for async requests). in vnode_pager_generic_getpages_done()
1196 for (i = 0; i < bp->b_pgbefore; i++) in vnode_pager_generic_getpages_done()
1197 vm_page_free_invalid(bp->b_pages[i]); in vnode_pager_generic_getpages_done()
1198 for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++) in vnode_pager_generic_getpages_done()
1199 vm_page_free_invalid(bp->b_pages[i]); in vnode_pager_generic_getpages_done()
1206 for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); in vnode_pager_generic_getpages_done()
1207 i < bp->b_npages; i++, tfoff = nextoff) { in vnode_pager_generic_getpages_done()
1211 mt = bp->b_pages[i]; in vnode_pager_generic_getpages_done()
1215 if (nextoff <= object->un_pager.vnp.vnp_size) { in vnode_pager_generic_getpages_done()
1220 KASSERT(mt->dirty == 0, in vnode_pager_generic_getpages_done()
1228 * Currently we do not set the entire page valid, in vnode_pager_generic_getpages_done()
1233 object->un_pager.vnp.vnp_size - tfoff); in vnode_pager_generic_getpages_done()
1234 KASSERT((mt->dirty & vm_page_bits(0, in vnode_pager_generic_getpages_done()
1235 object->un_pager.vnp.vnp_size - tfoff)) == 0, in vnode_pager_generic_getpages_done()
1239 if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) in vnode_pager_generic_getpages_done()
1265 * to prevent a low-memory deadlock. VOP operations often need to in vnode_pager_putpages()
1279 * Call device-specific putpages function in vnode_pager_putpages()
1281 vp = object->handle; in vnode_pager_putpages()
1300 KASSERT(IDX_TO_OFF(m->pindex) <= offset && in vn_dirty_blk()
1301 offset < IDX_TO_OFF(m->pindex + 1), in vn_dirty_blk()
1302 ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex, in vn_dirty_blk()
1304 return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0); in vn_dirty_blk()
1313 * underlying filesystem to write the data out asynchronously rather
1331 object = vp->v_object; in vnode_pager_generic_putpages()
1337 if ((int64_t)ma[0]->pindex < 0) { in vnode_pager_generic_putpages()
1339 "attempt to write meta-data 0x%jx(%lx)\n", in vnode_pager_generic_putpages()
1340 (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty); in vnode_pager_generic_putpages()
1348 poffset = IDX_TO_OFF(ma[0]->pindex); in vnode_pager_generic_putpages()
1351 * If the page-aligned write is larger then the actual file we in vnode_pager_generic_putpages()
1353 * there is an edge case where a file may not be page-aligned where in vnode_pager_generic_putpages()
1359 * We do not under any circumstances truncate the valid bits, as in vnode_pager_generic_putpages()
1363 if (maxsize + poffset > object->un_pager.vnp.vnp_size) { in vnode_pager_generic_putpages()
1364 if (object->un_pager.vnp.vnp_size > poffset) { in vnode_pager_generic_putpages()
1365 maxsize = object->un_pager.vnp.vnp_size - poffset; in vnode_pager_generic_putpages()
1376 m = ma[ncount - 1]; in vnode_pager_generic_putpages()
1379 ("vnode_pager_generic_putpages: page %p is not read-only", m)); in vnode_pager_generic_putpages()
1380 MPASS(m->dirty != 0); in vnode_pager_generic_putpages()
1381 vm_page_clear_dirty(m, pgoff, PAGE_SIZE - in vnode_pager_generic_putpages()
1402 m = ma[OFF_TO_IDX(prev_offset - poffset)]; in vnode_pager_generic_putpages()
1418 m = ma[OFF_TO_IDX(next_offset - poffset)]; in vnode_pager_generic_putpages()
1441 prev_resid = auio.uio_resid = aiov.iov_len = next_offset - in vnode_pager_generic_putpages()
1444 vnode_pager_putpages_ioflags(flags), curthread->td_ucred); in vnode_pager_generic_putpages()
1446 wrsz = prev_resid - auio.uio_resid; in vnode_pager_generic_putpages()
1450 "zero-length write at %ju resid %zd\n", in vnode_pager_generic_putpages()
1469 (uintmax_t)ma[0]->pindex); in vnode_pager_generic_putpages()
1475 for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++) in vnode_pager_generic_putpages()
1482 rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR; in vnode_pager_generic_putpages()
1496 * from saturating the buffer cache. Dummy-up the sequential in vnode_pager_putpages_ioflags()
1518 * were actually written. eof is the offset past the last valid byte
1545 * The page contains the last valid byte in in vnode_pager_undirty_pages()
1551 vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE - in vnode_pager_undirty_pages()
1560 if (ma[i]->dirty == 0) in vnode_pager_undirty_pages()
1580 if (object->type != OBJT_VNODE) { in vnode_pager_update_writecount()
1584 old_wm = object->un_pager.vnp.writemappings; in vnode_pager_update_writecount()
1585 object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start; in vnode_pager_update_writecount()
1586 vp = object->handle; in vnode_pager_update_writecount()
1587 if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) { in vnode_pager_update_writecount()
1591 __func__, vp, vp->v_writecount); in vnode_pager_update_writecount()
1592 } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) { in vnode_pager_update_writecount()
1594 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); in vnode_pager_update_writecount()
1596 __func__, vp, vp->v_writecount); in vnode_pager_update_writecount()
1615 if (object->type != OBJT_VNODE) { in vnode_pager_release_writecount()
1624 inc = end - start; in vnode_pager_release_writecount()
1625 if (object->un_pager.vnp.writemappings != inc) { in vnode_pager_release_writecount()
1626 object->un_pager.vnp.writemappings -= inc; in vnode_pager_release_writecount()
1631 vp = object->handle; in vnode_pager_release_writecount()
1654 *vpp = object->handle; in vnode_pager_getvp()
1663 obj = vp->v_object; in vnode_pager_clean1()