Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping
1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
47 * - The new swapper uses the new radix bitmap code. This should scale
53 * - on the fly reallocation of swap during putpages. The new system
57 * - on the fly deallocation of swap
59 * - No more garbage collection required. Unnecessarily allocated swap
61 * cycled (in a high-load system) by the pager. We also do on-the-fly
123 * The 64-page limit is due to the radix code (kern/subr_blist.c).
137 * SWAP_META_PAGES-aligned and sized range to the address of an
138 * on-disk swap block (or SWAPBLK_NONE). The collection of these
139 * mappings for an entire vm object is implemented as a pc-trie.
214 uip = cred->cr_ruidinfo; in swap_reserve_by_cred_rlimit()
216 prev = atomic_fetchadd_long(&uip->ui_vmsize, pincr); in swap_reserve_by_cred_rlimit()
220 prev = atomic_fetchadd_long(&uip->ui_vmsize, -pincr); in swap_reserve_by_cred_rlimit()
222 ("negative vmsize for uid %d\n", uip->ui_uid)); in swap_reserve_by_cred_rlimit()
236 uip = cred->cr_ruidinfo; in swap_release_by_cred_rlimit()
239 prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr); in swap_release_by_cred_rlimit()
241 ("negative vmsize for uid %d\n", uip->ui_uid)); in swap_release_by_cred_rlimit()
243 atomic_subtract_long(&uip->ui_vmsize, pdecr); in swap_release_by_cred_rlimit()
252 uip = cred->cr_ruidinfo; in swap_reserve_force_rlimit()
253 atomic_add_long(&uip->ui_vmsize, pincr); in swap_reserve_force_rlimit()
260 return (swap_reserve_by_cred(incr, curthread->td_ucred)); in swap_reserve()
293 s += vm_cnt.v_page_count - vm_cnt.v_free_reserved - in swap_reserve_by_cred()
298 prev = atomic_fetchadd_long(&swap_reserved, -pincr); in swap_reserve_by_cred()
305 prev = atomic_fetchadd_long(&swap_reserved, -pincr); in swap_reserve_by_cred()
317 cred->cr_ruidinfo->ui_uid, curproc->p_pid, incr); in swap_reserve_by_cred()
347 swap_reserve_force_rlimit(pincr, curthread->td_ucred); in swap_reserve_force()
356 cred = curproc->p_ucred; in swap_release()
374 prev = atomic_fetchadd_long(&swap_reserved, -pdecr); in swap_release_by_cred()
390 static int nsw_wcount_async; /* limit async write buffers */
413 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
422 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
474 static bool swp_pager_swblk_empty(struct swblk *sb, int start, int limit);
502 range->start = SWAPBLK_NONE; in swp_pager_init_freerange()
503 range->num = 0; in swp_pager_init_freerange()
509 if (range->start + range->num == addr) { in swp_pager_update_freerange()
510 range->num++; in swp_pager_update_freerange()
513 range->start = addr; in swp_pager_update_freerange()
514 range->num = 1; in swp_pager_update_freerange()
536 return (sb == NULL || sb->p >= pindex ? in swblk_start()
545 return (SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, in swblk_lookup()
552 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p); in swblk_lookup_remove()
558 return (pctrie_is_empty(&object->un_pager.swp.swp_blks)); in swblk_is_empty()
572 MPASS((object->flags & OBJ_SWAP) != 0); in swblk_iter_init_only()
573 pctrie_iter_init(blks, &object->un_pager.swp.swp_blks); in swblk_iter_init_only()
596 vm_pindex_t pindex, vm_pindex_t limit) in swblk_iter_limit_init() argument
599 MPASS((object->flags & OBJ_SWAP) != 0); in swblk_iter_limit_init()
600 pctrie_iter_limit_init(blks, &object->un_pager.swp.swp_blks, limit); in swblk_iter_limit_init()
630 * SWP_SIZECHECK() - update swap_pager_full indication
657 * SWAP_PAGER_INIT() - initialize the swap pager!
678 * The nsw_cluster_max is constrained by the bp->b_pages[] in swap_pager_init()
689 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
700 * Number of in-transit swap bp operations. Don't in swap_pager_swap_init()
705 * Currently we hardwire nsw_wcount_async to 4. This limit is in swap_pager_swap_init()
736 NULL, NULL, _Alignof(struct swblk) - 1, 0); in swap_pager_swap_init()
745 n -= ((n + 2) / 3); in swap_pager_swap_init()
776 object->un_pager.swp.writemappings = 0; in swap_pager_init_object()
777 object->handle = handle; in swap_pager_init_object()
779 object->cred = cred; in swap_pager_init_object()
780 object->charge = size; in swap_pager_init_object()
807 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
836 TAILQ_INSERT_TAIL(NOBJLIST(object->handle), in swap_pager_alloc()
849 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
863 KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj")); in swap_pager_dealloc()
869 if ((object->flags & OBJ_ANON) == 0 && object->handle != NULL) { in swap_pager_dealloc()
872 TAILQ_REMOVE(NOBJLIST(object->handle), object, in swap_pager_dealloc()
887 object->handle = NULL; in swap_pager_dealloc()
888 object->type = OBJT_DEAD; in swap_pager_dealloc()
891 * Release the allocation charge. in swap_pager_dealloc()
893 if (object->cred != NULL) { in swap_pager_dealloc()
894 swap_release_by_cred(object->charge, object->cred); in swap_pager_dealloc()
895 object->charge = 0; in swap_pager_dealloc()
896 crfree(object->cred); in swap_pager_dealloc()
897 object->cred = NULL; in swap_pager_dealloc()
911 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
922 * We allocate in round-robin fashion from the configured devices.
941 if ((sp->sw_flags & SW_CLOSING) == 0) in swp_pager_getswapspace()
942 blk = blist_alloc(sp->sw_blist, &npages, mpages); in swp_pager_getswapspace()
949 mpages = npages - 1; in swp_pager_getswapspace()
955 blk += sp->sw_first; in swp_pager_getswapspace()
956 sp->sw_used += npages; in swp_pager_getswapspace()
957 swap_pager_avail -= npages; in swp_pager_getswapspace()
977 return (blk >= sp->sw_first && blk < sp->sw_end); in swp_pager_isondev()
987 if (swp_pager_isondev(bp->b_blkno, sp)) { in swp_pager_strategy()
989 if ((sp->sw_flags & SW_UNMAPPED) != 0 && in swp_pager_strategy()
991 bp->b_data = unmapped_buf; in swp_pager_strategy()
992 bp->b_offset = 0; in swp_pager_strategy()
994 pmap_qenter((vm_offset_t)bp->b_data, in swp_pager_strategy()
995 &bp->b_pages[0], bp->b_bcount / PAGE_SIZE); in swp_pager_strategy()
997 sp->sw_strategy(bp, sp); in swp_pager_strategy()
1005 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
1017 blk = range->start; in swp_pager_freeswapspace()
1018 npages = range->num; in swp_pager_freeswapspace()
1024 sp->sw_used -= npages; in swp_pager_freeswapspace()
1030 if ((sp->sw_flags & SW_CLOSING) == 0) { in swp_pager_freeswapspace()
1031 blist_free(sp->sw_blist, blk - sp->sw_first, in swp_pager_freeswapspace()
1044 * SYSCTL_SWAP_FRAGMENTATION() - produce raw swap space stats
1060 if (vn_isdisk(sp->sw_vp)) in sysctl_swap_fragmentation()
1061 devname = devtoname(sp->sw_vp->v_rdev); in sysctl_swap_fragmentation()
1065 blist_stats(sp->sw_blist, &sbuf); in sysctl_swap_fragmentation()
1074 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
1089 MPASS((object->flags & OBJ_SWAP) != 0); in swap_pager_freespace()
1097 MPASS((object->flags & OBJ_SWAP) != 0); in swap_pager_freespace_pgo()
1103 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
1108 * Returns 0 on success, -1 on failure.
1123 n = MIN(size - i, INT_MAX); in swap_pager_reserve()
1128 return (-1); in swap_pager_reserve()
1143 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
1169 if (destroysource && (srcobject->flags & OBJ_ANON) == 0 && in swap_pager_copy()
1170 srcobject->handle != NULL) { in swap_pager_copy()
1174 TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject, in swap_pager_copy()
1184 swp_pager_meta_transfer(srcobject, dstobject, offset, dstobject->size); in swap_pager_copy()
1194 * SWP_PAGER_HASPAGE_ITER() - determine if we have good backing store for
1224 * find backwards-looking contiguous good backing store in swp_pager_haspage_iter()
1230 blk = swp_pager_meta_lookup(blks, pindex - i); in swp_pager_haspage_iter()
1231 if (blk != blk0 - i) in swp_pager_haspage_iter()
1234 *before = i - 1; in swp_pager_haspage_iter()
1238 * find forward-looking contiguous good backing store in swp_pager_haspage_iter()
1246 *after = i - 1; in swp_pager_haspage_iter()
1252 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
1274 KASSERT((m->object->flags & OBJ_SWAP) != 0, in swap_pager_unswapped_acct()
1276 if ((m->a.flags & PGA_SWAP_FREE) != 0) in swap_pager_unswapped_acct()
1287 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
1294 * calls us in a special-case situation
1298 * does NOT change the m->dirty status of the page. Also: MADV_FREE
1316 obj = m->object; in swap_pager_unswapped()
1325 if ((m->a.flags & (PGA_SWAP_SPACE | PGA_SWAP_FREE)) == in swap_pager_unswapped()
1334 sb = swblk_lookup(m->object, m->pindex); in swap_pager_unswapped()
1337 range.start = sb->d[m->pindex % SWAP_META_PAGES]; in swap_pager_unswapped()
1342 sb->d[m->pindex % SWAP_META_PAGES] = SWAPBLK_NONE; in swap_pager_unswapped()
1343 swp_pager_free_empty_swblk(m->object, sb); in swap_pager_unswapped()
1347 * swap_pager_getpages_locked() - bring pages in from swap
1366 KASSERT((object->flags & OBJ_SWAP) != 0, in swap_pager_getpages_locked()
1368 pindex = ma[0]->pindex; in swap_pager_getpages_locked()
1375 KASSERT(count - 1 <= rahead, in swap_pager_getpages_locked()
1381 * prevents clustering from re-creating pages which are being in swap_pager_getpages_locked()
1384 if ((object->flags & (OBJ_SPLIT | OBJ_DEAD)) != 0) { in swap_pager_getpages_locked()
1385 rahead = count - 1; in swap_pager_getpages_locked()
1390 rahead = a_rahead != NULL ? imin(*a_rahead, rahead - count + 1) : 0; in swap_pager_getpages_locked()
1392 vm_object_prepare_buf_pages(object, bp->b_pages, count, &rbehind, in swap_pager_getpages_locked()
1394 bp->b_npages = rbehind + count + rahead; in swap_pager_getpages_locked()
1395 for (int i = 0; i < bp->b_npages; i++) in swap_pager_getpages_locked()
1396 bp->b_pages[i]->oflags |= VPO_SWAPINPROG; in swap_pager_getpages_locked()
1397 bp->b_blkno = swp_pager_meta_lookup(blks, pindex - rbehind); in swap_pager_getpages_locked()
1398 KASSERT(bp->b_blkno != SWAPBLK_NONE, in swap_pager_getpages_locked()
1401 vm_object_pip_add(object, bp->b_npages); in swap_pager_getpages_locked()
1403 MPASS((bp->b_flags & B_MAXPHYS) != 0); in swap_pager_getpages_locked()
1411 bp->b_flags |= B_PAGING; in swap_pager_getpages_locked()
1412 bp->b_iocmd = BIO_READ; in swap_pager_getpages_locked()
1413 bp->b_iodone = swp_pager_async_iodone; in swap_pager_getpages_locked()
1414 bp->b_rcred = crhold(thread0.td_ucred); in swap_pager_getpages_locked()
1415 bp->b_wcred = crhold(thread0.td_ucred); in swap_pager_getpages_locked()
1416 bp->b_bufsize = bp->b_bcount = ptoa(bp->b_npages); in swap_pager_getpages_locked()
1417 bp->b_pgbefore = rbehind; in swap_pager_getpages_locked()
1418 bp->b_pgafter = rahead; in swap_pager_getpages_locked()
1421 VM_CNT_ADD(v_swappgsin, bp->b_npages); in swap_pager_getpages_locked()
1444 while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) { in swap_pager_getpages_locked()
1445 ma[0]->oflags |= VPO_SWAPSLEEP; in swap_pager_getpages_locked()
1447 if (VM_OBJECT_SLEEP(object, &object->handle, PSWP, in swap_pager_getpages_locked()
1451 bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount); in swap_pager_getpages_locked()
1460 if (ma[i]->valid != VM_PAGE_BITS_ALL) in swap_pager_getpages_locked()
1469 * to all-zero's later. in swap_pager_getpages_locked()
1525 * should ensure that no low-memory deadlock occurs. This is an area
1533 * The parent has soft-busy'd the pages it passes us and will unbusy
1549 KASSERT(count == 0 || ma[0]->object == object, in swap_pager_putpages()
1551 __func__, object, ma[0]->object)); in swap_pager_putpages()
1564 n = min(count - i, nsw_cluster_max); in swap_pager_putpages()
1571 nsw_wcount_async--; in swap_pager_putpages()
1591 KASSERT(mreq->object == object, in swap_pager_putpages()
1593 __func__, mreq->object, object)); in swap_pager_putpages()
1595 mreq->pindex, blk + j, false); in swap_pager_putpages()
1598 MPASS(mreq->dirty == VM_PAGE_BITS_ALL); in swap_pager_putpages()
1599 mreq->oflags |= VPO_SWAPINPROG; in swap_pager_putpages()
1604 MPASS((bp->b_flags & B_MAXPHYS) != 0); in swap_pager_putpages()
1606 bp->b_flags |= B_ASYNC; in swap_pager_putpages()
1607 bp->b_flags |= B_PAGING; in swap_pager_putpages()
1608 bp->b_iocmd = BIO_WRITE; in swap_pager_putpages()
1610 bp->b_rcred = crhold(thread0.td_ucred); in swap_pager_putpages()
1611 bp->b_wcred = crhold(thread0.td_ucred); in swap_pager_putpages()
1612 bp->b_bcount = PAGE_SIZE * n; in swap_pager_putpages()
1613 bp->b_bufsize = PAGE_SIZE * n; in swap_pager_putpages()
1614 bp->b_blkno = blk; in swap_pager_putpages()
1616 bp->b_pages[j] = ma[i + j]; in swap_pager_putpages()
1617 bp->b_npages = n; in swap_pager_putpages()
1622 bp->b_dirtyoff = 0; in swap_pager_putpages()
1623 bp->b_dirtyend = bp->b_bcount; in swap_pager_putpages()
1626 VM_CNT_ADD(v_swappgsout, bp->b_npages); in swap_pager_putpages()
1644 bp->b_iodone = swp_pager_async_iodone; in swap_pager_putpages()
1655 bp->b_iodone = bdone; in swap_pager_putpages()
1688 * Report error - unless we ran out of memory, in which case in swp_pager_async_iodone()
1691 if (bp->b_ioflags & BIO_ERROR && bp->b_error != ENOMEM) { in swp_pager_async_iodone()
1693 "swap_pager: I/O error - %s failed; blkno %ld," in swp_pager_async_iodone()
1695 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), in swp_pager_async_iodone()
1696 (long)bp->b_blkno, in swp_pager_async_iodone()
1697 (long)bp->b_bcount, in swp_pager_async_iodone()
1698 bp->b_error in swp_pager_async_iodone()
1703 * remove the mapping for kernel virtual in swp_pager_async_iodone()
1706 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); in swp_pager_async_iodone()
1708 bp->b_data = bp->b_kvabase; in swp_pager_async_iodone()
1710 if (bp->b_npages) { in swp_pager_async_iodone()
1711 object = bp->b_pages[0]->object; in swp_pager_async_iodone()
1719 * in this case we remove the m->swapblk assignment for the page in swp_pager_async_iodone()
1723 for (i = 0; i < bp->b_npages; ++i) { in swp_pager_async_iodone()
1724 vm_page_t m = bp->b_pages[i]; in swp_pager_async_iodone()
1726 m->oflags &= ~VPO_SWAPINPROG; in swp_pager_async_iodone()
1727 if (m->oflags & VPO_SWAPSLEEP) { in swp_pager_async_iodone()
1728 m->oflags &= ~VPO_SWAPSLEEP; in swp_pager_async_iodone()
1729 wakeup(&object->handle); in swp_pager_async_iodone()
1735 if (bp->b_ioflags & BIO_ERROR) { in swp_pager_async_iodone()
1742 if (bp->b_iocmd == BIO_READ) { in swp_pager_async_iodone()
1744 * NOTE: for reads, m->dirty will probably in swp_pager_async_iodone()
1749 if (i < bp->b_pgbefore || in swp_pager_async_iodone()
1750 i >= bp->b_npages - bp->b_pgafter) in swp_pager_async_iodone()
1758 MPASS(m->dirty == VM_PAGE_BITS_ALL); in swp_pager_async_iodone()
1764 } else if (bp->b_iocmd == BIO_READ) { in swp_pager_async_iodone()
1766 * NOTE: for reads, m->dirty will probably be in swp_pager_async_iodone()
1769 * swap in a low-swap situation. I don't think we'd in swp_pager_async_iodone()
1776 KASSERT(m->dirty == 0, in swp_pager_async_iodone()
1780 if (i < bp->b_pgbefore || in swp_pager_async_iodone()
1781 i >= bp->b_npages - bp->b_pgafter) in swp_pager_async_iodone()
1806 vm_object_pip_wakeupn(object, bp->b_npages); in swp_pager_async_iodone()
1815 if (bp->b_vp) { in swp_pager_async_iodone()
1816 bp->b_vp = NULL; in swp_pager_async_iodone()
1817 bp->b_bufobj = NULL; in swp_pager_async_iodone()
1822 if (bp->b_flags & B_ASYNC) { in swp_pager_async_iodone()
1828 uma_zfree((bp->b_iocmd == BIO_READ) ? swrbuf_zone : swwbuf_zone, bp); in swp_pager_async_iodone()
1865 if (sb->d[i] != SWAPBLK_NONE) in swap_pager_swapped_pages()
1890 KASSERT((object->flags & OBJ_SWAP) != 0, in swap_pager_swapoff_object()
1892 KASSERT((object->flags & OBJ_DEAD) == 0, in swap_pager_swapoff_object()
1894 KASSERT((sp->sw_flags & SW_CLOSING) != 0, in swap_pager_swapoff_object()
1904 if (sb->d[i] == SWAPBLK_NONE) in swap_pager_swapoff_object()
1907 if (!swp_pager_isondev(sb->d[i], sp)) { in swap_pager_swapoff_object()
1918 if (m != NULL && (m->oflags & VPO_SWAPINPROG) != 0) { in swap_pager_swapoff_object()
1919 m->oflags |= VPO_SWAPSLEEP; in swap_pager_swapoff_object()
1920 VM_OBJECT_SLEEP(object, &object->handle, PSWP, in swap_pager_swapoff_object()
1930 swp_pager_force_dirty(&range, m, &sb->d[i]); in swap_pager_swapoff_object()
1967 if ((object->flags & OBJ_DEAD) != 0) { in swap_pager_swapoff_object()
2025 if ((object->flags & OBJ_SWAP) == 0) in swap_pager_swapoff()
2028 /* Depends on type-stability. */ in swap_pager_swapoff()
2034 if ((object->flags & OBJ_DEAD) != 0) in swap_pager_swapoff()
2044 if ((object->flags & OBJ_SWAP) == 0) in swap_pager_swapoff()
2055 if (sp->sw_used) { in swap_pager_swapoff()
2065 sp->sw_used); in swap_pager_swapoff()
2086 * SWP_PAGER_SWBLK_EMPTY() - is a range of blocks free?
2089 swp_pager_swblk_empty(struct swblk *sb, int start, int limit) in swp_pager_swblk_empty() argument
2093 MPASS(0 <= start && start <= limit && limit <= SWAP_META_PAGES); in swp_pager_swblk_empty()
2094 for (i = start; i < limit; i++) { in swp_pager_swblk_empty()
2095 if (sb->d[i] != SWAPBLK_NONE) in swp_pager_swblk_empty()
2102 * SWP_PAGER_FREE_EMPTY_SWBLK() - frees if a block is free
2117 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
2148 sb->p = rounddown(pindex, SWAP_META_PAGES); in swp_pager_meta_build()
2150 sb->d[i] = SWAPBLK_NONE; in swp_pager_meta_build()
2210 MPASS(sb->p == rounddown(pindex, SWAP_META_PAGES)); in swp_pager_meta_build()
2214 prev_swapblk = sb->d[modpi]; in swp_pager_meta_build()
2217 sb->d[modpi] = swapblk; in swp_pager_meta_build()
2232 * SWP_PAGER_META_TRANSFER() - transfer a range of blocks in the srcobject's
2247 int d_mask, i, limit, start; in swp_pager_meta_transfer() local
2264 limit = MIN(last - srcblks.index, SWAP_META_PAGES); in swp_pager_meta_transfer()
2265 for (i = start; i < limit; i++) { in swp_pager_meta_transfer()
2266 if (sb->d[i] == SWAPBLK_NONE) in swp_pager_meta_transfer()
2269 srcblks.index + i - pindex, sb->d[i], true); in swp_pager_meta_transfer()
2270 if (blk == sb->d[i]) { in swp_pager_meta_transfer()
2280 swp_pager_update_freerange(&range, sb->d[i]); in swp_pager_meta_transfer()
2282 sb->d[i] = SWAPBLK_NONE; in swp_pager_meta_transfer()
2285 swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) { in swp_pager_meta_transfer()
2293 i = ffs(d_mask) - 1; in swp_pager_meta_transfer()
2295 srcblks.index + i - pindex, d[i], false); in swp_pager_meta_transfer()
2311 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2327 int i, limit, start; in swp_pager_meta_free() local
2341 limit = MIN(last - blks.index, SWAP_META_PAGES); in swp_pager_meta_free()
2342 for (i = start; i < limit; i++) { in swp_pager_meta_free()
2343 if (sb->d[i] == SWAPBLK_NONE) in swp_pager_meta_free()
2345 swp_pager_update_freerange(&range, sb->d[i]); in swp_pager_meta_free()
2351 sb->d[i] = SWAPBLK_NONE; in swp_pager_meta_free()
2354 swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) { in swp_pager_meta_free()
2371 if (sb->d[i] != SWAPBLK_NONE) in swp_pager_meta_free_block()
2372 swp_pager_update_freerange(range, sb->d[i]); in swp_pager_meta_free_block()
2378 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2391 SWAP_PCTRIE_RECLAIM_CALLBACK(&object->un_pager.swp.swp_blks, in swp_pager_meta_free_all()
2397 * SWP_PAGER_METACTL() - misc control of swap meta data.
2401 * looked-up, popped, or SWAPBLK_NONE if the block was invalid.
2415 return (sb->d[pindex % SWAP_META_PAGES]); in swp_pager_meta_lookup()
2431 if (blks->index < pindex) { in swap_pager_iter_find_least()
2433 if (sb->d[i] != SWAPBLK_NONE) in swap_pager_iter_find_least()
2434 return (blks->index + i); in swap_pager_iter_find_least()
2440 if (sb->d[i] != SWAPBLK_NONE) in swap_pager_iter_find_least()
2441 return (blks->index + i); in swap_pager_iter_find_least()
2482 swap_index = object->size; in swap_pager_seek_data()
2503 sb->d[pindex % SWAP_META_PAGES] != SWAPBLK_NONE)) in swap_pager_seek_hole()
2521 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); in swap_pager_scan_all_shadowed()
2523 backing_object = object->backing_object; in swap_pager_scan_all_shadowed()
2525 if ((backing_object->flags & OBJ_ANON) == 0) in swap_pager_scan_all_shadowed()
2528 KASSERT((object->flags & OBJ_ANON) != 0, in swap_pager_scan_all_shadowed()
2530 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); in swap_pager_scan_all_shadowed()
2531 pi_ubound = MIN(backing_object->size, in swap_pager_scan_all_shadowed()
2532 backing_offset_index + object->size); in swap_pager_scan_all_shadowed()
2540 * parent object's mapping of the backing object. in swap_pager_scan_all_shadowed()
2542 pv = ps = pi = backing_offset_index - 1; in swap_pager_scan_all_shadowed()
2546 pv = p != NULL ? p->pindex : backing_object->size; in swap_pager_scan_all_shadowed()
2586 new_pindex = pi - backing_offset_index; in swap_pager_scan_all_shadowed()
2642 UIO_USERSPACE, uap->name); in sys_swapon()
2652 } else if (vp->v_type == VREG && in sys_swapon()
2653 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && in sys_swapon()
2654 (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) { in sys_swapon()
2699 * First chop nblks off to page-align it, then convert. in swaponsomething()
2701 * sw->sw_nblks is in page-sized chunks now too. in swaponsomething()
2703 nblks &= ~(ctodb(1) - 1); in swaponsomething()
2707 sp->sw_blist = blist_create(nblks, M_WAITOK); in swaponsomething()
2708 sp->sw_vp = vp; in swaponsomething()
2709 sp->sw_id = id; in swaponsomething()
2710 sp->sw_dev = dev; in swaponsomething()
2711 sp->sw_nblks = nblks; in swaponsomething()
2712 sp->sw_used = 0; in swaponsomething()
2713 sp->sw_strategy = strategy; in swaponsomething()
2714 sp->sw_close = close; in swaponsomething()
2715 sp->sw_flags = flags; in swaponsomething()
2721 blist_free(sp->sw_blist, howmany(BBSIZE, PAGE_SIZE), in swaponsomething()
2722 nblks - howmany(BBSIZE, PAGE_SIZE)); in swaponsomething()
2727 if (tsp->sw_end >= dvbase) { in swaponsomething()
2730 * in order to definitively prevent any cross-device in swaponsomething()
2733 dvbase = tsp->sw_end + 1; in swaponsomething()
2736 sp->sw_first = dvbase; in swaponsomething()
2737 sp->sw_end = dvbase + nblks; in swaponsomething()
2740 swap_pager_avail += nblks - howmany(BBSIZE, PAGE_SIZE); in swaponsomething()
2783 if (sp->sw_vp == vp) in kern_swapoff()
2791 error = swapoff_one(sp, td->td_ucred, flags); in kern_swapoff()
2802 return (kern_swapoff(td, uap->name, UIO_USERSPACE, 0)); in freebsd13_swapoff()
2809 return (kern_swapoff(td, uap->name, UIO_USERSPACE, uap->flags)); in sys_swapoff()
2822 (void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY); in swapoff_one()
2823 error = mac_system_check_swapoff(cred, sp->sw_vp); in swapoff_one()
2824 (void) VOP_UNLOCK(sp->sw_vp); in swapoff_one()
2828 nblks = sp->sw_nblks; in swapoff_one()
2851 sp->sw_flags |= SW_CLOSING; in swapoff_one()
2852 swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks); in swapoff_one()
2853 swap_total -= nblks; in swapoff_one()
2861 sp->sw_close(curthread, sp); in swapoff_one()
2863 sp->sw_id = NULL; in swapoff_one()
2865 nswapdev--; in swapoff_one()
2873 blist_destroy(sp->sw_blist); in swapoff_one()
2890 if (vn_isdisk(sp->sw_vp)) in swapoff_all()
2891 devname = devtoname(sp->sw_vp->v_rdev); in swapoff_all()
2913 *used = swap_total - swap_pager_avail - in swap_pager_status()
2932 xs->xsw_version = XSWDEV_VERSION; in swap_dev_info()
2933 xs->xsw_dev = sp->sw_dev; in swap_dev_info()
2934 xs->xsw_flags = sp->sw_flags; in swap_dev_info()
2935 xs->xsw_nblks = sp->sw_nblks; in swap_dev_info()
2936 xs->xsw_used = sp->sw_used; in swap_dev_info()
2938 if (vn_isdisk(sp->sw_vp)) in swap_dev_info()
2939 tmp_devname = devtoname(sp->sw_vp->v_rdev); in swap_dev_info()
2992 if (req->oldlen == sizeof(xs32)) { in sysctl_vm_swap_info()
3005 if (req->oldlen == sizeof(xs11)) { in sysctl_vm_swap_info()
3041 int i, limit, start; in vmspace_swap_count() local
3043 map = &vmspace->vm_map; in vmspace_swap_count()
3047 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vmspace_swap_count()
3049 object = cur->object.vm_object; in vmspace_swap_count()
3050 if (object == NULL || (object->flags & OBJ_SWAP) == 0) in vmspace_swap_count()
3053 if ((object->flags & OBJ_SWAP) == 0) in vmspace_swap_count()
3055 pi = OFF_TO_IDX(cur->offset); in vmspace_swap_count()
3056 e = pi + OFF_TO_IDX(cur->end - cur->start); in vmspace_swap_count()
3060 limit = MIN(e - blks.index, SWAP_META_PAGES); in vmspace_swap_count()
3061 for (i = start; i < limit; i++) { in vmspace_swap_count()
3062 if (sb->d[i] != SWAPBLK_NONE) in vmspace_swap_count()
3095 g_access(cp, -1, -1, 0); in swapgeom_close_ev()
3108 cp->index++; in swapgeom_acquire()
3121 cp->index--; in swapgeom_release()
3122 if (cp->index == 0) { in swapgeom_release()
3124 sp->sw_id = NULL; in swapgeom_release()
3135 bp = bp2->bio_caller2; in swapgeom_done()
3136 cp = bp2->bio_from; in swapgeom_done()
3137 bp->b_ioflags = bp2->bio_flags; in swapgeom_done()
3138 if (bp2->bio_error) in swapgeom_done()
3139 bp->b_ioflags |= BIO_ERROR; in swapgeom_done()
3140 bp->b_resid = bp->b_bcount - bp2->bio_completed; in swapgeom_done()
3141 bp->b_error = bp2->bio_error; in swapgeom_done()
3142 bp->b_caller1 = NULL; in swapgeom_done()
3144 sp = bp2->bio_caller1; in swapgeom_done()
3158 cp = sp->sw_id; in swapgeom_strategy()
3161 bp->b_error = ENXIO; in swapgeom_strategy()
3162 bp->b_ioflags |= BIO_ERROR; in swapgeom_strategy()
3168 if (bp->b_iocmd == BIO_WRITE) in swapgeom_strategy()
3176 bp->b_error = ENOMEM; in swapgeom_strategy()
3177 bp->b_ioflags |= BIO_ERROR; in swapgeom_strategy()
3183 bp->b_caller1 = bio; in swapgeom_strategy()
3184 bio->bio_caller1 = sp; in swapgeom_strategy()
3185 bio->bio_caller2 = bp; in swapgeom_strategy()
3186 bio->bio_cmd = bp->b_iocmd; in swapgeom_strategy()
3187 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE; in swapgeom_strategy()
3188 bio->bio_length = bp->b_bcount; in swapgeom_strategy()
3189 bio->bio_done = swapgeom_done; in swapgeom_strategy()
3190 bio->bio_flags |= BIO_SWAP; in swapgeom_strategy()
3192 bio->bio_ma = bp->b_pages; in swapgeom_strategy()
3193 bio->bio_data = unmapped_buf; in swapgeom_strategy()
3194 bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK; in swapgeom_strategy()
3195 bio->bio_ma_n = bp->b_npages; in swapgeom_strategy()
3196 bio->bio_flags |= BIO_UNMAPPED; in swapgeom_strategy()
3198 bio->bio_data = bp->b_data; in swapgeom_strategy()
3199 bio->bio_ma = NULL; in swapgeom_strategy()
3213 if (sp->sw_id == cp) { in swapgeom_orphan()
3214 sp->sw_flags |= SW_CLOSING; in swapgeom_orphan()
3223 cp->index--; in swapgeom_orphan()
3224 destroy = ((sp != NULL) && (cp->index == 0)); in swapgeom_orphan()
3226 sp->sw_id = NULL; in swapgeom_orphan()
3238 cp = sw->sw_id; in swapgeom_close()
3239 sw->sw_id = NULL; in swapgeom_close()
3266 cp = sp->sw_id; in swapongeom_locked()
3267 if (cp != NULL && cp->provider == pp) { in swapongeom_locked()
3276 cp->index = 1; /* Number of active I/Os, plus one for being active. */ in swapongeom_locked()
3277 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; in swapongeom_locked()
3283 * set an exclusive count :-( in swapongeom_locked()
3291 nblks = pp->mediasize / DEV_BSIZE; in swapongeom_locked()
3294 (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0); in swapongeom_locked()
3304 if (vp->v_type != VCHR || VN_IS_DOOMED(vp)) { in swapongeom()
3308 error = swapongeom_locked(vp->v_rdev, vp); in swapongeom()
3327 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first); in swapdev_strategy()
3329 vp2 = sp->sw_id; in swapdev_strategy()
3331 if (bp->b_iocmd == BIO_WRITE) { in swapdev_strategy()
3333 if (bp->b_bufobj) in swapdev_strategy()
3334 bufobj_wdrop(bp->b_bufobj); in swapdev_strategy()
3335 bufobj_wref(&vp2->v_bufobj); in swapdev_strategy()
3339 if (bp->b_bufobj != &vp2->v_bufobj) in swapdev_strategy()
3340 bp->b_bufobj = &vp2->v_bufobj; in swapdev_strategy()
3341 bp->b_vp = vp2; in swapdev_strategy()
3342 bp->b_iooffset = dbtob(bp->b_blkno); in swapdev_strategy()
3352 vp = sp->sw_vp; in swapdev_close()
3354 VOP_CLOSE(vp, FREAD | FWRITE, td->td_ucred, td); in swapdev_close()
3369 if (sp->sw_id == vp) { in swaponvp()
3377 error = mac_system_check_swapon(td->td_ucred, vp); in swaponvp()
3380 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL); in swaponvp()
3396 if (error != 0 || req->newptr == NULL) in sysctl_swap_async_max()
3405 * Adjust difference. If the current async count is too low, in sysctl_swap_async_max()
3409 n = new - nsw_wcount_async_max; in sysctl_swap_async_max()
3415 nsw_wcount_async_max -= nsw_wcount_async; in sysctl_swap_async_max()
3432 KASSERT((object->flags & OBJ_ANON) == 0, in swap_pager_update_writecount()
3434 object->un_pager.swp.writemappings += (vm_ooffset_t)end - start; in swap_pager_update_writecount()
3444 KASSERT((object->flags & OBJ_ANON) == 0, in swap_pager_release_writecount()
3446 KASSERT(object->un_pager.swp.writemappings >= (vm_ooffset_t)end - start, in swap_pager_release_writecount()
3448 (uintmax_t)object->un_pager.swp.writemappings, in swap_pager_release_writecount()
3449 (uintmax_t)((vm_ooffset_t)end - start))); in swap_pager_release_writecount()
3450 object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start; in swap_pager_release_writecount()