Lines Matching +full:multi +full:- +full:cluster

1 /*-
2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
14 * The Mach Operating System project at Carnegie-Mellon University.
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
65 * Pittsburgh PA 15213-3890
72 * The proverbial page-out daemon.
153 "Panic on the given number of out-of-memory errors instead of "
183 "back-to-back calls to oom detector to start OOM");
203 "system-wide limit to user-wired page count");
224 KASSERT((marker->a.flags & PGA_ENQUEUED) == 0, in vm_pageout_init_scan()
228 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q); in vm_pageout_init_scan()
230 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q); in vm_pageout_init_scan()
233 vm_batchqueue_init(&ss->bq); in vm_pageout_init_scan()
234 ss->pq = pq; in vm_pageout_init_scan()
235 ss->marker = marker; in vm_pageout_init_scan()
236 ss->maxscan = maxscan; in vm_pageout_init_scan()
237 ss->scanned = 0; in vm_pageout_init_scan()
246 pq = ss->pq; in vm_pageout_end_scan()
248 KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0, in vm_pageout_end_scan()
249 ("marker %p not enqueued", ss->marker)); in vm_pageout_end_scan()
251 TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q); in vm_pageout_end_scan()
252 vm_page_aflag_clear(ss->marker, PGA_ENQUEUED); in vm_pageout_end_scan()
253 pq->pq_pdpages += ss->scanned; in vm_pageout_end_scan()
273 marker = ss->marker; in vm_pageout_collect_batch()
274 pq = ss->pq; in vm_pageout_collect_batch()
276 KASSERT((marker->a.flags & PGA_ENQUEUED) != 0, in vm_pageout_collect_batch()
277 ("marker %p not enqueued", ss->marker)); in vm_pageout_collect_batch()
281 ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE; in vm_pageout_collect_batch()
282 m = n, ss->scanned++) { in vm_pageout_collect_batch()
284 if ((m->flags & PG_MARKER) == 0) { in vm_pageout_collect_batch()
285 KASSERT((m->a.flags & PGA_ENQUEUED) != 0, in vm_pageout_collect_batch()
287 KASSERT((m->flags & PG_FICTITIOUS) == 0, in vm_pageout_collect_batch()
289 KASSERT((m->oflags & VPO_UNMANAGED) == 0, in vm_pageout_collect_batch()
294 (void)vm_batchqueue_insert(&ss->bq, m); in vm_pageout_collect_batch()
296 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); in vm_pageout_collect_batch()
300 TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q); in vm_pageout_collect_batch()
304 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q); in vm_pageout_collect_batch()
306 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt); in vm_pageout_collect_batch()
317 if (ss->bq.bq_cnt == 0) in vm_pageout_next()
319 return (vm_batchqueue_pop(&ss->bq)); in vm_pageout_next()
343 * We can cluster only if the page is not clean, busy, or held, and the page is
353 if (m->dirty != 0 && vm_page_in_laundry(m) && in vm_pageout_flushable()
363 * eligible for laundering, form a cluster of these pages and the given page,
364 * and launder that cluster.
369 vm_page_t mc[2 * vm_pageout_page_count - 1]; in vm_pageout_cluster()
372 VM_OBJECT_ASSERT_WLOCKED(m->object); in vm_pageout_cluster()
376 alignment = m->pindex % vm_pageout_page_count; in vm_pageout_cluster()
386 * align the clusters (which leaves sporadic out-of-order in vm_pageout_cluster()
388 * first and attempt to align our cluster, then do a in vm_pageout_cluster()
399 if (alignment == pageout_count - 1 && num_ends == 0) in vm_pageout_cluster()
407 mc[--page_base] = m; in vm_pageout_cluster()
410 m = mc[page_base + pageout_count - 1]; in vm_pageout_cluster()
428 * vm_pageout_flush() - launder the given pages
431 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
445 vm_object_t object = mc[0]->object; in vm_pageout_flush()
454 * valid and read-only. in vm_pageout_flush()
459 * NOTE! mc[i]->dirty may be partial or fragmented due to an in vm_pageout_flush()
466 KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0, in vm_pageout_flush()
474 runlen = count - mreq; in vm_pageout_flush()
520 if ((object->flags & OBJ_SWAP) != 0 && in vm_pageout_flush()
526 if (eio != NULL && i >= mreq && i - mreq < runlen) in vm_pageout_flush()
530 if (i >= mreq && i - mreq < runlen) in vm_pageout_flush()
531 runlen = i - mreq; in vm_pageout_flush()
583 object = m->object; in vm_pageout_clean()
592 * pageout daemon, but the new low-memory handling in vm_pageout_clean()
601 if (object->type == OBJT_VNODE) { in vm_pageout_clean()
603 vp = object->handle; in vm_pageout_clean()
604 if (vp->v_type == VREG && in vm_pageout_clean()
613 pindex = m->pindex; in vm_pageout_clean()
626 if (vp->v_object != object) { in vm_pageout_clean()
638 if (!vm_page_in_laundry(m) || m->object != object || in vm_pageout_clean()
639 m->pindex != pindex || m->dirty == 0) { in vm_pageout_clean()
722 marker = &vmd->vmd_markers[queue]; in vm_pageout_launder()
723 pq = &vmd->vmd_pagequeues[queue]; in vm_pageout_launder()
725 vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); in vm_pageout_launder()
727 if (__predict_false((m->flags & PG_MARKER) != 0)) in vm_pageout_launder()
742 if (object == NULL || object != m->object) { in vm_pageout_launder()
745 object = atomic_load_ptr(&m->object); in vm_pageout_launder()
750 /* Depends on type-stability. */ in vm_pageout_launder()
752 if (__predict_false(m->object != object)) { in vm_pageout_launder()
780 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; in vm_pageout_launder()
800 } else if (object->ref_count != 0) { in vm_pageout_launder()
827 launder--; in vm_pageout_launder()
830 } else if ((object->flags & OBJ_DEAD) == 0) { in vm_pageout_launder()
840 * If the page appears to be clean at the machine-independent in vm_pageout_launder()
846 if (object->ref_count != 0) { in vm_pageout_launder()
848 if (m->dirty == 0 && !vm_page_try_remove_all(m)) in vm_pageout_launder()
858 if (m->dirty == 0) { in vm_pageout_launder()
862 * manipulating the page, check for a last-second in vm_pageout_launder()
869 } else if ((object->flags & OBJ_DEAD) == 0) { in vm_pageout_launder()
870 if ((object->flags & OBJ_SWAP) != 0) in vm_pageout_launder()
880 * Form a cluster with adjacent, dirty pages from the in vm_pageout_launder()
881 * same object, and page out that entire cluster. in vm_pageout_launder()
894 launder -= numpagedout; in vm_pageout_launder()
926 return (starting_target - launder); in vm_pageout_launder()
937 bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0; in isqrt()
943 num -= tmp; in isqrt()
967 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; in vm_pageout_laundry_worker()
968 KASSERT(vmd->vmd_segs != 0, ("domain without segments")); in vm_pageout_laundry_worker()
1015 launder = target / shortfall_cycle--; in vm_pageout_laundry_worker()
1033 * that the threshold is non-zero after an inactive queue in vm_pageout_laundry_worker()
1037 nclean = vmd->vmd_free_count + in vm_pageout_laundry_worker()
1038 vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt; in vm_pageout_laundry_worker()
1039 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt; in vm_pageout_laundry_worker()
1041 vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) { in vm_pageout_laundry_worker()
1042 target = vmd->vmd_background_launder_target; in vm_pageout_laundry_worker()
1046 * We have a non-zero background laundering target. If we've in vm_pageout_laundry_worker()
1057 } else if (last_target - target >= in vm_pageout_laundry_worker()
1072 * a cluster minus one. in vm_pageout_laundry_worker()
1074 target -= min(vm_pageout_launder(vmd, launder, in vm_pageout_laundry_worker()
1085 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE) in vm_pageout_laundry_worker()
1086 (void)mtx_sleep(&vmd->vmd_laundry_request, in vm_pageout_laundry_worker()
1094 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL && in vm_pageout_laundry_worker()
1097 vmd->vmd_pageout_deficit; in vm_pageout_laundry_worker()
1103 vmd->vmd_laundry_request = VM_LAUNDRY_IDLE; in vm_pageout_laundry_worker()
1104 nfreed += vmd->vmd_clean_pages_freed; in vm_pageout_laundry_worker()
1105 vmd->vmd_clean_pages_freed = 0; in vm_pageout_laundry_worker()
1127 shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) - in vm_pageout_active_target()
1128 (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt + in vm_pageout_active_target()
1129 vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight); in vm_pageout_active_target()
1136 * small portion of the queue in order to maintain quasi-LRU.
1150 marker = &vmd->vmd_markers[PQ_ACTIVE]; in vm_pageout_scan_active()
1151 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; in vm_pageout_scan_active()
1160 min_scan = pq->pq_cnt; in vm_pageout_scan_active()
1161 min_scan *= scan_tick - vmd->vmd_last_active_scan; in vm_pageout_scan_active()
1165 if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0)) in vm_pageout_scan_active()
1166 vmd->vmd_last_active_scan = scan_tick; in vm_pageout_scan_active()
1170 * the per-page activity counter and use it to identify deactivation in vm_pageout_scan_active()
1181 max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan; in vm_pageout_scan_active()
1183 vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan); in vm_pageout_scan_active()
1185 if (__predict_false(m == &vmd->vmd_clock[1])) { in vm_pageout_scan_active()
1187 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); in vm_pageout_scan_active()
1188 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q); in vm_pageout_scan_active()
1189 TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0], in vm_pageout_scan_active()
1191 TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1], in vm_pageout_scan_active()
1193 max_scan -= ss.scanned; in vm_pageout_scan_active()
1197 if (__predict_false((m->flags & PG_MARKER) != 0)) in vm_pageout_scan_active()
1213 object = atomic_load_ptr(&m->object); in vm_pageout_scan_active()
1221 if ((m->a.flags & PGA_SWAP_FREE) != 0 && in vm_pageout_scan_active()
1223 if (m->object == object) in vm_pageout_scan_active()
1240 * 1) The count was transitioning to zero, but we saw a non- in vm_pageout_scan_active()
1247 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; in vm_pageout_scan_active()
1276 new.act_count -= min(new.act_count, in vm_pageout_scan_active()
1321 } else if (m->dirty == 0) { in vm_pageout_scan_active()
1335 page_shortage -= ps_delta; in vm_pageout_scan_active()
1338 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q); in vm_pageout_scan_active()
1339 TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q); in vm_pageout_scan_active()
1361 * Re-add stuck pages to the inactive queue. We will examine them again
1375 marker = ss->marker; in vm_pageout_reinsert_inactive()
1376 pq = ss->pq; in vm_pageout_reinsert_inactive()
1420 * entire queue. (Note that m->a.act_count is not used to make in vm_pageout_scan_inactive()
1426 pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; in vm_pageout_scan_inactive()
1428 vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt); in vm_pageout_scan_inactive()
1444 KASSERT((m->flags & PG_MARKER) == 0, in vm_pageout_scan_inactive()
1459 if (object == NULL || object != m->object) { in vm_pageout_scan_inactive()
1462 object = atomic_load_ptr(&m->object); in vm_pageout_scan_inactive()
1467 /* Depends on type-stability. */ in vm_pageout_scan_inactive()
1469 if (__predict_false(m->object != object)) { in vm_pageout_scan_inactive()
1490 if ((m->a.flags & PGA_SWAP_FREE) != 0) in vm_pageout_scan_inactive()
1511 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0; in vm_pageout_scan_inactive()
1531 } else if (object->ref_count != 0) { in vm_pageout_scan_inactive()
1552 } else if ((object->flags & OBJ_DEAD) == 0) { in vm_pageout_scan_inactive()
1563 * If the page appears to be clean at the machine-independent in vm_pageout_scan_inactive()
1569 if (object->ref_count != 0) { in vm_pageout_scan_inactive()
1571 if (m->dirty == 0 && !vm_page_try_remove_all(m)) in vm_pageout_scan_inactive()
1582 if (m->dirty == 0) { in vm_pageout_scan_inactive()
1586 * manipulating the page, check for a last-second in vm_pageout_scan_inactive()
1598 m->a.queue = PQ_NONE; in vm_pageout_scan_inactive()
1600 page_shortage--; in vm_pageout_scan_inactive()
1603 if ((object->flags & OBJ_DEAD) == 0) in vm_pageout_scan_inactive()
1622 atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage); in vm_pageout_scan_inactive()
1625 atomic_add_int(&vmd->vmd_inactive_us, in vm_pageout_scan_inactive()
1627 atomic_add_int(&vmd->vmd_inactive_freed, in vm_pageout_scan_inactive()
1628 starting_page_shortage - page_shortage); in vm_pageout_scan_inactive()
1640 vmd->vmd_inactive_shortage = shortage; in vm_pageout_inactive_dispatch()
1647 if ((threads = vmd->vmd_inactive_threads) > 1 && in vm_pageout_inactive_dispatch()
1648 vmd->vmd_helper_threads_enabled && in vm_pageout_inactive_dispatch()
1649 vmd->vmd_inactive_pps != 0 && in vm_pageout_inactive_dispatch()
1650 shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) { in vm_pageout_inactive_dispatch()
1651 vmd->vmd_inactive_shortage /= threads; in vm_pageout_inactive_dispatch()
1654 blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1); in vm_pageout_inactive_dispatch()
1655 blockcount_acquire(&vmd->vmd_inactive_running, threads - 1); in vm_pageout_inactive_dispatch()
1656 wakeup(&vmd->vmd_inactive_shortage); in vm_pageout_inactive_dispatch()
1661 vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage + slop); in vm_pageout_inactive_dispatch()
1667 blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM); in vm_pageout_inactive_dispatch()
1668 freed = atomic_readandclear_int(&vmd->vmd_inactive_freed); in vm_pageout_inactive_dispatch()
1672 * Calculate the per-thread paging rate with an exponential decay of in vm_pageout_inactive_dispatch()
1676 us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1); in vm_pageout_inactive_dispatch()
1682 vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2); in vm_pageout_inactive_dispatch()
1684 return (shortage - freed); in vm_pageout_inactive_dispatch()
1704 deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit); in vm_pageout_inactive()
1711 addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage); in vm_pageout_inactive()
1725 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY]; in vm_pageout_inactive()
1727 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE && in vm_pageout_inactive()
1728 (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) { in vm_pageout_inactive()
1730 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL; in vm_pageout_inactive()
1732 } else if (vmd->vmd_laundry_request != in vm_pageout_inactive()
1734 vmd->vmd_laundry_request = in vm_pageout_inactive()
1736 wakeup(&vmd->vmd_laundry_request); in vm_pageout_inactive()
1738 vmd->vmd_clean_pages_freed += in vm_pageout_inactive()
1739 starting_page_shortage - page_shortage; in vm_pageout_inactive()
1772 vmd->vmd_oom_seq = 0; in vm_pageout_mightbe_oom()
1774 vmd->vmd_oom_seq++; in vm_pageout_mightbe_oom()
1775 if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { in vm_pageout_mightbe_oom()
1776 if (vmd->vmd_oom) { in vm_pageout_mightbe_oom()
1777 vmd->vmd_oom = false; in vm_pageout_mightbe_oom()
1787 vmd->vmd_oom_seq = 0; in vm_pageout_mightbe_oom()
1789 if (vmd->vmd_oom) in vm_pageout_mightbe_oom()
1792 vmd->vmd_oom = true; in vm_pageout_mightbe_oom()
1794 if (old_vote != vm_ndomains - 1) in vm_pageout_mightbe_oom()
1810 vmd->vmd_oom = false; in vm_pageout_mightbe_oom()
1828 * efforts to write-back vnode-backed pages may have stalled. This
1831 * belonging to vnode-backed objects are counted, because they might
1847 map = &vmspace->vm_map; in vm_pageout_oom_pagecount()
1849 sx_assert(&map->lock, SA_LOCKED); in vm_pageout_oom_pagecount()
1852 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vm_pageout_oom_pagecount()
1854 obj = entry->object.vm_object; in vm_pageout_oom_pagecount()
1857 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 && in vm_pageout_oom_pagecount()
1858 obj->ref_count != 1) in vm_pageout_oom_pagecount()
1860 if (obj->type == OBJT_PHYS || obj->type == OBJT_VNODE || in vm_pageout_oom_pagecount()
1861 (obj->flags & OBJ_SWAP) != 0) in vm_pageout_oom_pagecount()
1862 res += obj->resident_page_count; in vm_pageout_oom_pagecount()
1888 * processes of middle-size, like buildworld, all of them in vm_pageout_oom()
1891 * To avoid killing too many processes, rate-limit OOMs in vm_pageout_oom()
1892 * initiated by vm_fault() time-outs on the waits for free in vm_pageout_oom()
1898 (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) { in vm_pageout_oom()
1922 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | in vm_pageout_oom()
1924 p->p_pid == 1 || P_KILLED(p) || in vm_pageout_oom()
1925 (p->p_pid < 48 && swap_pager_avail != 0)) { in vm_pageout_oom()
1930 * If the process is in a non-running type state, in vm_pageout_oom()
1961 if (!vm_map_trylock_read(&vm->vm_map)) { in vm_pageout_oom()
1970 vm_map_unlock_read(&vm->vm_map); in vm_pageout_oom()
2003 if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0) in vm_pageout_oom()
2016 * Return true if the free page count should be re-evaluated.
2028 while ((u_int)(ticks - last) / hz >= lowmem_period) { in vm_pageout_lowmem()
2055 * UMA reclaim worker has its own rate-limiting mechanism, so don't in vm_pageout_lowmem()
2083 KASSERT(vmd->vmd_segs != 0, ("domain without segments")); in vm_pageout_worker()
2084 vmd->vmd_last_active_scan = ticks; in vm_pageout_worker()
2097 atomic_store_int(&vmd->vmd_pageout_wanted, 0); in vm_pageout_worker()
2102 if (vm_paging_needed(vmd, vmd->vmd_free_count)) { in vm_pageout_worker()
2116 if (mtx_sleep(&vmd->vmd_pageout_wanted, in vm_pageout_worker()
2123 atomic_store_int(&vmd->vmd_pageout_wanted, 1); in vm_pageout_worker()
2131 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count); in vm_pageout_worker()
2133 ofree = vmd->vmd_free_count; in vm_pageout_worker()
2134 if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree) in vm_pageout_worker()
2135 shortage -= min(vmd->vmd_free_count - ofree, in vm_pageout_worker()
2167 msleep(&vmd->vmd_inactive_shortage, in vm_pageout_helper()
2169 blockcount_release(&vmd->vmd_inactive_starting, 1); in vm_pageout_helper()
2172 vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage); in vm_pageout_helper()
2179 blockcount_release(&vmd->vmd_inactive_running, 1); in vm_pageout_helper()
2188 if (VM_DOMAIN_EMPTY(vmd->vmd_domain)) in get_pageout_threads_per_domain()
2192 * Semi-arbitrarily constrain pagedaemon threads to less than half the in get_pageout_threads_per_domain()
2201 domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]); in get_pageout_threads_per_domain()
2207 eligible_cpus -= CPU_COUNT(&cpuset_domain[i]); in get_pageout_threads_per_domain()
2211 * corresponding to the fraction of pagedaemon-eligible CPUs in the in get_pageout_threads_per_domain()
2230 vmd->vmd_interrupt_free_min = 2; in vm_pageout_init_domain()
2237 vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE + in vm_pageout_init_domain()
2238 vmd->vmd_interrupt_free_min; in vm_pageout_init_domain()
2239 vmd->vmd_free_reserved = vm_pageout_page_count + in vm_pageout_init_domain()
2240 vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768; in vm_pageout_init_domain()
2241 vmd->vmd_free_min = vmd->vmd_page_count / 200; in vm_pageout_init_domain()
2242 vmd->vmd_free_severe = vmd->vmd_free_min / 2; in vm_pageout_init_domain()
2243 vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved; in vm_pageout_init_domain()
2244 vmd->vmd_free_min += vmd->vmd_free_reserved; in vm_pageout_init_domain()
2245 vmd->vmd_free_severe += vmd->vmd_free_reserved; in vm_pageout_init_domain()
2246 vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2; in vm_pageout_init_domain()
2247 if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3) in vm_pageout_init_domain()
2248 vmd->vmd_inactive_target = vmd->vmd_free_count / 3; in vm_pageout_init_domain()
2254 vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9; in vm_pageout_init_domain()
2261 vmd->vmd_background_launder_target = (vmd->vmd_free_target - in vm_pageout_init_domain()
2262 vmd->vmd_free_min) / 10; in vm_pageout_init_domain()
2265 pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE, in vm_pageout_init_domain()
2266 vmd->vmd_free_target, PIDCTRL_BOUND, in vm_pageout_init_domain()
2268 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO, in vm_pageout_init_domain()
2270 pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid)); in vm_pageout_init_domain()
2272 vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd); in vm_pageout_init_domain()
2273 SYSCTL_ADD_BOOL(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO, in vm_pageout_init_domain()
2275 &vmd->vmd_helper_threads_enabled, 0, in vm_pageout_init_domain()
2276 "Enable multi-threaded inactive queue scanning"); in vm_pageout_init_domain()
2294 vm_cnt.v_free_reserved += vmd->vmd_free_reserved; in vm_pageout_init()
2295 vm_cnt.v_free_target += vmd->vmd_free_target; in vm_pageout_init()
2296 vm_cnt.v_free_min += vmd->vmd_free_min; in vm_pageout_init()
2297 vm_cnt.v_inactive_target += vmd->vmd_inactive_target; in vm_pageout_init()
2298 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min; in vm_pageout_init()
2299 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min; in vm_pageout_init()
2300 vm_cnt.v_free_severe += vmd->vmd_free_severe; in vm_pageout_init()
2301 freecount += vmd->vmd_free_count; in vm_pageout_init()
2313 * Set the maximum number of user-wired virtual pages. Historically the in vm_pageout_init()
2315 * may also request user-wired memory. in vm_pageout_init()
2336 for (first = -1, i = 0; i < vm_ndomains; i++) { in vm_pageout()
2343 if (first == -1) in vm_pageout()
2352 pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads; in vm_pageout()
2353 for (j = 0; j < pageout_threads - 1; j++) { in vm_pageout()
2370 snprintf(td->td_name, sizeof(td->td_name), "dom%d", first); in vm_pageout()
2387 if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) { in pagedaemon_wakeup()
2389 atomic_store_int(&vmd->vmd_pageout_wanted, 1); in pagedaemon_wakeup()
2390 wakeup(&vmd->vmd_pageout_wanted); in pagedaemon_wakeup()