160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1989, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 6df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 7df8bae1dSRodney W. Grimes * are met: 8df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 9df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 10df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 12df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 13df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 14df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 15df8bae1dSRodney W. Grimes * without specific prior written permission. 16df8bae1dSRodney W. Grimes * 17df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27df8bae1dSRodney W. Grimes * SUCH DAMAGE. 28df8bae1dSRodney W. Grimes * 29df8bae1dSRodney W. Grimes * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94 30df8bae1dSRodney W. Grimes */ 31df8bae1dSRodney W. Grimes 32874651b1SDavid E. O'Brien #include <sys/cdefs.h> 33874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 34874651b1SDavid E. O'Brien 35df8bae1dSRodney W. Grimes #include <sys/param.h> 36df8bae1dSRodney W. Grimes #include <sys/systm.h> 37df8bae1dSRodney W. Grimes #include <sys/kernel.h> 38fb919e4dSMark Murray #include <sys/lock.h> 39fb919e4dSMark Murray #include <sys/mutex.h> 40fb919e4dSMark Murray #include <sys/proc.h> 4108637435SBruce Evans #include <sys/resource.h> 4289f6b863SAttilio Rao #include <sys/rwlock.h> 431005a129SJohn Baldwin #include <sys/sx.h> 44efeaf95aSDavid Greenman #include <sys/vmmeter.h> 4580f5c8bfSMatthew Dillon #include <sys/smp.h> 46efeaf95aSDavid Greenman 47df8bae1dSRodney W. Grimes #include <vm/vm.h> 48b0359e2cSPeter Wemm #include <vm/vm_page.h> 499b4288a3SBruce Evans #include <vm/vm_extern.h> 50efeaf95aSDavid Greenman #include <vm/vm_param.h> 51efeaf95aSDavid Greenman #include <vm/pmap.h> 52efeaf95aSDavid Greenman #include <vm/vm_map.h> 53efeaf95aSDavid Greenman #include <vm/vm_object.h> 54df8bae1dSRodney W. Grimes #include <sys/sysctl.h> 55df8bae1dSRodney W. Grimes 5644f1c916SBryan Drewery struct vmmeter vm_cnt; 57df8bae1dSRodney W. Grimes 589701cd40SJohn Baldwin SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min, 5944f1c916SBryan Drewery CTLFLAG_RW, &vm_cnt.v_free_min, 0, "Minimum low-free-pages threshold"); 609701cd40SJohn Baldwin SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target, 6144f1c916SBryan Drewery CTLFLAG_RW, &vm_cnt.v_free_target, 0, "Desired free pages"); 629701cd40SJohn Baldwin SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved, 6344f1c916SBryan Drewery CTLFLAG_RW, &vm_cnt.v_free_reserved, 0, "Pages reserved for deadlock"); 649701cd40SJohn Baldwin SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target, 6544f1c916SBryan Drewery CTLFLAG_RW, &vm_cnt.v_inactive_target, 0, "Pages desired inactive"); 669701cd40SJohn Baldwin SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min, 6744f1c916SBryan Drewery CTLFLAG_RW, &vm_cnt.v_pageout_free_min, 0, "Min pages reserved for kernel"); 689701cd40SJohn Baldwin SYSCTL_UINT(_vm, OID_AUTO, v_free_severe, 6944f1c916SBryan Drewery CTLFLAG_RW, &vm_cnt.v_free_severe, 0, "Severe page depletion point"); 70a9ad941cSPoul-Henning Kamp 71a7bc3102SPeter Wemm static int 72a7bc3102SPeter Wemm sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS) 73a7bc3102SPeter Wemm { 74ac957cd2SJulian Elischer 75a7bc3102SPeter Wemm #ifdef SCTL_MASK32 76a7bc3102SPeter Wemm u_int32_t la[4]; 77a7bc3102SPeter Wemm 78a7bc3102SPeter Wemm if (req->flags & SCTL_MASK32) { 79a7bc3102SPeter Wemm la[0] = averunnable.ldavg[0]; 80a7bc3102SPeter Wemm la[1] = averunnable.ldavg[1]; 81a7bc3102SPeter Wemm la[2] = averunnable.ldavg[2]; 82a7bc3102SPeter Wemm la[3] = averunnable.fscale; 83a7bc3102SPeter Wemm return SYSCTL_OUT(req, la, sizeof(la)); 84a7bc3102SPeter Wemm } else 85a7bc3102SPeter Wemm #endif 86a7bc3102SPeter Wemm return SYSCTL_OUT(req, &averunnable, sizeof(averunnable)); 87a7bc3102SPeter Wemm } 888a7ef10bSJohn Baldwin SYSCTL_PROC(_vm, VM_LOADAVG, loadavg, CTLTYPE_STRUCT | CTLFLAG_RD | 898a7ef10bSJohn Baldwin CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_loadavg, "S,loadavg", 908a7ef10bSJohn Baldwin "Machine loadaverage history"); 91a9ad941cSPoul-Henning Kamp 92*d3b9828dSKonstantin Belousov /* 93*d3b9828dSKonstantin Belousov * This function aims to determine if the object is mapped, 94*d3b9828dSKonstantin Belousov * specifically, if it is referenced by a vm_map_entry. Because 95*d3b9828dSKonstantin Belousov * objects occasionally acquire transient references that do not 96*d3b9828dSKonstantin Belousov * represent a mapping, the method used here is inexact. However, it 97*d3b9828dSKonstantin Belousov * has very low overhead and is good enough for the advisory 98*d3b9828dSKonstantin Belousov * vm.vmtotal sysctl. 99*d3b9828dSKonstantin Belousov */ 100*d3b9828dSKonstantin Belousov static bool 101*d3b9828dSKonstantin Belousov is_object_active(vm_object_t obj) 102*d3b9828dSKonstantin Belousov { 103*d3b9828dSKonstantin Belousov 104*d3b9828dSKonstantin Belousov return (obj->ref_count > obj->shadow_count); 105*d3b9828dSKonstantin Belousov } 106*d3b9828dSKonstantin Belousov 107a9ad941cSPoul-Henning Kamp static int 10882d9ae4eSPoul-Henning Kamp vmtotal(SYSCTL_HANDLER_ARGS) 109df8bae1dSRodney W. Grimes { 110ef1b7c48SRuslan Ermilov struct vmtotal total; 111a9ad941cSPoul-Henning Kamp vm_object_t object; 112*d3b9828dSKonstantin Belousov struct proc *p; 113e602ba25SJulian Elischer struct thread *td; 114df8bae1dSRodney W. Grimes 115ef1b7c48SRuslan Ermilov bzero(&total, sizeof(total)); 116*d3b9828dSKonstantin Belousov 117df8bae1dSRodney W. Grimes /* 118df8bae1dSRodney W. Grimes * Calculate process statistics. 119df8bae1dSRodney W. Grimes */ 1201005a129SJohn Baldwin sx_slock(&allproc_lock); 121b40ce416SJulian Elischer FOREACH_PROC_IN_SYSTEM(p) { 122df8bae1dSRodney W. Grimes if (p->p_flag & P_SYSTEM) 123df8bae1dSRodney W. Grimes continue; 124374ae2a3SJeff Roberson PROC_LOCK(p); 125e602ba25SJulian Elischer switch (p->p_state) { 126e602ba25SJulian Elischer case PRS_NEW: 127374ae2a3SJeff Roberson PROC_UNLOCK(p); 128df8bae1dSRodney W. Grimes continue; 129e602ba25SJulian Elischer break; 130e602ba25SJulian Elischer default: 131e602ba25SJulian Elischer FOREACH_THREAD_IN_PROC(p, td) { 132982d11f8SJeff Roberson thread_lock(td); 133e602ba25SJulian Elischer switch (td->td_state) { 13471fad9fdSJulian Elischer case TDS_INHIBITED: 135c5aa6b58SJeff Roberson if (TD_IS_SWAPPED(td)) 136ef1b7c48SRuslan Ermilov total.t_sw++; 137*d3b9828dSKonstantin Belousov else if (TD_IS_SLEEPING(td)) { 138*d3b9828dSKonstantin Belousov if (td->td_priority <= PZERO) 139ef1b7c48SRuslan Ermilov total.t_dw++; 14071fad9fdSJulian Elischer else 141ef1b7c48SRuslan Ermilov total.t_sl++; 142*d3b9828dSKonstantin Belousov if (td->td_wchan == 143*d3b9828dSKonstantin Belousov &vm_cnt.v_free_count) 144*d3b9828dSKonstantin Belousov total.t_pw++; 145*d3b9828dSKonstantin Belousov } 146df8bae1dSRodney W. Grimes break; 147e602ba25SJulian Elischer 14871fad9fdSJulian Elischer case TDS_CAN_RUN: 149ef1b7c48SRuslan Ermilov total.t_sw++; 15071fad9fdSJulian Elischer break; 151e602ba25SJulian Elischer case TDS_RUNQ: 152e602ba25SJulian Elischer case TDS_RUNNING: 153ef1b7c48SRuslan Ermilov total.t_rq++; 154982d11f8SJeff Roberson thread_unlock(td); 155e602ba25SJulian Elischer continue; 156e602ba25SJulian Elischer default: 157e602ba25SJulian Elischer break; 158e602ba25SJulian Elischer } 159982d11f8SJeff Roberson thread_unlock(td); 160e602ba25SJulian Elischer } 161df8bae1dSRodney W. Grimes } 162374ae2a3SJeff Roberson PROC_UNLOCK(p); 163df8bae1dSRodney W. Grimes } 1641005a129SJohn Baldwin sx_sunlock(&allproc_lock); 165df8bae1dSRodney W. Grimes /* 166df8bae1dSRodney W. Grimes * Calculate object memory usage statistics. 167df8bae1dSRodney W. Grimes */ 168a5698387SAlan Cox mtx_lock(&vm_object_list_mtx); 169cc64b484SAlfred Perlstein TAILQ_FOREACH(object, &vm_object_list, object_list) { 1701c7c3c6aSMatthew Dillon /* 171e735691bSJohn Baldwin * Perform unsynchronized reads on the object. In 172e735691bSJohn Baldwin * this case, the lack of synchronization should not 173e735691bSJohn Baldwin * impair the accuracy of the reported statistics. 1741c7c3c6aSMatthew Dillon */ 17528634820SAlan Cox if ((object->flags & OBJ_FICTITIOUS) != 0) { 176e0ba75ddSAlan Cox /* 177e0ba75ddSAlan Cox * Devices, like /dev/mem, will badly skew our totals. 178e0ba75ddSAlan Cox */ 1791c7c3c6aSMatthew Dillon continue; 18049247edcSAlan Cox } 181276096bbSRuslan Ermilov if (object->ref_count == 0) { 182276096bbSRuslan Ermilov /* 183276096bbSRuslan Ermilov * Also skip unreferenced objects, including 184276096bbSRuslan Ermilov * vnodes representing mounted file systems. 185276096bbSRuslan Ermilov */ 186276096bbSRuslan Ermilov continue; 187276096bbSRuslan Ermilov } 188*d3b9828dSKonstantin Belousov if (object->ref_count == 1 && 189*d3b9828dSKonstantin Belousov (object->flags & OBJ_NOSPLIT) != 0) { 190*d3b9828dSKonstantin Belousov /* 191*d3b9828dSKonstantin Belousov * Also skip otherwise unreferenced swap 192*d3b9828dSKonstantin Belousov * objects backing tmpfs vnodes, and POSIX or 193*d3b9828dSKonstantin Belousov * SysV shared memory. 194*d3b9828dSKonstantin Belousov */ 195*d3b9828dSKonstantin Belousov continue; 196*d3b9828dSKonstantin Belousov } 197ef1b7c48SRuslan Ermilov total.t_vm += object->size; 198ef1b7c48SRuslan Ermilov total.t_rm += object->resident_page_count; 199*d3b9828dSKonstantin Belousov if (is_object_active(object)) { 200ef1b7c48SRuslan Ermilov total.t_avm += object->size; 201ef1b7c48SRuslan Ermilov total.t_arm += object->resident_page_count; 202df8bae1dSRodney W. Grimes } 2035070c7f8SJohn Dyson if (object->shadow_count > 1) { 204df8bae1dSRodney W. Grimes /* shared object */ 205ef1b7c48SRuslan Ermilov total.t_vmshr += object->size; 206ef1b7c48SRuslan Ermilov total.t_rmshr += object->resident_page_count; 207*d3b9828dSKonstantin Belousov if (is_object_active(object)) { 208ef1b7c48SRuslan Ermilov total.t_avmshr += object->size; 209ef1b7c48SRuslan Ermilov total.t_armshr += object->resident_page_count; 210df8bae1dSRodney W. Grimes } 211df8bae1dSRodney W. Grimes } 212df8bae1dSRodney W. Grimes } 213a5698387SAlan Cox mtx_unlock(&vm_object_list_mtx); 21444f1c916SBryan Drewery total.t_free = vm_cnt.v_free_count + vm_cnt.v_cache_count; 215ef1b7c48SRuslan Ermilov return (sysctl_handle_opaque(oidp, &total, sizeof(total), req)); 216df8bae1dSRodney W. Grimes } 217a9ad941cSPoul-Henning Kamp 21880f5c8bfSMatthew Dillon /* 21980f5c8bfSMatthew Dillon * vcnt() - accumulate statistics from all cpus and the global cnt 22080f5c8bfSMatthew Dillon * structure. 22180f5c8bfSMatthew Dillon * 22280f5c8bfSMatthew Dillon * The vmmeter structure is now per-cpu as well as global. Those 22380f5c8bfSMatthew Dillon * statistics which can be kept on a per-cpu basis (to avoid cache 22480f5c8bfSMatthew Dillon * stalls between cpus) can be moved to the per-cpu vmmeter. Remaining 22580f5c8bfSMatthew Dillon * statistics, such as v_free_reserved, are left in the global 22680f5c8bfSMatthew Dillon * structure. 22780f5c8bfSMatthew Dillon * 22880f5c8bfSMatthew Dillon * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) 22980f5c8bfSMatthew Dillon */ 23080f5c8bfSMatthew Dillon static int 23180f5c8bfSMatthew Dillon vcnt(SYSCTL_HANDLER_ARGS) 23280f5c8bfSMatthew Dillon { 23380f5c8bfSMatthew Dillon int count = *(int *)arg1; 23444f1c916SBryan Drewery int offset = (char *)arg1 - (char *)&vm_cnt; 23580f5c8bfSMatthew Dillon int i; 23680f5c8bfSMatthew Dillon 2376edf6104SJuli Mallett CPU_FOREACH(i) { 23880f5c8bfSMatthew Dillon struct pcpu *pcpu = pcpu_find(i); 23980f5c8bfSMatthew Dillon count += *(int *)((char *)&pcpu->pc_cnt + offset); 24080f5c8bfSMatthew Dillon } 2419fd06695SJohn Baldwin return (SYSCTL_OUT(req, &count, sizeof(int))); 24280f5c8bfSMatthew Dillon } 24380f5c8bfSMatthew Dillon 2448a7ef10bSJohn Baldwin SYSCTL_PROC(_vm, VM_TOTAL, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE, 2453d177f46SBill Fumerola 0, sizeof(struct vmtotal), vmtotal, "S,vmtotal", 2463d177f46SBill Fumerola "System virtual memory statistics"); 2477fbdc921SPoul-Henning Kamp SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW, 0, "VM meter stats"); 24839a79f0cSPoul-Henning Kamp static SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW, 0, 24939a79f0cSPoul-Henning Kamp "VM meter sys stats"); 25039a79f0cSPoul-Henning Kamp static SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, 25139a79f0cSPoul-Henning Kamp "VM meter vm stats"); 252b0359e2cSPeter Wemm SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats"); 25380f5c8bfSMatthew Dillon 25433fd7c56SEitan Adler #define VM_STATS(parent, var, descr) \ 25533fd7c56SEitan Adler SYSCTL_PROC(parent, OID_AUTO, var, \ 25644f1c916SBryan Drewery CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, &vm_cnt.var, 0, vcnt, \ 25733fd7c56SEitan Adler "IU", descr) 25833fd7c56SEitan Adler #define VM_STATS_VM(var, descr) VM_STATS(_vm_stats_vm, var, descr) 25933fd7c56SEitan Adler #define VM_STATS_SYS(var, descr) VM_STATS(_vm_stats_sys, var, descr) 26080f5c8bfSMatthew Dillon 2613eb9ab52SEitan Adler VM_STATS_SYS(v_swtch, "Context switches"); 2623eb9ab52SEitan Adler VM_STATS_SYS(v_trap, "Traps"); 2633eb9ab52SEitan Adler VM_STATS_SYS(v_syscall, "System calls"); 2643eb9ab52SEitan Adler VM_STATS_SYS(v_intr, "Device interrupts"); 2653eb9ab52SEitan Adler VM_STATS_SYS(v_soft, "Software interrupts"); 26633fd7c56SEitan Adler VM_STATS_VM(v_vm_faults, "Address memory faults"); 267b3a01bdfSAndrey Zonov VM_STATS_VM(v_io_faults, "Page faults requiring I/O"); 26833fd7c56SEitan Adler VM_STATS_VM(v_cow_faults, "Copy-on-write faults"); 26933fd7c56SEitan Adler VM_STATS_VM(v_cow_optim, "Optimized COW faults"); 27033fd7c56SEitan Adler VM_STATS_VM(v_zfod, "Pages zero-filled on demand"); 27133fd7c56SEitan Adler VM_STATS_VM(v_ozfod, "Optimized zero fill pages"); 27233fd7c56SEitan Adler VM_STATS_VM(v_swapin, "Swap pager pageins"); 27333fd7c56SEitan Adler VM_STATS_VM(v_swapout, "Swap pager pageouts"); 27433fd7c56SEitan Adler VM_STATS_VM(v_swappgsin, "Swap pages swapped in"); 27533fd7c56SEitan Adler VM_STATS_VM(v_swappgsout, "Swap pages swapped out"); 27633fd7c56SEitan Adler VM_STATS_VM(v_vnodein, "Vnode pager pageins"); 27733fd7c56SEitan Adler VM_STATS_VM(v_vnodeout, "Vnode pager pageouts"); 27833fd7c56SEitan Adler VM_STATS_VM(v_vnodepgsin, "Vnode pages paged in"); 27933fd7c56SEitan Adler VM_STATS_VM(v_vnodepgsout, "Vnode pages paged out"); 28033fd7c56SEitan Adler VM_STATS_VM(v_intrans, "In transit page faults"); 28133fd7c56SEitan Adler VM_STATS_VM(v_reactivated, "Pages reactivated from free list"); 28233fd7c56SEitan Adler VM_STATS_VM(v_pdwakeups, "Pagedaemon wakeups"); 28333fd7c56SEitan Adler VM_STATS_VM(v_pdpages, "Pages analyzed by pagedaemon"); 28433fd7c56SEitan Adler VM_STATS_VM(v_tcached, "Total pages cached"); 28533fd7c56SEitan Adler VM_STATS_VM(v_dfree, "Pages freed by pagedaemon"); 28633fd7c56SEitan Adler VM_STATS_VM(v_pfree, "Pages freed by exiting processes"); 28733fd7c56SEitan Adler VM_STATS_VM(v_tfree, "Total pages freed"); 28833fd7c56SEitan Adler VM_STATS_VM(v_page_size, "Page size in bytes"); 28933fd7c56SEitan Adler VM_STATS_VM(v_page_count, "Total number of pages in system"); 29033fd7c56SEitan Adler VM_STATS_VM(v_free_reserved, "Pages reserved for deadlock"); 29133fd7c56SEitan Adler VM_STATS_VM(v_free_target, "Pages desired free"); 29233fd7c56SEitan Adler VM_STATS_VM(v_free_min, "Minimum low-free-pages threshold"); 29333fd7c56SEitan Adler VM_STATS_VM(v_free_count, "Free pages"); 29433fd7c56SEitan Adler VM_STATS_VM(v_wire_count, "Wired pages"); 29533fd7c56SEitan Adler VM_STATS_VM(v_active_count, "Active pages"); 29633fd7c56SEitan Adler VM_STATS_VM(v_inactive_target, "Desired inactive pages"); 29733fd7c56SEitan Adler VM_STATS_VM(v_inactive_count, "Inactive pages"); 29833fd7c56SEitan Adler VM_STATS_VM(v_cache_count, "Pages on cache queue"); 29933fd7c56SEitan Adler VM_STATS_VM(v_pageout_free_min, "Min pages reserved for kernel"); 30033fd7c56SEitan Adler VM_STATS_VM(v_interrupt_free_min, "Reserved pages for interrupt code"); 30133fd7c56SEitan Adler VM_STATS_VM(v_forks, "Number of fork() calls"); 30233fd7c56SEitan Adler VM_STATS_VM(v_vforks, "Number of vfork() calls"); 30333fd7c56SEitan Adler VM_STATS_VM(v_rforks, "Number of rfork() calls"); 30433fd7c56SEitan Adler VM_STATS_VM(v_kthreads, "Number of fork() calls by kernel"); 30533fd7c56SEitan Adler VM_STATS_VM(v_forkpages, "VM pages affected by fork()"); 30633fd7c56SEitan Adler VM_STATS_VM(v_vforkpages, "VM pages affected by vfork()"); 30733fd7c56SEitan Adler VM_STATS_VM(v_rforkpages, "VM pages affected by rfork()"); 30833fd7c56SEitan Adler VM_STATS_VM(v_kthreadpages, "VM pages affected by fork() by kernel"); 3093eb9ab52SEitan Adler 3103eb9ab52SEitan Adler SYSCTL_INT(_vm_stats_misc, OID_AUTO, zero_page_count, CTLFLAG_RD, 3113eb9ab52SEitan Adler &vm_page_zero_count, 0, "Number of zero-ed free pages"); 312