vm_page.c (04a18977c815b83bf38911871f3c4ca7f881f275) | vm_page.c (222d01951f8677015e3e96c6950e809c0d983c09) |
---|---|
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without --- 137 unchanged lines hidden (view full) --- 146 * 147 * Sets the page size, perhaps based upon the memory 148 * size. Must be called before any use of page-size 149 * dependent functions. 150 */ 151void 152vm_set_page_size(void) 153{ | 1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without --- 137 unchanged lines hidden (view full) --- 146 * 147 * Sets the page size, perhaps based upon the memory 148 * size. Must be called before any use of page-size 149 * dependent functions. 150 */ 151void 152vm_set_page_size(void) 153{ |
154 if (cnt.v_page_size == 0) 155 cnt.v_page_size = PAGE_SIZE; 156 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) | 154 if (VMCNT_GET(page_size) == 0) 155 VMCNT_SET(page_size, PAGE_SIZE); 156 if (((VMCNT_GET(page_size) - 1) & VMCNT_GET(page_size)) != 0) |
157 panic("vm_set_page_size: page size not a power of two"); 158} 159 160/* 161 * vm_page_blacklist_lookup: 162 * 163 * See if a physical address in this page has been listed 164 * in the blacklist tunable. Entries in the tunable are --- 187 unchanged lines hidden (view full) --- 352 ("vm_page_startup: inconsistent page counts")); 353 354 /* 355 * Construct the free queue(s) in descending order (by physical 356 * address) so that the first 16MB of physical memory is allocated 357 * last rather than first. On large-memory machines, this avoids 358 * the exhaustion of low physical memory before isa_dma_init has run. 359 */ | 157 panic("vm_set_page_size: page size not a power of two"); 158} 159 160/* 161 * vm_page_blacklist_lookup: 162 * 163 * See if a physical address in this page has been listed 164 * in the blacklist tunable. Entries in the tunable are --- 187 unchanged lines hidden (view full) --- 352 ("vm_page_startup: inconsistent page counts")); 353 354 /* 355 * Construct the free queue(s) in descending order (by physical 356 * address) so that the first 16MB of physical memory is allocated 357 * last rather than first. On large-memory machines, this avoids 358 * the exhaustion of low physical memory before isa_dma_init has run. 359 */ |
360 cnt.v_page_count = 0; 361 cnt.v_free_count = 0; | 360 VMCNT_SET(page_count, 0); 361 VMCNT_SET(free_count, 0); |
362 list = getenv("vm.blacklist"); 363 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 364 pa = phys_avail[i]; 365 last_pa = phys_avail[i + 1]; 366 while (pa < last_pa) { 367 if (list != NULL && 368 vm_page_blacklist_lookup(list, pa)) 369 printf("Skipping page with pa 0x%jx\n", --- 499 unchanged lines hidden (view full) --- 869 * The pager is allowed to eat deeper into the free page list. 870 */ 871 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 872 page_req = VM_ALLOC_SYSTEM; 873 }; 874 875loop: 876 mtx_lock(&vm_page_queue_free_mtx); | 362 list = getenv("vm.blacklist"); 363 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 364 pa = phys_avail[i]; 365 last_pa = phys_avail[i + 1]; 366 while (pa < last_pa) { 367 if (list != NULL && 368 vm_page_blacklist_lookup(list, pa)) 369 printf("Skipping page with pa 0x%jx\n", --- 499 unchanged lines hidden (view full) --- 869 * The pager is allowed to eat deeper into the free page list. 870 */ 871 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 872 page_req = VM_ALLOC_SYSTEM; 873 }; 874 875loop: 876 mtx_lock(&vm_page_queue_free_mtx); |
877 if (cnt.v_free_count > cnt.v_free_reserved || | 877 if (VMCNT_GET(free_count) > VMCNT_GET(free_reserved) || |
878 (page_req == VM_ALLOC_SYSTEM && | 878 (page_req == VM_ALLOC_SYSTEM && |
879 cnt.v_cache_count == 0 && 880 cnt.v_free_count > cnt.v_interrupt_free_min) || 881 (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) { | 879 VMCNT_GET(cache_count) == 0 && 880 VMCNT_GET(free_count) > VMCNT_GET(interrupt_free_min)) || 881 (page_req == VM_ALLOC_INTERRUPT && VMCNT_GET(free_count) > 0)) { |
882 /* 883 * Allocate from the free queue if the number of free pages 884 * exceeds the minimum for the request class. 885 */ 886 m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0); 887 } else if (page_req != VM_ALLOC_INTERRUPT) { 888 mtx_unlock(&vm_page_queue_free_mtx); 889 /* 890 * Allocatable from cache (non-interrupt only). On success, 891 * we must free the page and try again, thus ensuring that 892 * cnt.v_*_free_min counters are replenished. 893 */ 894 vm_page_lock_queues(); 895 if ((m = vm_page_select_cache(color)) == NULL) { | 882 /* 883 * Allocate from the free queue if the number of free pages 884 * exceeds the minimum for the request class. 885 */ 886 m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0); 887 } else if (page_req != VM_ALLOC_INTERRUPT) { 888 mtx_unlock(&vm_page_queue_free_mtx); 889 /* 890 * Allocatable from cache (non-interrupt only). On success, 891 * we must free the page and try again, thus ensuring that 892 * cnt.v_*_free_min counters are replenished. 893 */ 894 vm_page_lock_queues(); 895 if ((m = vm_page_select_cache(color)) == NULL) { |
896 KASSERT(cnt.v_cache_count == 0, | 896 KASSERT(VMCNT_GET(cache_count) == 0, |
897 ("vm_page_alloc: cache queue is missing %d pages", | 897 ("vm_page_alloc: cache queue is missing %d pages", |
898 cnt.v_cache_count)); | 898 VMCNT_GET(cache_count))); |
899 vm_page_unlock_queues(); 900 atomic_add_int(&vm_pageout_deficit, 1); 901 pagedaemon_wakeup(); 902 903 if (page_req != VM_ALLOC_SYSTEM) 904 return (NULL); 905 906 mtx_lock(&vm_page_queue_free_mtx); | 899 vm_page_unlock_queues(); 900 atomic_add_int(&vm_pageout_deficit, 1); 901 pagedaemon_wakeup(); 902 903 if (page_req != VM_ALLOC_SYSTEM) 904 return (NULL); 905 906 mtx_lock(&vm_page_queue_free_mtx); |
907 if (cnt.v_free_count <= cnt.v_interrupt_free_min) { | 907 if (VMCNT_GET(free_count) <= 908 VMCNT_GET(interrupt_free_min)) { |
908 mtx_unlock(&vm_page_queue_free_mtx); 909 return (NULL); 910 } 911 m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0); 912 } else { 913 vm_page_unlock_queues(); 914 goto loop; 915 } --- 33 unchanged lines hidden (view full) --- 949 if (object != NULL && object->type == OBJT_PHYS) 950 flags |= PG_UNMANAGED; 951 m->flags = flags; 952 if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) 953 m->oflags = 0; 954 else 955 m->oflags = VPO_BUSY; 956 if (req & VM_ALLOC_WIRED) { | 909 mtx_unlock(&vm_page_queue_free_mtx); 910 return (NULL); 911 } 912 m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0); 913 } else { 914 vm_page_unlock_queues(); 915 goto loop; 916 } --- 33 unchanged lines hidden (view full) --- 950 if (object != NULL && object->type == OBJT_PHYS) 951 flags |= PG_UNMANAGED; 952 m->flags = flags; 953 if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) 954 m->oflags = 0; 955 else 956 m->oflags = VPO_BUSY; 957 if (req & VM_ALLOC_WIRED) { |
957 atomic_add_int(&cnt.v_wire_count, 1); | 958 VMCNT_ADD(wire_count, 1); |
958 m->wire_count = 1; 959 } else 960 m->wire_count = 0; 961 m->hold_count = 0; 962 m->act_count = 0; 963 m->busy = 0; 964 m->valid = 0; 965 KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m)); --- 29 unchanged lines hidden (view full) --- 995 vm_pageout_pages_needed = 1; 996 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 997 PDROP | PSWP, "VMWait", 0); 998 } else { 999 if (!vm_pages_needed) { 1000 vm_pages_needed = 1; 1001 wakeup(&vm_pages_needed); 1002 } | 959 m->wire_count = 1; 960 } else 961 m->wire_count = 0; 962 m->hold_count = 0; 963 m->act_count = 0; 964 m->busy = 0; 965 m->valid = 0; 966 KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m)); --- 29 unchanged lines hidden (view full) --- 996 vm_pageout_pages_needed = 1; 997 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, 998 PDROP | PSWP, "VMWait", 0); 999 } else { 1000 if (!vm_pages_needed) { 1001 vm_pages_needed = 1; 1002 wakeup(&vm_pages_needed); 1003 } |
1003 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, 1004 "vmwait", 0); | 1004 msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP | 1005 PVM, "vmwait", 0); |
1005 } 1006} 1007 1008/* 1009 * vm_waitpfault: (also see VM_WAITPFAULT macro) 1010 * 1011 * Block until free pages are available for allocation 1012 * - Called only in vm_fault so that processes page faulting --- 6 unchanged lines hidden (view full) --- 1019vm_waitpfault(void) 1020{ 1021 1022 mtx_lock(&vm_page_queue_free_mtx); 1023 if (!vm_pages_needed) { 1024 vm_pages_needed = 1; 1025 wakeup(&vm_pages_needed); 1026 } | 1006 } 1007} 1008 1009/* 1010 * vm_waitpfault: (also see VM_WAITPFAULT macro) 1011 * 1012 * Block until free pages are available for allocation 1013 * - Called only in vm_fault so that processes page faulting --- 6 unchanged lines hidden (view full) --- 1020vm_waitpfault(void) 1021{ 1022 1023 mtx_lock(&vm_page_queue_free_mtx); 1024 if (!vm_pages_needed) { 1025 vm_pages_needed = 1; 1026 wakeup(&vm_pages_needed); 1027 } |
1027 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, | 1028 msleep(VMCNT_PTR(free_count), &vm_page_queue_free_mtx, PDROP | PUSER, |
1028 "pfault", 0); 1029} 1030 1031/* 1032 * vm_page_activate: 1033 * 1034 * Put the specified page on the active list (if appropriate). 1035 * Ensure that act_count is at least ACT_INIT but do not otherwise --- 4 unchanged lines hidden (view full) --- 1040 */ 1041void 1042vm_page_activate(vm_page_t m) 1043{ 1044 1045 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1046 if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) { 1047 if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) | 1029 "pfault", 0); 1030} 1031 1032/* 1033 * vm_page_activate: 1034 * 1035 * Put the specified page on the active list (if appropriate). 1036 * Ensure that act_count is at least ACT_INIT but do not otherwise --- 4 unchanged lines hidden (view full) --- 1041 */ 1042void 1043vm_page_activate(vm_page_t m) 1044{ 1045 1046 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1047 if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) { 1048 if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) |
1048 cnt.v_reactivated++; | 1049 VMCNT_ADD(reactivated, 1); |
1049 vm_pageq_remove(m); 1050 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1051 if (m->act_count < ACT_INIT) 1052 m->act_count = ACT_INIT; 1053 vm_pageq_enqueue(PQ_ACTIVE, m); 1054 } 1055 } else { 1056 if (m->act_count < ACT_INIT) --- 16 unchanged lines hidden (view full) --- 1073{ 1074 1075 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1076 /* 1077 * if pageout daemon needs pages, then tell it that there are 1078 * some free. 1079 */ 1080 if (vm_pageout_pages_needed && | 1050 vm_pageq_remove(m); 1051 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1052 if (m->act_count < ACT_INIT) 1053 m->act_count = ACT_INIT; 1054 vm_pageq_enqueue(PQ_ACTIVE, m); 1055 } 1056 } else { 1057 if (m->act_count < ACT_INIT) --- 16 unchanged lines hidden (view full) --- 1074{ 1075 1076 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 1077 /* 1078 * if pageout daemon needs pages, then tell it that there are 1079 * some free. 1080 */ 1081 if (vm_pageout_pages_needed && |
1081 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { | 1082 VMCNT_GET(cache_count) + VMCNT_GET(free_count) >= 1083 VMCNT_GET(pageout_free_min)) { |
1082 wakeup(&vm_pageout_pages_needed); 1083 vm_pageout_pages_needed = 0; 1084 } 1085 /* 1086 * wakeup processes that are waiting on memory if we hit a 1087 * high water mark. And wakeup scheduler process if we have 1088 * lots of memory. this process will swapin processes. 1089 */ 1090 if (vm_pages_needed && !vm_page_count_min()) { 1091 vm_pages_needed = 0; | 1084 wakeup(&vm_pageout_pages_needed); 1085 vm_pageout_pages_needed = 0; 1086 } 1087 /* 1088 * wakeup processes that are waiting on memory if we hit a 1089 * high water mark. And wakeup scheduler process if we have 1090 * lots of memory. this process will swapin processes. 1091 */ 1092 if (vm_pages_needed && !vm_page_count_min()) { 1093 vm_pages_needed = 0; |
1092 wakeup(&cnt.v_free_count); | 1094 wakeup(VMCNT_PTR(free_count)); |
1093 } 1094} 1095 1096/* 1097 * vm_page_free_toq: 1098 * 1099 * Returns the given page to the PQ_FREE list, 1100 * disassociating it with any VM object. --- 6 unchanged lines hidden (view full) --- 1107vm_page_free_toq(vm_page_t m) 1108{ 1109 struct vpgqueues *pq; 1110 1111 if (VM_PAGE_GETQUEUE(m) != PQ_NONE) 1112 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1113 KASSERT(!pmap_page_is_mapped(m), 1114 ("vm_page_free_toq: freeing mapped page %p", m)); | 1095 } 1096} 1097 1098/* 1099 * vm_page_free_toq: 1100 * 1101 * Returns the given page to the PQ_FREE list, 1102 * disassociating it with any VM object. --- 6 unchanged lines hidden (view full) --- 1109vm_page_free_toq(vm_page_t m) 1110{ 1111 struct vpgqueues *pq; 1112 1113 if (VM_PAGE_GETQUEUE(m) != PQ_NONE) 1114 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1115 KASSERT(!pmap_page_is_mapped(m), 1116 ("vm_page_free_toq: freeing mapped page %p", m)); |
1115 cnt.v_tfree++; | 1117 VMCNT_ADD(tfree, 1); |
1116 1117 if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) { 1118 printf( 1119 "vm_page_free: pindex(%lu), busy(%d), VPO_BUSY(%d), hold(%d)\n", 1120 (u_long)m->pindex, m->busy, (m->oflags & VPO_BUSY) ? 1 : 0, 1121 m->hold_count); 1122 if (VM_PAGE_INQUEUE1(m, PQ_FREE)) 1123 panic("vm_page_free: freeing free page"); --- 74 unchanged lines hidden (view full) --- 1198 * it is already off the queues). 1199 */ 1200 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1201 if (m->flags & PG_FICTITIOUS) 1202 return; 1203 if (m->wire_count == 0) { 1204 if ((m->flags & PG_UNMANAGED) == 0) 1205 vm_pageq_remove(m); | 1118 1119 if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) { 1120 printf( 1121 "vm_page_free: pindex(%lu), busy(%d), VPO_BUSY(%d), hold(%d)\n", 1122 (u_long)m->pindex, m->busy, (m->oflags & VPO_BUSY) ? 1 : 0, 1123 m->hold_count); 1124 if (VM_PAGE_INQUEUE1(m, PQ_FREE)) 1125 panic("vm_page_free: freeing free page"); --- 74 unchanged lines hidden (view full) --- 1200 * it is already off the queues). 1201 */ 1202 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1203 if (m->flags & PG_FICTITIOUS) 1204 return; 1205 if (m->wire_count == 0) { 1206 if ((m->flags & PG_UNMANAGED) == 0) 1207 vm_pageq_remove(m); |
1206 atomic_add_int(&cnt.v_wire_count, 1); | 1208 VMCNT_ADD(wire_count, 1); |
1207 } 1208 m->wire_count++; 1209 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 1210} 1211 1212/* 1213 * vm_page_unwire: 1214 * --- 27 unchanged lines hidden (view full) --- 1242{ 1243 1244 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1245 if (m->flags & PG_FICTITIOUS) 1246 return; 1247 if (m->wire_count > 0) { 1248 m->wire_count--; 1249 if (m->wire_count == 0) { | 1209 } 1210 m->wire_count++; 1211 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); 1212} 1213 1214/* 1215 * vm_page_unwire: 1216 * --- 27 unchanged lines hidden (view full) --- 1244{ 1245 1246 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1247 if (m->flags & PG_FICTITIOUS) 1248 return; 1249 if (m->wire_count > 0) { 1250 m->wire_count--; 1251 if (m->wire_count == 0) { |
1250 atomic_subtract_int(&cnt.v_wire_count, 1); | 1252 VMCNT_DEC(wire_count, 1); |
1251 if (m->flags & PG_UNMANAGED) { 1252 ; 1253 } else if (activate) 1254 vm_pageq_enqueue(PQ_ACTIVE, m); 1255 else { 1256 vm_page_flag_clear(m, PG_WINATCFLS); 1257 vm_pageq_enqueue(PQ_INACTIVE, m); 1258 } --- 22 unchanged lines hidden (view full) --- 1281 1282 /* 1283 * Ignore if already inactive. 1284 */ 1285 if (VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) 1286 return; 1287 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1288 if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) | 1253 if (m->flags & PG_UNMANAGED) { 1254 ; 1255 } else if (activate) 1256 vm_pageq_enqueue(PQ_ACTIVE, m); 1257 else { 1258 vm_page_flag_clear(m, PG_WINATCFLS); 1259 vm_pageq_enqueue(PQ_INACTIVE, m); 1260 } --- 22 unchanged lines hidden (view full) --- 1283 1284 /* 1285 * Ignore if already inactive. 1286 */ 1287 if (VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) 1288 return; 1289 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1290 if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) |
1289 cnt.v_reactivated++; | 1291 VMCNT_ADD(reactivated, 1); |
1290 vm_page_flag_clear(m, PG_WINATCFLS); 1291 vm_pageq_remove(m); 1292 if (athead) 1293 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1294 else 1295 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1296 VM_PAGE_SETQUEUE2(m, PQ_INACTIVE); 1297 vm_page_queues[PQ_INACTIVE].lcnt++; | 1292 vm_page_flag_clear(m, PG_WINATCFLS); 1293 vm_pageq_remove(m); 1294 if (athead) 1295 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1296 else 1297 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1298 VM_PAGE_SETQUEUE2(m, PQ_INACTIVE); 1299 vm_page_queues[PQ_INACTIVE].lcnt++; |
1298 cnt.v_inactive_count++; | 1300 VMCNT_ADD(inactive_count, 1); |
1299 } 1300} 1301 1302void 1303vm_page_deactivate(vm_page_t m) 1304{ 1305 _vm_page_deactivate(m, 0); 1306} --- 468 unchanged lines hidden (view full) --- 1775#include "opt_ddb.h" 1776#ifdef DDB 1777#include <sys/kernel.h> 1778 1779#include <ddb/ddb.h> 1780 1781DB_SHOW_COMMAND(page, vm_page_print_page_info) 1782{ | 1301 } 1302} 1303 1304void 1305vm_page_deactivate(vm_page_t m) 1306{ 1307 _vm_page_deactivate(m, 0); 1308} --- 468 unchanged lines hidden (view full) --- 1777#include "opt_ddb.h" 1778#ifdef DDB 1779#include <sys/kernel.h> 1780 1781#include <ddb/ddb.h> 1782 1783DB_SHOW_COMMAND(page, vm_page_print_page_info) 1784{ |
1783 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 1784 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 1785 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 1786 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 1787 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 1788 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 1789 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 1790 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 1791 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 1792 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); | 1785 db_printf("cnt.v_free_count: %d\n", VMCNT_GET(free_count)); 1786 db_printf("cnt.v_cache_count: %d\n", VMCNT_GET(cache_count)); 1787 db_printf("cnt.v_inactive_count: %d\n", VMCNT_GET(inactive_count)); 1788 db_printf("cnt.v_active_count: %d\n", VMCNT_GET(active_count)); 1789 db_printf("cnt.v_wire_count: %d\n", VMCNT_GET(wire_count)); 1790 db_printf("cnt.v_free_reserved: %d\n", VMCNT_GET(free_reserved)); 1791 db_printf("cnt.v_free_min: %d\n", VMCNT_GET(free_min)); 1792 db_printf("cnt.v_free_target: %d\n", VMCNT_GET(free_target)); 1793 db_printf("cnt.v_cache_min: %d\n", VMCNT_GET(cache_min)); 1794 db_printf("cnt.v_inactive_target: %d\n", VMCNT_GET(inactive_target)); |
1793} 1794 1795DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1796{ 1797 int i; 1798 db_printf("PQ_FREE:"); 1799 for (i = 0; i < PQ_NUMCOLORS; i++) { 1800 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt); --- 14 unchanged lines hidden --- | 1795} 1796 1797DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1798{ 1799 int i; 1800 db_printf("PQ_FREE:"); 1801 for (i = 0; i < PQ_NUMCOLORS; i++) { 1802 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt); --- 14 unchanged lines hidden --- |