vm_pageout.c (c98bb15d01876b54224f52232cee5ba1b5b0ce6f) | vm_pageout.c (44f1c916109d4d88941d257b7c4c96c26ab55477) |
---|---|
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Yahoo! Technologies Norway AS --- 664 unchanged lines hidden (view full) --- 673 } 674 675 /* 676 * Make the next scan start on the next domain. 677 */ 678 initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains; 679 680 inactl = 0; | 1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Yahoo! Technologies Norway AS --- 664 unchanged lines hidden (view full) --- 673 } 674 675 /* 676 * Make the next scan start on the next domain. 677 */ 678 initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains; 679 680 inactl = 0; |
681 inactmax = cnt.v_inactive_count; | 681 inactmax = vm_cnt.v_inactive_count; |
682 actl = 0; | 682 actl = 0; |
683 actmax = tries < 2 ? 0 : cnt.v_active_count; | 683 actmax = tries < 2 ? 0 : vm_cnt.v_active_count; |
684 dom = initial_dom; 685 686 /* 687 * Scan domains in round-robin order, first inactive queues, 688 * then active. Since domain usually owns large physically 689 * contiguous chunk of memory, it makes sense to completely 690 * exhaust one domain before switching to next, while growing 691 * the pool of contiguous physical pages. --- 613 unchanged lines hidden (view full) --- 1305 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q); 1306 } 1307 vm_pagequeue_unlock(pq); 1308 1309 /* 1310 * Compute the number of pages we want to try to move from the 1311 * active queue to the inactive queue. 1312 */ | 684 dom = initial_dom; 685 686 /* 687 * Scan domains in round-robin order, first inactive queues, 688 * then active. Since domain usually owns large physically 689 * contiguous chunk of memory, it makes sense to completely 690 * exhaust one domain before switching to next, while growing 691 * the pool of contiguous physical pages. --- 613 unchanged lines hidden (view full) --- 1305 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q); 1306 } 1307 vm_pagequeue_unlock(pq); 1308 1309 /* 1310 * Compute the number of pages we want to try to move from the 1311 * active queue to the inactive queue. 1312 */ |
1313 page_shortage = cnt.v_inactive_target - cnt.v_inactive_count + | 1313 page_shortage = vm_cnt.v_inactive_target - vm_cnt.v_inactive_count + |
1314 vm_paging_target() + deficit + addl_page_shortage; 1315 1316 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1317 vm_pagequeue_lock(pq); 1318 maxscan = pq->pq_cnt; 1319 1320 /* 1321 * If we're just idle polling attempt to visit every --- 249 unchanged lines hidden (view full) --- 1571 } else 1572 PROC_UNLOCK(p); 1573 } 1574 sx_sunlock(&allproc_lock); 1575 if (bigproc != NULL) { 1576 killproc(bigproc, "out of swap space"); 1577 sched_nice(bigproc, PRIO_MIN); 1578 PROC_UNLOCK(bigproc); | 1314 vm_paging_target() + deficit + addl_page_shortage; 1315 1316 pq = &vmd->vmd_pagequeues[PQ_ACTIVE]; 1317 vm_pagequeue_lock(pq); 1318 maxscan = pq->pq_cnt; 1319 1320 /* 1321 * If we're just idle polling attempt to visit every --- 249 unchanged lines hidden (view full) --- 1571 } else 1572 PROC_UNLOCK(p); 1573 } 1574 sx_sunlock(&allproc_lock); 1575 if (bigproc != NULL) { 1576 killproc(bigproc, "out of swap space"); 1577 sched_nice(bigproc, PRIO_MIN); 1578 PROC_UNLOCK(bigproc); |
1579 wakeup(&cnt.v_free_count); | 1579 wakeup(&vm_cnt.v_free_count); |
1580 } 1581} 1582 1583static void 1584vm_pageout_worker(void *arg) 1585{ 1586 struct vm_domain *domain; 1587 int domidx; --- 19 unchanged lines hidden (view full) --- 1607 * not clear vm_pages_needed until we reach our target, 1608 * otherwise we may be woken up over and over again and 1609 * waste a lot of cpu. 1610 */ 1611 mtx_lock(&vm_page_queue_free_mtx); 1612 if (vm_pages_needed && !vm_page_count_min()) { 1613 if (!vm_paging_needed()) 1614 vm_pages_needed = 0; | 1580 } 1581} 1582 1583static void 1584vm_pageout_worker(void *arg) 1585{ 1586 struct vm_domain *domain; 1587 int domidx; --- 19 unchanged lines hidden (view full) --- 1607 * not clear vm_pages_needed until we reach our target, 1608 * otherwise we may be woken up over and over again and 1609 * waste a lot of cpu. 1610 */ 1611 mtx_lock(&vm_page_queue_free_mtx); 1612 if (vm_pages_needed && !vm_page_count_min()) { 1613 if (!vm_paging_needed()) 1614 vm_pages_needed = 0; |
1615 wakeup(&cnt.v_free_count); | 1615 wakeup(&vm_cnt.v_free_count); |
1616 } 1617 if (vm_pages_needed) { 1618 /* 1619 * Still not done, take a second pass without waiting 1620 * (unlimited dirty cleaning), otherwise sleep a bit 1621 * and try again. 1622 */ 1623 if (domain->vmd_pass > 1) --- 6 unchanged lines hidden (view full) --- 1630 * stats. 1631 */ 1632 domain->vmd_pass = 0; 1633 msleep(&vm_pages_needed, &vm_page_queue_free_mtx, 1634 PVM, "psleep", hz); 1635 1636 } 1637 if (vm_pages_needed) { | 1616 } 1617 if (vm_pages_needed) { 1618 /* 1619 * Still not done, take a second pass without waiting 1620 * (unlimited dirty cleaning), otherwise sleep a bit 1621 * and try again. 1622 */ 1623 if (domain->vmd_pass > 1) --- 6 unchanged lines hidden (view full) --- 1630 * stats. 1631 */ 1632 domain->vmd_pass = 0; 1633 msleep(&vm_pages_needed, &vm_page_queue_free_mtx, 1634 PVM, "psleep", hz); 1635 1636 } 1637 if (vm_pages_needed) { |
1638 cnt.v_pdwakeups++; | 1638 vm_cnt.v_pdwakeups++; |
1639 domain->vmd_pass++; 1640 } 1641 mtx_unlock(&vm_page_queue_free_mtx); 1642 vm_pageout_scan(domain, domain->vmd_pass); 1643 } 1644} 1645 1646/* --- 4 unchanged lines hidden (view full) --- 1651{ 1652#if MAXMEMDOM > 1 1653 int error, i; 1654#endif 1655 1656 /* 1657 * Initialize some paging parameters. 1658 */ | 1639 domain->vmd_pass++; 1640 } 1641 mtx_unlock(&vm_page_queue_free_mtx); 1642 vm_pageout_scan(domain, domain->vmd_pass); 1643 } 1644} 1645 1646/* --- 4 unchanged lines hidden (view full) --- 1651{ 1652#if MAXMEMDOM > 1 1653 int error, i; 1654#endif 1655 1656 /* 1657 * Initialize some paging parameters. 1658 */ |
1659 cnt.v_interrupt_free_min = 2; 1660 if (cnt.v_page_count < 2000) | 1659 vm_cnt.v_interrupt_free_min = 2; 1660 if (vm_cnt.v_page_count < 2000) |
1661 vm_pageout_page_count = 8; 1662 1663 /* 1664 * v_free_reserved needs to include enough for the largest 1665 * swap pager structures plus enough for any pv_entry structs 1666 * when paging. 1667 */ | 1661 vm_pageout_page_count = 8; 1662 1663 /* 1664 * v_free_reserved needs to include enough for the largest 1665 * swap pager structures plus enough for any pv_entry structs 1666 * when paging. 1667 */ |
1668 if (cnt.v_page_count > 1024) 1669 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; | 1668 if (vm_cnt.v_page_count > 1024) 1669 vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200; |
1670 else | 1670 else |
1671 cnt.v_free_min = 4; 1672 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1673 cnt.v_interrupt_free_min; 1674 cnt.v_free_reserved = vm_pageout_page_count + 1675 cnt.v_pageout_free_min + (cnt.v_page_count / 768); 1676 cnt.v_free_severe = cnt.v_free_min / 2; 1677 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1678 cnt.v_free_min += cnt.v_free_reserved; 1679 cnt.v_free_severe += cnt.v_free_reserved; 1680 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1681 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1682 cnt.v_inactive_target = cnt.v_free_count / 3; | 1671 vm_cnt.v_free_min = 4; 1672 vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1673 vm_cnt.v_interrupt_free_min; 1674 vm_cnt.v_free_reserved = vm_pageout_page_count + 1675 vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768); 1676 vm_cnt.v_free_severe = vm_cnt.v_free_min / 2; 1677 vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved; 1678 vm_cnt.v_free_min += vm_cnt.v_free_reserved; 1679 vm_cnt.v_free_severe += vm_cnt.v_free_reserved; 1680 vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2; 1681 if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3) 1682 vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3; |
1683 1684 /* 1685 * Set the default wakeup threshold to be 10% above the minimum 1686 * page limit. This keeps the steady state out of shortfall. 1687 */ | 1683 1684 /* 1685 * Set the default wakeup threshold to be 10% above the minimum 1686 * page limit. This keeps the steady state out of shortfall. 1687 */ |
1688 vm_pageout_wakeup_thresh = (cnt.v_free_min / 10) * 11; | 1688 vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11; |
1689 1690 /* 1691 * Set interval in seconds for active scan. We want to visit each 1692 * page at least once every ten minutes. This is to prevent worst 1693 * case paging behaviors with stale active LRU. 1694 */ 1695 if (vm_pageout_update_period == 0) 1696 vm_pageout_update_period = 600; 1697 1698 /* XXX does not really belong here */ 1699 if (vm_page_max_wired == 0) | 1689 1690 /* 1691 * Set interval in seconds for active scan. We want to visit each 1692 * page at least once every ten minutes. This is to prevent worst 1693 * case paging behaviors with stale active LRU. 1694 */ 1695 if (vm_pageout_update_period == 0) 1696 vm_pageout_update_period = 600; 1697 1698 /* XXX does not really belong here */ 1699 if (vm_page_max_wired == 0) |
1700 vm_page_max_wired = cnt.v_free_count / 3; | 1700 vm_page_max_wired = vm_cnt.v_free_count / 3; |
1701 1702 swap_pager_swap_init(); 1703#if MAXMEMDOM > 1 1704 for (i = 1; i < vm_ndomains; i++) { 1705 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i, 1706 curproc, NULL, 0, 0, "dom%d", i); 1707 if (error != 0) { 1708 panic("starting pageout for domain %d, error %d\n", 1709 i, error); 1710 } 1711 } 1712#endif 1713 vm_pageout_worker((void *)(uintptr_t)0); 1714} 1715 1716/* 1717 * Unless the free page queue lock is held by the caller, this function 1718 * should be regarded as advisory. Specifically, the caller should | 1701 1702 swap_pager_swap_init(); 1703#if MAXMEMDOM > 1 1704 for (i = 1; i < vm_ndomains; i++) { 1705 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i, 1706 curproc, NULL, 0, 0, "dom%d", i); 1707 if (error != 0) { 1708 panic("starting pageout for domain %d, error %d\n", 1709 i, error); 1710 } 1711 } 1712#endif 1713 vm_pageout_worker((void *)(uintptr_t)0); 1714} 1715 1716/* 1717 * Unless the free page queue lock is held by the caller, this function 1718 * should be regarded as advisory. Specifically, the caller should |
1719 * not msleep() on &cnt.v_free_count following this function unless | 1719 * not msleep() on &vm_cnt.v_free_count following this function unless |
1720 * the free page queue lock is held until the msleep() is performed. 1721 */ 1722void 1723pagedaemon_wakeup(void) 1724{ 1725 1726 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1727 vm_pages_needed = 1; --- 149 unchanged lines hidden --- | 1720 * the free page queue lock is held until the msleep() is performed. 1721 */ 1722void 1723pagedaemon_wakeup(void) 1724{ 1725 1726 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1727 vm_pages_needed = 1; --- 149 unchanged lines hidden --- |