vm_pageout.c (63e9755548e4feebf798686ab8bce0cdaaaf7b46) | vm_pageout.c (0012f373e43db2341c20329163ed2d5ad3b0f341) |
---|---|
1/*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman --- 455 unchanged lines hidden (view full) --- 464 * 465 * We do not have to fixup the clean/dirty bits here... we can 466 * allow the pager to do it after the I/O completes. 467 * 468 * NOTE! mc[i]->dirty may be partial or fragmented due to an 469 * edge case with file fragments. 470 */ 471 for (i = 0; i < count; i++) { | 1/*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman --- 455 unchanged lines hidden (view full) --- 464 * 465 * We do not have to fixup the clean/dirty bits here... we can 466 * allow the pager to do it after the I/O completes. 467 * 468 * NOTE! mc[i]->dirty may be partial or fragmented due to an 469 * edge case with file fragments. 470 */ 471 for (i = 0; i < count; i++) { |
472 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, | 472 KASSERT(vm_page_all_valid(mc[i]), |
473 ("vm_pageout_flush: partially invalid page %p index %d/%d", 474 mc[i], i, count)); 475 KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0, 476 ("vm_pageout_flush: writeable page %p", mc[i])); 477 vm_page_busy_downgrade(mc[i]); 478 } 479 vm_object_pip_add(object, count); 480 --- 343 unchanged lines hidden (view full) --- 824 vm_page_dequeue_deferred(m); 825 continue; 826 } 827 828 /* 829 * Invalid pages can be easily freed. They cannot be 830 * mapped; vm_page_free() asserts this. 831 */ | 473 ("vm_pageout_flush: partially invalid page %p index %d/%d", 474 mc[i], i, count)); 475 KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0, 476 ("vm_pageout_flush: writeable page %p", mc[i])); 477 vm_page_busy_downgrade(mc[i]); 478 } 479 vm_object_pip_add(object, count); 480 --- 343 unchanged lines hidden (view full) --- 824 vm_page_dequeue_deferred(m); 825 continue; 826 } 827 828 /* 829 * Invalid pages can be easily freed. They cannot be 830 * mapped; vm_page_free() asserts this. 831 */ |
832 if (m->valid == 0) | 832 if (vm_page_none_valid(m)) |
833 goto free_page; 834 835 /* 836 * If the page has been referenced and the object is not dead, 837 * reactivate or requeue the page depending on whether the 838 * object is mapped. 839 * 840 * Test PGA_REFERENCED after calling pmap_ts_referenced() so --- 714 unchanged lines hidden (view full) --- 1555 vm_page_dequeue_deferred(m); 1556 continue; 1557 } 1558 1559 /* 1560 * Invalid pages can be easily freed. They cannot be 1561 * mapped, vm_page_free() asserts this. 1562 */ | 833 goto free_page; 834 835 /* 836 * If the page has been referenced and the object is not dead, 837 * reactivate or requeue the page depending on whether the 838 * object is mapped. 839 * 840 * Test PGA_REFERENCED after calling pmap_ts_referenced() so --- 714 unchanged lines hidden (view full) --- 1555 vm_page_dequeue_deferred(m); 1556 continue; 1557 } 1558 1559 /* 1560 * Invalid pages can be easily freed. They cannot be 1561 * mapped, vm_page_free() asserts this. 1562 */ |
1563 if (m->valid == 0) | 1563 if (vm_page_none_valid(m)) |
1564 goto free_page; 1565 1566 /* 1567 * If the page has been referenced and the object is not dead, 1568 * reactivate or requeue the page depending on whether the 1569 * object is mapped. 1570 * 1571 * Test PGA_REFERENCED after calling pmap_ts_referenced() so --- 674 unchanged lines hidden --- | 1564 goto free_page; 1565 1566 /* 1567 * If the page has been referenced and the object is not dead, 1568 * reactivate or requeue the page depending on whether the 1569 * object is mapped. 1570 * 1571 * Test PGA_REFERENCED after calling pmap_ts_referenced() so --- 674 unchanged lines hidden --- |