vm_page.c (9328cbc047a607ed479c15720b3e881944af9e56) vm_page.c (a81c400e753c30a7e50e7b5b878d0248696b8b23)
1/*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
7 *
8 * This code is derived from software contributed to Berkeley by

--- 99 unchanged lines hidden (view full) ---

108#include <vm/vm_radix.h>
109#include <vm/vm_reserv.h>
110#include <vm/vm_extern.h>
111#include <vm/uma.h>
112#include <vm/uma_int.h>
113
114#include <machine/md_var.h>
115
1/*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
7 *
8 * This code is derived from software contributed to Berkeley by

--- 99 unchanged lines hidden (view full) ---

108#include <vm/vm_radix.h>
109#include <vm/vm_reserv.h>
110#include <vm/vm_extern.h>
111#include <vm/uma.h>
112#include <vm/uma_int.h>
113
114#include <machine/md_var.h>
115
116extern int uma_startup_count(int);
117extern void uma_startup(void *, int);
118extern int vmem_startup_count(void);
119
120struct vm_domain vm_dom[MAXMEMDOM];
121
122DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
123
124struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
125
126struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
127/* The following fields are protected by the domainset lock. */

--- 36 unchanged lines hidden (view full) ---

164 * or for paging into sparsely invalid regions.
165 */
166vm_page_t bogus_page;
167
168vm_page_t vm_page_array;
169long vm_page_array_size;
170long first_page;
171
116struct vm_domain vm_dom[MAXMEMDOM];
117
118DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
119
120struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
121
122struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
123/* The following fields are protected by the domainset lock. */

--- 36 unchanged lines hidden (view full) ---

160 * or for paging into sparsely invalid regions.
161 */
162vm_page_t bogus_page;
163
164vm_page_t vm_page_array;
165long vm_page_array_size;
166long first_page;
167
172static int boot_pages;
173SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
174 &boot_pages, 0,
175 "number of pages allocated for bootstrapping the VM system");
176
177static TAILQ_HEAD(, vm_page) blacklist_head;
178static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
179SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
180 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
181
182static uma_zone_t fakepg_zone;
183
184static void vm_page_alloc_check(vm_page_t m);

--- 378 unchanged lines hidden (view full) ---

563 * page queues.
564 */
565vm_offset_t
566vm_page_startup(vm_offset_t vaddr)
567{
568 struct vm_phys_seg *seg;
569 vm_page_t m;
570 char *list, *listend;
168static TAILQ_HEAD(, vm_page) blacklist_head;
169static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
170SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
171 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
172
173static uma_zone_t fakepg_zone;
174
175static void vm_page_alloc_check(vm_page_t m);

--- 378 unchanged lines hidden (view full) ---

554 * page queues.
555 */
556vm_offset_t
557vm_page_startup(vm_offset_t vaddr)
558{
559 struct vm_phys_seg *seg;
560 vm_page_t m;
561 char *list, *listend;
571 vm_offset_t mapped;
572 vm_paddr_t end, high_avail, low_avail, new_end, size;
573 vm_paddr_t page_range __unused;
574 vm_paddr_t last_pa, pa;
575 u_long pagecount;
576 int biggestone, i, segind;
577#ifdef WITNESS
562 vm_paddr_t end, high_avail, low_avail, new_end, size;
563 vm_paddr_t page_range __unused;
564 vm_paddr_t last_pa, pa;
565 u_long pagecount;
566 int biggestone, i, segind;
567#ifdef WITNESS
568 vm_offset_t mapped;
578 int witness_size;
579#endif
580#if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
581 long ii;
582#endif
583
584 vaddr = round_page(vaddr);
585

--- 5 unchanged lines hidden (view full) ---

591 * Initialize the page and queue locks.
592 */
593 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
594 for (i = 0; i < PA_LOCK_COUNT; i++)
595 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
596 for (i = 0; i < vm_ndomains; i++)
597 vm_page_domain_init(i);
598
569 int witness_size;
570#endif
571#if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
572 long ii;
573#endif
574
575 vaddr = round_page(vaddr);
576

--- 5 unchanged lines hidden (view full) ---

582 * Initialize the page and queue locks.
583 */
584 mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
585 for (i = 0; i < PA_LOCK_COUNT; i++)
586 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
587 for (i = 0; i < vm_ndomains; i++)
588 vm_page_domain_init(i);
589
599 /*
600 * Allocate memory for use when boot strapping the kernel memory
601 * allocator. Tell UMA how many zones we are going to create
602 * before going fully functional. UMA will add its zones.
603 *
604 * VM startup zones: vmem, vmem_btag, VM OBJECT, RADIX NODE, MAP,
605 * KMAP ENTRY, MAP ENTRY, VMSPACE.
606 */
607 boot_pages = uma_startup_count(8);
608
609#ifndef UMA_MD_SMALL_ALLOC
610 /* vmem_startup() calls uma_prealloc(). */
611 boot_pages += vmem_startup_count();
612 /* vm_map_startup() calls uma_prealloc(). */
613 boot_pages += howmany(MAX_KMAP,
614 slab_ipers(sizeof(struct vm_map), UMA_ALIGN_PTR));
615
616 /*
617 * Before we are fully boot strapped we need to account for the
618 * following allocations:
619 *
620 * "KMAP ENTRY" from kmem_init()
621 * "vmem btag" from vmem_startup()
622 * "vmem" from vmem_create()
623 * "KMAP" from vm_map_startup()
624 *
625 * Each needs at least one page per-domain.
626 */
627 boot_pages += 4 * vm_ndomains;
628#endif
629 /*
630 * CTFLAG_RDTUN doesn't work during the early boot process, so we must
631 * manually fetch the value.
632 */
633 TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages);
634 new_end = end - (boot_pages * UMA_SLAB_SIZE);
635 new_end = trunc_page(new_end);
636 mapped = pmap_map(&vaddr, new_end, end,
637 VM_PROT_READ | VM_PROT_WRITE);
638 bzero((void *)mapped, end - new_end);
639 uma_startup((void *)mapped, boot_pages);
640
590 new_end = end;
641#ifdef WITNESS
642 witness_size = round_page(witness_startup_count());
643 new_end -= witness_size;
644 mapped = pmap_map(&vaddr, new_end, new_end + witness_size,
645 VM_PROT_READ | VM_PROT_WRITE);
646 bzero((void *)mapped, witness_size);
647 witness_startup((void *)mapped);
648#endif

--- 4596 unchanged lines hidden ---
591#ifdef WITNESS
592 witness_size = round_page(witness_startup_count());
593 new_end -= witness_size;
594 mapped = pmap_map(&vaddr, new_end, new_end + witness_size,
595 VM_PROT_READ | VM_PROT_WRITE);
596 bzero((void *)mapped, witness_size);
597 witness_startup((void *)mapped);
598#endif

--- 4596 unchanged lines hidden ---