pmap.c (efeaf95a41820e4eb661a90cfb59a26e36575784) | pmap.c (87b911575102f86e2cbceac54e8b0518d27200cd) |
---|---|
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * --- 25 unchanged lines hidden (view full) --- 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 | 1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * --- 25 unchanged lines hidden (view full) --- 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 |
42 * $Id: pmap.c,v 1.66 1995/12/03 18:35:28 bde Exp $ | 42 * $Id: pmap.c,v 1.67 1995/12/07 12:45:36 davidg Exp $ |
43 */ 44 45/* 46 * Derived from hp300 version by Mike Hibler, this version by William 47 * Jolitz uses a recursive map [a pde points to the page directory] to 48 * map the page tables using the pagetables themselves. This is done to 49 * reduce the impact on kernel virtual memory for lots of sparse address 50 * space, and to reduce the cost of memory to each process. --- 52 unchanged lines hidden (view full) --- 103#include <vm/vm_extern.h> 104 105#include <machine/pcb.h> 106#include <machine/cputypes.h> 107#include <machine/md_var.h> 108 109#include <i386/isa/isa.h> 110 | 43 */ 44 45/* 46 * Derived from hp300 version by Mike Hibler, this version by William 47 * Jolitz uses a recursive map [a pde points to the page directory] to 48 * map the page tables using the pagetables themselves. This is done to 49 * reduce the impact on kernel virtual memory for lots of sparse address 50 * space, and to reduce the cost of memory to each process. --- 52 unchanged lines hidden (view full) --- 103#include <vm/vm_extern.h> 104 105#include <machine/pcb.h> 106#include <machine/cputypes.h> 107#include <machine/md_var.h> 108 109#include <i386/isa/isa.h> 110 |
111extern void init_pv_entries __P((int)); 112extern void pmap_copy_on_write __P((vm_offset_t pa)); | 111static void init_pv_entries __P((int)); |
113extern void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr, 114 vm_object_t object, vm_offset_t offset, 115 vm_offset_t size)); | 112extern void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr, 113 vm_object_t object, vm_offset_t offset, 114 vm_offset_t size)); |
116extern void pmap_remove_all __P((vm_offset_t pa)); 117extern void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv, | 115static void pmap_remove_all __P((vm_offset_t pa)); 116static void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv, |
118 vm_offset_t va)); 119 120/* 121 * Get PDEs and PTEs for user/kernel address space 122 */ 123#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 124#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023]) 125 --- 8 unchanged lines hidden (view full) --- 134#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 135#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 136 137/* 138 * Given a map and a machine independent protection code, 139 * convert to a vax protection code. 140 */ 141#define pte_prot(m, p) (protection_codes[p]) | 117 vm_offset_t va)); 118 119/* 120 * Get PDEs and PTEs for user/kernel address space 121 */ 122#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 123#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023]) 124 --- 8 unchanged lines hidden (view full) --- 133#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 134#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 135 136/* 137 * Given a map and a machine independent protection code, 138 * convert to a vax protection code. 139 */ 140#define pte_prot(m, p) (protection_codes[p]) |
142int protection_codes[8]; | 141static int protection_codes[8]; |
143 | 142 |
144struct pmap kernel_pmap_store; | 143static struct pmap kernel_pmap_store; |
145pmap_t kernel_pmap; 146 147vm_offset_t avail_start; /* PA of first available physical page */ 148vm_offset_t avail_end; /* PA of last available physical page */ | 144pmap_t kernel_pmap; 145 146vm_offset_t avail_start; /* PA of first available physical page */ 147vm_offset_t avail_end; /* PA of last available physical page */ |
149vm_size_t mem_size; /* memory size in bytes */ | |
150vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 151vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ | 148vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 149vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ |
152boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 153vm_offset_t vm_first_phys, vm_last_phys; | 150static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 151static vm_offset_t vm_first_phys; |
154 | 152 |
155int nkpt; | 153static int nkpt; |
156 157extern vm_offset_t clean_sva, clean_eva; 158extern int cpu_class; 159 160/* 161 * All those kernel PT submaps that BSD is so fond of 162 */ | 154 155extern vm_offset_t clean_sva, clean_eva; 156extern int cpu_class; 157 158/* 159 * All those kernel PT submaps that BSD is so fond of 160 */ |
163pt_entry_t *CMAP1, *CMAP2, *ptmmap; 164pv_entry_t pv_table; 165caddr_t CADDR1, CADDR2, ptvmmap; 166pt_entry_t *msgbufmap; | 161pt_entry_t *CMAP1; 162static pt_entry_t *CMAP2, *ptmmap; 163static pv_entry_t pv_table; 164caddr_t CADDR1, ptvmmap; 165static caddr_t CADDR2; 166static pt_entry_t *msgbufmap; |
167struct msgbuf *msgbufp; 168 169static void free_pv_entry __P((pv_entry_t pv)); 170static pt_entry_t * 171 get_pt_entry __P((pmap_t pmap)); 172static pv_entry_t 173 get_pv_entry __P((void)); 174static void i386_protection_init __P((void)); --- 89 unchanged lines hidden (view full) --- 264 return 1; 265 } 266 return 0; 267} 268 269/* 270 * find the vm_page_t of a pte (only) given va of pte and pmap 271 */ | 167struct msgbuf *msgbufp; 168 169static void free_pv_entry __P((pv_entry_t pv)); 170static pt_entry_t * 171 get_pt_entry __P((pmap_t pmap)); 172static pv_entry_t 173 get_pv_entry __P((void)); 174static void i386_protection_init __P((void)); --- 89 unchanged lines hidden (view full) --- 264 return 1; 265 } 266 return 0; 267} 268 269/* 270 * find the vm_page_t of a pte (only) given va of pte and pmap 271 */ |
272__inline vm_page_t | 272static __inline vm_page_t |
273pmap_pte_vm_page(pmap, pt) 274 pmap_t pmap; 275 vm_offset_t pt; 276{ 277 vm_page_t m; 278 279 pt = i386_trunc_page(pt); 280 pt = (pt - UPT_MIN_ADDRESS) / NBPG; --- 193 unchanged lines hidden (view full) --- 474 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 475 virt += PAGE_SIZE; 476 start += PAGE_SIZE; 477 } 478 return (virt); 479} 480 481/* | 273pmap_pte_vm_page(pmap, pt) 274 pmap_t pmap; 275 vm_offset_t pt; 276{ 277 vm_page_t m; 278 279 pt = i386_trunc_page(pt); 280 pt = (pt - UPT_MIN_ADDRESS) / NBPG; --- 193 unchanged lines hidden (view full) --- 474 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 475 virt += PAGE_SIZE; 476 start += PAGE_SIZE; 477 } 478 return (virt); 479} 480 481/* |
482 * Create and return a physical map. 483 * 484 * If the size specified for the map 485 * is zero, the map is an actual physical 486 * map, and may be referenced by the 487 * hardware. 488 * 489 * If the size specified is non-zero, 490 * the map will be used in software only, and 491 * is bounded by that size. 492 * 493 */ 494 495pmap_t 496pmap_create(size) 497 vm_size_t size; 498{ 499 register pmap_t pmap; 500 501 /* 502 * Software use map does not need a pmap 503 */ 504 if (size) 505 return (NULL); 506 507 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 508 bzero(pmap, sizeof(*pmap)); 509 pmap_pinit(pmap); 510 return (pmap); 511} 512 513/* | |
514 * Initialize a preallocated and zeroed pmap structure, 515 * such as one in a vmspace structure. 516 */ 517void 518pmap_pinit(pmap) 519 register struct pmap *pmap; 520{ 521 /* --- 11 unchanged lines hidden (view full) --- 533 534 pmap->pm_count = 1; 535} 536 537/* 538 * grow the number of kernel page table entries, if needed 539 */ 540 | 482 * Initialize a preallocated and zeroed pmap structure, 483 * such as one in a vmspace structure. 484 */ 485void 486pmap_pinit(pmap) 487 register struct pmap *pmap; 488{ 489 /* --- 11 unchanged lines hidden (view full) --- 501 502 pmap->pm_count = 1; 503} 504 505/* 506 * grow the number of kernel page table entries, if needed 507 */ 508 |
541vm_page_t nkpg; | 509static vm_page_t nkpg; |
542vm_offset_t kernel_vm_end; 543 544void 545pmap_growkernel(vm_offset_t addr) 546{ 547 struct proc *p; 548 struct pmap *pmap; 549 int s; --- 82 unchanged lines hidden (view full) --- 632 } 633} 634 635#define PV_FREELIST_MIN ((NBPG / sizeof (struct pv_entry)) / 2) 636 637/* 638 * Data for the pv entry allocation mechanism 639 */ | 510vm_offset_t kernel_vm_end; 511 512void 513pmap_growkernel(vm_offset_t addr) 514{ 515 struct proc *p; 516 struct pmap *pmap; 517 int s; --- 82 unchanged lines hidden (view full) --- 600 } 601} 602 603#define PV_FREELIST_MIN ((NBPG / sizeof (struct pv_entry)) / 2) 604 605/* 606 * Data for the pv entry allocation mechanism 607 */ |
640int pv_freelistcnt; 641pv_entry_t pv_freelist; 642vm_offset_t pvva; 643int npvvapg; | 608static int pv_freelistcnt; 609static pv_entry_t pv_freelist; 610static vm_offset_t pvva; 611static int npvvapg; |
644 645/* 646 * free the pv_entry back to the free list 647 */ 648inline static void 649free_pv_entry(pv) 650 pv_entry_t pv; 651{ --- 132 unchanged lines hidden (view full) --- 784} 785 786/* 787 * If it is the first entry on the list, it is actually 788 * in the header and we must copy the following entry up 789 * to the header. Otherwise we must search the list for 790 * the entry. In either case we free the now unused entry. 791 */ | 612 613/* 614 * free the pv_entry back to the free list 615 */ 616inline static void 617free_pv_entry(pv) 618 pv_entry_t pv; 619{ --- 132 unchanged lines hidden (view full) --- 752} 753 754/* 755 * If it is the first entry on the list, it is actually 756 * in the header and we must copy the following entry up 757 * to the header. Otherwise we must search the list for 758 * the entry. In either case we free the now unused entry. 759 */ |
792void | 760static void |
793pmap_remove_entry(pmap, pv, va) 794 struct pmap *pmap; 795 pv_entry_t pv; 796 vm_offset_t va; 797{ 798 pv_entry_t npv; 799 int s; 800 --- 170 unchanged lines hidden (view full) --- 971 * all physical maps in which it resides. 972 * Reflects back modify bits to the pager. 973 * 974 * Notes: 975 * Original versions of this routine were very 976 * inefficient because they iteratively called 977 * pmap_remove (slow...) 978 */ | 761pmap_remove_entry(pmap, pv, va) 762 struct pmap *pmap; 763 pv_entry_t pv; 764 vm_offset_t va; 765{ 766 pv_entry_t npv; 767 int s; 768 --- 170 unchanged lines hidden (view full) --- 939 * all physical maps in which it resides. 940 * Reflects back modify bits to the pager. 941 * 942 * Notes: 943 * Original versions of this routine were very 944 * inefficient because they iteratively called 945 * pmap_remove (slow...) 946 */ |
979void | 947static void |
980pmap_remove_all(pa) 981 vm_offset_t pa; 982{ 983 register pv_entry_t pv, npv; 984 register pt_entry_t *pte, *ptp; 985 vm_offset_t va; 986 struct pmap *pmap; 987 vm_page_t m; --- 866 unchanged lines hidden (view full) --- 1854 */ 1855void 1856pmap_clear_reference(vm_offset_t pa) 1857{ 1858 pmap_changebit((pa), PG_U, FALSE); 1859} 1860 1861/* | 948pmap_remove_all(pa) 949 vm_offset_t pa; 950{ 951 register pv_entry_t pv, npv; 952 register pt_entry_t *pte, *ptp; 953 vm_offset_t va; 954 struct pmap *pmap; 955 vm_page_t m; --- 866 unchanged lines hidden (view full) --- 1822 */ 1823void 1824pmap_clear_reference(vm_offset_t pa) 1825{ 1826 pmap_changebit((pa), PG_U, FALSE); 1827} 1828 1829/* |
1862 * Routine: pmap_copy_on_write 1863 * Function: 1864 * Remove write privileges from all 1865 * physical maps for this physical page. 1866 */ 1867void 1868pmap_copy_on_write(vm_offset_t pa) 1869{ 1870 pmap_changebit((pa), PG_RW, FALSE); 1871} 1872 1873/* | |
1874 * Miscellaneous support routines follow 1875 */ 1876 1877static void 1878i386_protection_init() 1879{ 1880 register int *kp, prot; 1881 --- 102 unchanged lines hidden --- | 1830 * Miscellaneous support routines follow 1831 */ 1832 1833static void 1834i386_protection_init() 1835{ 1836 register int *kp, prot; 1837 --- 102 unchanged lines hidden --- |