pmap.c (de54353fb88689ba1351ca5750a9e383c67e1721) | pmap.c (7ab9b220d934cea20d147fbaabf4a241a0aa5b0a) |
---|---|
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * --- 183 unchanged lines hidden (view full) --- 192static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 193static int pgeflag; /* PG_G or-in */ 194static int pseflag; /* PG_PS or-in */ 195 196static int nkpt; 197vm_offset_t kernel_vm_end; 198extern u_int32_t KERNend; 199 | 1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * --- 183 unchanged lines hidden (view full) --- 192static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 193static int pgeflag; /* PG_G or-in */ 194static int pseflag; /* PG_PS or-in */ 195 196static int nkpt; 197vm_offset_t kernel_vm_end; 198extern u_int32_t KERNend; 199 |
200#ifdef PAE 201static uma_zone_t pdptzone; 202#endif 203 |
|
200/* 201 * Data for the pv entry allocation mechanism 202 */ 203static uma_zone_t pvzone; 204static struct vm_object pvzone_obj; 205static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 206int pmap_pagedaemon_waken; 207 --- 35 unchanged lines hidden (view full) --- 243 vm_page_t mpte, vm_page_t m); 244 245static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va); 246 247static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex); 248static vm_page_t pmap_page_lookup(vm_object_t object, vm_pindex_t pindex); 249static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); 250static vm_offset_t pmap_kmem_choose(vm_offset_t addr); | 204/* 205 * Data for the pv entry allocation mechanism 206 */ 207static uma_zone_t pvzone; 208static struct vm_object pvzone_obj; 209static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 210int pmap_pagedaemon_waken; 211 --- 35 unchanged lines hidden (view full) --- 247 vm_page_t mpte, vm_page_t m); 248 249static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va); 250 251static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex); 252static vm_page_t pmap_page_lookup(vm_object_t object, vm_pindex_t pindex); 253static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); 254static vm_offset_t pmap_kmem_choose(vm_offset_t addr); |
251static void *pmap_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); | 255static void *pmap_pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); 256#ifdef PAE 257static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); 258#endif |
252 253static pd_entry_t pdir4mb; 254 255CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 256CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 257 258/* 259 * Move the kernel virtual free pointer to the next --- 58 unchanged lines hidden (view full) --- 318 * Initialize protection array. 319 */ 320 i386_protection_init(); 321 322 /* 323 * Initialize the kernel pmap (which is statically allocated). 324 */ 325 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); | 259 260static pd_entry_t pdir4mb; 261 262CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); 263CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); 264 265/* 266 * Move the kernel virtual free pointer to the next --- 58 unchanged lines hidden (view full) --- 325 * Initialize protection array. 326 */ 327 i386_protection_init(); 328 329 /* 330 * Initialize the kernel pmap (which is statically allocated). 331 */ 332 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD); |
333#ifdef PAE 334 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); 335#endif |
|
326 kernel_pmap->pm_active = -1; /* don't allow deactivation */ 327 TAILQ_INIT(&kernel_pmap->pm_pvlist); 328 LIST_INIT(&allpmaps); 329 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 330 mtx_lock_spin(&allpmaps_lock); 331 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 332 mtx_unlock_spin(&allpmaps_lock); 333 nkpt = NKPT; --- 165 unchanged lines hidden (view full) --- 499 * We do not need to broadcast the invltlb here, because 500 * each AP does it the moment it is released from the boot 501 * lock. See ap_init(). 502 */ 503 } 504} 505 506static void * | 336 kernel_pmap->pm_active = -1; /* don't allow deactivation */ 337 TAILQ_INIT(&kernel_pmap->pm_pvlist); 338 LIST_INIT(&allpmaps); 339 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 340 mtx_lock_spin(&allpmaps_lock); 341 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 342 mtx_unlock_spin(&allpmaps_lock); 343 nkpt = NKPT; --- 165 unchanged lines hidden (view full) --- 509 * We do not need to broadcast the invltlb here, because 510 * each AP does it the moment it is released from the boot 511 * lock. See ap_init(). 512 */ 513 } 514} 515 516static void * |
507pmap_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) | 517pmap_pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) |
508{ 509 *flags = UMA_SLAB_PRIV; 510 return (void *)kmem_alloc(kernel_map, bytes); 511} 512 | 518{ 519 *flags = UMA_SLAB_PRIV; 520 return (void *)kmem_alloc(kernel_map, bytes); 521} 522 |
523#ifdef PAE 524static void * 525pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 526{ 527 *flags = UMA_SLAB_PRIV; 528 return (contigmalloc(PAGE_SIZE, NULL, 0, 0x0ULL, 0xffffffffULL, 1, 0)); 529} 530#endif 531 |
|
513/* 514 * Initialize the pmap module. 515 * Called by vm_init, to initialize any structures that the pmap 516 * system needs to map virtual memory. 517 * pmap_init has been enhanced to support in a fairly consistant 518 * way, discontiguous physical memory. 519 */ 520void --- 19 unchanged lines hidden (view full) --- 540 /* 541 * init the pv free list 542 */ 543 initial_pvs = vm_page_array_size; 544 if (initial_pvs < MINPV) 545 initial_pvs = MINPV; 546 pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 547 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); | 532/* 533 * Initialize the pmap module. 534 * Called by vm_init, to initialize any structures that the pmap 535 * system needs to map virtual memory. 536 * pmap_init has been enhanced to support in a fairly consistant 537 * way, discontiguous physical memory. 538 */ 539void --- 19 unchanged lines hidden (view full) --- 559 /* 560 * init the pv free list 561 */ 562 initial_pvs = vm_page_array_size; 563 if (initial_pvs < MINPV) 564 initial_pvs = MINPV; 565 pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 566 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); |
548 uma_zone_set_allocf(pvzone, pmap_allocf); | 567 uma_zone_set_allocf(pvzone, pmap_pv_allocf); |
549 uma_prealloc(pvzone, initial_pvs); 550 | 568 uma_prealloc(pvzone, initial_pvs); 569 |
570#ifdef PAE 571 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, 572 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, 0); 573 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); 574#endif 575 |
|
551 /* 552 * Now it is safe to enable pv_table recording. 553 */ 554 pmap_initialized = TRUE; 555} 556 557/* 558 * Initialize the address space (zone) for the pv_entries. Set a --- 677 unchanged lines hidden (view full) --- 1236} 1237 1238void 1239pmap_pinit0(pmap) 1240 struct pmap *pmap; 1241{ 1242 1243 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); | 576 /* 577 * Now it is safe to enable pv_table recording. 578 */ 579 pmap_initialized = TRUE; 580} 581 582/* 583 * Initialize the address space (zone) for the pv_entries. Set a --- 677 unchanged lines hidden (view full) --- 1261} 1262 1263void 1264pmap_pinit0(pmap) 1265 struct pmap *pmap; 1266{ 1267 1268 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD); |
1269#ifdef PAE 1270 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); 1271#endif |
|
1244 pmap->pm_active = 0; 1245 TAILQ_INIT(&pmap->pm_pvlist); 1246 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1247 mtx_lock_spin(&allpmaps_lock); 1248 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1249 mtx_unlock_spin(&allpmaps_lock); 1250} 1251 --- 8 unchanged lines hidden (view full) --- 1260 vm_page_t ptdpg[NPGPTD]; 1261 vm_paddr_t pa; 1262 int i; 1263 1264 /* 1265 * No need to allocate page table space yet but we do need a valid 1266 * page directory table. 1267 */ | 1272 pmap->pm_active = 0; 1273 TAILQ_INIT(&pmap->pm_pvlist); 1274 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1275 mtx_lock_spin(&allpmaps_lock); 1276 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 1277 mtx_unlock_spin(&allpmaps_lock); 1278} 1279 --- 8 unchanged lines hidden (view full) --- 1288 vm_page_t ptdpg[NPGPTD]; 1289 vm_paddr_t pa; 1290 int i; 1291 1292 /* 1293 * No need to allocate page table space yet but we do need a valid 1294 * page directory table. 1295 */ |
1268 if (pmap->pm_pdir == NULL) | 1296 if (pmap->pm_pdir == NULL) { |
1269 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_pageable(kernel_map, 1270 NBPTD); | 1297 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_pageable(kernel_map, 1298 NBPTD); |
1299#ifdef PAE 1300 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); 1301 KASSERT(((vm_offset_t)pmap->pm_pdpt & 1302 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, 1303 ("pmap_pinit: pdpt misaligned")); 1304 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), 1305 ("pmap_pinit: pdpt above 4g")); 1306#endif 1307 } |
|
1271 1272 /* 1273 * allocate object for the ptes 1274 */ 1275 if (pmap->pm_pteobj == NULL) 1276 pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1277 NPGPTD); 1278 --- 26 unchanged lines hidden (view full) --- 1305#ifdef SMP 1306 pmap->pm_pdir[MPPTDI] = PTD[MPPTDI]; 1307#endif 1308 1309 /* install self-referential address mapping entry(s) */ 1310 for (i = 0; i < NPGPTD; i++) { 1311 pa = VM_PAGE_TO_PHYS(ptdpg[i]); 1312 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M; | 1308 1309 /* 1310 * allocate object for the ptes 1311 */ 1312 if (pmap->pm_pteobj == NULL) 1313 pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1314 NPGPTD); 1315 --- 26 unchanged lines hidden (view full) --- 1342#ifdef SMP 1343 pmap->pm_pdir[MPPTDI] = PTD[MPPTDI]; 1344#endif 1345 1346 /* install self-referential address mapping entry(s) */ 1347 for (i = 0; i < NPGPTD; i++) { 1348 pa = VM_PAGE_TO_PHYS(ptdpg[i]); 1349 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M; |
1350#ifdef PAE 1351 pmap->pm_pdpt[i] = pa | PG_V; 1352#endif |
|
1313 } 1314 1315 pmap->pm_active = 0; 1316 TAILQ_INIT(&pmap->pm_pvlist); 1317 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1318} 1319 1320/* --- 159 unchanged lines hidden (view full) --- 1480 pmap->pm_pdir[MPPTDI] = 0; 1481#endif 1482 1483 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1484 1485 vm_page_lock_queues(); 1486 for (i = 0; i < NPGPTD; i++) { 1487 m = TAILQ_FIRST(&object->memq); | 1353 } 1354 1355 pmap->pm_active = 0; 1356 TAILQ_INIT(&pmap->pm_pvlist); 1357 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1358} 1359 1360/* --- 159 unchanged lines hidden (view full) --- 1520 pmap->pm_pdir[MPPTDI] = 0; 1521#endif 1522 1523 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 1524 1525 vm_page_lock_queues(); 1526 for (i = 0; i < NPGPTD; i++) { 1527 m = TAILQ_FIRST(&object->memq); |
1528#ifdef PAE 1529 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), 1530 ("pmap_release: got wrong ptd page")); 1531#endif |
|
1488 m->wire_count--; 1489 atomic_subtract_int(&cnt.v_wire_count, 1); 1490 vm_page_busy(m); 1491 vm_page_free_zero(m); 1492 } 1493 KASSERT(TAILQ_EMPTY(&object->memq), 1494 ("pmap_release: leaking page table pages")); 1495 vm_page_unlock_queues(); --- 179 unchanged lines hidden (view full) --- 1675 * pmap_remove_pte: do the things to unmap a page in a process 1676 */ 1677static int 1678pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va) 1679{ 1680 pt_entry_t oldpte; 1681 vm_page_t m; 1682 | 1532 m->wire_count--; 1533 atomic_subtract_int(&cnt.v_wire_count, 1); 1534 vm_page_busy(m); 1535 vm_page_free_zero(m); 1536 } 1537 KASSERT(TAILQ_EMPTY(&object->memq), 1538 ("pmap_release: leaking page table pages")); 1539 vm_page_unlock_queues(); --- 179 unchanged lines hidden (view full) --- 1719 * pmap_remove_pte: do the things to unmap a page in a process 1720 */ 1721static int 1722pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va) 1723{ 1724 pt_entry_t oldpte; 1725 vm_page_t m; 1726 |
1683 oldpte = atomic_readandclear_int(ptq); | 1727 oldpte = pte_load_clear(ptq); |
1684 if (oldpte & PG_W) 1685 pmap->pm_stats.wired_count -= 1; 1686 /* 1687 * Machines that don't support invlpg, also don't support 1688 * PG_G. 1689 */ 1690 if (oldpte & PG_G) 1691 pmap_invalidate_page(kernel_pmap, va); --- 149 unchanged lines hidden (view full) --- 1841 VM_PAGE_TO_PHYS(m)); 1842 } 1843#endif 1844 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1845 s = splvm(); 1846 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1847 pv->pv_pmap->pm_stats.resident_count--; 1848 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); | 1728 if (oldpte & PG_W) 1729 pmap->pm_stats.wired_count -= 1; 1730 /* 1731 * Machines that don't support invlpg, also don't support 1732 * PG_G. 1733 */ 1734 if (oldpte & PG_G) 1735 pmap_invalidate_page(kernel_pmap, va); --- 149 unchanged lines hidden (view full) --- 1885 VM_PAGE_TO_PHYS(m)); 1886 } 1887#endif 1888 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1889 s = splvm(); 1890 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1891 pv->pv_pmap->pm_stats.resident_count--; 1892 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); |
1849 tpte = atomic_readandclear_int(pte); | 1893 tpte = pte_load_clear(pte); |
1850 if (tpte & PG_W) 1851 pv->pv_pmap->pm_stats.wired_count--; 1852 if (tpte & PG_A) 1853 vm_page_flag_set(m, PG_REFERENCED); 1854 1855 /* 1856 * Update the vm_page_t clean and reference bits. 1857 */ --- 1420 unchanged lines hidden (view full) --- 3278 u_int32_t cr3; 3279 3280 pmap = vmspace_pmap(td->td_proc->p_vmspace); 3281#if defined(SMP) 3282 pmap->pm_active |= PCPU_GET(cpumask); 3283#else 3284 pmap->pm_active |= 1; 3285#endif | 1894 if (tpte & PG_W) 1895 pv->pv_pmap->pm_stats.wired_count--; 1896 if (tpte & PG_A) 1897 vm_page_flag_set(m, PG_REFERENCED); 1898 1899 /* 1900 * Update the vm_page_t clean and reference bits. 1901 */ --- 1420 unchanged lines hidden (view full) --- 3322 u_int32_t cr3; 3323 3324 pmap = vmspace_pmap(td->td_proc->p_vmspace); 3325#if defined(SMP) 3326 pmap->pm_active |= PCPU_GET(cpumask); 3327#else 3328 pmap->pm_active |= 1; 3329#endif |
3330#ifdef PAE 3331 cr3 = vtophys(pmap->pm_pdpt); 3332#else |
|
3286 cr3 = vtophys(pmap->pm_pdir); | 3333 cr3 = vtophys(pmap->pm_pdir); |
3334#endif |
|
3287 /* XXXKSE this is wrong. 3288 * pmap_activate is for the current thread on the current cpu 3289 */ 3290 if (p->p_flag & P_THREADED) { 3291 /* Make sure all other cr3 entries are updated. */ 3292 /* what if they are running? XXXKSE (maybe abort them) */ 3293 FOREACH_THREAD_IN_PROC(p, td) { 3294 td->td_pcb->pcb_cr3 = cr3; --- 131 unchanged lines hidden --- | 3335 /* XXXKSE this is wrong. 3336 * pmap_activate is for the current thread on the current cpu 3337 */ 3338 if (p->p_flag & P_THREADED) { 3339 /* Make sure all other cr3 entries are updated. */ 3340 /* what if they are running? XXXKSE (maybe abort them) */ 3341 FOREACH_THREAD_IN_PROC(p, td) { 3342 td->td_pcb->pcb_cr3 = cr3; --- 131 unchanged lines hidden --- |