pmap.c (f53687f7b5bc719c707bf0eebac09e5173e64b2a) pmap.c (27e9b35e071747be72e61b7a1a05ca2ea27f2bdb)
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 25 unchanged lines hidden (view full) ---

34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 25 unchanged lines hidden (view full) ---

34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
42 * $Id: pmap.c,v 1.120 1996/09/28 04:22:10 dyson Exp $
42 * $Id: pmap.c,v 1.121 1996/09/28 15:28:40 bde Exp $
43 */
44
45/*
46 * Manages physical address maps.
47 *
48 * In addition to hardware address maps, this
49 * module is called upon to provide software-use-only
50 * maps which may or may not be stored in the same

--- 232 unchanged lines hidden (view full) ---

283 /*
284 * ptemap is used for pmap_pte_quick
285 */
286 SYSMAP(unsigned *, PMAP1, PADDR1, 1);
287
288 virtual_avail = va;
289
290 *(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0;
43 */
44
45/*
46 * Manages physical address maps.
47 *
48 * In addition to hardware address maps, this
49 * module is called upon to provide software-use-only
50 * maps which may or may not be stored in the same

--- 232 unchanged lines hidden (view full) ---

283 /*
284 * ptemap is used for pmap_pte_quick
285 */
286 SYSMAP(unsigned *, PMAP1, PADDR1, 1);
287
288 virtual_avail = va;
289
290 *(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0;
291 pmap_update();
291 invltlb();
292
293}
294
295/*
296 * Initialize the pmap module.
297 * Called by vm_init, to initialize any structures that the pmap
298 * system needs to map virtual memory.
299 * pmap_init has been enhanced to support in a fairly consistant

--- 97 unchanged lines hidden (view full) ---

397pmap_track_modified( vm_offset_t va) {
398 if ((va < clean_sva) || (va >= clean_eva))
399 return 1;
400 else
401 return 0;
402}
403
404static PMAP_INLINE void
292
293}
294
295/*
296 * Initialize the pmap module.
297 * Called by vm_init, to initialize any structures that the pmap
298 * system needs to map virtual memory.
299 * pmap_init has been enhanced to support in a fairly consistant

--- 97 unchanged lines hidden (view full) ---

397pmap_track_modified( vm_offset_t va) {
398 if ((va < clean_sva) || (va >= clean_eva))
399 return 1;
400 else
401 return 0;
402}
403
404static PMAP_INLINE void
405pmap_update_2pg( vm_offset_t va1, vm_offset_t va2) {
405invltlb_1pg( vm_offset_t va) {
406#if defined(I386_CPU)
407 if (cpu_class == CPUCLASS_386) {
406#if defined(I386_CPU)
407 if (cpu_class == CPUCLASS_386) {
408 pmap_update();
408 invltlb();
409 } else
410#endif
411 {
409 } else
410#endif
411 {
412 pmap_update_1pg(va1);
413 pmap_update_1pg(va2);
412 invlpg(va);
414 }
415}
416
413 }
414}
415
416static PMAP_INLINE void
417invltlb_2pg( vm_offset_t va1, vm_offset_t va2) {
418#if defined(I386_CPU)
419 if (cpu_class == CPUCLASS_386) {
420 invltlb();
421 } else
422#endif
423 {
424 invlpg(va1);
425 invlpg(va2);
426 }
427}
417
428
429
418static PMAP_INLINE void
419pmap_lock(pmap)
420pmap_t pmap;
421{
422 int s;
423 if (pmap == kernel_pmap)
424 return;
425 s = splhigh();

--- 48 unchanged lines hidden (view full) ---

474
475 /* are we current address space or kernel? */
476 if (pmap == kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
477 return (unsigned *) PTmap;
478 }
479 /* otherwise, we are alternate address space */
480 if (frame != (((unsigned) APTDpde) & PG_FRAME)) {
481 APTDpde = (pd_entry_t) (frame | PG_RW | PG_V);
430static PMAP_INLINE void
431pmap_lock(pmap)
432pmap_t pmap;
433{
434 int s;
435 if (pmap == kernel_pmap)
436 return;
437 s = splhigh();

--- 48 unchanged lines hidden (view full) ---

486
487 /* are we current address space or kernel? */
488 if (pmap == kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
489 return (unsigned *) PTmap;
490 }
491 /* otherwise, we are alternate address space */
492 if (frame != (((unsigned) APTDpde) & PG_FRAME)) {
493 APTDpde = (pd_entry_t) (frame | PG_RW | PG_V);
482 pmap_update();
494 invltlb();
483 }
484 return (unsigned *) APTmap;
485}
486
487/*
488 * Routine: pmap_pte
489 * Function:
490 * Extract the page table entry associated

--- 9 unchanged lines hidden (view full) ---

500 return get_ptbase(pmap) + i386_btop(va);
501 }
502 return (0);
503}
504
505/*
506 * Super fast pmap_pte routine best used when scanning
507 * the pv lists. This eliminates many coarse-grained
495 }
496 return (unsigned *) APTmap;
497}
498
499/*
500 * Routine: pmap_pte
501 * Function:
502 * Extract the page table entry associated

--- 9 unchanged lines hidden (view full) ---

512 return get_ptbase(pmap) + i386_btop(va);
513 }
514 return (0);
515}
516
517/*
518 * Super fast pmap_pte routine best used when scanning
519 * the pv lists. This eliminates many coarse-grained
508 * pmap_update calls. Note that many of the pv list
520 * invltlb calls. Note that many of the pv list
509 * scans are across different pmaps. It is very wasteful
521 * scans are across different pmaps. It is very wasteful
510 * to do an entire pmap_update for checking a single mapping.
522 * to do an entire invltlb for checking a single mapping.
511 */
512
513unsigned *
514pmap_pte_quick(pmap, va)
515 register pmap_t pmap;
516 vm_offset_t va;
517{
518 unsigned pde, newpf;
519 if (pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) {
520 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
521 unsigned index = i386_btop(va);
522 /* are we current address space or kernel? */
523 if ((pmap == kernel_pmap) ||
524 (frame == (((unsigned) PTDpde) & PG_FRAME))) {
525 return (unsigned *) PTmap + index;
526 }
527 newpf = pde & PG_FRAME;
528 if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) {
529 * (unsigned *) PMAP1 = newpf | PG_RW | PG_V;
523 */
524
525unsigned *
526pmap_pte_quick(pmap, va)
527 register pmap_t pmap;
528 vm_offset_t va;
529{
530 unsigned pde, newpf;
531 if (pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) {
532 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
533 unsigned index = i386_btop(va);
534 /* are we current address space or kernel? */
535 if ((pmap == kernel_pmap) ||
536 (frame == (((unsigned) PTDpde) & PG_FRAME))) {
537 return (unsigned *) PTmap + index;
538 }
539 newpf = pde & PG_FRAME;
540 if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) {
541 * (unsigned *) PMAP1 = newpf | PG_RW | PG_V;
530 pmap_update_1pg((vm_offset_t) PADDR1);
542 invltlb_1pg((vm_offset_t) PADDR1);
531 }
532 return PADDR1 + ((unsigned) index & (NPTEPG - 1));
533 }
534 return (0);
535}
536
537/*
538 * Routine: pmap_extract

--- 64 unchanged lines hidden (view full) ---

603 for (i = 0; i < count; i++) {
604 vm_offset_t tva = va + i * PAGE_SIZE;
605 unsigned npte = VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V;
606 unsigned opte;
607 pte = (unsigned *)vtopte(tva);
608 opte = *pte;
609 *pte = npte;
610 if (opte)
543 }
544 return PADDR1 + ((unsigned) index & (NPTEPG - 1));
545 }
546 return (0);
547}
548
549/*
550 * Routine: pmap_extract

--- 64 unchanged lines hidden (view full) ---

615 for (i = 0; i < count; i++) {
616 vm_offset_t tva = va + i * PAGE_SIZE;
617 unsigned npte = VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V;
618 unsigned opte;
619 pte = (unsigned *)vtopte(tva);
620 opte = *pte;
621 *pte = npte;
622 if (opte)
611 pmap_update_1pg(tva);
623 invltlb_1pg(tva);
612 }
613}
614
615/*
616 * this routine jerks page mappings from the
617 * kernel -- it is meant only for temporary mappings.
618 */
619void
620pmap_qremove(va, count)
621 vm_offset_t va;
622 int count;
623{
624 int i;
625 register unsigned *pte;
626
627 for (i = 0; i < count; i++) {
628 pte = (unsigned *)vtopte(va);
629 *pte = 0;
624 }
625}
626
627/*
628 * this routine jerks page mappings from the
629 * kernel -- it is meant only for temporary mappings.
630 */
631void
632pmap_qremove(va, count)
633 vm_offset_t va;
634 int count;
635{
636 int i;
637 register unsigned *pte;
638
639 for (i = 0; i < count; i++) {
640 pte = (unsigned *)vtopte(va);
641 *pte = 0;
630 pmap_update_1pg(va);
642 invltlb_1pg(va);
631 va += PAGE_SIZE;
632 }
633}
634
635/*
636 * add a wired page to the kva
637 * note that in order for the mapping to take effect -- you
643 va += PAGE_SIZE;
644 }
645}
646
647/*
648 * add a wired page to the kva
649 * note that in order for the mapping to take effect -- you
638 * should do a pmap_update after doing the pmap_kenter...
650 * should do a invltlb after doing the pmap_kenter...
639 */
640PMAP_INLINE void
641pmap_kenter(va, pa)
642 vm_offset_t va;
643 register vm_offset_t pa;
644{
645 register unsigned *pte;
646 unsigned npte, opte;
647
648 npte = pa | PG_RW | PG_V;
649 pte = (unsigned *)vtopte(va);
650 opte = *pte;
651 *pte = npte;
652 if (opte)
651 */
652PMAP_INLINE void
653pmap_kenter(va, pa)
654 vm_offset_t va;
655 register vm_offset_t pa;
656{
657 register unsigned *pte;
658 unsigned npte, opte;
659
660 npte = pa | PG_RW | PG_V;
661 pte = (unsigned *)vtopte(va);
662 opte = *pte;
663 *pte = npte;
664 if (opte)
653 pmap_update_1pg(va);
665 invltlb_1pg(va);
654}
655
656/*
657 * remove a page from the kernel pagetables
658 */
659PMAP_INLINE void
660pmap_kremove(va)
661 vm_offset_t va;
662{
663 register unsigned *pte;
664
665 pte = (unsigned *)vtopte(va);
666 *pte = 0;
666}
667
668/*
669 * remove a page from the kernel pagetables
670 */
671PMAP_INLINE void
672pmap_kremove(va)
673 vm_offset_t va;
674{
675 register unsigned *pte;
676
677 pte = (unsigned *)vtopte(va);
678 *pte = 0;
667 pmap_update_1pg(va);
679 invltlb_1pg(va);
668}
669
670static vm_page_t
671pmap_page_alloc(object, pindex)
672 vm_object_t object;
673 vm_pindex_t pindex;
674{
675 vm_page_t m;

--- 52 unchanged lines hidden (view full) ---

728 /*
729 * unmap the page table page
730 */
731 pmap->pm_pdir[m->pindex] = 0;
732 --pmap->pm_stats.resident_count;
733 if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
734 (((unsigned) PTDpde) & PG_FRAME)) {
735 /*
680}
681
682static vm_page_t
683pmap_page_alloc(object, pindex)
684 vm_object_t object;
685 vm_pindex_t pindex;
686{
687 vm_page_t m;

--- 52 unchanged lines hidden (view full) ---

740 /*
741 * unmap the page table page
742 */
743 pmap->pm_pdir[m->pindex] = 0;
744 --pmap->pm_stats.resident_count;
745 if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
746 (((unsigned) PTDpde) & PG_FRAME)) {
747 /*
736 * Do a pmap_update to make the invalidated mapping
748 * Do a invltlb to make the invalidated mapping
737 * take effect immediately.
738 */
739 pteva = UPT_MIN_ADDRESS + i386_ptob(m->pindex);
749 * take effect immediately.
750 */
751 pteva = UPT_MIN_ADDRESS + i386_ptob(m->pindex);
740 pmap_update_1pg(pteva);
752 invltlb_1pg(pteva);
741 }
742
743#if defined(PTPHINT)
744 if (pmap->pm_ptphint == m)
745 pmap->pm_ptphint = NULL;
746#endif
747
748 /*

--- 681 unchanged lines hidden (view full) ---

1430 }
1431
1432 /*
1433 * get a local va for mappings for this pmap.
1434 */
1435 ptq = get_ptbase(pmap) + i386_btop(va);
1436 if (*ptq) {
1437 (void) pmap_remove_pte(pmap, ptq, va);
753 }
754
755#if defined(PTPHINT)
756 if (pmap->pm_ptphint == m)
757 pmap->pm_ptphint = NULL;
758#endif
759
760 /*

--- 681 unchanged lines hidden (view full) ---

1442 }
1443
1444 /*
1445 * get a local va for mappings for this pmap.
1446 */
1447 ptq = get_ptbase(pmap) + i386_btop(va);
1448 if (*ptq) {
1449 (void) pmap_remove_pte(pmap, ptq, va);
1438 pmap_update_1pg(va);
1450 invltlb_1pg(va);
1439 }
1440 return;
1441}
1442
1443/*
1444 * Remove the given range of addresses from the specified map.
1445 *
1446 * It is assumed that the start and end are properly

--- 71 unchanged lines hidden (view full) ---

1518 anyvalid++;
1519 if (pmap_remove_pte(pmap,
1520 ptbase + sindex, va))
1521 break;
1522 }
1523 }
1524
1525 if (anyvalid) {
1451 }
1452 return;
1453}
1454
1455/*
1456 * Remove the given range of addresses from the specified map.
1457 *
1458 * It is assumed that the start and end are properly

--- 71 unchanged lines hidden (view full) ---

1530 anyvalid++;
1531 if (pmap_remove_pte(pmap,
1532 ptbase + sindex, va))
1533 break;
1534 }
1535 }
1536
1537 if (anyvalid) {
1526 pmap_update();
1538 invltlb();
1527 }
1528 pmap_unlock(pmap);
1529}
1530
1531/*
1532 * Routine: pmap_remove_all
1533 * Function:
1534 * Removes this physical page from

--- 63 unchanged lines hidden (view full) ---

1598 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
1599 --ppv->pv_list_count;
1600 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1601 pmap_unlock(pv->pv_pmap);
1602 free_pv_entry(pv);
1603 }
1604
1605 if (update_needed)
1539 }
1540 pmap_unlock(pmap);
1541}
1542
1543/*
1544 * Routine: pmap_remove_all
1545 * Function:
1546 * Removes this physical page from

--- 63 unchanged lines hidden (view full) ---

1610 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
1611 --ppv->pv_list_count;
1612 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1613 pmap_unlock(pv->pv_pmap);
1614 free_pv_entry(pv);
1615 }
1616
1617 if (update_needed)
1606 pmap_update();
1618 invltlb();
1607 splx(s);
1608 return;
1609}
1610
1611/*
1612 * Set the physical protection on the
1613 * specified range of this map as requested.
1614 */

--- 59 unchanged lines hidden (view full) ---

1674 }
1675 ptbase[sindex] = pbits & ~(PG_M|PG_RW);
1676 anychanged = 1;
1677 }
1678 }
1679 }
1680 pmap_unlock(pmap);
1681 if (anychanged)
1619 splx(s);
1620 return;
1621}
1622
1623/*
1624 * Set the physical protection on the
1625 * specified range of this map as requested.
1626 */

--- 59 unchanged lines hidden (view full) ---

1686 }
1687 ptbase[sindex] = pbits & ~(PG_M|PG_RW);
1688 anychanged = 1;
1689 }
1690 }
1691 }
1692 pmap_unlock(pmap);
1693 if (anychanged)
1682 pmap_update();
1694 invltlb();
1683}
1684
1685/*
1686 * Insert the given physical page (p) at
1687 * the specified virtual address (v) in the
1688 * target physical map with the protection requested.
1689 *
1690 * If specified, the page will be wired down, meaning

--- 131 unchanged lines hidden (view full) ---

1822
1823 /*
1824 * if the mapping or permission bits are different, we need
1825 * to update the pte.
1826 */
1827 if ((origpte & ~(PG_M|PG_A)) != newpte) {
1828 *pte = newpte;
1829 if (origpte)
1695}
1696
1697/*
1698 * Insert the given physical page (p) at
1699 * the specified virtual address (v) in the
1700 * target physical map with the protection requested.
1701 *
1702 * If specified, the page will be wired down, meaning

--- 131 unchanged lines hidden (view full) ---

1834
1835 /*
1836 * if the mapping or permission bits are different, we need
1837 * to update the pte.
1838 */
1839 if ((origpte & ~(PG_M|PG_A)) != newpte) {
1840 *pte = newpte;
1841 if (origpte)
1830 pmap_update_1pg(va);
1842 invltlb_1pg(va);
1831 }
1832 pmap_unlock(pmap);
1833}
1834
1835/*
1836 * this code makes some *MAJOR* assumptions:
1837 * 1. Current pmap & pmap exists.
1838 * 2. Not wired.

--- 341 unchanged lines hidden (view full) ---

2180 pmap_unlock(src_pmap);
2181 pmap_unlock(dst_pmap);
2182 return;
2183 }
2184
2185 dst_frame = ((unsigned) dst_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
2186 if (dst_frame != (((unsigned) APTDpde) & PG_FRAME)) {
2187 APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V);
1843 }
1844 pmap_unlock(pmap);
1845}
1846
1847/*
1848 * this code makes some *MAJOR* assumptions:
1849 * 1. Current pmap & pmap exists.
1850 * 2. Not wired.

--- 341 unchanged lines hidden (view full) ---

2192 pmap_unlock(src_pmap);
2193 pmap_unlock(dst_pmap);
2194 return;
2195 }
2196
2197 dst_frame = ((unsigned) dst_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
2198 if (dst_frame != (((unsigned) APTDpde) & PG_FRAME)) {
2199 APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V);
2188 pmap_update();
2200 invltlb();
2189 }
2190
2191 for(addr = src_addr; addr < end_addr; addr = pdnxt) {
2192 unsigned *src_pte, *dst_pte;
2193 vm_page_t dstmpte, srcmpte;
2194 vm_offset_t srcptepaddr;
2195 unsigned ptepindex;
2196

--- 77 unchanged lines hidden (view full) ---

2274 vm_offset_t phys;
2275{
2276 if (*(int *) CMAP2)
2277 panic("pmap_zero_page: CMAP busy");
2278
2279 *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME);
2280 bzero(CADDR2, PAGE_SIZE);
2281 *(int *) CMAP2 = 0;
2201 }
2202
2203 for(addr = src_addr; addr < end_addr; addr = pdnxt) {
2204 unsigned *src_pte, *dst_pte;
2205 vm_page_t dstmpte, srcmpte;
2206 vm_offset_t srcptepaddr;
2207 unsigned ptepindex;
2208

--- 77 unchanged lines hidden (view full) ---

2286 vm_offset_t phys;
2287{
2288 if (*(int *) CMAP2)
2289 panic("pmap_zero_page: CMAP busy");
2290
2291 *(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME);
2292 bzero(CADDR2, PAGE_SIZE);
2293 *(int *) CMAP2 = 0;
2282 pmap_update_1pg((vm_offset_t) CADDR2);
2294 invltlb_1pg((vm_offset_t) CADDR2);
2283}
2284
2285/*
2286 * pmap_copy_page copies the specified (machine independent)
2287 * page by mapping the page into virtual memory and using
2288 * bcopy to copy the page, one machine dependent page at a
2289 * time.
2290 */

--- 7 unchanged lines hidden (view full) ---

2298
2299 *(int *) CMAP1 = PG_V | PG_RW | (src & PG_FRAME);
2300 *(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME);
2301
2302 bcopy(CADDR1, CADDR2, PAGE_SIZE);
2303
2304 *(int *) CMAP1 = 0;
2305 *(int *) CMAP2 = 0;
2295}
2296
2297/*
2298 * pmap_copy_page copies the specified (machine independent)
2299 * page by mapping the page into virtual memory and using
2300 * bcopy to copy the page, one machine dependent page at a
2301 * time.
2302 */

--- 7 unchanged lines hidden (view full) ---

2310
2311 *(int *) CMAP1 = PG_V | PG_RW | (src & PG_FRAME);
2312 *(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME);
2313
2314 bcopy(CADDR1, CADDR2, PAGE_SIZE);
2315
2316 *(int *) CMAP1 = 0;
2317 *(int *) CMAP2 = 0;
2306 pmap_update_2pg( (vm_offset_t) CADDR1, (vm_offset_t) CADDR2);
2318 invltlb_2pg( (vm_offset_t) CADDR1, (vm_offset_t) CADDR2);
2307}
2308
2309
2310/*
2311 * Routine: pmap_pageable
2312 * Function:
2313 * Make the specified pages (by pmap, offset)
2314 * pageable (or not) as requested.

--- 113 unchanged lines hidden (view full) ---

2428
2429 --ppv->pv_list_count;
2430 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
2431
2432 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2433 free_pv_entry(pv);
2434 }
2435 splx(s);
2319}
2320
2321
2322/*
2323 * Routine: pmap_pageable
2324 * Function:
2325 * Make the specified pages (by pmap, offset)
2326 * pageable (or not) as requested.

--- 113 unchanged lines hidden (view full) ---

2440
2441 --ppv->pv_list_count;
2442 TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
2443
2444 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2445 free_pv_entry(pv);
2446 }
2447 splx(s);
2436 pmap_update();
2448 invltlb();
2437 pmap_unlock(pmap);
2438}
2439
2440/*
2441 * pmap_testbit tests bits in pte's
2442 * note that the testbit/changebit routines are inline,
2443 * and a lot of things compile-time evaluate.
2444 */

--- 60 unchanged lines hidden (view full) ---

2505pmap_changebit(pa, bit, setem)
2506 vm_offset_t pa;
2507 int bit;
2508 boolean_t setem;
2509{
2510 register pv_entry_t pv;
2511 pv_table_t *ppv;
2512 register unsigned *pte;
2449 pmap_unlock(pmap);
2450}
2451
2452/*
2453 * pmap_testbit tests bits in pte's
2454 * note that the testbit/changebit routines are inline,
2455 * and a lot of things compile-time evaluate.
2456 */

--- 60 unchanged lines hidden (view full) ---

2517pmap_changebit(pa, bit, setem)
2518 vm_offset_t pa;
2519 int bit;
2520 boolean_t setem;
2521{
2522 register pv_entry_t pv;
2523 pv_table_t *ppv;
2524 register unsigned *pte;
2513 vm_offset_t va;
2514 int changed;
2515 int s;
2516
2517 if (!pmap_is_managed(pa))
2518 return;
2519
2520 s = splvm();
2521 changed = 0;

--- 44 unchanged lines hidden (view full) ---

2566 *(int *)pte = pbits & ~bit;
2567 }
2568 }
2569 }
2570 pmap_unlock(pv->pv_pmap);
2571 }
2572 splx(s);
2573 if (changed)
2525 int changed;
2526 int s;
2527
2528 if (!pmap_is_managed(pa))
2529 return;
2530
2531 s = splvm();
2532 changed = 0;

--- 44 unchanged lines hidden (view full) ---

2577 *(int *)pte = pbits & ~bit;
2578 }
2579 }
2580 }
2581 pmap_unlock(pv->pv_pmap);
2582 }
2583 splx(s);
2584 if (changed)
2574 pmap_update();
2585 invltlb();
2575}
2576
2577/*
2578 * pmap_page_protect:
2579 *
2580 * Lower the permission for all mappings to a given page.
2581 */
2582void

--- 119 unchanged lines hidden (view full) ---

2702 if (*pte & PG_A) {
2703 rtval++;
2704 *pte &= ~PG_A;
2705 }
2706 pmap_unlock(pv->pv_pmap);
2707 }
2708 splx(s);
2709 if (rtval) {
2586}
2587
2588/*
2589 * pmap_page_protect:
2590 *
2591 * Lower the permission for all mappings to a given page.
2592 */
2593void

--- 119 unchanged lines hidden (view full) ---

2713 if (*pte & PG_A) {
2714 rtval++;
2715 *pte &= ~PG_A;
2716 }
2717 pmap_unlock(pv->pv_pmap);
2718 }
2719 splx(s);
2720 if (rtval) {
2710 pmap_update();
2721 invltlb();
2711 }
2712 return (rtval);
2713}
2714
2715/*
2716 * pmap_is_modified:
2717 *
2718 * Return whether or not the specified physical page was modified

--- 20 unchanged lines hidden (view full) ---

2739 * Clear the reference bit on the specified physical page.
2740 */
2741void
2742pmap_clear_reference(vm_offset_t pa)
2743{
2744 pmap_changebit((pa), PG_A, FALSE);
2745}
2746
2722 }
2723 return (rtval);
2724}
2725
2726/*
2727 * pmap_is_modified:
2728 *
2729 * Return whether or not the specified physical page was modified

--- 20 unchanged lines hidden (view full) ---

2750 * Clear the reference bit on the specified physical page.
2751 */
2752void
2753pmap_clear_reference(vm_offset_t pa)
2754{
2755 pmap_changebit((pa), PG_A, FALSE);
2756}
2757
2747#if 0
2748void
2749pmap_update_map(pmap_t pmap) {
2750 unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
2751 if ((pmap == kernel_pmap) ||
2752 (frame == (((unsigned) PTDpde) & PG_FRAME))) {
2753 pmap_update();
2754 }
2755}
2756#endif
2757
2758/*
2759 * Miscellaneous support routines follow
2760 */
2761
2762static void
2763i386_protection_init()
2764{
2765 register int *kp, prot;

--- 45 unchanged lines hidden (view full) ---

2811 pa = pa & PG_FRAME;
2812 for (tmpva = va; size > 0;) {
2813 pte = (unsigned *)vtopte(tmpva);
2814 *pte = pa | PG_RW | PG_V | PG_N;
2815 size -= PAGE_SIZE;
2816 tmpva += PAGE_SIZE;
2817 pa += PAGE_SIZE;
2818 }
2758/*
2759 * Miscellaneous support routines follow
2760 */
2761
2762static void
2763i386_protection_init()
2764{
2765 register int *kp, prot;

--- 45 unchanged lines hidden (view full) ---

2811 pa = pa & PG_FRAME;
2812 for (tmpva = va; size > 0;) {
2813 pte = (unsigned *)vtopte(tmpva);
2814 *pte = pa | PG_RW | PG_V | PG_N;
2815 size -= PAGE_SIZE;
2816 tmpva += PAGE_SIZE;
2817 pa += PAGE_SIZE;
2818 }
2819 pmap_update();
2819 invltlb();
2820
2821 return ((void *) va);
2822}
2823
2824/*
2825 * perform the pmap work for mincore
2826 */
2827int

--- 157 unchanged lines hidden ---
2820
2821 return ((void *) va);
2822}
2823
2824/*
2825 * perform the pmap work for mincore
2826 */
2827int

--- 157 unchanged lines hidden ---