pmap.c (6c85795a256fac2ca714542b7241c6b28f1023da) pmap.c (e45b89d23d34f192217b1464b4392f1fb638136c)
1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman

--- 1045 unchanged lines hidden (view full) ---

1054static u_long pmap_pde_promotions;
1055SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
1056 &pmap_pde_promotions, 0, "2/4MB page promotions");
1057
1058/***************************************************
1059 * Low level helper routines.....
1060 ***************************************************/
1061
1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman

--- 1045 unchanged lines hidden (view full) ---

1054static u_long pmap_pde_promotions;
1055SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
1056 &pmap_pde_promotions, 0, "2/4MB page promotions");
1057
1058/***************************************************
1059 * Low level helper routines.....
1060 ***************************************************/
1061
1062boolean_t
1063pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
1064{
1065
1066 return (mode >= 0 && mode < PAT_INDEX_SIZE &&
1067 pat_index[(int)mode] >= 0);
1068}
1069
1062/*
1063 * Determine the appropriate bits to set in a PTE or PDE for a specified
1064 * caching mode.
1065 */
1066int
1070/*
1071 * Determine the appropriate bits to set in a PTE or PDE for a specified
1072 * caching mode.
1073 */
1074int
1067pmap_cache_bits(int mode, boolean_t is_pde)
1075pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
1068{
1069 int cache_bits, pat_flag, pat_idx;
1070
1076{
1077 int cache_bits, pat_flag, pat_idx;
1078
1071 if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
1079 if (!pmap_is_valid_memattr(pmap, mode))
1072 panic("Unknown caching mode %d\n", mode);
1073
1074 /* The PAT bit is different for PTE's and PDE's. */
1075 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
1076
1077 /* Map the caching mode to a PAT index. */
1078 pat_idx = pat_index[mode];
1079

--- 659 unchanged lines hidden (view full) ---

1739}
1740
1741static __inline void
1742pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
1743{
1744 pt_entry_t *pte;
1745
1746 pte = vtopte(va);
1080 panic("Unknown caching mode %d\n", mode);
1081
1082 /* The PAT bit is different for PTE's and PDE's. */
1083 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
1084
1085 /* Map the caching mode to a PAT index. */
1086 pat_idx = pat_index[mode];
1087

--- 659 unchanged lines hidden (view full) ---

1747}
1748
1749static __inline void
1750pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
1751{
1752 pt_entry_t *pte;
1753
1754 pte = vtopte(va);
1747 pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(mode, 0));
1755 pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(kernel_pmap,
1756 mode, 0));
1748}
1749
1750/*
1751 * Remove a page from the kernel pagetables.
1752 * Note: not SMP coherent.
1753 *
1754 * This function may be used before pmap_bootstrap() is called.
1755 */

--- 78 unchanged lines hidden (view full) ---

1834 pt_entry_t *endpte, oldpte, pa, *pte;
1835 vm_page_t m;
1836
1837 oldpte = 0;
1838 pte = vtopte(sva);
1839 endpte = pte + count;
1840 while (pte < endpte) {
1841 m = *ma++;
1757}
1758
1759/*
1760 * Remove a page from the kernel pagetables.
1761 * Note: not SMP coherent.
1762 *
1763 * This function may be used before pmap_bootstrap() is called.
1764 */

--- 78 unchanged lines hidden (view full) ---

1843 pt_entry_t *endpte, oldpte, pa, *pte;
1844 vm_page_t m;
1845
1846 oldpte = 0;
1847 pte = vtopte(sva);
1848 endpte = pte + count;
1849 while (pte < endpte) {
1850 m = *ma++;
1842 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
1851 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(kernel_pmap,
1852 m->md.pat_mode, 0);
1843 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
1844 oldpte |= *pte;
1845#if defined(PAE) || defined(PAE_TABLES)
1846 pte_store(pte, pa | pg_nx | PG_RW | PG_V);
1847#else
1848 pte_store(pte, pa | PG_RW | PG_V);
1849#endif
1850 }

--- 1798 unchanged lines hidden (view full) ---

3649#if defined(PAE) || defined(PAE_TABLES)
3650 if ((prot & VM_PROT_EXECUTE) == 0)
3651 newpte |= pg_nx;
3652#endif
3653 if ((flags & PMAP_ENTER_WIRED) != 0)
3654 newpte |= PG_W;
3655 if (pmap != kernel_pmap)
3656 newpte |= PG_U;
1853 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
1854 oldpte |= *pte;
1855#if defined(PAE) || defined(PAE_TABLES)
1856 pte_store(pte, pa | pg_nx | PG_RW | PG_V);
1857#else
1858 pte_store(pte, pa | PG_RW | PG_V);
1859#endif
1860 }

--- 1798 unchanged lines hidden (view full) ---

3659#if defined(PAE) || defined(PAE_TABLES)
3660 if ((prot & VM_PROT_EXECUTE) == 0)
3661 newpte |= pg_nx;
3662#endif
3663 if ((flags & PMAP_ENTER_WIRED) != 0)
3664 newpte |= PG_W;
3665 if (pmap != kernel_pmap)
3666 newpte |= PG_U;
3657 newpte |= pmap_cache_bits(m->md.pat_mode, psind > 0);
3667 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
3658 if ((m->oflags & VPO_UNMANAGED) == 0)
3659 newpte |= PG_MANAGED;
3660
3661 rw_wlock(&pvh_global_lock);
3662 PMAP_LOCK(pmap);
3663 sched_pin();
3664 if (psind == 1) {
3665 /* Assert the required virtual and physical alignment. */

--- 206 unchanged lines hidden (view full) ---

3872 * reclaiming another PV entry.
3873 */
3874static bool
3875pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3876{
3877 pd_entry_t newpde;
3878
3879 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3668 if ((m->oflags & VPO_UNMANAGED) == 0)
3669 newpte |= PG_MANAGED;
3670
3671 rw_wlock(&pvh_global_lock);
3672 PMAP_LOCK(pmap);
3673 sched_pin();
3674 if (psind == 1) {
3675 /* Assert the required virtual and physical alignment. */

--- 206 unchanged lines hidden (view full) ---

3882 * reclaiming another PV entry.
3883 */
3884static bool
3885pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3886{
3887 pd_entry_t newpde;
3888
3889 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3880 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
3890 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) |
3881 PG_PS | PG_V;
3882 if ((m->oflags & VPO_UNMANAGED) == 0)
3883 newpde |= PG_MANAGED;
3884#if defined(PAE) || defined(PAE_TABLES)
3885 if ((prot & VM_PROT_EXECUTE) == 0)
3886 newpde |= pg_nx;
3887#endif
3888 if (pmap != kernel_pmap)

--- 241 unchanged lines hidden (view full) ---

4130 return (mpte);
4131 }
4132
4133 /*
4134 * Increment counters
4135 */
4136 pmap->pm_stats.resident_count++;
4137
3891 PG_PS | PG_V;
3892 if ((m->oflags & VPO_UNMANAGED) == 0)
3893 newpde |= PG_MANAGED;
3894#if defined(PAE) || defined(PAE_TABLES)
3895 if ((prot & VM_PROT_EXECUTE) == 0)
3896 newpde |= pg_nx;
3897#endif
3898 if (pmap != kernel_pmap)

--- 241 unchanged lines hidden (view full) ---

4140 return (mpte);
4141 }
4142
4143 /*
4144 * Increment counters
4145 */
4146 pmap->pm_stats.resident_count++;
4147
4138 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
4148 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0);
4139#if defined(PAE) || defined(PAE_TABLES)
4140 if ((prot & VM_PROT_EXECUTE) == 0)
4141 pa |= pg_nx;
4142#endif
4143
4144 /*
4145 * Now validate mapping with RO protection
4146 */

--- 71 unchanged lines hidden (view full) ---

4218 }
4219
4220 /*
4221 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and
4222 * "size" is a multiple of 2/4M, adding the PAT setting to
4223 * "pa" will not affect the termination of this loop.
4224 */
4225 PMAP_LOCK(pmap);
4149#if defined(PAE) || defined(PAE_TABLES)
4150 if ((prot & VM_PROT_EXECUTE) == 0)
4151 pa |= pg_nx;
4152#endif
4153
4154 /*
4155 * Now validate mapping with RO protection
4156 */

--- 71 unchanged lines hidden (view full) ---

4228 }
4229
4230 /*
4231 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and
4232 * "size" is a multiple of 2/4M, adding the PAT setting to
4233 * "pa" will not affect the termination of this loop.
4234 */
4235 PMAP_LOCK(pmap);
4226 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
4227 size; pa += NBPDR) {
4236 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
4237 pa < ptepa + size; pa += NBPDR) {
4228 pde = pmap_pde(pmap, addr);
4229 if (*pde == 0) {
4230 pde_store(pde, pa | PG_PS | PG_M | PG_A |
4231 PG_U | PG_RW | PG_V);
4232 pmap->pm_stats.resident_count += NBPDR /
4233 PAGE_SIZE;
4234 pmap_pde_mappings++;
4235 }

--- 248 unchanged lines hidden (view full) ---

4484
4485 sched_pin();
4486 pc = get_pcpu();
4487 cmap_pte2 = pc->pc_cmap_pte2;
4488 mtx_lock(&pc->pc_cmap_lock);
4489 if (*cmap_pte2)
4490 panic("pmap_zero_page: CMAP2 busy");
4491 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
4238 pde = pmap_pde(pmap, addr);
4239 if (*pde == 0) {
4240 pde_store(pde, pa | PG_PS | PG_M | PG_A |
4241 PG_U | PG_RW | PG_V);
4242 pmap->pm_stats.resident_count += NBPDR /
4243 PAGE_SIZE;
4244 pmap_pde_mappings++;
4245 }

--- 248 unchanged lines hidden (view full) ---

4494
4495 sched_pin();
4496 pc = get_pcpu();
4497 cmap_pte2 = pc->pc_cmap_pte2;
4498 mtx_lock(&pc->pc_cmap_lock);
4499 if (*cmap_pte2)
4500 panic("pmap_zero_page: CMAP2 busy");
4501 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
4492 pmap_cache_bits(m->md.pat_mode, 0);
4502 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
4493 invlcaddr(pc->pc_cmap_addr2);
4494 pagezero(pc->pc_cmap_addr2);
4495 *cmap_pte2 = 0;
4496
4497 /*
4498 * Unpin the thread before releasing the lock. Otherwise the thread
4499 * could be rescheduled while still bound to the current CPU, only
4500 * to unpin itself immediately upon resuming execution.

--- 14 unchanged lines hidden (view full) ---

4515
4516 sched_pin();
4517 pc = get_pcpu();
4518 cmap_pte2 = pc->pc_cmap_pte2;
4519 mtx_lock(&pc->pc_cmap_lock);
4520 if (*cmap_pte2)
4521 panic("pmap_zero_page_area: CMAP2 busy");
4522 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
4503 invlcaddr(pc->pc_cmap_addr2);
4504 pagezero(pc->pc_cmap_addr2);
4505 *cmap_pte2 = 0;
4506
4507 /*
4508 * Unpin the thread before releasing the lock. Otherwise the thread
4509 * could be rescheduled while still bound to the current CPU, only
4510 * to unpin itself immediately upon resuming execution.

--- 14 unchanged lines hidden (view full) ---

4525
4526 sched_pin();
4527 pc = get_pcpu();
4528 cmap_pte2 = pc->pc_cmap_pte2;
4529 mtx_lock(&pc->pc_cmap_lock);
4530 if (*cmap_pte2)
4531 panic("pmap_zero_page_area: CMAP2 busy");
4532 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
4523 pmap_cache_bits(m->md.pat_mode, 0);
4533 pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
4524 invlcaddr(pc->pc_cmap_addr2);
4525 if (off == 0 && size == PAGE_SIZE)
4526 pagezero(pc->pc_cmap_addr2);
4527 else
4528 bzero(pc->pc_cmap_addr2 + off, size);
4529 *cmap_pte2 = 0;
4530 sched_unpin();
4531 mtx_unlock(&pc->pc_cmap_lock);

--- 13 unchanged lines hidden (view full) ---

4545 cmap_pte1 = pc->pc_cmap_pte1;
4546 cmap_pte2 = pc->pc_cmap_pte2;
4547 mtx_lock(&pc->pc_cmap_lock);
4548 if (*cmap_pte1)
4549 panic("pmap_copy_page: CMAP1 busy");
4550 if (*cmap_pte2)
4551 panic("pmap_copy_page: CMAP2 busy");
4552 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
4534 invlcaddr(pc->pc_cmap_addr2);
4535 if (off == 0 && size == PAGE_SIZE)
4536 pagezero(pc->pc_cmap_addr2);
4537 else
4538 bzero(pc->pc_cmap_addr2 + off, size);
4539 *cmap_pte2 = 0;
4540 sched_unpin();
4541 mtx_unlock(&pc->pc_cmap_lock);

--- 13 unchanged lines hidden (view full) ---

4555 cmap_pte1 = pc->pc_cmap_pte1;
4556 cmap_pte2 = pc->pc_cmap_pte2;
4557 mtx_lock(&pc->pc_cmap_lock);
4558 if (*cmap_pte1)
4559 panic("pmap_copy_page: CMAP1 busy");
4560 if (*cmap_pte2)
4561 panic("pmap_copy_page: CMAP2 busy");
4562 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
4553 pmap_cache_bits(src->md.pat_mode, 0);
4563 pmap_cache_bits(kernel_pmap, src->md.pat_mode, 0);
4554 invlcaddr(pc->pc_cmap_addr1);
4555 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
4564 invlcaddr(pc->pc_cmap_addr1);
4565 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
4556 pmap_cache_bits(dst->md.pat_mode, 0);
4566 pmap_cache_bits(kernel_pmap, dst->md.pat_mode, 0);
4557 invlcaddr(pc->pc_cmap_addr2);
4558 bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE);
4559 *cmap_pte1 = 0;
4560 *cmap_pte2 = 0;
4561 sched_unpin();
4562 mtx_unlock(&pc->pc_cmap_lock);
4563}
4564

--- 22 unchanged lines hidden (view full) ---

4587 while (xfersize > 0) {
4588 a_pg = ma[a_offset >> PAGE_SHIFT];
4589 a_pg_offset = a_offset & PAGE_MASK;
4590 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4591 b_pg = mb[b_offset >> PAGE_SHIFT];
4592 b_pg_offset = b_offset & PAGE_MASK;
4593 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4594 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A |
4567 invlcaddr(pc->pc_cmap_addr2);
4568 bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE);
4569 *cmap_pte1 = 0;
4570 *cmap_pte2 = 0;
4571 sched_unpin();
4572 mtx_unlock(&pc->pc_cmap_lock);
4573}
4574

--- 22 unchanged lines hidden (view full) ---

4597 while (xfersize > 0) {
4598 a_pg = ma[a_offset >> PAGE_SHIFT];
4599 a_pg_offset = a_offset & PAGE_MASK;
4600 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4601 b_pg = mb[b_offset >> PAGE_SHIFT];
4602 b_pg_offset = b_offset & PAGE_MASK;
4603 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4604 *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A |
4595 pmap_cache_bits(a_pg->md.pat_mode, 0);
4605 pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, 0);
4596 invlcaddr(pc->pc_cmap_addr1);
4597 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A |
4606 invlcaddr(pc->pc_cmap_addr1);
4607 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A |
4598 PG_M | pmap_cache_bits(b_pg->md.pat_mode, 0);
4608 PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, 0);
4599 invlcaddr(pc->pc_cmap_addr2);
4600 a_cp = pc->pc_cmap_addr1 + a_pg_offset;
4601 b_cp = pc->pc_cmap_addr2 + b_pg_offset;
4602 bcopy(a_cp, b_cp, cnt);
4603 a_offset += cnt;
4604 b_offset += cnt;
4605 xfersize -= cnt;
4606 }

--- 955 unchanged lines hidden (view full) ---

5562 if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) {
5563 sched_pin();
5564 pc = get_pcpu();
5565 cmap_pte2 = pc->pc_cmap_pte2;
5566 mtx_lock(&pc->pc_cmap_lock);
5567 if (*cmap_pte2)
5568 panic("pmap_flush_page: CMAP2 busy");
5569 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
4609 invlcaddr(pc->pc_cmap_addr2);
4610 a_cp = pc->pc_cmap_addr1 + a_pg_offset;
4611 b_cp = pc->pc_cmap_addr2 + b_pg_offset;
4612 bcopy(a_cp, b_cp, cnt);
4613 a_offset += cnt;
4614 b_offset += cnt;
4615 xfersize -= cnt;
4616 }

--- 955 unchanged lines hidden (view full) ---

5572 if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) {
5573 sched_pin();
5574 pc = get_pcpu();
5575 cmap_pte2 = pc->pc_cmap_pte2;
5576 mtx_lock(&pc->pc_cmap_lock);
5577 if (*cmap_pte2)
5578 panic("pmap_flush_page: CMAP2 busy");
5579 *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
5570 PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0);
5580 PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode,
5581 0);
5571 invlcaddr(pc->pc_cmap_addr2);
5572 sva = (vm_offset_t)pc->pc_cmap_addr2;
5573 eva = sva + PAGE_SIZE;
5574
5575 /*
5576 * Use mfence or sfence despite the ordering implied by
5577 * mtx_{un,}lock() because clflush on non-Intel CPUs
5578 * and clflushopt are not guaranteed to be ordered by

--- 44 unchanged lines hidden (view full) ---

5623 size = round_page(offset + size);
5624
5625 /*
5626 * Only supported on kernel virtual addresses above the recursive map.
5627 */
5628 if (base < VM_MIN_KERNEL_ADDRESS)
5629 return (EINVAL);
5630
5582 invlcaddr(pc->pc_cmap_addr2);
5583 sva = (vm_offset_t)pc->pc_cmap_addr2;
5584 eva = sva + PAGE_SIZE;
5585
5586 /*
5587 * Use mfence or sfence despite the ordering implied by
5588 * mtx_{un,}lock() because clflush on non-Intel CPUs
5589 * and clflushopt are not guaranteed to be ordered by

--- 44 unchanged lines hidden (view full) ---

5634 size = round_page(offset + size);
5635
5636 /*
5637 * Only supported on kernel virtual addresses above the recursive map.
5638 */
5639 if (base < VM_MIN_KERNEL_ADDRESS)
5640 return (EINVAL);
5641
5631 cache_bits_pde = pmap_cache_bits(mode, 1);
5632 cache_bits_pte = pmap_cache_bits(mode, 0);
5642 cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1);
5643 cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0);
5633 changed = FALSE;
5634
5635 /*
5636 * Pages that aren't mapped aren't supported. Also break down
5637 * 2/4MB pages into 4KB pages if required.
5638 */
5639 PMAP_LOCK(kernel_pmap);
5640 for (tmpva = base; tmpva < base + size; ) {

--- 189 unchanged lines hidden (view full) ---

5830 pt_entry_t *pte;
5831
5832 critical_enter();
5833 qaddr = PCPU_GET(qmap_addr);
5834 pte = vtopte(qaddr);
5835
5836 KASSERT(*pte == 0, ("pmap_quick_enter_page: PTE busy"));
5837 *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
5644 changed = FALSE;
5645
5646 /*
5647 * Pages that aren't mapped aren't supported. Also break down
5648 * 2/4MB pages into 4KB pages if required.
5649 */
5650 PMAP_LOCK(kernel_pmap);
5651 for (tmpva = base; tmpva < base + size; ) {

--- 189 unchanged lines hidden (view full) ---

5841 pt_entry_t *pte;
5842
5843 critical_enter();
5844 qaddr = PCPU_GET(qmap_addr);
5845 pte = vtopte(qaddr);
5846
5847 KASSERT(*pte == 0, ("pmap_quick_enter_page: PTE busy"));
5848 *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
5838 pmap_cache_bits(pmap_page_get_memattr(m), 0);
5849 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), 0);
5839 invlpg(qaddr);
5840
5841 return (qaddr);
5842}
5843
5844void
5845pmap_quick_remove_page(vm_offset_t addr)
5846{

--- 34 unchanged lines hidden (view full) ---

5881 }
5882 prev_addr += trm_guard;
5883 trm_pte = PTmap + atop(prev_addr);
5884 for (af = prev_addr; af < addr; af += PAGE_SIZE) {
5885 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY |
5886 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
5887 pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) |
5888 PG_M | PG_A | PG_RW | PG_V | pgeflag |
5850 invlpg(qaddr);
5851
5852 return (qaddr);
5853}
5854
5855void
5856pmap_quick_remove_page(vm_offset_t addr)
5857{

--- 34 unchanged lines hidden (view full) ---

5892 }
5893 prev_addr += trm_guard;
5894 trm_pte = PTmap + atop(prev_addr);
5895 for (af = prev_addr; af < addr; af += PAGE_SIZE) {
5896 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY |
5897 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
5898 pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) |
5899 PG_M | PG_A | PG_RW | PG_V | pgeflag |
5889 pmap_cache_bits(VM_MEMATTR_DEFAULT, FALSE));
5900 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE));
5890 }
5891 *addrp = prev_addr;
5892 return (0);
5893}
5894
5895static
5896void pmap_init_trm(void)
5897{

--- 4 unchanged lines hidden (view full) ---

5902 trm_guard = 0;
5903 pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK);
5904 vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE);
5905 pd_m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY |
5906 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK | VM_ALLOC_ZERO);
5907 if ((pd_m->flags & PG_ZERO) == 0)
5908 pmap_zero_page(pd_m);
5909 PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V |
5901 }
5902 *addrp = prev_addr;
5903 return (0);
5904}
5905
5906static
5907void pmap_init_trm(void)
5908{

--- 4 unchanged lines hidden (view full) ---

5913 trm_guard = 0;
5914 pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK);
5915 vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE);
5916 pd_m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY |
5917 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK | VM_ALLOC_ZERO);
5918 if ((pd_m->flags & PG_ZERO) == 0)
5919 pmap_zero_page(pd_m);
5920 PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V |
5910 pmap_cache_bits(VM_MEMATTR_DEFAULT, TRUE);
5921 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE);
5911}
5912
5913void *
5914pmap_trm_alloc(size_t size, int flags)
5915{
5916 vmem_addr_t res;
5917 int error;
5918

--- 77 unchanged lines hidden ---
5922}
5923
5924void *
5925pmap_trm_alloc(size_t size, int flags)
5926{
5927 vmem_addr_t res;
5928 int error;
5929

--- 77 unchanged lines hidden ---