pmap.c (f23b4c91c4fb94e1bb6aeb4e7747f4ccf7767b41) pmap.c (2c7a40c7ca0475936c00ceb08a4935cc5c4ff7f0)
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 25 unchanged lines hidden (view full) ---

34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 25 unchanged lines hidden (view full) ---

34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
42 * $Id: pmap.c,v 1.32 1994/08/13 03:49:44 wollman Exp $
42 * $Id: pmap.c,v 1.33 1994/08/18 22:34:42 wollman Exp $
43 */
44
45/*
46 * Derived from hp300 version by Mike Hibler, this version by William
47 * Jolitz uses a recursive map [a pde points to the page directory] to
48 * map the page tables using the pagetables themselves. This is done to
49 * reduce the impact on kernel virtual memory for lots of sparse address
50 * space, and to reduce the cost of memory to each process.

--- 138 unchanged lines hidden (view full) ---

189 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
190 /* are we current address space or kernel? */
191 if ( (pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME)))
192 return ((pt_entry_t *) vtopte(va));
193 /* otherwise, we are alternate address space */
194 else {
195 if ( frame != ((int) APTDpde & PG_FRAME) ) {
196 APTDpde = pmap->pm_pdir[PTDPTDI];
43 */
44
45/*
46 * Derived from hp300 version by Mike Hibler, this version by William
47 * Jolitz uses a recursive map [a pde points to the page directory] to
48 * map the page tables using the pagetables themselves. This is done to
49 * reduce the impact on kernel virtual memory for lots of sparse address
50 * space, and to reduce the cost of memory to each process.

--- 138 unchanged lines hidden (view full) ---

189 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
190 /* are we current address space or kernel? */
191 if ( (pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME)))
192 return ((pt_entry_t *) vtopte(va));
193 /* otherwise, we are alternate address space */
194 else {
195 if ( frame != ((int) APTDpde & PG_FRAME) ) {
196 APTDpde = pmap->pm_pdir[PTDPTDI];
197 tlbflush();
197 pmap_update();
198 }
199 return((pt_entry_t *) avtopte(va));
200 }
201 }
202 return(0);
203}
204
205/*

--- 17 unchanged lines hidden (view full) ---

223 /* are we current address space or kernel? */
224 if ( (pmap == kernel_pmap)
225 || (frame == ((int) PTDpde & PG_FRAME)) ) {
226 pa = *(int *) vtopte(va);
227 /* otherwise, we are alternate address space */
228 } else {
229 if ( frame != ((int) APTDpde & PG_FRAME)) {
230 APTDpde = pmap->pm_pdir[PTDPTDI];
198 }
199 return((pt_entry_t *) avtopte(va));
200 }
201 }
202 return(0);
203}
204
205/*

--- 17 unchanged lines hidden (view full) ---

223 /* are we current address space or kernel? */
224 if ( (pmap == kernel_pmap)
225 || (frame == ((int) PTDpde & PG_FRAME)) ) {
226 pa = *(int *) vtopte(va);
227 /* otherwise, we are alternate address space */
228 } else {
229 if ( frame != ((int) APTDpde & PG_FRAME)) {
230 APTDpde = pmap->pm_pdir[PTDPTDI];
231 tlbflush();
231 pmap_update();
232 }
233 pa = *(int *) avtopte(va);
234 }
235 pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
236 return pa;
237 }
238 return 0;
239

--- 147 unchanged lines hidden (view full) ---

387 extern vm_offset_t isaphysmem;
388 isaphysmem = va;
389
390 virtual_avail = pmap_map(va, firstaddr,
391 firstaddr + DMAPAGES*NBPG, VM_PROT_ALL);
392 }
393
394 *(int *)CMAP1 = *(int *)CMAP2 = *(int *)PTD = 0;
232 }
233 pa = *(int *) avtopte(va);
234 }
235 pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
236 return pa;
237 }
238 return 0;
239

--- 147 unchanged lines hidden (view full) ---

387 extern vm_offset_t isaphysmem;
388 isaphysmem = va;
389
390 virtual_avail = pmap_map(va, firstaddr,
391 firstaddr + DMAPAGES*NBPG, VM_PROT_ALL);
392 }
393
394 *(int *)CMAP1 = *(int *)CMAP2 = *(int *)PTD = 0;
395 tlbflush();
395 pmap_update();
396
397}
398
399/*
400 * Initialize the pmap module.
401 * Called by vm_init, to initialize any structures that the pmap
402 * system needs to map virtual memory.
403 * pmap_init has been enhanced to support in a fairly consistant

--- 341 unchanged lines hidden (view full) ---

745 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
746 /* are we current address space or kernel? */
747 if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) {
748 ptp=PTmap;
749 /* otherwise, we are alternate address space */
750 } else {
751 if ( frame != ((int) APTDpde & PG_FRAME)) {
752 APTDpde = pmap->pm_pdir[PTDPTDI];
396
397}
398
399/*
400 * Initialize the pmap module.
401 * Called by vm_init, to initialize any structures that the pmap
402 * system needs to map virtual memory.
403 * pmap_init has been enhanced to support in a fairly consistant

--- 341 unchanged lines hidden (view full) ---

745 vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
746 /* are we current address space or kernel? */
747 if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) {
748 ptp=PTmap;
749 /* otherwise, we are alternate address space */
750 } else {
751 if ( frame != ((int) APTDpde & PG_FRAME)) {
752 APTDpde = pmap->pm_pdir[PTDPTDI];
753 tlbflush();
753 pmap_update();
754 }
755 ptp=APTmap;
756 }
757 return ptp;
758}
759
760/*
761 * If it is the first entry on the list, it is actually

--- 98 unchanged lines hidden (view full) ---

860 }
861 }
862 }
863
864 pv = pa_to_pvh(pa);
865 pmap_remove_entry(pmap, pv, sva);
866 pmap_unuse_pt(pmap, sva);
867 }
754 }
755 ptp=APTmap;
756 }
757 return ptp;
758}
759
760/*
761 * If it is the first entry on the list, it is actually

--- 98 unchanged lines hidden (view full) ---

860 }
861 }
862 }
863
864 pv = pa_to_pvh(pa);
865 pmap_remove_entry(pmap, pv, sva);
866 pmap_unuse_pt(pmap, sva);
867 }
868 tlbflush();
868 pmap_update();
869 return;
870 }
871
872 sva = i386_btop(sva);
873 eva = i386_btop(eva);
874
875 while (sva < eva) {
876 /*

--- 74 unchanged lines hidden (view full) ---

951 }
952 }
953
954 pv = pa_to_pvh(pa);
955 pmap_remove_entry(pmap, pv, va);
956 pmap_unuse_pt(pmap, va);
957 ++sva;
958 }
869 return;
870 }
871
872 sva = i386_btop(sva);
873 eva = i386_btop(eva);
874
875 while (sva < eva) {
876 /*

--- 74 unchanged lines hidden (view full) ---

951 }
952 }
953
954 pv = pa_to_pvh(pa);
955 pmap_remove_entry(pmap, pv, va);
956 pmap_unuse_pt(pmap, va);
957 ++sva;
958 }
959 tlbflush();
959 pmap_update();
960}
961
962/*
963 * Routine: pmap_remove_all
964 * Function:
965 * Removes this physical page from
966 * all physical maps in which it resides.
967 * Reflects back modify bits to the pager.

--- 58 unchanged lines hidden (view full) ---

1026 *pv = *npv;
1027 free_pv_entry(npv);
1028 } else {
1029 pv->pv_pmap = NULL;
1030 }
1031 }
1032 splx(s);
1033 if (anyvalid)
960}
961
962/*
963 * Routine: pmap_remove_all
964 * Function:
965 * Removes this physical page from
966 * all physical maps in which it resides.
967 * Reflects back modify bits to the pager.

--- 58 unchanged lines hidden (view full) ---

1026 *pv = *npv;
1027 free_pv_entry(npv);
1028 } else {
1029 pv->pv_pmap = NULL;
1030 }
1031 }
1032 splx(s);
1033 if (anyvalid)
1034 tlbflush();
1034 pmap_update();
1035}
1036
1037
1038/*
1039 * Set the physical protection on the
1040 * specified range of this map as requested.
1041 */
1042void

--- 75 unchanged lines hidden (view full) ---

1118 i386prot |= PG_u;
1119 if( va >= UPT_MIN_ADDRESS)
1120 i386prot |= PG_RW;
1121 }
1122 pmap_pte_set_prot(pte, i386prot);
1123 va += PAGE_SIZE;
1124 }
1125 if (anyvalid)
1035}
1036
1037
1038/*
1039 * Set the physical protection on the
1040 * specified range of this map as requested.
1041 */
1042void

--- 75 unchanged lines hidden (view full) ---

1118 i386prot |= PG_u;
1119 if( va >= UPT_MIN_ADDRESS)
1120 i386prot |= PG_RW;
1121 }
1122 pmap_pte_set_prot(pte, i386prot);
1123 va += PAGE_SIZE;
1124 }
1125 if (anyvalid)
1126 tlbflush();
1126 pmap_update();
1127}
1128
1129/*
1130 * Insert the given physical page (p) at
1131 * the specified virtual address (v) in the
1132 * target physical map with the protection requested.
1133 *
1134 * If specified, the page will be wired down, meaning

--- 142 unchanged lines hidden (view full) ---

1277 (int) npte |= PG_u | PG_RW;
1278
1279 if(*pte != npte) {
1280 if (*pte)
1281 ptevalid++;
1282 *pte = npte;
1283 }
1284 if (ptevalid)
1127}
1128
1129/*
1130 * Insert the given physical page (p) at
1131 * the specified virtual address (v) in the
1132 * target physical map with the protection requested.
1133 *
1134 * If specified, the page will be wired down, meaning

--- 142 unchanged lines hidden (view full) ---

1277 (int) npte |= PG_u | PG_RW;
1278
1279 if(*pte != npte) {
1280 if (*pte)
1281 ptevalid++;
1282 *pte = npte;
1283 }
1284 if (ptevalid)
1285 tlbflush();
1285 pmap_update();
1286}
1287
1288/*
1289 * Add a list of wired pages to the kva
1290 * this routine is only used for temporary
1291 * kernel mappings that do not need to have
1292 * page modification or references recorded.
1293 * Note that old mappings are simply written

--- 11 unchanged lines hidden (view full) ---

1305
1306 for(i=0;i<count;i++) {
1307 pte = vtopte(va + i * NBPG);
1308 if (*pte)
1309 anyvalid++;
1310 *pte = (pt_entry_t) ( (int) (VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V | PG_W));
1311 }
1312 if (anyvalid)
1286}
1287
1288/*
1289 * Add a list of wired pages to the kva
1290 * this routine is only used for temporary
1291 * kernel mappings that do not need to have
1292 * page modification or references recorded.
1293 * Note that old mappings are simply written

--- 11 unchanged lines hidden (view full) ---

1305
1306 for(i=0;i<count;i++) {
1307 pte = vtopte(va + i * NBPG);
1308 if (*pte)
1309 anyvalid++;
1310 *pte = (pt_entry_t) ( (int) (VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V | PG_W));
1311 }
1312 if (anyvalid)
1313 tlbflush();
1313 pmap_update();
1314}
1315/*
1316 * this routine jerks page mappings from the
1317 * kernel -- it is meant only for temporary mappings.
1318 */
1319void
1320pmap_qremove(va, count)
1321 vm_offset_t va;
1322 int count;
1323{
1324 int i;
1325 register pt_entry_t *pte;
1326 for(i=0;i<count;i++) {
1327 pte = vtopte(va + i * NBPG);
1328 *pte = 0;
1329 }
1314}
1315/*
1316 * this routine jerks page mappings from the
1317 * kernel -- it is meant only for temporary mappings.
1318 */
1319void
1320pmap_qremove(va, count)
1321 vm_offset_t va;
1322 int count;
1323{
1324 int i;
1325 register pt_entry_t *pte;
1326 for(i=0;i<count;i++) {
1327 pte = vtopte(va + i * NBPG);
1328 *pte = 0;
1329 }
1330 tlbflush();
1330 pmap_update();
1331}
1332
1333/*
1334 * add a wired page to the kva
1335 * note that in order for the mapping to take effect -- you
1331}
1332
1333/*
1334 * add a wired page to the kva
1335 * note that in order for the mapping to take effect -- you
1336 * should do a tlbflush after doing the pmap_kenter...
1336 * should do a pmap_update after doing the pmap_kenter...
1337 */
1338void
1339pmap_kenter(va, pa)
1340 vm_offset_t va;
1341 register vm_offset_t pa;
1342{
1343 register pt_entry_t *pte;
1344 int wasvalid = 0;
1345
1346 pte = vtopte(va);
1347
1348 if (*pte)
1349 wasvalid++;
1350
1351 *pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W));
1352
1353 if (wasvalid)
1337 */
1338void
1339pmap_kenter(va, pa)
1340 vm_offset_t va;
1341 register vm_offset_t pa;
1342{
1343 register pt_entry_t *pte;
1344 int wasvalid = 0;
1345
1346 pte = vtopte(va);
1347
1348 if (*pte)
1349 wasvalid++;
1350
1351 *pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W));
1352
1353 if (wasvalid)
1354 tlbflush();
1354 pmap_update();
1355}
1356
1357/*
1358 * remove a page from the kernel pagetables
1359 */
1360void
1361pmap_kremove( va)
1362 vm_offset_t va;
1363{
1364 register pt_entry_t *pte;
1365 pte = vtopte(va);
1366
1367 *pte = (pt_entry_t) 0;
1355}
1356
1357/*
1358 * remove a page from the kernel pagetables
1359 */
1360void
1361pmap_kremove( va)
1362 vm_offset_t va;
1363{
1364 register pt_entry_t *pte;
1365 pte = vtopte(va);
1366
1367 *pte = (pt_entry_t) 0;
1368 tlbflush();
1368 pmap_update();
1369}
1370
1371/*
1372 * this code makes some *MAJOR* assumptions:
1373 * 1. Current pmap & pmap exists.
1374 * 2. Not wired.
1375 * 3. Read access.
1376 * 4. No page table pages.

--- 135 unchanged lines hidden (view full) ---

1512 vm_page_unhold(p);
1513 anyvalid += pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
1514 }
1515 }
1516 }
1517 }
1518
1519 if (anyvalid)
1369}
1370
1371/*
1372 * this code makes some *MAJOR* assumptions:
1373 * 1. Current pmap & pmap exists.
1374 * 2. Not wired.
1375 * 3. Read access.
1376 * 4. No page table pages.

--- 135 unchanged lines hidden (view full) ---

1512 vm_page_unhold(p);
1513 anyvalid += pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
1514 }
1515 }
1516 }
1517 }
1518
1519 if (anyvalid)
1520 tlbflush();
1520 pmap_update();
1521}
1522
1523/*
1524 * Routine: pmap_change_wiring
1525 * Function: Change the wiring attribute for a map/virtual-address
1526 * pair.
1527 * In/out conditions:
1528 * The mapping must already exist in the pmap.

--- 41 unchanged lines hidden (view full) ---

1570void
1571pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1572 pmap_t dst_pmap, src_pmap;
1573 vm_offset_t dst_addr;
1574 vm_size_t len;
1575 vm_offset_t src_addr;
1576{
1577}
1521}
1522
1523/*
1524 * Routine: pmap_change_wiring
1525 * Function: Change the wiring attribute for a map/virtual-address
1526 * pair.
1527 * In/out conditions:
1528 * The mapping must already exist in the pmap.

--- 41 unchanged lines hidden (view full) ---

1570void
1571pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1572 pmap_t dst_pmap, src_pmap;
1573 vm_offset_t dst_addr;
1574 vm_size_t len;
1575 vm_offset_t src_addr;
1576{
1577}
1578/*
1579 * Require that all active physical maps contain no
1580 * incorrect entries NOW. [This update includes
1581 * forcing updates of any address map caching.]
1582 *
1583 * Generally used to insure that a thread about
1584 * to run will see a semantically correct world.
1585 */
1586void
1587pmap_update()
1588{
1589 tlbflush();
1590}
1591
1592/*
1593 * Routine: pmap_kernel
1594 * Function:
1595 * Returns the physical map handle for the kernel.
1596 */
1597pmap_t
1598pmap_kernel()

--- 13 unchanged lines hidden (view full) ---

1612{
1613 if (*(int *)CMAP2)
1614 panic("pmap_zero_page: CMAP busy");
1615
1616 *(int *)CMAP2 = PG_V | PG_KW | i386_trunc_page(phys);
1617 bzero(CADDR2,NBPG);
1618
1619 *(int *)CMAP2 = 0;
1578
1579/*
1580 * Routine: pmap_kernel
1581 * Function:
1582 * Returns the physical map handle for the kernel.
1583 */
1584pmap_t
1585pmap_kernel()

--- 13 unchanged lines hidden (view full) ---

1599{
1600 if (*(int *)CMAP2)
1601 panic("pmap_zero_page: CMAP busy");
1602
1603 *(int *)CMAP2 = PG_V | PG_KW | i386_trunc_page(phys);
1604 bzero(CADDR2,NBPG);
1605
1606 *(int *)CMAP2 = 0;
1620 tlbflush();
1607 pmap_update();
1621}
1622
1623/*
1624 * pmap_copy_page copies the specified (machine independent)
1625 * page by mapping the page into virtual memory and using
1626 * bcopy to copy the page, one machine dependent page at a
1627 * time.
1628 */

--- 10 unchanged lines hidden (view full) ---

1639
1640#if __GNUC__ > 1
1641 memcpy(CADDR2, CADDR1, NBPG);
1642#else
1643 bcopy(CADDR1, CADDR2, NBPG);
1644#endif
1645 *(int *)CMAP1 = 0;
1646 *(int *)CMAP2 = 0;
1608}
1609
1610/*
1611 * pmap_copy_page copies the specified (machine independent)
1612 * page by mapping the page into virtual memory and using
1613 * bcopy to copy the page, one machine dependent page at a
1614 * time.
1615 */

--- 10 unchanged lines hidden (view full) ---

1626
1627#if __GNUC__ > 1
1628 memcpy(CADDR2, CADDR1, NBPG);
1629#else
1630 bcopy(CADDR1, CADDR2, NBPG);
1631#endif
1632 *(int *)CMAP1 = 0;
1633 *(int *)CMAP2 = 0;
1647 tlbflush();
1634 pmap_update();
1648}
1649
1650
1651/*
1652 * Routine: pmap_pageable
1653 * Function:
1654 * Make the specified pages (by pmap, offset)
1655 * pageable (or not) as requested.

--- 158 unchanged lines hidden (view full) ---

1814 if (setem)
1815 (int) npte = (int) *pte | bit;
1816 else
1817 (int) npte = (int) *pte & ~bit;
1818 *pte = npte;
1819 }
1820 }
1821 splx(s);
1635}
1636
1637
1638/*
1639 * Routine: pmap_pageable
1640 * Function:
1641 * Make the specified pages (by pmap, offset)
1642 * pageable (or not) as requested.

--- 158 unchanged lines hidden (view full) ---

1801 if (setem)
1802 (int) npte = (int) *pte | bit;
1803 else
1804 (int) npte = (int) *pte & ~bit;
1805 *pte = npte;
1806 }
1807 }
1808 splx(s);
1822 tlbflush();
1809 pmap_update();
1823}
1824
1825/*
1826 * pmap_page_protect:
1827 *
1828 * Lower the permission for all mappings to a given page.
1829 */
1830void

--- 160 unchanged lines hidden ---
1810}
1811
1812/*
1813 * pmap_page_protect:
1814 *
1815 * Lower the permission for all mappings to a given page.
1816 */
1817void

--- 160 unchanged lines hidden ---