pmap.c (da2d1e9d2582eece6aad948897489ca0b5d64d73) pmap.c (d12c44655065633dd8b8c249ec271a1d8ba63ba4)
1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman

--- 134 unchanged lines hidden (view full) ---

143#include <vm/vm_reserv.h>
144#include <vm/uma.h>
145
146#ifdef DEV_APIC
147#include <sys/bus.h>
148#include <machine/intr_machdep.h>
149#include <x86/apicvar.h>
150#endif
1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman

--- 134 unchanged lines hidden (view full) ---

143#include <vm/vm_reserv.h>
144#include <vm/uma.h>
145
146#ifdef DEV_APIC
147#include <sys/bus.h>
148#include <machine/intr_machdep.h>
149#include <x86/apicvar.h>
150#endif
151#include <x86/ifunc.h>
151#include <machine/bootinfo.h>
152#include <machine/cpu.h>
153#include <machine/cputypes.h>
154#include <machine/md_var.h>
155#include <machine/pcb.h>
156#include <machine/specialreg.h>
157#ifdef SMP
158#include <machine/smp.h>

--- 150 unchanged lines hidden (view full) ---

309static bool pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
310 vm_prot_t prot);
311static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
312 u_int flags, vm_page_t m);
313static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
314 vm_page_t m, vm_prot_t prot, vm_page_t mpte);
315static void pmap_flush_page(vm_page_t m);
316static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
152#include <machine/bootinfo.h>
153#include <machine/cpu.h>
154#include <machine/cputypes.h>
155#include <machine/md_var.h>
156#include <machine/pcb.h>
157#include <machine/specialreg.h>
158#ifdef SMP
159#include <machine/smp.h>

--- 150 unchanged lines hidden (view full) ---

310static bool pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
311 vm_prot_t prot);
312static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
313 u_int flags, vm_page_t m);
314static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
315 vm_page_t m, vm_prot_t prot, vm_page_t mpte);
316static void pmap_flush_page(vm_page_t m);
317static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
318static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
319 vm_offset_t eva);
320static void pmap_invalidate_cache_range_all(vm_offset_t sva,
321 vm_offset_t eva);
317static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
318 pd_entry_t pde);
319static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
320static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
321static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
322static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
323static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
324static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);

--- 1077 unchanged lines hidden (view full) ---

1402 * 2- or 4MB page mapping from the TLB.
1403 */
1404 if ((pde & PG_PROMOTED) != 0)
1405 pmap_invalidate_range(pmap, va, va + NBPDR - 1);
1406 else
1407 pmap_invalidate_page(pmap, va);
1408}
1409
322static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
323 pd_entry_t pde);
324static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
325static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
326static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
327static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
328static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
329static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);

--- 1077 unchanged lines hidden (view full) ---

1407 * 2- or 4MB page mapping from the TLB.
1408 */
1409 if ((pde & PG_PROMOTED) != 0)
1410 pmap_invalidate_range(pmap, va, va + NBPDR - 1);
1411 else
1412 pmap_invalidate_page(pmap, va);
1413}
1414
1415DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t),
1416 static)
1417{
1418
1419 if ((cpu_feature & CPUID_SS) != 0)
1420 return (pmap_invalidate_cache_range_selfsnoop);
1421 if ((cpu_feature & CPUID_CLFSH) != 0)
1422 return (pmap_force_invalidate_cache_range);
1423 return (pmap_invalidate_cache_range_all);
1424}
1425
1410#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
1411
1426#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
1427
1428static void
1429pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
1430{
1431
1432 KASSERT((sva & PAGE_MASK) == 0,
1433 ("pmap_invalidate_cache_range: sva not page-aligned"));
1434 KASSERT((eva & PAGE_MASK) == 0,
1435 ("pmap_invalidate_cache_range: eva not page-aligned"));
1436}
1437
1438static void
1439pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
1440{
1441
1442 pmap_invalidate_cache_range_check_align(sva, eva);
1443}
1444
1412void
1445void
1413pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
1446pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
1414{
1415
1447{
1448
1416 if (force) {
1417 sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
1418 } else {
1419 KASSERT((sva & PAGE_MASK) == 0,
1420 ("pmap_invalidate_cache_range: sva not page-aligned"));
1421 KASSERT((eva & PAGE_MASK) == 0,
1422 ("pmap_invalidate_cache_range: eva not page-aligned"));
1449 sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
1450 if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
1451 /*
1452 * The supplied range is bigger than 2MB.
1453 * Globally invalidate cache.
1454 */
1455 pmap_invalidate_cache();
1456 return;
1423 }
1424
1457 }
1458
1425 if ((cpu_feature & CPUID_SS) != 0 && !force)
1426 ; /* If "Self Snoop" is supported and allowed, do nothing. */
1427 else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 &&
1428 eva - sva < PMAP_CLFLUSH_THRESHOLD) {
1429#ifdef DEV_APIC
1459 /*
1460 * XXX: Some CPUs fault, hang, or trash the local APIC
1461 * registers if we use CLFLUSH on the local APIC
1462 * range. The local APIC is always uncached, so we
1463 * don't need to flush for that range anyway.
1464 */
1465 if (pmap_kextract(sva) == lapic_paddr)
1466 return;
1467
1468 if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
1430 /*
1469 /*
1431 * XXX: Some CPUs fault, hang, or trash the local APIC
1432 * registers if we use CLFLUSH on the local APIC
1433 * range. The local APIC is always uncached, so we
1434 * don't need to flush for that range anyway.
1435 */
1436 if (pmap_kextract(sva) == lapic_paddr)
1437 return;
1438#endif
1439 /*
1440 * Otherwise, do per-cache line flush. Use the sfence
1470 * Do per-cache line flush. Use the sfence
1441 * instruction to insure that previous stores are
1442 * included in the write-back. The processor
1443 * propagates flush to other processors in the cache
1444 * coherence domain.
1445 */
1446 sfence();
1447 for (; sva < eva; sva += cpu_clflush_line_size)
1448 clflushopt(sva);
1449 sfence();
1471 * instruction to insure that previous stores are
1472 * included in the write-back. The processor
1473 * propagates flush to other processors in the cache
1474 * coherence domain.
1475 */
1476 sfence();
1477 for (; sva < eva; sva += cpu_clflush_line_size)
1478 clflushopt(sva);
1479 sfence();
1450 } else if ((cpu_feature & CPUID_CLFSH) != 0 &&
1451 eva - sva < PMAP_CLFLUSH_THRESHOLD) {
1452#ifdef DEV_APIC
1453 if (pmap_kextract(sva) == lapic_paddr)
1454 return;
1455#endif
1480 } else {
1456 /*
1457 * Writes are ordered by CLFLUSH on Intel CPUs.
1458 */
1459 if (cpu_vendor_id != CPU_VENDOR_INTEL)
1460 mfence();
1461 for (; sva < eva; sva += cpu_clflush_line_size)
1462 clflush(sva);
1463 if (cpu_vendor_id != CPU_VENDOR_INTEL)
1464 mfence();
1481 /*
1482 * Writes are ordered by CLFLUSH on Intel CPUs.
1483 */
1484 if (cpu_vendor_id != CPU_VENDOR_INTEL)
1485 mfence();
1486 for (; sva < eva; sva += cpu_clflush_line_size)
1487 clflush(sva);
1488 if (cpu_vendor_id != CPU_VENDOR_INTEL)
1489 mfence();
1465 } else {
1466
1467 /*
1468 * No targeted cache flush methods are supported by CPU,
1469 * or the supplied range is bigger than 2MB.
1470 * Globally invalidate cache.
1471 */
1472 pmap_invalidate_cache();
1473 }
1474}
1475
1490 }
1491}
1492
1493static void
1494pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
1495{
1496
1497 pmap_invalidate_cache_range_check_align(sva, eva);
1498 pmap_invalidate_cache();
1499}
1500
1476void
1477pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1478{
1479 int i;
1480
1481 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1482 (cpu_feature & CPUID_CLFSH) == 0) {
1483 pmap_invalidate_cache();

--- 3990 unchanged lines hidden (view full) ---

5474 }
5475 va = kva_alloc(size);
5476 if (va == 0)
5477 panic("%s: Couldn't allocate KVA", __func__);
5478 }
5479 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5480 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
5481 pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
1501void
1502pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1503{
1504 int i;
1505
1506 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1507 (cpu_feature & CPUID_CLFSH) == 0) {
1508 pmap_invalidate_cache();

--- 3990 unchanged lines hidden (view full) ---

5499 }
5500 va = kva_alloc(size);
5501 if (va == 0)
5502 panic("%s: Couldn't allocate KVA", __func__);
5503 }
5504 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5505 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
5506 pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
5482 pmap_invalidate_cache_range(va, va + size, FALSE);
5507 pmap_invalidate_cache_range(va, va + size);
5483 return ((void *)(va + offset));
5484}
5485
5486void *
5487pmap_mapdev(vm_paddr_t pa, vm_size_t size)
5488{
5489
5490 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));

--- 222 unchanged lines hidden (view full) ---

5713 }
5714
5715 /*
5716 * Flush CPU caches to make sure any data isn't cached that
5717 * shouldn't be, etc.
5718 */
5719 if (changed) {
5720 pmap_invalidate_range(kernel_pmap, base, tmpva);
5508 return ((void *)(va + offset));
5509}
5510
5511void *
5512pmap_mapdev(vm_paddr_t pa, vm_size_t size)
5513{
5514
5515 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));

--- 222 unchanged lines hidden (view full) ---

5738 }
5739
5740 /*
5741 * Flush CPU caches to make sure any data isn't cached that
5742 * shouldn't be, etc.
5743 */
5744 if (changed) {
5745 pmap_invalidate_range(kernel_pmap, base, tmpva);
5721 pmap_invalidate_cache_range(base, tmpva, FALSE);
5746 pmap_invalidate_cache_range(base, tmpva);
5722 }
5723 return (0);
5724}
5725
5726/*
5727 * perform the pmap work for mincore
5728 */
5729int

--- 290 unchanged lines hidden ---
5747 }
5748 return (0);
5749}
5750
5751/*
5752 * perform the pmap work for mincore
5753 */
5754int

--- 290 unchanged lines hidden ---