pmap.c (81302f1d777f8f76c378a5afaab432f7c0bc2ae4) pmap.c (3b23ffe27185fbc4c57c76db11039cd8d51eece6)
1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman

--- 1189 unchanged lines hidden (view full) ---

1198 /*
1199 * Promotion: flush every 4KB page mapping from the TLB
1200 * because there are too many to flush individually.
1201 */
1202 invltlb();
1203}
1204
1205#ifdef SMP
1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman

--- 1189 unchanged lines hidden (view full) ---

1198 /*
1199 * Promotion: flush every 4KB page mapping from the TLB
1200 * because there are too many to flush individually.
1201 */
1202 invltlb();
1203}
1204
1205#ifdef SMP
1206
1207static void
1208pmap_curcpu_cb_dummy(pmap_t pmap __unused, vm_offset_t addr1 __unused,
1209 vm_offset_t addr2 __unused)
1210{
1211}
1212
1206/*
1207 * For SMP, these functions have to use the IPI mechanism for coherence.
1208 *
1209 * N.B.: Before calling any of the following TLB invalidation functions,
1210 * the calling processor must ensure that all stores updating a non-
1211 * kernel page table are globally performed. Otherwise, another
1212 * processor could cache an old, pre-update entry without being
1213 * invalidated. This can happen one of two ways: (1) The pmap becomes

--- 22 unchanged lines hidden (view full) ---

1236 mask = &all_cpus;
1237 } else {
1238 cpuid = PCPU_GET(cpuid);
1239 other_cpus = all_cpus;
1240 CPU_CLR(cpuid, &other_cpus);
1241 CPU_AND(&other_cpus, &pmap->pm_active);
1242 mask = &other_cpus;
1243 }
1213/*
1214 * For SMP, these functions have to use the IPI mechanism for coherence.
1215 *
1216 * N.B.: Before calling any of the following TLB invalidation functions,
1217 * the calling processor must ensure that all stores updating a non-
1218 * kernel page table are globally performed. Otherwise, another
1219 * processor could cache an old, pre-update entry without being
1220 * invalidated. This can happen one of two ways: (1) The pmap becomes

--- 22 unchanged lines hidden (view full) ---

1243 mask = &all_cpus;
1244 } else {
1245 cpuid = PCPU_GET(cpuid);
1246 other_cpus = all_cpus;
1247 CPU_CLR(cpuid, &other_cpus);
1248 CPU_AND(&other_cpus, &pmap->pm_active);
1249 mask = &other_cpus;
1250 }
1244 smp_masked_invlpg(*mask, va, pmap);
1251 smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy);
1245 sched_unpin();
1246}
1247
1248/* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
1249#define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE)
1250
1251static void
1252pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)

--- 16 unchanged lines hidden (view full) ---

1269 mask = &all_cpus;
1270 } else {
1271 cpuid = PCPU_GET(cpuid);
1272 other_cpus = all_cpus;
1273 CPU_CLR(cpuid, &other_cpus);
1274 CPU_AND(&other_cpus, &pmap->pm_active);
1275 mask = &other_cpus;
1276 }
1252 sched_unpin();
1253}
1254
1255/* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
1256#define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE)
1257
1258static void
1259pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)

--- 16 unchanged lines hidden (view full) ---

1276 mask = &all_cpus;
1277 } else {
1278 cpuid = PCPU_GET(cpuid);
1279 other_cpus = all_cpus;
1280 CPU_CLR(cpuid, &other_cpus);
1281 CPU_AND(&other_cpus, &pmap->pm_active);
1282 mask = &other_cpus;
1283 }
1277 smp_masked_invlpg_range(*mask, sva, eva, pmap);
1284 smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy);
1278 sched_unpin();
1279}
1280
1281static void
1282pmap_invalidate_all_int(pmap_t pmap)
1283{
1284 cpuset_t *mask, other_cpus;
1285 u_int cpuid;

--- 6 unchanged lines hidden (view full) ---

1292 mask = &all_cpus;
1293 } else {
1294 cpuid = PCPU_GET(cpuid);
1295 other_cpus = all_cpus;
1296 CPU_CLR(cpuid, &other_cpus);
1297 CPU_AND(&other_cpus, &pmap->pm_active);
1298 mask = &other_cpus;
1299 }
1285 sched_unpin();
1286}
1287
1288static void
1289pmap_invalidate_all_int(pmap_t pmap)
1290{
1291 cpuset_t *mask, other_cpus;
1292 u_int cpuid;

--- 6 unchanged lines hidden (view full) ---

1299 mask = &all_cpus;
1300 } else {
1301 cpuid = PCPU_GET(cpuid);
1302 other_cpus = all_cpus;
1303 CPU_CLR(cpuid, &other_cpus);
1304 CPU_AND(&other_cpus, &pmap->pm_active);
1305 mask = &other_cpus;
1306 }
1300 smp_masked_invltlb(*mask, pmap);
1307 smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy);
1301 sched_unpin();
1302}
1303
1304static void
1308 sched_unpin();
1309}
1310
1311static void
1305__CONCAT(PMTYPE, invalidate_cache)(void)
1312pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused,
1313 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused)
1306{
1314{
1307
1308 sched_pin();
1309 wbinvd();
1315 wbinvd();
1310 smp_cache_flush();
1311 sched_unpin();
1312}
1313
1316}
1317
1318static void
1319__CONCAT(PMTYPE, invalidate_cache)(void)
1320{
1321 smp_cache_flush(pmap_invalidate_cache_curcpu_cb);
1322}
1323
1314struct pde_action {
1315 cpuset_t invalidate; /* processors that invalidate their TLB */
1316 vm_offset_t va;
1317 pd_entry_t *pde;
1318 pd_entry_t newpde;
1319 u_int store; /* processor that updates the PDE */
1320};
1321

--- 5081 unchanged lines hidden ---
1324struct pde_action {
1325 cpuset_t invalidate; /* processors that invalidate their TLB */
1326 vm_offset_t va;
1327 pd_entry_t *pde;
1328 pd_entry_t newpde;
1329 u_int store; /* processor that updates the PDE */
1330};
1331

--- 5081 unchanged lines hidden ---