1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1996, by Steve Passe
5 * Copyright (c) 2003, by Peter Wemm
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include "opt_acpi.h"
31 #include "opt_cpu.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_sched.h"
35 #include "opt_smp.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/cpuset.h>
41 #include <sys/domainset.h>
42 #include <sys/kdb.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/memrange.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_phys.h>
62
63 #include <x86/apicreg.h>
64 #include <machine/clock.h>
65 #include <machine/cputypes.h>
66 #include <machine/cpufunc.h>
67 #include <x86/mca.h>
68 #include <machine/md_var.h>
69 #include <machine/pcb.h>
70 #include <machine/psl.h>
71 #include <machine/smp.h>
72 #include <machine/specialreg.h>
73 #include <machine/tss.h>
74 #include <x86/ucode.h>
75 #include <machine/cpu.h>
76 #include <x86/init.h>
77
78 #ifdef DEV_ACPI
79 #include <contrib/dev/acpica/include/acpi.h>
80 #include <dev/acpica/acpivar.h>
81 #endif
82
83 #define WARMBOOT_TARGET 0
84 #define WARMBOOT_OFF (KERNBASE + 0x0467)
85 #define WARMBOOT_SEG (KERNBASE + 0x0469)
86
87 #define CMOS_REG (0x70)
88 #define CMOS_DATA (0x71)
89 #define BIOS_RESET (0x0f)
90 #define BIOS_WARM (0x0a)
91
92 #define GiB(v) (v ## ULL << 30)
93
94 #define AP_BOOTPT_SZ (PAGE_SIZE * 4)
95
96 /* Temporary variables for init_secondary() */
97 static char *doublefault_stack;
98 static char *mce_stack;
99 static char *nmi_stack;
100 static char *dbg_stack;
101 void *bootpcpu;
102
103 extern u_int mptramp_la57;
104 extern u_int mptramp_nx;
105 smp_targeted_tlb_shootdown_t smp_targeted_tlb_shootdown =
106 &smp_targeted_tlb_shootdown_native;
107
108 /*
109 * Local data and functions.
110 */
111
112 static int start_ap(int apic_id, vm_paddr_t boot_address);
113
114 /*
115 * Initialize the IPI handlers and start up the AP's.
116 */
117 void
cpu_mp_start(void)118 cpu_mp_start(void)
119 {
120 int i;
121
122 /* Initialize the logical ID to APIC ID table. */
123 for (i = 0; i < MAXCPU; i++) {
124 cpu_apic_ids[i] = -1;
125 }
126
127 /* Install an inter-CPU IPI for cache and TLB invalidations. */
128 setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
129 SDT_SYSIGT, SEL_KPL, 0);
130
131 /* Install an inter-CPU IPI for all-CPU rendezvous */
132 setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
133 IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
134
135 /* Install generic inter-CPU IPI handler */
136 setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
137 IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
138
139 /* Install an inter-CPU IPI for CPU stop/restart */
140 setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
141 SDT_SYSIGT, SEL_KPL, 0);
142
143 /* Install an inter-CPU IPI for CPU suspend/resume */
144 setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
145 SDT_SYSIGT, SEL_KPL, 0);
146
147 /* Install an IPI for calling delayed SWI */
148 setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
149 SDT_SYSIGT, SEL_KPL, 0);
150
151 /* Set boot_cpu_id if needed. */
152 if (boot_cpu_id == -1) {
153 boot_cpu_id = PCPU_GET(apic_id);
154 cpu_info[boot_cpu_id].cpu_bsp = 1;
155 } else
156 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
157 ("BSP's APIC ID doesn't match boot_cpu_id"));
158
159 /* Probe logical/physical core configuration. */
160 topo_probe();
161
162 assign_cpu_ids();
163
164 mptramp_la57 = la57;
165 mptramp_nx = pg_nx != 0;
166 MPASS(kernel_pmap->pm_cr3 < (1UL << 32));
167 mptramp_pagetables = kernel_pmap->pm_cr3;
168
169 /* Start each Application Processor */
170 start_all_aps();
171
172 set_interrupt_apic_ids();
173
174 #if defined(DEV_ACPI) && MAXMEMDOM > 1
175 acpi_pxm_set_cpu_locality();
176 #endif
177 }
178
179 /*
180 * AP CPU's call this to initialize themselves.
181 */
182 void
init_secondary(void)183 init_secondary(void)
184 {
185 struct pcpu *pc;
186 struct nmi_pcpu *np;
187 struct user_segment_descriptor *gdt;
188 struct region_descriptor ap_gdt;
189 u_int64_t cr0;
190 int cpu, gsel_tss, x;
191
192 /* Set by the startup code for us to use */
193 cpu = bootAP;
194
195 /* Update microcode before doing anything else. */
196 ucode_load_ap(cpu);
197
198 /* Initialize the PCPU area. */
199 pc = bootpcpu;
200 pcpu_init(pc, cpu, sizeof(struct pcpu));
201 dpcpu_init(dpcpu, cpu);
202 pc->pc_apic_id = cpu_apic_ids[cpu];
203 pc->pc_prvspace = pc;
204 pc->pc_curthread = 0;
205 pc->pc_tssp = &pc->pc_common_tss;
206 pc->pc_rsp0 = 0;
207 pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
208 PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
209 gdt = pc->pc_gdt;
210 pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
211 pc->pc_fs32p = &gdt[GUFS32_SEL];
212 pc->pc_gs32p = &gdt[GUGS32_SEL];
213 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
214 pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
215 /* See comment in pmap_bootstrap(). */
216 pc->pc_pcid_next = PMAP_PCID_KERN + 2;
217 pc->pc_pcid_gen = 1;
218 pc->pc_kpmap_store.pm_pcid = PMAP_PCID_KERN;
219 pc->pc_kpmap_store.pm_gen = 1;
220
221 pc->pc_smp_tlb_gen = 1;
222
223 /* Init tss */
224 pc->pc_common_tss = __pcpu[0].pc_common_tss;
225 pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
226 IOPERM_BITMAP_SIZE;
227 pc->pc_common_tss.tss_rsp0 = 0;
228
229 /* The doublefault stack runs on IST1. */
230 np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
231 np->np_pcpu = (register_t)pc;
232 pc->pc_common_tss.tss_ist1 = (long)np;
233
234 /* The NMI stack runs on IST2. */
235 np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
236 np->np_pcpu = (register_t)pc;
237 pc->pc_common_tss.tss_ist2 = (long)np;
238
239 /* The MC# stack runs on IST3. */
240 np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
241 np->np_pcpu = (register_t)pc;
242 pc->pc_common_tss.tss_ist3 = (long)np;
243
244 /* The DB# stack runs on IST4. */
245 np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
246 np->np_pcpu = (register_t)pc;
247 pc->pc_common_tss.tss_ist4 = (long)np;
248
249 /* Prepare private GDT */
250 gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
251 for (x = 0; x < NGDT; x++) {
252 if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
253 x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
254 ssdtosd(&gdt_segs[x], &gdt[x]);
255 }
256 ssdtosyssd(&gdt_segs[GPROC0_SEL],
257 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
258 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
259 ap_gdt.rd_base = (u_long)gdt;
260 lgdt(&ap_gdt); /* does magic intra-segment return */
261
262 wrmsr(MSR_FSBASE, 0); /* User value */
263 wrmsr(MSR_GSBASE, (uint64_t)pc);
264 wrmsr(MSR_KGSBASE, 0); /* User value */
265 fix_cpuid();
266
267 lidt(&r_idt);
268
269 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
270 ltr(gsel_tss);
271
272 /*
273 * Set to a known state:
274 * Set by mpboot.s: CR0_PG, CR0_PE
275 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
276 */
277 cr0 = rcr0();
278 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
279 load_cr0(cr0);
280
281 amd64_conf_fast_syscall();
282
283 /* signal our startup to the BSP. */
284 mp_naps++;
285
286 /* Spin until the BSP releases the AP's. */
287 while (atomic_load_acq_int(&aps_ready) == 0)
288 ia32_pause();
289
290 init_secondary_tail();
291 }
292
293 static void
amd64_mp_alloc_pcpu(void)294 amd64_mp_alloc_pcpu(void)
295 {
296 vm_page_t m;
297 int cpu;
298
299 /* Allocate pcpu areas to the correct domain. */
300 for (cpu = 1; cpu < mp_ncpus; cpu++) {
301 #ifdef NUMA
302 m = NULL;
303 if (vm_ndomains > 1) {
304 m = vm_page_alloc_noobj_domain(
305 acpi_pxm_get_cpu_locality(cpu_apic_ids[cpu]),
306 VM_ALLOC_ZERO);
307 }
308 if (m == NULL)
309 #endif
310 m = vm_page_alloc_noobj(VM_ALLOC_ZERO);
311 if (m == NULL)
312 panic("cannot alloc pcpu page for cpu %d", cpu);
313 pmap_qenter((vm_offset_t)&__pcpu[cpu], &m, 1);
314 }
315 }
316
317 /*
318 * start each AP in our list
319 */
320 int
start_all_aps(void)321 start_all_aps(void)
322 {
323 vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4];
324 pml5_entry_t old_pml45;
325 pml4_entry_t *v_pml4;
326 pdp_entry_t *v_pdp;
327 pd_entry_t *v_pd;
328 vm_paddr_t boot_address;
329 u_int32_t mpbioswarmvec;
330 int apic_id, cpu, domain, i;
331 u_char mpbiosreason;
332
333 amd64_mp_alloc_pcpu();
334 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
335
336 MPASS(bootMP_size <= PAGE_SIZE);
337 m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
338 (1ULL << 20), /* Trampoline should be below 1M for real mode */
339 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
340 boot_address = VM_PAGE_TO_PHYS(m_boottramp);
341
342 /* Create a transient 1:1 mapping of low 4G */
343 if (la57) {
344 m_pml4 = pmap_page_alloc_below_4g(true);
345 v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
346 } else {
347 v_pml4 = &kernel_pmap->pm_pmltop[0];
348 }
349 m_pdp = pmap_page_alloc_below_4g(true);
350 v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
351 m_pd[0] = pmap_page_alloc_below_4g(false);
352 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0]));
353 for (i = 0; i < NPDEPG; i++)
354 v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A |
355 X86_PG_M | PG_PS;
356 m_pd[1] = pmap_page_alloc_below_4g(false);
357 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1]));
358 for (i = 0; i < NPDEPG; i++)
359 v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
360 X86_PG_A | X86_PG_M | PG_PS;
361 m_pd[2] = pmap_page_alloc_below_4g(false);
362 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2]));
363 for (i = 0; i < NPDEPG; i++)
364 v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
365 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
366 m_pd[3] = pmap_page_alloc_below_4g(false);
367 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3]));
368 for (i = 0; i < NPDEPG; i++)
369 v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
370 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
371 v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V |
372 X86_PG_RW | X86_PG_A | X86_PG_M;
373 v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V |
374 X86_PG_RW | X86_PG_A | X86_PG_M;
375 v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V |
376 X86_PG_RW | X86_PG_A | X86_PG_M;
377 v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V |
378 X86_PG_RW | X86_PG_A | X86_PG_M;
379 old_pml45 = kernel_pmap->pm_pmltop[0];
380 if (la57) {
381 kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) |
382 X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
383 }
384 v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V |
385 X86_PG_RW | X86_PG_A | X86_PG_M;
386 pmap_invalidate_all(kernel_pmap);
387
388 /* copy the AP 1st level boot code */
389 bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
390 if (bootverbose)
391 printf("AP boot address %#lx\n", boot_address);
392
393 /* save the current value of the warm-start vector */
394 if (!efi_boot)
395 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
396 outb(CMOS_REG, BIOS_RESET);
397 mpbiosreason = inb(CMOS_DATA);
398
399 /* setup a vector to our boot code */
400 if (!efi_boot) {
401 *((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
402 *((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4);
403 }
404 outb(CMOS_REG, BIOS_RESET);
405 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
406
407 /* start each AP */
408 domain = 0;
409 for (cpu = 1; cpu < mp_ncpus; cpu++) {
410 apic_id = cpu_apic_ids[cpu];
411 #ifdef NUMA
412 if (vm_ndomains > 1)
413 domain = acpi_pxm_get_cpu_locality(apic_id);
414 #endif
415 /* allocate and set up an idle stack data page */
416 bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
417 M_WAITOK | M_ZERO);
418 doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
419 M_WAITOK | M_ZERO);
420 mce_stack = kmem_malloc(MCE_STACK_SIZE,
421 M_WAITOK | M_ZERO);
422 nmi_stack = kmem_malloc_domainset(
423 DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
424 dbg_stack = kmem_malloc_domainset(
425 DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
426 dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
427 DPCPU_SIZE, M_WAITOK | M_ZERO);
428
429 bootpcpu = &__pcpu[cpu];
430 bootSTK = (char *)bootstacks[cpu] +
431 kstack_pages * PAGE_SIZE - 8;
432 bootAP = cpu;
433
434 /* attempt to start the Application Processor */
435 if (!start_ap(apic_id, boot_address)) {
436 /* restore the warmstart vector */
437 if (!efi_boot)
438 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
439 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
440 }
441
442 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
443 }
444
445 /* restore the warmstart vector */
446 if (!efi_boot)
447 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
448
449 outb(CMOS_REG, BIOS_RESET);
450 outb(CMOS_DATA, mpbiosreason);
451
452 /* Destroy transient 1:1 mapping */
453 kernel_pmap->pm_pmltop[0] = old_pml45;
454 invlpg(0);
455 if (la57)
456 vm_page_free(m_pml4);
457 vm_page_free(m_pd[3]);
458 vm_page_free(m_pd[2]);
459 vm_page_free(m_pd[1]);
460 vm_page_free(m_pd[0]);
461 vm_page_free(m_pdp);
462 vm_page_free(m_boottramp);
463
464 /* number of APs actually started */
465 return (mp_naps);
466 }
467
468 /*
469 * This function starts the AP (application processor) identified
470 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
471 * to accomplish this. This is necessary because of the nuances
472 * of the different hardware we might encounter. It isn't pretty,
473 * but it seems to work.
474 */
475 static int
start_ap(int apic_id,vm_paddr_t boot_address)476 start_ap(int apic_id, vm_paddr_t boot_address)
477 {
478 int vector, ms;
479 int cpus;
480
481 /* calculate the vector */
482 vector = (boot_address >> 12) & 0xff;
483
484 /* used as a watchpoint to signal AP startup */
485 cpus = mp_naps;
486
487 ipi_startup(apic_id, vector);
488
489 /* Wait up to 5 seconds for it to start. */
490 for (ms = 0; ms < 5000; ms++) {
491 if (mp_naps > cpus)
492 return 1; /* return SUCCESS */
493 DELAY(1000);
494 }
495 return 0; /* return FAILURE */
496 }
497
498 /*
499 * Flush the TLB on other CPU's
500 */
501
502 /*
503 * These variables are initialized at startup to reflect how each of
504 * the different kinds of invalidations should be performed on the
505 * current machine and environment.
506 */
507 static enum invl_op_codes invl_op_tlb;
508 static enum invl_op_codes invl_op_pgrng;
509 static enum invl_op_codes invl_op_pg;
510
511 /*
512 * Scoreboard of IPI completion notifications from target to IPI initiator.
513 *
514 * Each CPU can initiate shootdown IPI independently from other CPUs.
515 * Initiator enters critical section, then fills its local PCPU
516 * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
517 * at location (cpu, my_cpuid) for each target cpu. After that IPI is
518 * sent to all targets which scan for zeroed scoreboard generation
519 * words. Upon finding such word the shootdown data is read from
520 * corresponding cpu's pcpu, and generation is set. Meantime initiator
521 * loops waiting for all zeroed generations in scoreboard to update.
522 */
523 static uint32_t *invl_scoreboard;
524
525 static void
invl_scoreboard_init(void * arg __unused)526 invl_scoreboard_init(void *arg __unused)
527 {
528 u_int i;
529
530 invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
531 (mp_maxid + 1), M_DEVBUF, M_WAITOK);
532 for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
533 invl_scoreboard[i] = 1;
534
535 if (pmap_pcid_enabled) {
536 if (invpcid_works) {
537 if (pti)
538 invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
539 else
540 invl_op_tlb = INVL_OP_TLB_INVPCID;
541 invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
542 invl_op_pg = INVL_OP_PG_INVPCID;
543 } else {
544 invl_op_tlb = INVL_OP_TLB_PCID;
545 invl_op_pgrng = INVL_OP_PGRNG_PCID;
546 invl_op_pg = INVL_OP_PG_PCID;
547 }
548 } else {
549 invl_op_tlb = INVL_OP_TLB;
550 invl_op_pgrng = INVL_OP_PGRNG;
551 invl_op_pg = INVL_OP_PG;
552 }
553 }
554 SYSINIT(invl_ops, SI_SUB_SMP - 1, SI_ORDER_ANY, invl_scoreboard_init, NULL);
555
556 static uint32_t *
invl_scoreboard_getcpu(u_int cpu)557 invl_scoreboard_getcpu(u_int cpu)
558 {
559 return (invl_scoreboard + cpu * (mp_maxid + 1));
560 }
561
562 static uint32_t *
invl_scoreboard_slot(u_int cpu)563 invl_scoreboard_slot(u_int cpu)
564 {
565 return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
566 }
567
568 /*
569 * Used by the pmap to request cache or TLB invalidation on local and
570 * remote processors. Mask provides the set of remote CPUs that are
571 * to be signalled with the invalidation IPI. As an optimization, the
572 * curcpu_cb callback is invoked on the calling CPU in a critical
573 * section while waiting for the remote CPUs to complete the operation.
574 *
575 * The callback function is called unconditionally on the caller's
576 * underlying processor, even when this processor is not set in the
577 * mask. So, the callback function must be prepared to handle such
578 * spurious invocations.
579 *
580 * Interrupts must be enabled when calling the function with smp
581 * started, to avoid deadlock with other IPIs that are protected with
582 * smp_ipi_mtx spinlock at the initiator side.
583 *
584 * Function must be called with the thread pinned, and it unpins on
585 * completion.
586 */
587 void
smp_targeted_tlb_shootdown_native(pmap_t pmap,vm_offset_t addr1,vm_offset_t addr2,smp_invl_cb_t curcpu_cb,enum invl_op_codes op)588 smp_targeted_tlb_shootdown_native(pmap_t pmap, vm_offset_t addr1,
589 vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
590 {
591 cpuset_t mask;
592 uint32_t generation, *p_cpudone;
593 int cpu;
594 bool is_all;
595
596 /*
597 * It is not necessary to signal other CPUs while booting or
598 * when in the debugger.
599 */
600 if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
601 goto local_cb;
602
603 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
604
605 /*
606 * Make a stable copy of the set of CPUs on which the pmap is active.
607 * See if we have to interrupt other CPUs.
608 */
609 CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
610 is_all = CPU_CMP(&mask, &all_cpus) == 0;
611 CPU_CLR(curcpu, &mask);
612 if (CPU_EMPTY(&mask))
613 goto local_cb;
614
615 /*
616 * Initiator must have interrupts enabled, which prevents
617 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
618 * from deadlocking with us. On the other hand, preemption
619 * must be disabled to pin initiator to the instance of the
620 * pcpu pc_smp_tlb data and scoreboard line.
621 */
622 KASSERT((read_rflags() & PSL_I) != 0,
623 ("smp_targeted_tlb_shootdown: interrupts disabled"));
624 critical_enter();
625
626 PCPU_SET(smp_tlb_addr1, addr1);
627 PCPU_SET(smp_tlb_addr2, addr2);
628 PCPU_SET(smp_tlb_pmap, pmap);
629 generation = PCPU_GET(smp_tlb_gen);
630 if (++generation == 0)
631 generation = 1;
632 PCPU_SET(smp_tlb_gen, generation);
633 PCPU_SET(smp_tlb_op, op);
634 /* Fence between filling smp_tlb fields and clearing scoreboard. */
635 atomic_thread_fence_rel();
636
637 CPU_FOREACH_ISSET(cpu, &mask) {
638 KASSERT(*invl_scoreboard_slot(cpu) != 0,
639 ("IPI scoreboard is zero, initiator %d target %d",
640 curcpu, cpu));
641 *invl_scoreboard_slot(cpu) = 0;
642 }
643
644 /*
645 * IPI acts as a fence between writing to the scoreboard above
646 * (zeroing slot) and reading from it below (wait for
647 * acknowledgment).
648 */
649 if (is_all) {
650 ipi_all_but_self(IPI_INVLOP);
651 } else {
652 ipi_selected(mask, IPI_INVLOP);
653 }
654 curcpu_cb(pmap, addr1, addr2);
655 CPU_FOREACH_ISSET(cpu, &mask) {
656 p_cpudone = invl_scoreboard_slot(cpu);
657 while (atomic_load_int(p_cpudone) != generation)
658 ia32_pause();
659 }
660
661 /*
662 * Unpin before leaving critical section. If the thread owes
663 * preemption, this allows scheduler to select thread on any
664 * CPU from its cpuset.
665 */
666 sched_unpin();
667 critical_exit();
668
669 return;
670
671 local_cb:
672 critical_enter();
673 curcpu_cb(pmap, addr1, addr2);
674 sched_unpin();
675 critical_exit();
676 }
677
678 void
smp_masked_invltlb(pmap_t pmap,smp_invl_cb_t curcpu_cb)679 smp_masked_invltlb(pmap_t pmap, smp_invl_cb_t curcpu_cb)
680 {
681 if (invlpgb_works && pmap == kernel_pmap) {
682 invlpgb(INVLPGB_GLOB, 0, 0);
683
684 /*
685 * TLBSYNC syncs only against INVLPGB executed on the
686 * same CPU. Since current thread is pinned by
687 * caller, we do not need to enter critical section to
688 * prevent migration.
689 */
690 tlbsync();
691 sched_unpin();
692 return;
693 }
694
695 smp_targeted_tlb_shootdown(pmap, 0, 0, curcpu_cb, invl_op_tlb);
696 #ifdef COUNT_XINVLTLB_HITS
697 ipi_global++;
698 #endif
699 }
700
701 void
smp_masked_invlpg(vm_offset_t addr,pmap_t pmap,smp_invl_cb_t curcpu_cb)702 smp_masked_invlpg(vm_offset_t addr, pmap_t pmap, smp_invl_cb_t curcpu_cb)
703 {
704 if (invlpgb_works && pmap == kernel_pmap) {
705 invlpgb(INVLPGB_GLOB | INVLPGB_VA | trunc_page(addr), 0, 0);
706 tlbsync();
707 sched_unpin();
708 return;
709 }
710
711 smp_targeted_tlb_shootdown(pmap, addr, 0, curcpu_cb, invl_op_pg);
712 #ifdef COUNT_XINVLTLB_HITS
713 ipi_page++;
714 #endif
715 }
716
717 void
smp_masked_invlpg_range(vm_offset_t addr1,vm_offset_t addr2,pmap_t pmap,smp_invl_cb_t curcpu_cb)718 smp_masked_invlpg_range(vm_offset_t addr1, vm_offset_t addr2, pmap_t pmap,
719 smp_invl_cb_t curcpu_cb)
720 {
721 if (invlpgb_works && pmap == kernel_pmap) {
722 vm_offset_t va;
723 uint64_t cnt, total;
724
725 addr1 = trunc_page(addr1);
726 addr2 = round_page(addr2);
727 total = atop(addr2 - addr1);
728 for (va = addr1; total > 0;) {
729 if ((va & PDRMASK) != 0 || total < NPDEPG) {
730 cnt = atop(NBPDR - (va & PDRMASK));
731 if (cnt > total)
732 cnt = total;
733 if (cnt > invlpgb_maxcnt + 1)
734 cnt = invlpgb_maxcnt + 1;
735 invlpgb(INVLPGB_GLOB | INVLPGB_VA | va, 0,
736 cnt - 1);
737 va += ptoa(cnt);
738 total -= cnt;
739 } else {
740 cnt = total / NPTEPG;
741 if (cnt > invlpgb_maxcnt + 1)
742 cnt = invlpgb_maxcnt + 1;
743 invlpgb(INVLPGB_GLOB | INVLPGB_VA | va, 0,
744 INVLPGB_2M_CNT | (cnt - 1));
745 va += cnt << PDRSHIFT;
746 total -= cnt * NPTEPG;
747 }
748 }
749 tlbsync();
750 sched_unpin();
751 return;
752 }
753
754 smp_targeted_tlb_shootdown(pmap, addr1, addr2, curcpu_cb,
755 invl_op_pgrng);
756 #ifdef COUNT_XINVLTLB_HITS
757 ipi_range++;
758 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
759 #endif
760 }
761
762 void
smp_cache_flush(smp_invl_cb_t curcpu_cb)763 smp_cache_flush(smp_invl_cb_t curcpu_cb)
764 {
765 smp_targeted_tlb_shootdown(kernel_pmap, 0, 0, curcpu_cb, INVL_OP_CACHE);
766 }
767
768 /*
769 * Handlers for TLB related IPIs
770 */
771 static void
invltlb_handler(pmap_t smp_tlb_pmap)772 invltlb_handler(pmap_t smp_tlb_pmap)
773 {
774 #ifdef COUNT_XINVLTLB_HITS
775 xhits_gbl[PCPU_GET(cpuid)]++;
776 #endif /* COUNT_XINVLTLB_HITS */
777 #ifdef COUNT_IPIS
778 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
779 #endif /* COUNT_IPIS */
780
781 if (smp_tlb_pmap == kernel_pmap)
782 invltlb_glob();
783 else
784 invltlb();
785 }
786
787 static void
invltlb_invpcid_handler(pmap_t smp_tlb_pmap)788 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
789 {
790 struct invpcid_descr d;
791
792 #ifdef COUNT_XINVLTLB_HITS
793 xhits_gbl[PCPU_GET(cpuid)]++;
794 #endif /* COUNT_XINVLTLB_HITS */
795 #ifdef COUNT_IPIS
796 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
797 #endif /* COUNT_IPIS */
798
799 d.pcid = pmap_get_pcid(smp_tlb_pmap);
800 d.pad = 0;
801 d.addr = 0;
802 invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
803 INVPCID_CTX);
804 }
805
806 static void
invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)807 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
808 {
809 struct invpcid_descr d;
810
811 #ifdef COUNT_XINVLTLB_HITS
812 xhits_gbl[PCPU_GET(cpuid)]++;
813 #endif /* COUNT_XINVLTLB_HITS */
814 #ifdef COUNT_IPIS
815 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
816 #endif /* COUNT_IPIS */
817
818 d.pcid = pmap_get_pcid(smp_tlb_pmap);
819 d.pad = 0;
820 d.addr = 0;
821 if (smp_tlb_pmap == kernel_pmap) {
822 /*
823 * This invalidation actually needs to clear kernel
824 * mappings from the TLB in the current pmap, but
825 * since we were asked for the flush in the kernel
826 * pmap, achieve it by performing global flush.
827 */
828 invpcid(&d, INVPCID_CTXGLOB);
829 } else {
830 invpcid(&d, INVPCID_CTX);
831 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
832 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
833 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
834 }
835 }
836
837 static void
invltlb_pcid_handler(pmap_t smp_tlb_pmap)838 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
839 {
840 #ifdef COUNT_XINVLTLB_HITS
841 xhits_gbl[PCPU_GET(cpuid)]++;
842 #endif /* COUNT_XINVLTLB_HITS */
843 #ifdef COUNT_IPIS
844 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
845 #endif /* COUNT_IPIS */
846
847 if (smp_tlb_pmap == kernel_pmap) {
848 invltlb_glob();
849 } else {
850 /*
851 * The current pmap might not be equal to
852 * smp_tlb_pmap. The clearing of the pm_gen in
853 * pmap_invalidate_all() takes care of TLB
854 * invalidation when switching to the pmap on this
855 * CPU.
856 */
857 if (smp_tlb_pmap == PCPU_GET(curpmap)) {
858 load_cr3(smp_tlb_pmap->pm_cr3 |
859 pmap_get_pcid(smp_tlb_pmap));
860 if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
861 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
862 }
863 }
864 }
865
866 static void
invlpg_handler(vm_offset_t smp_tlb_addr1)867 invlpg_handler(vm_offset_t smp_tlb_addr1)
868 {
869 #ifdef COUNT_XINVLTLB_HITS
870 xhits_pg[PCPU_GET(cpuid)]++;
871 #endif /* COUNT_XINVLTLB_HITS */
872 #ifdef COUNT_IPIS
873 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
874 #endif /* COUNT_IPIS */
875
876 invlpg(smp_tlb_addr1);
877 }
878
879 static void
invlpg_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)880 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
881 {
882 struct invpcid_descr d;
883
884 #ifdef COUNT_XINVLTLB_HITS
885 xhits_pg[PCPU_GET(cpuid)]++;
886 #endif /* COUNT_XINVLTLB_HITS */
887 #ifdef COUNT_IPIS
888 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
889 #endif /* COUNT_IPIS */
890
891 pmap_invlpg(smp_tlb_pmap, smp_tlb_addr1);
892 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
893 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
894 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
895 d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
896 d.pad = 0;
897 d.addr = smp_tlb_addr1;
898 invpcid(&d, INVPCID_ADDR);
899 }
900 }
901
902 static void
invlpg_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)903 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
904 {
905 uint64_t kcr3, ucr3;
906 uint32_t pcid;
907
908 #ifdef COUNT_XINVLTLB_HITS
909 xhits_pg[PCPU_GET(cpuid)]++;
910 #endif /* COUNT_XINVLTLB_HITS */
911 #ifdef COUNT_IPIS
912 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
913 #endif /* COUNT_IPIS */
914
915 invlpg(smp_tlb_addr1);
916 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
917 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
918 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
919 pcid = pmap_get_pcid(smp_tlb_pmap);
920 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
921 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
922 pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
923 }
924 }
925
926 static void
invlrng_handler(vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)927 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
928 {
929 vm_offset_t addr;
930
931 #ifdef COUNT_XINVLTLB_HITS
932 xhits_rng[PCPU_GET(cpuid)]++;
933 #endif /* COUNT_XINVLTLB_HITS */
934 #ifdef COUNT_IPIS
935 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
936 #endif /* COUNT_IPIS */
937
938 addr = smp_tlb_addr1;
939 do {
940 invlpg(addr);
941 addr += PAGE_SIZE;
942 } while (addr < smp_tlb_addr2);
943 }
944
945 static void
invlrng_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)946 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
947 vm_offset_t smp_tlb_addr2)
948 {
949 struct invpcid_descr d;
950 vm_offset_t addr;
951
952 #ifdef COUNT_XINVLTLB_HITS
953 xhits_rng[PCPU_GET(cpuid)]++;
954 #endif /* COUNT_XINVLTLB_HITS */
955 #ifdef COUNT_IPIS
956 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
957 #endif /* COUNT_IPIS */
958
959 addr = smp_tlb_addr1;
960 if (smp_tlb_pmap == kernel_pmap && PCPU_GET(pcid_invlpg_workaround)) {
961 struct invpcid_descr d = { 0 };
962
963 invpcid(&d, INVPCID_CTXGLOB);
964 } else {
965 do {
966 invlpg(addr);
967 addr += PAGE_SIZE;
968 } while (addr < smp_tlb_addr2);
969 }
970 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
971 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
972 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
973 d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
974 d.pad = 0;
975 d.addr = smp_tlb_addr1;
976 do {
977 invpcid(&d, INVPCID_ADDR);
978 d.addr += PAGE_SIZE;
979 } while (d.addr < smp_tlb_addr2);
980 }
981 }
982
983 static void
invlrng_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)984 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
985 vm_offset_t smp_tlb_addr2)
986 {
987 vm_offset_t addr;
988 uint64_t kcr3, ucr3;
989 uint32_t pcid;
990
991 #ifdef COUNT_XINVLTLB_HITS
992 xhits_rng[PCPU_GET(cpuid)]++;
993 #endif /* COUNT_XINVLTLB_HITS */
994 #ifdef COUNT_IPIS
995 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
996 #endif /* COUNT_IPIS */
997
998 addr = smp_tlb_addr1;
999 do {
1000 invlpg(addr);
1001 addr += PAGE_SIZE;
1002 } while (addr < smp_tlb_addr2);
1003 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
1004 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
1005 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
1006 pcid = pmap_get_pcid(smp_tlb_pmap);
1007 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
1008 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
1009 pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, smp_tlb_addr2);
1010 }
1011 }
1012
1013 static void
invlcache_handler(void)1014 invlcache_handler(void)
1015 {
1016 #ifdef COUNT_IPIS
1017 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1018 #endif /* COUNT_IPIS */
1019 wbinvd();
1020 }
1021
1022 static void
invlop_handler_one_req(enum invl_op_codes smp_tlb_op,pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)1023 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
1024 vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1025 {
1026 switch (smp_tlb_op) {
1027 case INVL_OP_TLB:
1028 invltlb_handler(smp_tlb_pmap);
1029 break;
1030 case INVL_OP_TLB_INVPCID:
1031 invltlb_invpcid_handler(smp_tlb_pmap);
1032 break;
1033 case INVL_OP_TLB_INVPCID_PTI:
1034 invltlb_invpcid_pti_handler(smp_tlb_pmap);
1035 break;
1036 case INVL_OP_TLB_PCID:
1037 invltlb_pcid_handler(smp_tlb_pmap);
1038 break;
1039 case INVL_OP_PGRNG:
1040 invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1041 break;
1042 case INVL_OP_PGRNG_INVPCID:
1043 invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1044 smp_tlb_addr2);
1045 break;
1046 case INVL_OP_PGRNG_PCID:
1047 invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1048 smp_tlb_addr2);
1049 break;
1050 case INVL_OP_PG:
1051 invlpg_handler(smp_tlb_addr1);
1052 break;
1053 case INVL_OP_PG_INVPCID:
1054 invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1055 break;
1056 case INVL_OP_PG_PCID:
1057 invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1058 break;
1059 case INVL_OP_CACHE:
1060 invlcache_handler();
1061 break;
1062 default:
1063 __assert_unreachable();
1064 break;
1065 }
1066 }
1067
1068 void
invlop_handler(void)1069 invlop_handler(void)
1070 {
1071 struct pcpu *initiator_pc;
1072 pmap_t smp_tlb_pmap;
1073 vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1074 u_int initiator_cpu_id;
1075 enum invl_op_codes smp_tlb_op;
1076 uint32_t *scoreboard, smp_tlb_gen;
1077
1078 scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1079 for (;;) {
1080 for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1081 initiator_cpu_id++) {
1082 if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1083 break;
1084 }
1085 if (initiator_cpu_id > mp_maxid)
1086 break;
1087 initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1088
1089 /*
1090 * This acquire fence and its corresponding release
1091 * fence in smp_targeted_tlb_shootdown() is between
1092 * reading zero scoreboard slot and accessing PCPU of
1093 * initiator for pc_smp_tlb values.
1094 */
1095 atomic_thread_fence_acq();
1096 smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1097 smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1098 smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1099 smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1100 smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1101
1102 /*
1103 * Ensure that we do not make our scoreboard
1104 * notification visible to the initiator until the
1105 * pc_smp_tlb values are read. The corresponding
1106 * fence is implicitly provided by the barrier in the
1107 * IPI send operation before the APIC ICR register
1108 * write.
1109 *
1110 * As an optimization, the request is acknowledged
1111 * before the actual invalidation is performed. It is
1112 * safe because target CPU cannot return to userspace
1113 * before handler finishes. Only NMI can preempt the
1114 * handler, but NMI would see the kernel handler frame
1115 * and not touch not-invalidated user page table.
1116 */
1117 atomic_thread_fence_acq();
1118 atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1119
1120 invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1121 smp_tlb_addr2);
1122 }
1123 }
1124