xref: /freebsd/sys/amd64/amd64/mp_machdep.c (revision 55305b590797524dd1cecfc9406869700e925e51)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1996, by Steve Passe
5  * Copyright (c) 2003, by Peter Wemm
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the developer may NOT be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include "opt_acpi.h"
31 #include "opt_cpu.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_sched.h"
35 #include "opt_smp.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/cpuset.h>
41 #include <sys/domainset.h>
42 #include <sys/kdb.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/memrange.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_phys.h>
62 
63 #include <x86/apicreg.h>
64 #include <machine/clock.h>
65 #include <machine/cputypes.h>
66 #include <machine/cpufunc.h>
67 #include <x86/mca.h>
68 #include <machine/md_var.h>
69 #include <machine/pcb.h>
70 #include <machine/psl.h>
71 #include <machine/smp.h>
72 #include <machine/specialreg.h>
73 #include <machine/tss.h>
74 #include <x86/ucode.h>
75 #include <machine/cpu.h>
76 #include <x86/init.h>
77 
78 #ifdef DEV_ACPI
79 #include <contrib/dev/acpica/include/acpi.h>
80 #include <dev/acpica/acpivar.h>
81 #endif
82 
83 #define WARMBOOT_TARGET		0
84 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
85 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
86 
87 #define CMOS_REG		(0x70)
88 #define CMOS_DATA		(0x71)
89 #define BIOS_RESET		(0x0f)
90 #define BIOS_WARM		(0x0a)
91 
92 #define GiB(v)			(v ## ULL << 30)
93 
94 #define	AP_BOOTPT_SZ		(PAGE_SIZE * 4)
95 
96 /* Temporary variables for init_secondary()  */
97 static char *doublefault_stack;
98 static char *mce_stack;
99 static char *nmi_stack;
100 static char *dbg_stack;
101 void *bootpcpu;
102 
103 extern u_int mptramp_la57;
104 extern u_int mptramp_nx;
105 smp_targeted_tlb_shootdown_t smp_targeted_tlb_shootdown =
106     &smp_targeted_tlb_shootdown_native;
107 
108 /*
109  * Local data and functions.
110  */
111 
112 static int start_ap(int apic_id, vm_paddr_t boot_address);
113 
114 /*
115  * Initialize the IPI handlers and start up the AP's.
116  */
117 void
cpu_mp_start(void)118 cpu_mp_start(void)
119 {
120 	int i;
121 
122 	/* Initialize the logical ID to APIC ID table. */
123 	for (i = 0; i < MAXCPU; i++) {
124 		cpu_apic_ids[i] = -1;
125 	}
126 
127 	/* Install an inter-CPU IPI for cache and TLB invalidations. */
128 	setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
129 	    SDT_SYSIGT, SEL_KPL, 0);
130 
131 	/* Install an inter-CPU IPI for all-CPU rendezvous */
132 	setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
133 	    IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
134 
135 	/* Install generic inter-CPU IPI handler */
136 	setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
137 	    IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
138 
139 	/* Install an inter-CPU IPI for CPU stop/restart */
140 	setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
141 	    SDT_SYSIGT, SEL_KPL, 0);
142 
143 	/* Install an inter-CPU IPI for CPU offline */
144 	setidt(IPI_OFF, pti ? IDTVEC(cpuoff_pti) : IDTVEC(cpuoff),
145 	    SDT_SYSIGT, SEL_KPL, 0);
146 
147 	/* Install an inter-CPU IPI for CPU suspend/resume */
148 	setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
149 	    SDT_SYSIGT, SEL_KPL, 0);
150 
151 	/* Install an IPI for calling delayed SWI */
152 	setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
153 	    SDT_SYSIGT, SEL_KPL, 0);
154 
155 	/* Set boot_cpu_id if needed. */
156 	if (boot_cpu_id == -1) {
157 		boot_cpu_id = PCPU_GET(apic_id);
158 		cpu_info[boot_cpu_id].cpu_bsp = 1;
159 	} else
160 		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
161 		    ("BSP's APIC ID doesn't match boot_cpu_id"));
162 
163 	/* Probe logical/physical core configuration. */
164 	topo_probe();
165 
166 	assign_cpu_ids();
167 
168 	mptramp_la57 = la57;
169 	mptramp_nx = pg_nx != 0;
170 	MPASS(kernel_pmap->pm_cr3 < (1UL << 32));
171 	mptramp_pagetables = kernel_pmap->pm_cr3;
172 
173 	/* Start each Application Processor */
174 	start_all_aps();
175 
176 	set_interrupt_apic_ids();
177 
178 #if defined(DEV_ACPI) && MAXMEMDOM > 1
179 	acpi_pxm_set_cpu_locality();
180 #endif
181 }
182 
183 void
cpu_mp_stop(void)184 cpu_mp_stop(void)
185 {
186 	cpuset_t other_cpus = all_cpus;
187 
188 	CPU_CLR(PCPU_GET(cpuid), &other_cpus);
189 	offline_cpus(other_cpus);
190 }
191 
192 /*
193  * AP CPU's call this to initialize themselves.
194  */
195 void
init_secondary(void)196 init_secondary(void)
197 {
198 	struct pcpu *pc;
199 	struct nmi_pcpu *np;
200 	struct user_segment_descriptor *gdt;
201 	struct region_descriptor ap_gdt;
202 	u_int64_t cr0;
203 	int cpu, gsel_tss, x;
204 
205 	/* Set by the startup code for us to use */
206 	cpu = bootAP;
207 
208 	/* Update microcode before doing anything else. */
209 	ucode_load_ap(cpu);
210 
211 	/* Initialize the PCPU area. */
212 	pc = bootpcpu;
213 	pcpu_init(pc, cpu, sizeof(struct pcpu));
214 	dpcpu_init(dpcpu, cpu);
215 	pc->pc_apic_id = cpu_apic_ids[cpu];
216 	pc->pc_prvspace = pc;
217 	pc->pc_curthread = 0;
218 	pc->pc_tssp = &pc->pc_common_tss;
219 	pc->pc_rsp0 = 0;
220 	pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
221 	    PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
222 	gdt = pc->pc_gdt;
223 	pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
224 	pc->pc_fs32p = &gdt[GUFS32_SEL];
225 	pc->pc_gs32p = &gdt[GUGS32_SEL];
226 	pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
227 	pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
228 	/* See comment in pmap_bootstrap(). */
229 	pc->pc_pcid_next = PMAP_PCID_KERN + 2;
230 	pc->pc_pcid_gen = 1;
231 	pc->pc_kpmap_store.pm_pcid = PMAP_PCID_KERN;
232 	pc->pc_kpmap_store.pm_gen = 1;
233 
234 	pc->pc_smp_tlb_gen = 1;
235 
236 	/* Init tss */
237 	pc->pc_common_tss = __pcpu[0].pc_common_tss;
238 	pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
239 	    IOPERM_BITMAP_SIZE;
240 	pc->pc_common_tss.tss_rsp0 = 0;
241 
242 	/* The doublefault stack runs on IST1. */
243 	np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
244 	np->np_pcpu = (register_t)pc;
245 	pc->pc_common_tss.tss_ist1 = (long)np;
246 
247 	/* The NMI stack runs on IST2. */
248 	np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
249 	np->np_pcpu = (register_t)pc;
250 	pc->pc_common_tss.tss_ist2 = (long)np;
251 
252 	/* The MC# stack runs on IST3. */
253 	np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
254 	np->np_pcpu = (register_t)pc;
255 	pc->pc_common_tss.tss_ist3 = (long)np;
256 
257 	/* The DB# stack runs on IST4. */
258 	np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
259 	np->np_pcpu = (register_t)pc;
260 	pc->pc_common_tss.tss_ist4 = (long)np;
261 
262 	/* Prepare private GDT */
263 	gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
264 	for (x = 0; x < NGDT; x++) {
265 		if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
266 		    x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
267 			ssdtosd(&gdt_segs[x], &gdt[x]);
268 	}
269 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
270 	    (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
271 	ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
272 	ap_gdt.rd_base = (u_long)gdt;
273 	lgdt(&ap_gdt);			/* does magic intra-segment return */
274 
275 	wrmsr(MSR_FSBASE, 0);		/* User value */
276 	wrmsr(MSR_GSBASE, (uint64_t)pc);
277 	wrmsr(MSR_KGSBASE, 0);		/* User value */
278 	fix_cpuid();
279 
280 	lidt(&r_idt);
281 
282 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
283 	ltr(gsel_tss);
284 
285 	/*
286 	 * Set to a known state:
287 	 * Set by mpboot.s: CR0_PG, CR0_PE
288 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
289 	 */
290 	cr0 = rcr0();
291 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
292 	load_cr0(cr0);
293 
294 	amd64_conf_fast_syscall();
295 
296 	/* signal our startup to the BSP. */
297 	mp_naps++;
298 
299 	/* Spin until the BSP releases the AP's. */
300 	while (atomic_load_acq_int(&aps_ready) == 0)
301 		ia32_pause();
302 
303 	init_secondary_tail();
304 }
305 
306 static void
amd64_mp_alloc_pcpu(void)307 amd64_mp_alloc_pcpu(void)
308 {
309 	vm_page_t m;
310 	int cpu;
311 
312 	/* Allocate pcpu areas to the correct domain. */
313 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
314 #ifdef NUMA
315 		m = NULL;
316 		if (vm_ndomains > 1) {
317 			m = vm_page_alloc_noobj_domain(
318 			    acpi_pxm_get_cpu_locality(cpu_apic_ids[cpu]),
319 			    VM_ALLOC_ZERO);
320 		}
321 		if (m == NULL)
322 #endif
323 			m = vm_page_alloc_noobj(VM_ALLOC_ZERO);
324 		if (m == NULL)
325 			panic("cannot alloc pcpu page for cpu %d", cpu);
326 		pmap_qenter((vm_offset_t)&__pcpu[cpu], &m, 1);
327 	}
328 }
329 
330 /*
331  * start each AP in our list
332  */
333 int
start_all_aps(void)334 start_all_aps(void)
335 {
336 	vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4];
337 	pml5_entry_t old_pml45;
338 	pml4_entry_t *v_pml4;
339 	pdp_entry_t *v_pdp;
340 	pd_entry_t *v_pd;
341 	vm_paddr_t boot_address;
342 	u_int32_t mpbioswarmvec;
343 	int apic_id, cpu, domain, i;
344 	u_char mpbiosreason;
345 
346 	amd64_mp_alloc_pcpu();
347 
348 	MPASS(bootMP_size <= PAGE_SIZE);
349 	m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
350 	    (1ULL << 20), /* Trampoline should be below 1M for real mode */
351 	    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
352 	boot_address = VM_PAGE_TO_PHYS(m_boottramp);
353 
354 	/* Create a transient 1:1 mapping of low 4G */
355 	if (la57) {
356 		m_pml4 = pmap_page_alloc_below_4g(true);
357 		v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
358 	} else {
359 		v_pml4 = &kernel_pmap->pm_pmltop[0];
360 	}
361 	m_pdp = pmap_page_alloc_below_4g(true);
362 	v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
363 	m_pd[0] = pmap_page_alloc_below_4g(false);
364 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0]));
365 	for (i = 0; i < NPDEPG; i++)
366 		v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A |
367 		    X86_PG_M | PG_PS;
368 	m_pd[1] = pmap_page_alloc_below_4g(false);
369 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1]));
370 	for (i = 0; i < NPDEPG; i++)
371 		v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
372 		    X86_PG_A | X86_PG_M | PG_PS;
373 	m_pd[2] = pmap_page_alloc_below_4g(false);
374 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2]));
375 	for (i = 0; i < NPDEPG; i++)
376 		v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
377 		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
378 	m_pd[3] = pmap_page_alloc_below_4g(false);
379 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3]));
380 	for (i = 0; i < NPDEPG; i++)
381 		v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
382 		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
383 	v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V |
384 	    X86_PG_RW | X86_PG_A | X86_PG_M;
385 	v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V |
386 	    X86_PG_RW | X86_PG_A | X86_PG_M;
387 	v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V |
388 	    X86_PG_RW | X86_PG_A | X86_PG_M;
389 	v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V |
390 	    X86_PG_RW | X86_PG_A | X86_PG_M;
391 	old_pml45 = kernel_pmap->pm_pmltop[0];
392 	if (la57) {
393 		kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) |
394 		    X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
395 	}
396 	v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V |
397 	    X86_PG_RW | X86_PG_A | X86_PG_M;
398 	pmap_invalidate_all(kernel_pmap);
399 
400 	/* copy the AP 1st level boot code */
401 	bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
402 	if (bootverbose)
403 		printf("AP boot address %#lx\n", boot_address);
404 
405 	/* save the current value of the warm-start vector */
406 	if (!efi_boot)
407 		mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
408 	outb(CMOS_REG, BIOS_RESET);
409 	mpbiosreason = inb(CMOS_DATA);
410 
411 	/* setup a vector to our boot code */
412 	if (!efi_boot) {
413 		*((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
414 		*((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4);
415 	}
416 	outb(CMOS_REG, BIOS_RESET);
417 	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
418 
419 	/* start each AP */
420 	domain = 0;
421 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
422 		apic_id = cpu_apic_ids[cpu];
423 #ifdef NUMA
424 		if (vm_ndomains > 1)
425 			domain = acpi_pxm_get_cpu_locality(apic_id);
426 #endif
427 		/* allocate and set up an idle stack data page */
428 		bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
429 		    M_WAITOK | M_ZERO);
430 		doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
431 		    M_WAITOK | M_ZERO);
432 		mce_stack = kmem_malloc(MCE_STACK_SIZE,
433 		    M_WAITOK | M_ZERO);
434 		nmi_stack = kmem_malloc_domainset(
435 		    DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
436 		dbg_stack = kmem_malloc_domainset(
437 		    DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
438 		dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
439 		    DPCPU_SIZE, M_WAITOK | M_ZERO);
440 
441 		bootpcpu = &__pcpu[cpu];
442 		bootSTK = (char *)bootstacks[cpu] +
443 		    kstack_pages * PAGE_SIZE - 8;
444 		bootAP = cpu;
445 
446 		/* attempt to start the Application Processor */
447 		if (!start_ap(apic_id, boot_address)) {
448 			/* restore the warmstart vector */
449 			if (!efi_boot)
450 				*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
451 			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
452 		}
453 
454 		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
455 	}
456 
457 	/* restore the warmstart vector */
458 	if (!efi_boot)
459 		*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
460 
461 	outb(CMOS_REG, BIOS_RESET);
462 	outb(CMOS_DATA, mpbiosreason);
463 
464 	/* Destroy transient 1:1 mapping */
465 	kernel_pmap->pm_pmltop[0] = old_pml45;
466 	invlpg(0);
467 	if (la57)
468 		vm_page_free(m_pml4);
469 	vm_page_free(m_pd[3]);
470 	vm_page_free(m_pd[2]);
471 	vm_page_free(m_pd[1]);
472 	vm_page_free(m_pd[0]);
473 	vm_page_free(m_pdp);
474 	vm_page_free(m_boottramp);
475 
476 	/* number of APs actually started */
477 	return (mp_naps);
478 }
479 
480 /*
481  * This function starts the AP (application processor) identified
482  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
483  * to accomplish this.  This is necessary because of the nuances
484  * of the different hardware we might encounter.  It isn't pretty,
485  * but it seems to work.
486  */
487 static int
start_ap(int apic_id,vm_paddr_t boot_address)488 start_ap(int apic_id, vm_paddr_t boot_address)
489 {
490 	int vector, ms;
491 	int cpus;
492 
493 	/* calculate the vector */
494 	vector = (boot_address >> 12) & 0xff;
495 
496 	/* used as a watchpoint to signal AP startup */
497 	cpus = mp_naps;
498 
499 	ipi_startup(apic_id, vector);
500 
501 	/* Wait up to 5 seconds for it to start. */
502 	for (ms = 0; ms < 5000; ms++) {
503 		if (mp_naps > cpus)
504 			return 1;	/* return SUCCESS */
505 		DELAY(1000);
506 	}
507 	return 0;		/* return FAILURE */
508 }
509 
510 /*
511  * Flush the TLB on other CPU's
512  */
513 
514 /*
515  * These variables are initialized at startup to reflect how each of
516  * the different kinds of invalidations should be performed on the
517  * current machine and environment.
518  */
519 static enum invl_op_codes invl_op_tlb;
520 static enum invl_op_codes invl_op_pgrng;
521 static enum invl_op_codes invl_op_pg;
522 
523 /*
524  * Scoreboard of IPI completion notifications from target to IPI initiator.
525  *
526  * Each CPU can initiate shootdown IPI independently from other CPUs.
527  * Initiator enters critical section, then fills its local PCPU
528  * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
529  * at location (cpu, my_cpuid) for each target cpu.  After that IPI is
530  * sent to all targets which scan for zeroed scoreboard generation
531  * words.  Upon finding such word the shootdown data is read from
532  * corresponding cpu's pcpu, and generation is set.  Meantime initiator
533  * loops waiting for all zeroed generations in scoreboard to update.
534  */
535 static uint32_t *invl_scoreboard;
536 
537 static void
invl_scoreboard_init(void * arg __unused)538 invl_scoreboard_init(void *arg __unused)
539 {
540 	u_int i;
541 
542 	invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
543 	    (mp_maxid + 1), M_DEVBUF, M_WAITOK);
544 	for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
545 		invl_scoreboard[i] = 1;
546 
547 	if (pmap_pcid_enabled) {
548 		if (invpcid_works) {
549 			if (pti)
550 				invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
551 			else
552 				invl_op_tlb = INVL_OP_TLB_INVPCID;
553 			invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
554 			invl_op_pg = INVL_OP_PG_INVPCID;
555 		} else {
556 			invl_op_tlb = INVL_OP_TLB_PCID;
557 			invl_op_pgrng = INVL_OP_PGRNG_PCID;
558 			invl_op_pg = INVL_OP_PG_PCID;
559 		}
560 	} else {
561 		invl_op_tlb = INVL_OP_TLB;
562 		invl_op_pgrng = INVL_OP_PGRNG;
563 		invl_op_pg = INVL_OP_PG;
564 	}
565 }
566 SYSINIT(invl_ops, SI_SUB_SMP - 1, SI_ORDER_ANY, invl_scoreboard_init, NULL);
567 
568 static uint32_t *
invl_scoreboard_getcpu(u_int cpu)569 invl_scoreboard_getcpu(u_int cpu)
570 {
571 	return (invl_scoreboard + cpu * (mp_maxid + 1));
572 }
573 
574 static uint32_t *
invl_scoreboard_slot(u_int cpu)575 invl_scoreboard_slot(u_int cpu)
576 {
577 	return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
578 }
579 
580 /*
581  * Used by the pmap to request cache or TLB invalidation on local and
582  * remote processors.  Mask provides the set of remote CPUs that are
583  * to be signalled with the invalidation IPI.  As an optimization, the
584  * curcpu_cb callback is invoked on the calling CPU in a critical
585  * section while waiting for the remote CPUs to complete the operation.
586  *
587  * The callback function is called unconditionally on the caller's
588  * underlying processor, even when this processor is not set in the
589  * mask.  So, the callback function must be prepared to handle such
590  * spurious invocations.
591  *
592  * Interrupts must be enabled when calling the function with smp
593  * started, to avoid deadlock with other IPIs that are protected with
594  * smp_ipi_mtx spinlock at the initiator side.
595  *
596  * Function must be called with the thread pinned, and it unpins on
597  * completion.
598  */
599 void
smp_targeted_tlb_shootdown_native(pmap_t pmap,vm_offset_t addr1,vm_offset_t addr2,smp_invl_cb_t curcpu_cb,enum invl_op_codes op)600 smp_targeted_tlb_shootdown_native(pmap_t pmap, vm_offset_t addr1,
601     vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
602 {
603 	cpuset_t mask;
604 	uint32_t generation, *p_cpudone;
605 	int cpu;
606 	bool is_all;
607 
608 	/*
609 	 * It is not necessary to signal other CPUs while booting or
610 	 * when in the debugger.
611 	 */
612 	if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
613 		goto local_cb;
614 
615 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
616 
617 	/*
618 	 * Make a stable copy of the set of CPUs on which the pmap is active.
619 	 * See if we have to interrupt other CPUs.
620 	 */
621 	CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
622 	is_all = CPU_CMP(&mask, &all_cpus) == 0;
623 	CPU_CLR(curcpu, &mask);
624 	if (CPU_EMPTY(&mask))
625 		goto local_cb;
626 
627 	/*
628 	 * Initiator must have interrupts enabled, which prevents
629 	 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
630 	 * from deadlocking with us.  On the other hand, preemption
631 	 * must be disabled to pin initiator to the instance of the
632 	 * pcpu pc_smp_tlb data and scoreboard line.
633 	 */
634 	KASSERT((read_rflags() & PSL_I) != 0,
635 	    ("smp_targeted_tlb_shootdown: interrupts disabled"));
636 	critical_enter();
637 
638 	PCPU_SET(smp_tlb_addr1, addr1);
639 	PCPU_SET(smp_tlb_addr2, addr2);
640 	PCPU_SET(smp_tlb_pmap, pmap);
641 	generation = PCPU_GET(smp_tlb_gen);
642 	if (++generation == 0)
643 		generation = 1;
644 	PCPU_SET(smp_tlb_gen, generation);
645 	PCPU_SET(smp_tlb_op, op);
646 	/* Fence between filling smp_tlb fields and clearing scoreboard. */
647 	atomic_thread_fence_rel();
648 
649 	CPU_FOREACH_ISSET(cpu, &mask) {
650 		KASSERT(*invl_scoreboard_slot(cpu) != 0,
651 		    ("IPI scoreboard is zero, initiator %d target %d",
652 		    curcpu, cpu));
653 		*invl_scoreboard_slot(cpu) = 0;
654 	}
655 
656 	/*
657 	 * IPI acts as a fence between writing to the scoreboard above
658 	 * (zeroing slot) and reading from it below (wait for
659 	 * acknowledgment).
660 	 */
661 	if (is_all) {
662 		ipi_all_but_self(IPI_INVLOP);
663 	} else {
664 		ipi_selected(mask, IPI_INVLOP);
665 	}
666 	curcpu_cb(pmap, addr1, addr2);
667 	CPU_FOREACH_ISSET(cpu, &mask) {
668 		p_cpudone = invl_scoreboard_slot(cpu);
669 		while (atomic_load_int(p_cpudone) != generation)
670 			ia32_pause();
671 	}
672 
673 	/*
674 	 * Unpin before leaving critical section.  If the thread owes
675 	 * preemption, this allows scheduler to select thread on any
676 	 * CPU from its cpuset.
677 	 */
678 	sched_unpin();
679 	critical_exit();
680 
681 	return;
682 
683 local_cb:
684 	critical_enter();
685 	curcpu_cb(pmap, addr1, addr2);
686 	sched_unpin();
687 	critical_exit();
688 }
689 
690 void
smp_masked_invltlb(pmap_t pmap,smp_invl_cb_t curcpu_cb)691 smp_masked_invltlb(pmap_t pmap, smp_invl_cb_t curcpu_cb)
692 {
693 	if (invlpgb_works && pmap == kernel_pmap) {
694 		invlpgb(INVLPGB_GLOB, 0, 0);
695 
696 		/*
697 		 * TLBSYNC syncs only against INVLPGB executed on the
698 		 * same CPU.  Since current thread is pinned by
699 		 * caller, we do not need to enter critical section to
700 		 * prevent migration.
701 		 */
702 		tlbsync();
703 		sched_unpin();
704 		return;
705 	}
706 
707 	smp_targeted_tlb_shootdown(pmap, 0, 0, curcpu_cb, invl_op_tlb);
708 #ifdef COUNT_XINVLTLB_HITS
709 	ipi_global++;
710 #endif
711 }
712 
713 void
smp_masked_invlpg(vm_offset_t addr,pmap_t pmap,smp_invl_cb_t curcpu_cb)714 smp_masked_invlpg(vm_offset_t addr, pmap_t pmap, smp_invl_cb_t curcpu_cb)
715 {
716 	if (invlpgb_works && pmap == kernel_pmap) {
717 		invlpgb(INVLPGB_GLOB | INVLPGB_VA | trunc_page(addr), 0, 0);
718 		tlbsync();
719 		sched_unpin();
720 		return;
721 	}
722 
723 	smp_targeted_tlb_shootdown(pmap, addr, 0, curcpu_cb, invl_op_pg);
724 #ifdef COUNT_XINVLTLB_HITS
725 	ipi_page++;
726 #endif
727 }
728 
729 void
smp_masked_invlpg_range(vm_offset_t addr1,vm_offset_t addr2,pmap_t pmap,smp_invl_cb_t curcpu_cb)730 smp_masked_invlpg_range(vm_offset_t addr1, vm_offset_t addr2, pmap_t pmap,
731     smp_invl_cb_t curcpu_cb)
732 {
733 	if (invlpgb_works && pmap == kernel_pmap) {
734 		vm_offset_t va;
735 		uint64_t cnt, total;
736 
737 		addr1 = trunc_page(addr1);
738 		addr2 = round_page(addr2);
739 		total = atop(addr2 - addr1);
740 		for (va = addr1; total > 0;) {
741 			if ((va & PDRMASK) != 0 || total < NPDEPG) {
742 				cnt = atop(NBPDR - (va & PDRMASK));
743 				if (cnt > total)
744 					cnt = total;
745 				if (cnt > invlpgb_maxcnt + 1)
746 					cnt = invlpgb_maxcnt + 1;
747 				invlpgb(INVLPGB_GLOB | INVLPGB_VA | va, 0,
748 				    cnt - 1);
749 				va += ptoa(cnt);
750 				total -= cnt;
751 			} else {
752 				cnt = total / NPTEPG;
753 				if (cnt > invlpgb_maxcnt + 1)
754 					cnt = invlpgb_maxcnt + 1;
755 				invlpgb(INVLPGB_GLOB | INVLPGB_VA | va, 0,
756 				    INVLPGB_2M_CNT | (cnt - 1));
757 				va += cnt << PDRSHIFT;
758 				total -= cnt * NPTEPG;
759 			}
760 		}
761 		tlbsync();
762 		sched_unpin();
763 		return;
764 	}
765 
766 	smp_targeted_tlb_shootdown(pmap, addr1, addr2, curcpu_cb,
767 	    invl_op_pgrng);
768 #ifdef COUNT_XINVLTLB_HITS
769 	ipi_range++;
770 	ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
771 #endif
772 }
773 
774 void
smp_cache_flush(smp_invl_cb_t curcpu_cb)775 smp_cache_flush(smp_invl_cb_t curcpu_cb)
776 {
777 	smp_targeted_tlb_shootdown(kernel_pmap, 0, 0, curcpu_cb, INVL_OP_CACHE);
778 }
779 
780 /*
781  * Handlers for TLB related IPIs
782  */
783 static void
invltlb_handler(pmap_t smp_tlb_pmap)784 invltlb_handler(pmap_t smp_tlb_pmap)
785 {
786 #ifdef COUNT_XINVLTLB_HITS
787 	xhits_gbl[PCPU_GET(cpuid)]++;
788 #endif /* COUNT_XINVLTLB_HITS */
789 #ifdef COUNT_IPIS
790 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
791 #endif /* COUNT_IPIS */
792 
793 	if (smp_tlb_pmap == kernel_pmap)
794 		invltlb_glob();
795 	else
796 		invltlb();
797 }
798 
799 static void
invltlb_invpcid_handler(pmap_t smp_tlb_pmap)800 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
801 {
802 	struct invpcid_descr d;
803 
804 #ifdef COUNT_XINVLTLB_HITS
805 	xhits_gbl[PCPU_GET(cpuid)]++;
806 #endif /* COUNT_XINVLTLB_HITS */
807 #ifdef COUNT_IPIS
808 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
809 #endif /* COUNT_IPIS */
810 
811 	d.pcid = pmap_get_pcid(smp_tlb_pmap);
812 	d.pad = 0;
813 	d.addr = 0;
814 	invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
815 	    INVPCID_CTX);
816 }
817 
818 static void
invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)819 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
820 {
821 	struct invpcid_descr d;
822 
823 #ifdef COUNT_XINVLTLB_HITS
824 	xhits_gbl[PCPU_GET(cpuid)]++;
825 #endif /* COUNT_XINVLTLB_HITS */
826 #ifdef COUNT_IPIS
827 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
828 #endif /* COUNT_IPIS */
829 
830 	d.pcid = pmap_get_pcid(smp_tlb_pmap);
831 	d.pad = 0;
832 	d.addr = 0;
833 	if (smp_tlb_pmap == kernel_pmap) {
834 		/*
835 		 * This invalidation actually needs to clear kernel
836 		 * mappings from the TLB in the current pmap, but
837 		 * since we were asked for the flush in the kernel
838 		 * pmap, achieve it by performing global flush.
839 		 */
840 		invpcid(&d, INVPCID_CTXGLOB);
841 	} else {
842 		invpcid(&d, INVPCID_CTX);
843 		if (smp_tlb_pmap == PCPU_GET(curpmap) &&
844 		    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
845 			PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
846 	}
847 }
848 
849 static void
invltlb_pcid_handler(pmap_t smp_tlb_pmap)850 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
851 {
852 #ifdef COUNT_XINVLTLB_HITS
853 	xhits_gbl[PCPU_GET(cpuid)]++;
854 #endif /* COUNT_XINVLTLB_HITS */
855 #ifdef COUNT_IPIS
856 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
857 #endif /* COUNT_IPIS */
858 
859 	if (smp_tlb_pmap == kernel_pmap) {
860 		invltlb_glob();
861 	} else {
862 		/*
863 		 * The current pmap might not be equal to
864 		 * smp_tlb_pmap.  The clearing of the pm_gen in
865 		 * pmap_invalidate_all() takes care of TLB
866 		 * invalidation when switching to the pmap on this
867 		 * CPU.
868 		 */
869 		if (smp_tlb_pmap == PCPU_GET(curpmap)) {
870 			load_cr3(smp_tlb_pmap->pm_cr3 |
871 			    pmap_get_pcid(smp_tlb_pmap));
872 			if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
873 				PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
874 		}
875 	}
876 }
877 
878 static void
invlpg_handler(vm_offset_t smp_tlb_addr1)879 invlpg_handler(vm_offset_t smp_tlb_addr1)
880 {
881 #ifdef COUNT_XINVLTLB_HITS
882 	xhits_pg[PCPU_GET(cpuid)]++;
883 #endif /* COUNT_XINVLTLB_HITS */
884 #ifdef COUNT_IPIS
885 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
886 #endif /* COUNT_IPIS */
887 
888 	invlpg(smp_tlb_addr1);
889 }
890 
891 static void
invlpg_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)892 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
893 {
894 	struct invpcid_descr d;
895 
896 #ifdef COUNT_XINVLTLB_HITS
897 	xhits_pg[PCPU_GET(cpuid)]++;
898 #endif /* COUNT_XINVLTLB_HITS */
899 #ifdef COUNT_IPIS
900 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
901 #endif /* COUNT_IPIS */
902 
903 	pmap_invlpg(smp_tlb_pmap, smp_tlb_addr1);
904 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
905 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
906 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
907 		d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
908 		d.pad = 0;
909 		d.addr = smp_tlb_addr1;
910 		invpcid(&d, INVPCID_ADDR);
911 	}
912 }
913 
914 static void
invlpg_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)915 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
916 {
917 	uint64_t kcr3, ucr3;
918 	uint32_t pcid;
919 
920 #ifdef COUNT_XINVLTLB_HITS
921 	xhits_pg[PCPU_GET(cpuid)]++;
922 #endif /* COUNT_XINVLTLB_HITS */
923 #ifdef COUNT_IPIS
924 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
925 #endif /* COUNT_IPIS */
926 
927 	invlpg(smp_tlb_addr1);
928 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
929 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
930 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
931 		pcid = pmap_get_pcid(smp_tlb_pmap);
932 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
933 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
934 		pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
935 	}
936 }
937 
938 static void
invlrng_handler(vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)939 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
940 {
941 	vm_offset_t addr;
942 
943 #ifdef COUNT_XINVLTLB_HITS
944 	xhits_rng[PCPU_GET(cpuid)]++;
945 #endif /* COUNT_XINVLTLB_HITS */
946 #ifdef COUNT_IPIS
947 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
948 #endif /* COUNT_IPIS */
949 
950 	addr = smp_tlb_addr1;
951 	do {
952 		invlpg(addr);
953 		addr += PAGE_SIZE;
954 	} while (addr < smp_tlb_addr2);
955 }
956 
957 static void
invlrng_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)958 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
959     vm_offset_t smp_tlb_addr2)
960 {
961 	struct invpcid_descr d;
962 	vm_offset_t addr;
963 
964 #ifdef COUNT_XINVLTLB_HITS
965 	xhits_rng[PCPU_GET(cpuid)]++;
966 #endif /* COUNT_XINVLTLB_HITS */
967 #ifdef COUNT_IPIS
968 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
969 #endif /* COUNT_IPIS */
970 
971 	addr = smp_tlb_addr1;
972 	if (smp_tlb_pmap == kernel_pmap && PCPU_GET(pcid_invlpg_workaround)) {
973 		struct invpcid_descr d = { 0 };
974 
975 		invpcid(&d, INVPCID_CTXGLOB);
976 	} else {
977 		do {
978 			invlpg(addr);
979 			addr += PAGE_SIZE;
980 		} while (addr < smp_tlb_addr2);
981 	}
982 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
983 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
984 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
985 		d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
986 		d.pad = 0;
987 		d.addr = smp_tlb_addr1;
988 		do {
989 			invpcid(&d, INVPCID_ADDR);
990 			d.addr += PAGE_SIZE;
991 		} while (d.addr < smp_tlb_addr2);
992 	}
993 }
994 
995 static void
invlrng_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)996 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
997     vm_offset_t smp_tlb_addr2)
998 {
999 	vm_offset_t addr;
1000 	uint64_t kcr3, ucr3;
1001 	uint32_t pcid;
1002 
1003 #ifdef COUNT_XINVLTLB_HITS
1004 	xhits_rng[PCPU_GET(cpuid)]++;
1005 #endif /* COUNT_XINVLTLB_HITS */
1006 #ifdef COUNT_IPIS
1007 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
1008 #endif /* COUNT_IPIS */
1009 
1010 	addr = smp_tlb_addr1;
1011 	do {
1012 		invlpg(addr);
1013 		addr += PAGE_SIZE;
1014 	} while (addr < smp_tlb_addr2);
1015 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
1016 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
1017 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
1018 		pcid = pmap_get_pcid(smp_tlb_pmap);
1019 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
1020 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
1021 		pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, smp_tlb_addr2);
1022 	}
1023 }
1024 
1025 static void
invlcache_handler(void)1026 invlcache_handler(void)
1027 {
1028 #ifdef COUNT_IPIS
1029 	(*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1030 #endif /* COUNT_IPIS */
1031 	wbinvd();
1032 }
1033 
1034 static void
invlop_handler_one_req(enum invl_op_codes smp_tlb_op,pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)1035 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
1036     vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1037 {
1038 	switch (smp_tlb_op) {
1039 	case INVL_OP_TLB:
1040 		invltlb_handler(smp_tlb_pmap);
1041 		break;
1042 	case INVL_OP_TLB_INVPCID:
1043 		invltlb_invpcid_handler(smp_tlb_pmap);
1044 		break;
1045 	case INVL_OP_TLB_INVPCID_PTI:
1046 		invltlb_invpcid_pti_handler(smp_tlb_pmap);
1047 		break;
1048 	case INVL_OP_TLB_PCID:
1049 		invltlb_pcid_handler(smp_tlb_pmap);
1050 		break;
1051 	case INVL_OP_PGRNG:
1052 		invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1053 		break;
1054 	case INVL_OP_PGRNG_INVPCID:
1055 		invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1056 		    smp_tlb_addr2);
1057 		break;
1058 	case INVL_OP_PGRNG_PCID:
1059 		invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1060 		    smp_tlb_addr2);
1061 		break;
1062 	case INVL_OP_PG:
1063 		invlpg_handler(smp_tlb_addr1);
1064 		break;
1065 	case INVL_OP_PG_INVPCID:
1066 		invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1067 		break;
1068 	case INVL_OP_PG_PCID:
1069 		invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1070 		break;
1071 	case INVL_OP_CACHE:
1072 		invlcache_handler();
1073 		break;
1074 	default:
1075 		__assert_unreachable();
1076 		break;
1077 	}
1078 }
1079 
1080 void
invlop_handler(void)1081 invlop_handler(void)
1082 {
1083 	struct pcpu *initiator_pc;
1084 	pmap_t smp_tlb_pmap;
1085 	vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1086 	u_int initiator_cpu_id;
1087 	enum invl_op_codes smp_tlb_op;
1088 	uint32_t *scoreboard, smp_tlb_gen;
1089 
1090 	scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1091 	for (;;) {
1092 		for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1093 		    initiator_cpu_id++) {
1094 			if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1095 				break;
1096 		}
1097 		if (initiator_cpu_id > mp_maxid)
1098 			break;
1099 		initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1100 
1101 		/*
1102 		 * This acquire fence and its corresponding release
1103 		 * fence in smp_targeted_tlb_shootdown() is between
1104 		 * reading zero scoreboard slot and accessing PCPU of
1105 		 * initiator for pc_smp_tlb values.
1106 		 */
1107 		atomic_thread_fence_acq();
1108 		smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1109 		smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1110 		smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1111 		smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1112 		smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1113 
1114 		/*
1115 		 * Ensure that we do not make our scoreboard
1116 		 * notification visible to the initiator until the
1117 		 * pc_smp_tlb values are read.  The corresponding
1118 		 * fence is implicitly provided by the barrier in the
1119 		 * IPI send operation before the APIC ICR register
1120 		 * write.
1121 		 *
1122 		 * As an optimization, the request is acknowledged
1123 		 * before the actual invalidation is performed.  It is
1124 		 * safe because target CPU cannot return to userspace
1125 		 * before handler finishes. Only NMI can preempt the
1126 		 * handler, but NMI would see the kernel handler frame
1127 		 * and not touch not-invalidated user page table.
1128 		 */
1129 		atomic_thread_fence_acq();
1130 		atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1131 
1132 		invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1133 		    smp_tlb_addr2);
1134 	}
1135 }
1136