1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1996, by Steve Passe
5 * Copyright (c) 2003, by Peter Wemm
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include "opt_acpi.h"
31 #include "opt_cpu.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_sched.h"
35 #include "opt_smp.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/cpuset.h>
41 #include <sys/domainset.h>
42 #include <sys/kdb.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/memrange.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_phys.h>
62
63 #include <x86/apicreg.h>
64 #include <machine/clock.h>
65 #include <machine/cputypes.h>
66 #include <machine/cpufunc.h>
67 #include <x86/mca.h>
68 #include <machine/md_var.h>
69 #include <machine/pcb.h>
70 #include <machine/psl.h>
71 #include <machine/smp.h>
72 #include <machine/specialreg.h>
73 #include <machine/tss.h>
74 #include <x86/ucode.h>
75 #include <machine/cpu.h>
76 #include <x86/init.h>
77
78 #ifdef DEV_ACPI
79 #include <contrib/dev/acpica/include/acpi.h>
80 #include <dev/acpica/acpivar.h>
81 #endif
82
83 #define WARMBOOT_TARGET 0
84 #define WARMBOOT_OFF (KERNBASE + 0x0467)
85 #define WARMBOOT_SEG (KERNBASE + 0x0469)
86
87 #define CMOS_REG (0x70)
88 #define CMOS_DATA (0x71)
89 #define BIOS_RESET (0x0f)
90 #define BIOS_WARM (0x0a)
91
92 #define GiB(v) (v ## ULL << 30)
93
94 #define AP_BOOTPT_SZ (PAGE_SIZE * 4)
95
96 /* Temporary variables for init_secondary() */
97 static char *doublefault_stack;
98 static char *mce_stack;
99 static char *nmi_stack;
100 static char *dbg_stack;
101 void *bootpcpu;
102
103 extern u_int mptramp_la57;
104 extern u_int mptramp_nx;
105 smp_targeted_tlb_shootdown_t smp_targeted_tlb_shootdown =
106 &smp_targeted_tlb_shootdown_native;
107
108 /*
109 * Local data and functions.
110 */
111
112 static int start_ap(int apic_id, vm_paddr_t boot_address);
113
114 /*
115 * Initialize the IPI handlers and start up the AP's.
116 */
117 void
cpu_mp_start(void)118 cpu_mp_start(void)
119 {
120 int i;
121
122 /* Initialize the logical ID to APIC ID table. */
123 for (i = 0; i < MAXCPU; i++) {
124 cpu_apic_ids[i] = -1;
125 }
126
127 /* Install an inter-CPU IPI for cache and TLB invalidations. */
128 setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
129 SDT_SYSIGT, SEL_KPL, 0);
130
131 /* Install an inter-CPU IPI for all-CPU rendezvous */
132 setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
133 IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
134
135 /* Install generic inter-CPU IPI handler */
136 setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
137 IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
138
139 /* Install an inter-CPU IPI for CPU stop/restart */
140 setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
141 SDT_SYSIGT, SEL_KPL, 0);
142
143 /* Install an inter-CPU IPI for CPU offline */
144 setidt(IPI_OFF, pti ? IDTVEC(cpuoff_pti) : IDTVEC(cpuoff),
145 SDT_SYSIGT, SEL_KPL, 0);
146
147 /* Install an inter-CPU IPI for CPU suspend/resume */
148 setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
149 SDT_SYSIGT, SEL_KPL, 0);
150
151 /* Install an IPI for calling delayed SWI */
152 setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
153 SDT_SYSIGT, SEL_KPL, 0);
154
155 /* Set boot_cpu_id if needed. */
156 if (boot_cpu_id == -1) {
157 boot_cpu_id = PCPU_GET(apic_id);
158 cpu_info[boot_cpu_id].cpu_bsp = 1;
159 } else
160 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
161 ("BSP's APIC ID doesn't match boot_cpu_id"));
162
163 /* Probe logical/physical core configuration. */
164 topo_probe();
165
166 assign_cpu_ids();
167
168 mptramp_la57 = la57;
169 mptramp_nx = pg_nx != 0;
170 MPASS(kernel_pmap->pm_cr3 < (1UL << 32));
171 mptramp_pagetables = kernel_pmap->pm_cr3;
172
173 /* Start each Application Processor */
174 start_all_aps();
175
176 set_interrupt_apic_ids();
177
178 #if defined(DEV_ACPI) && MAXMEMDOM > 1
179 acpi_pxm_set_cpu_locality();
180 #endif
181 }
182
183 void
cpu_mp_stop(void)184 cpu_mp_stop(void)
185 {
186 cpuset_t other_cpus = all_cpus;
187
188 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
189 offline_cpus(other_cpus);
190 }
191
192 /*
193 * AP CPU's call this to initialize themselves.
194 */
195 void
init_secondary(void)196 init_secondary(void)
197 {
198 struct pcpu *pc;
199 struct nmi_pcpu *np;
200 struct user_segment_descriptor *gdt;
201 struct region_descriptor ap_gdt;
202 u_int64_t cr0;
203 int cpu, gsel_tss, x;
204
205 /* Set by the startup code for us to use */
206 cpu = bootAP;
207
208 /* Update microcode before doing anything else. */
209 ucode_load_ap(cpu);
210
211 /* Initialize the PCPU area. */
212 pc = bootpcpu;
213 pcpu_init(pc, cpu, sizeof(struct pcpu));
214 dpcpu_init(dpcpu, cpu);
215 pc->pc_apic_id = cpu_apic_ids[cpu];
216 pc->pc_prvspace = pc;
217 pc->pc_curthread = 0;
218 pc->pc_tssp = &pc->pc_common_tss;
219 pc->pc_rsp0 = 0;
220 pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
221 PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
222 gdt = pc->pc_gdt;
223 pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
224 pc->pc_fs32p = &gdt[GUFS32_SEL];
225 pc->pc_gs32p = &gdt[GUGS32_SEL];
226 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
227 pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
228 /* See comment in pmap_bootstrap(). */
229 pc->pc_pcid_next = PMAP_PCID_KERN + 2;
230 pc->pc_pcid_gen = 1;
231 pc->pc_kpmap_store.pm_pcid = PMAP_PCID_KERN;
232 pc->pc_kpmap_store.pm_gen = 1;
233
234 pc->pc_smp_tlb_gen = 1;
235
236 /* Init tss */
237 pc->pc_common_tss = __pcpu[0].pc_common_tss;
238 pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
239 IOPERM_BITMAP_SIZE;
240 pc->pc_common_tss.tss_rsp0 = 0;
241
242 /* The doublefault stack runs on IST1. */
243 np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
244 np->np_pcpu = (register_t)pc;
245 pc->pc_common_tss.tss_ist1 = (long)np;
246
247 /* The NMI stack runs on IST2. */
248 np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
249 np->np_pcpu = (register_t)pc;
250 pc->pc_common_tss.tss_ist2 = (long)np;
251
252 /* The MC# stack runs on IST3. */
253 np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
254 np->np_pcpu = (register_t)pc;
255 pc->pc_common_tss.tss_ist3 = (long)np;
256
257 /* The DB# stack runs on IST4. */
258 np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
259 np->np_pcpu = (register_t)pc;
260 pc->pc_common_tss.tss_ist4 = (long)np;
261
262 /* Prepare private GDT */
263 gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
264 for (x = 0; x < NGDT; x++) {
265 if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
266 x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
267 ssdtosd(&gdt_segs[x], &gdt[x]);
268 }
269 ssdtosyssd(&gdt_segs[GPROC0_SEL],
270 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
271 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
272 ap_gdt.rd_base = (u_long)gdt;
273 lgdt(&ap_gdt); /* does magic intra-segment return */
274
275 wrmsr(MSR_FSBASE, 0); /* User value */
276 wrmsr(MSR_GSBASE, (uint64_t)pc);
277 wrmsr(MSR_KGSBASE, 0); /* User value */
278 fix_cpuid();
279
280 lidt(&r_idt);
281
282 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
283 ltr(gsel_tss);
284
285 /*
286 * Set to a known state:
287 * Set by mpboot.s: CR0_PG, CR0_PE
288 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
289 */
290 cr0 = rcr0();
291 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
292 load_cr0(cr0);
293
294 amd64_conf_fast_syscall();
295
296 /* signal our startup to the BSP. */
297 mp_naps++;
298
299 /* Spin until the BSP releases the AP's. */
300 while (atomic_load_acq_int(&aps_ready) == 0)
301 ia32_pause();
302
303 init_secondary_tail();
304 }
305
306 static void
amd64_mp_alloc_pcpu(void)307 amd64_mp_alloc_pcpu(void)
308 {
309 vm_page_t m;
310 int cpu;
311
312 /* Allocate pcpu areas to the correct domain. */
313 for (cpu = 1; cpu < mp_ncpus; cpu++) {
314 #ifdef NUMA
315 m = NULL;
316 if (vm_ndomains > 1) {
317 m = vm_page_alloc_noobj_domain(
318 acpi_pxm_get_cpu_locality(cpu_apic_ids[cpu]),
319 VM_ALLOC_ZERO);
320 }
321 if (m == NULL)
322 #endif
323 m = vm_page_alloc_noobj(VM_ALLOC_ZERO);
324 if (m == NULL)
325 panic("cannot alloc pcpu page for cpu %d", cpu);
326 pmap_qenter((vm_offset_t)&__pcpu[cpu], &m, 1);
327 }
328 }
329
330 /*
331 * start each AP in our list
332 */
333 int
start_all_aps(void)334 start_all_aps(void)
335 {
336 vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4];
337 pml5_entry_t old_pml45;
338 pml4_entry_t *v_pml4;
339 pdp_entry_t *v_pdp;
340 pd_entry_t *v_pd;
341 vm_paddr_t boot_address;
342 u_int32_t mpbioswarmvec;
343 int apic_id, cpu, domain, i;
344 u_char mpbiosreason;
345
346 amd64_mp_alloc_pcpu();
347 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
348
349 MPASS(bootMP_size <= PAGE_SIZE);
350 m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
351 (1ULL << 20), /* Trampoline should be below 1M for real mode */
352 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
353 boot_address = VM_PAGE_TO_PHYS(m_boottramp);
354
355 /* Create a transient 1:1 mapping of low 4G */
356 if (la57) {
357 m_pml4 = pmap_page_alloc_below_4g(true);
358 v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
359 } else {
360 v_pml4 = &kernel_pmap->pm_pmltop[0];
361 }
362 m_pdp = pmap_page_alloc_below_4g(true);
363 v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
364 m_pd[0] = pmap_page_alloc_below_4g(false);
365 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0]));
366 for (i = 0; i < NPDEPG; i++)
367 v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A |
368 X86_PG_M | PG_PS;
369 m_pd[1] = pmap_page_alloc_below_4g(false);
370 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1]));
371 for (i = 0; i < NPDEPG; i++)
372 v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
373 X86_PG_A | X86_PG_M | PG_PS;
374 m_pd[2] = pmap_page_alloc_below_4g(false);
375 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2]));
376 for (i = 0; i < NPDEPG; i++)
377 v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
378 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
379 m_pd[3] = pmap_page_alloc_below_4g(false);
380 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3]));
381 for (i = 0; i < NPDEPG; i++)
382 v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
383 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
384 v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V |
385 X86_PG_RW | X86_PG_A | X86_PG_M;
386 v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V |
387 X86_PG_RW | X86_PG_A | X86_PG_M;
388 v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V |
389 X86_PG_RW | X86_PG_A | X86_PG_M;
390 v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V |
391 X86_PG_RW | X86_PG_A | X86_PG_M;
392 old_pml45 = kernel_pmap->pm_pmltop[0];
393 if (la57) {
394 kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) |
395 X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
396 }
397 v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V |
398 X86_PG_RW | X86_PG_A | X86_PG_M;
399 pmap_invalidate_all(kernel_pmap);
400
401 /* copy the AP 1st level boot code */
402 bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
403 if (bootverbose)
404 printf("AP boot address %#lx\n", boot_address);
405
406 /* save the current value of the warm-start vector */
407 if (!efi_boot)
408 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
409 outb(CMOS_REG, BIOS_RESET);
410 mpbiosreason = inb(CMOS_DATA);
411
412 /* setup a vector to our boot code */
413 if (!efi_boot) {
414 *((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
415 *((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4);
416 }
417 outb(CMOS_REG, BIOS_RESET);
418 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
419
420 /* start each AP */
421 domain = 0;
422 for (cpu = 1; cpu < mp_ncpus; cpu++) {
423 apic_id = cpu_apic_ids[cpu];
424 #ifdef NUMA
425 if (vm_ndomains > 1)
426 domain = acpi_pxm_get_cpu_locality(apic_id);
427 #endif
428 /* allocate and set up an idle stack data page */
429 bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
430 M_WAITOK | M_ZERO);
431 doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
432 M_WAITOK | M_ZERO);
433 mce_stack = kmem_malloc(MCE_STACK_SIZE,
434 M_WAITOK | M_ZERO);
435 nmi_stack = kmem_malloc_domainset(
436 DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
437 dbg_stack = kmem_malloc_domainset(
438 DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
439 dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
440 DPCPU_SIZE, M_WAITOK | M_ZERO);
441
442 bootpcpu = &__pcpu[cpu];
443 bootSTK = (char *)bootstacks[cpu] +
444 kstack_pages * PAGE_SIZE - 8;
445 bootAP = cpu;
446
447 /* attempt to start the Application Processor */
448 if (!start_ap(apic_id, boot_address)) {
449 /* restore the warmstart vector */
450 if (!efi_boot)
451 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
452 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
453 }
454
455 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
456 }
457
458 /* restore the warmstart vector */
459 if (!efi_boot)
460 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
461
462 outb(CMOS_REG, BIOS_RESET);
463 outb(CMOS_DATA, mpbiosreason);
464
465 /* Destroy transient 1:1 mapping */
466 kernel_pmap->pm_pmltop[0] = old_pml45;
467 invlpg(0);
468 if (la57)
469 vm_page_free(m_pml4);
470 vm_page_free(m_pd[3]);
471 vm_page_free(m_pd[2]);
472 vm_page_free(m_pd[1]);
473 vm_page_free(m_pd[0]);
474 vm_page_free(m_pdp);
475 vm_page_free(m_boottramp);
476
477 /* number of APs actually started */
478 return (mp_naps);
479 }
480
481 /*
482 * This function starts the AP (application processor) identified
483 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
484 * to accomplish this. This is necessary because of the nuances
485 * of the different hardware we might encounter. It isn't pretty,
486 * but it seems to work.
487 */
488 static int
start_ap(int apic_id,vm_paddr_t boot_address)489 start_ap(int apic_id, vm_paddr_t boot_address)
490 {
491 int vector, ms;
492 int cpus;
493
494 /* calculate the vector */
495 vector = (boot_address >> 12) & 0xff;
496
497 /* used as a watchpoint to signal AP startup */
498 cpus = mp_naps;
499
500 ipi_startup(apic_id, vector);
501
502 /* Wait up to 5 seconds for it to start. */
503 for (ms = 0; ms < 5000; ms++) {
504 if (mp_naps > cpus)
505 return 1; /* return SUCCESS */
506 DELAY(1000);
507 }
508 return 0; /* return FAILURE */
509 }
510
511 /*
512 * Flush the TLB on other CPU's
513 */
514
515 /*
516 * These variables are initialized at startup to reflect how each of
517 * the different kinds of invalidations should be performed on the
518 * current machine and environment.
519 */
520 static enum invl_op_codes invl_op_tlb;
521 static enum invl_op_codes invl_op_pgrng;
522 static enum invl_op_codes invl_op_pg;
523
524 /*
525 * Scoreboard of IPI completion notifications from target to IPI initiator.
526 *
527 * Each CPU can initiate shootdown IPI independently from other CPUs.
528 * Initiator enters critical section, then fills its local PCPU
529 * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
530 * at location (cpu, my_cpuid) for each target cpu. After that IPI is
531 * sent to all targets which scan for zeroed scoreboard generation
532 * words. Upon finding such word the shootdown data is read from
533 * corresponding cpu's pcpu, and generation is set. Meantime initiator
534 * loops waiting for all zeroed generations in scoreboard to update.
535 */
536 static uint32_t *invl_scoreboard;
537
538 static void
invl_scoreboard_init(void * arg __unused)539 invl_scoreboard_init(void *arg __unused)
540 {
541 u_int i;
542
543 invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
544 (mp_maxid + 1), M_DEVBUF, M_WAITOK);
545 for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
546 invl_scoreboard[i] = 1;
547
548 if (pmap_pcid_enabled) {
549 if (invpcid_works) {
550 if (pti)
551 invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
552 else
553 invl_op_tlb = INVL_OP_TLB_INVPCID;
554 invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
555 invl_op_pg = INVL_OP_PG_INVPCID;
556 } else {
557 invl_op_tlb = INVL_OP_TLB_PCID;
558 invl_op_pgrng = INVL_OP_PGRNG_PCID;
559 invl_op_pg = INVL_OP_PG_PCID;
560 }
561 } else {
562 invl_op_tlb = INVL_OP_TLB;
563 invl_op_pgrng = INVL_OP_PGRNG;
564 invl_op_pg = INVL_OP_PG;
565 }
566 }
567 SYSINIT(invl_ops, SI_SUB_SMP - 1, SI_ORDER_ANY, invl_scoreboard_init, NULL);
568
569 static uint32_t *
invl_scoreboard_getcpu(u_int cpu)570 invl_scoreboard_getcpu(u_int cpu)
571 {
572 return (invl_scoreboard + cpu * (mp_maxid + 1));
573 }
574
575 static uint32_t *
invl_scoreboard_slot(u_int cpu)576 invl_scoreboard_slot(u_int cpu)
577 {
578 return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
579 }
580
581 /*
582 * Used by the pmap to request cache or TLB invalidation on local and
583 * remote processors. Mask provides the set of remote CPUs that are
584 * to be signalled with the invalidation IPI. As an optimization, the
585 * curcpu_cb callback is invoked on the calling CPU in a critical
586 * section while waiting for the remote CPUs to complete the operation.
587 *
588 * The callback function is called unconditionally on the caller's
589 * underlying processor, even when this processor is not set in the
590 * mask. So, the callback function must be prepared to handle such
591 * spurious invocations.
592 *
593 * Interrupts must be enabled when calling the function with smp
594 * started, to avoid deadlock with other IPIs that are protected with
595 * smp_ipi_mtx spinlock at the initiator side.
596 *
597 * Function must be called with the thread pinned, and it unpins on
598 * completion.
599 */
600 void
smp_targeted_tlb_shootdown_native(pmap_t pmap,vm_offset_t addr1,vm_offset_t addr2,smp_invl_cb_t curcpu_cb,enum invl_op_codes op)601 smp_targeted_tlb_shootdown_native(pmap_t pmap, vm_offset_t addr1,
602 vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
603 {
604 cpuset_t mask;
605 uint32_t generation, *p_cpudone;
606 int cpu;
607 bool is_all;
608
609 /*
610 * It is not necessary to signal other CPUs while booting or
611 * when in the debugger.
612 */
613 if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
614 goto local_cb;
615
616 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
617
618 /*
619 * Make a stable copy of the set of CPUs on which the pmap is active.
620 * See if we have to interrupt other CPUs.
621 */
622 CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
623 is_all = CPU_CMP(&mask, &all_cpus) == 0;
624 CPU_CLR(curcpu, &mask);
625 if (CPU_EMPTY(&mask))
626 goto local_cb;
627
628 /*
629 * Initiator must have interrupts enabled, which prevents
630 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
631 * from deadlocking with us. On the other hand, preemption
632 * must be disabled to pin initiator to the instance of the
633 * pcpu pc_smp_tlb data and scoreboard line.
634 */
635 KASSERT((read_rflags() & PSL_I) != 0,
636 ("smp_targeted_tlb_shootdown: interrupts disabled"));
637 critical_enter();
638
639 PCPU_SET(smp_tlb_addr1, addr1);
640 PCPU_SET(smp_tlb_addr2, addr2);
641 PCPU_SET(smp_tlb_pmap, pmap);
642 generation = PCPU_GET(smp_tlb_gen);
643 if (++generation == 0)
644 generation = 1;
645 PCPU_SET(smp_tlb_gen, generation);
646 PCPU_SET(smp_tlb_op, op);
647 /* Fence between filling smp_tlb fields and clearing scoreboard. */
648 atomic_thread_fence_rel();
649
650 CPU_FOREACH_ISSET(cpu, &mask) {
651 KASSERT(*invl_scoreboard_slot(cpu) != 0,
652 ("IPI scoreboard is zero, initiator %d target %d",
653 curcpu, cpu));
654 *invl_scoreboard_slot(cpu) = 0;
655 }
656
657 /*
658 * IPI acts as a fence between writing to the scoreboard above
659 * (zeroing slot) and reading from it below (wait for
660 * acknowledgment).
661 */
662 if (is_all) {
663 ipi_all_but_self(IPI_INVLOP);
664 } else {
665 ipi_selected(mask, IPI_INVLOP);
666 }
667 curcpu_cb(pmap, addr1, addr2);
668 CPU_FOREACH_ISSET(cpu, &mask) {
669 p_cpudone = invl_scoreboard_slot(cpu);
670 while (atomic_load_int(p_cpudone) != generation)
671 ia32_pause();
672 }
673
674 /*
675 * Unpin before leaving critical section. If the thread owes
676 * preemption, this allows scheduler to select thread on any
677 * CPU from its cpuset.
678 */
679 sched_unpin();
680 critical_exit();
681
682 return;
683
684 local_cb:
685 critical_enter();
686 curcpu_cb(pmap, addr1, addr2);
687 sched_unpin();
688 critical_exit();
689 }
690
691 void
smp_masked_invltlb(pmap_t pmap,smp_invl_cb_t curcpu_cb)692 smp_masked_invltlb(pmap_t pmap, smp_invl_cb_t curcpu_cb)
693 {
694 if (invlpgb_works && pmap == kernel_pmap) {
695 invlpgb(INVLPGB_GLOB, 0, 0);
696
697 /*
698 * TLBSYNC syncs only against INVLPGB executed on the
699 * same CPU. Since current thread is pinned by
700 * caller, we do not need to enter critical section to
701 * prevent migration.
702 */
703 tlbsync();
704 sched_unpin();
705 return;
706 }
707
708 smp_targeted_tlb_shootdown(pmap, 0, 0, curcpu_cb, invl_op_tlb);
709 #ifdef COUNT_XINVLTLB_HITS
710 ipi_global++;
711 #endif
712 }
713
714 void
smp_masked_invlpg(vm_offset_t addr,pmap_t pmap,smp_invl_cb_t curcpu_cb)715 smp_masked_invlpg(vm_offset_t addr, pmap_t pmap, smp_invl_cb_t curcpu_cb)
716 {
717 if (invlpgb_works && pmap == kernel_pmap) {
718 invlpgb(INVLPGB_GLOB | INVLPGB_VA | trunc_page(addr), 0, 0);
719 tlbsync();
720 sched_unpin();
721 return;
722 }
723
724 smp_targeted_tlb_shootdown(pmap, addr, 0, curcpu_cb, invl_op_pg);
725 #ifdef COUNT_XINVLTLB_HITS
726 ipi_page++;
727 #endif
728 }
729
730 void
smp_masked_invlpg_range(vm_offset_t addr1,vm_offset_t addr2,pmap_t pmap,smp_invl_cb_t curcpu_cb)731 smp_masked_invlpg_range(vm_offset_t addr1, vm_offset_t addr2, pmap_t pmap,
732 smp_invl_cb_t curcpu_cb)
733 {
734 if (invlpgb_works && pmap == kernel_pmap) {
735 vm_offset_t va;
736 uint64_t cnt, total;
737
738 addr1 = trunc_page(addr1);
739 addr2 = round_page(addr2);
740 total = atop(addr2 - addr1);
741 for (va = addr1; total > 0;) {
742 if ((va & PDRMASK) != 0 || total < NPDEPG) {
743 cnt = atop(NBPDR - (va & PDRMASK));
744 if (cnt > total)
745 cnt = total;
746 if (cnt > invlpgb_maxcnt + 1)
747 cnt = invlpgb_maxcnt + 1;
748 invlpgb(INVLPGB_GLOB | INVLPGB_VA | va, 0,
749 cnt - 1);
750 va += ptoa(cnt);
751 total -= cnt;
752 } else {
753 cnt = total / NPTEPG;
754 if (cnt > invlpgb_maxcnt + 1)
755 cnt = invlpgb_maxcnt + 1;
756 invlpgb(INVLPGB_GLOB | INVLPGB_VA | va, 0,
757 INVLPGB_2M_CNT | (cnt - 1));
758 va += cnt << PDRSHIFT;
759 total -= cnt * NPTEPG;
760 }
761 }
762 tlbsync();
763 sched_unpin();
764 return;
765 }
766
767 smp_targeted_tlb_shootdown(pmap, addr1, addr2, curcpu_cb,
768 invl_op_pgrng);
769 #ifdef COUNT_XINVLTLB_HITS
770 ipi_range++;
771 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
772 #endif
773 }
774
775 void
smp_cache_flush(smp_invl_cb_t curcpu_cb)776 smp_cache_flush(smp_invl_cb_t curcpu_cb)
777 {
778 smp_targeted_tlb_shootdown(kernel_pmap, 0, 0, curcpu_cb, INVL_OP_CACHE);
779 }
780
781 /*
782 * Handlers for TLB related IPIs
783 */
784 static void
invltlb_handler(pmap_t smp_tlb_pmap)785 invltlb_handler(pmap_t smp_tlb_pmap)
786 {
787 #ifdef COUNT_XINVLTLB_HITS
788 xhits_gbl[PCPU_GET(cpuid)]++;
789 #endif /* COUNT_XINVLTLB_HITS */
790 #ifdef COUNT_IPIS
791 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
792 #endif /* COUNT_IPIS */
793
794 if (smp_tlb_pmap == kernel_pmap)
795 invltlb_glob();
796 else
797 invltlb();
798 }
799
800 static void
invltlb_invpcid_handler(pmap_t smp_tlb_pmap)801 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
802 {
803 struct invpcid_descr d;
804
805 #ifdef COUNT_XINVLTLB_HITS
806 xhits_gbl[PCPU_GET(cpuid)]++;
807 #endif /* COUNT_XINVLTLB_HITS */
808 #ifdef COUNT_IPIS
809 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
810 #endif /* COUNT_IPIS */
811
812 d.pcid = pmap_get_pcid(smp_tlb_pmap);
813 d.pad = 0;
814 d.addr = 0;
815 invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
816 INVPCID_CTX);
817 }
818
819 static void
invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)820 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
821 {
822 struct invpcid_descr d;
823
824 #ifdef COUNT_XINVLTLB_HITS
825 xhits_gbl[PCPU_GET(cpuid)]++;
826 #endif /* COUNT_XINVLTLB_HITS */
827 #ifdef COUNT_IPIS
828 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
829 #endif /* COUNT_IPIS */
830
831 d.pcid = pmap_get_pcid(smp_tlb_pmap);
832 d.pad = 0;
833 d.addr = 0;
834 if (smp_tlb_pmap == kernel_pmap) {
835 /*
836 * This invalidation actually needs to clear kernel
837 * mappings from the TLB in the current pmap, but
838 * since we were asked for the flush in the kernel
839 * pmap, achieve it by performing global flush.
840 */
841 invpcid(&d, INVPCID_CTXGLOB);
842 } else {
843 invpcid(&d, INVPCID_CTX);
844 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
845 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
846 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
847 }
848 }
849
850 static void
invltlb_pcid_handler(pmap_t smp_tlb_pmap)851 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
852 {
853 #ifdef COUNT_XINVLTLB_HITS
854 xhits_gbl[PCPU_GET(cpuid)]++;
855 #endif /* COUNT_XINVLTLB_HITS */
856 #ifdef COUNT_IPIS
857 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
858 #endif /* COUNT_IPIS */
859
860 if (smp_tlb_pmap == kernel_pmap) {
861 invltlb_glob();
862 } else {
863 /*
864 * The current pmap might not be equal to
865 * smp_tlb_pmap. The clearing of the pm_gen in
866 * pmap_invalidate_all() takes care of TLB
867 * invalidation when switching to the pmap on this
868 * CPU.
869 */
870 if (smp_tlb_pmap == PCPU_GET(curpmap)) {
871 load_cr3(smp_tlb_pmap->pm_cr3 |
872 pmap_get_pcid(smp_tlb_pmap));
873 if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
874 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
875 }
876 }
877 }
878
879 static void
invlpg_handler(vm_offset_t smp_tlb_addr1)880 invlpg_handler(vm_offset_t smp_tlb_addr1)
881 {
882 #ifdef COUNT_XINVLTLB_HITS
883 xhits_pg[PCPU_GET(cpuid)]++;
884 #endif /* COUNT_XINVLTLB_HITS */
885 #ifdef COUNT_IPIS
886 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
887 #endif /* COUNT_IPIS */
888
889 invlpg(smp_tlb_addr1);
890 }
891
892 static void
invlpg_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)893 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
894 {
895 struct invpcid_descr d;
896
897 #ifdef COUNT_XINVLTLB_HITS
898 xhits_pg[PCPU_GET(cpuid)]++;
899 #endif /* COUNT_XINVLTLB_HITS */
900 #ifdef COUNT_IPIS
901 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
902 #endif /* COUNT_IPIS */
903
904 pmap_invlpg(smp_tlb_pmap, smp_tlb_addr1);
905 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
906 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
907 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
908 d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
909 d.pad = 0;
910 d.addr = smp_tlb_addr1;
911 invpcid(&d, INVPCID_ADDR);
912 }
913 }
914
915 static void
invlpg_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)916 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
917 {
918 uint64_t kcr3, ucr3;
919 uint32_t pcid;
920
921 #ifdef COUNT_XINVLTLB_HITS
922 xhits_pg[PCPU_GET(cpuid)]++;
923 #endif /* COUNT_XINVLTLB_HITS */
924 #ifdef COUNT_IPIS
925 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
926 #endif /* COUNT_IPIS */
927
928 invlpg(smp_tlb_addr1);
929 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
930 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
931 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
932 pcid = pmap_get_pcid(smp_tlb_pmap);
933 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
934 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
935 pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
936 }
937 }
938
939 static void
invlrng_handler(vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)940 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
941 {
942 vm_offset_t addr;
943
944 #ifdef COUNT_XINVLTLB_HITS
945 xhits_rng[PCPU_GET(cpuid)]++;
946 #endif /* COUNT_XINVLTLB_HITS */
947 #ifdef COUNT_IPIS
948 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
949 #endif /* COUNT_IPIS */
950
951 addr = smp_tlb_addr1;
952 do {
953 invlpg(addr);
954 addr += PAGE_SIZE;
955 } while (addr < smp_tlb_addr2);
956 }
957
958 static void
invlrng_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)959 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
960 vm_offset_t smp_tlb_addr2)
961 {
962 struct invpcid_descr d;
963 vm_offset_t addr;
964
965 #ifdef COUNT_XINVLTLB_HITS
966 xhits_rng[PCPU_GET(cpuid)]++;
967 #endif /* COUNT_XINVLTLB_HITS */
968 #ifdef COUNT_IPIS
969 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
970 #endif /* COUNT_IPIS */
971
972 addr = smp_tlb_addr1;
973 if (smp_tlb_pmap == kernel_pmap && PCPU_GET(pcid_invlpg_workaround)) {
974 struct invpcid_descr d = { 0 };
975
976 invpcid(&d, INVPCID_CTXGLOB);
977 } else {
978 do {
979 invlpg(addr);
980 addr += PAGE_SIZE;
981 } while (addr < smp_tlb_addr2);
982 }
983 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
984 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
985 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
986 d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
987 d.pad = 0;
988 d.addr = smp_tlb_addr1;
989 do {
990 invpcid(&d, INVPCID_ADDR);
991 d.addr += PAGE_SIZE;
992 } while (d.addr < smp_tlb_addr2);
993 }
994 }
995
996 static void
invlrng_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)997 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
998 vm_offset_t smp_tlb_addr2)
999 {
1000 vm_offset_t addr;
1001 uint64_t kcr3, ucr3;
1002 uint32_t pcid;
1003
1004 #ifdef COUNT_XINVLTLB_HITS
1005 xhits_rng[PCPU_GET(cpuid)]++;
1006 #endif /* COUNT_XINVLTLB_HITS */
1007 #ifdef COUNT_IPIS
1008 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
1009 #endif /* COUNT_IPIS */
1010
1011 addr = smp_tlb_addr1;
1012 do {
1013 invlpg(addr);
1014 addr += PAGE_SIZE;
1015 } while (addr < smp_tlb_addr2);
1016 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
1017 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
1018 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
1019 pcid = pmap_get_pcid(smp_tlb_pmap);
1020 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
1021 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
1022 pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, smp_tlb_addr2);
1023 }
1024 }
1025
1026 static void
invlcache_handler(void)1027 invlcache_handler(void)
1028 {
1029 #ifdef COUNT_IPIS
1030 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1031 #endif /* COUNT_IPIS */
1032 wbinvd();
1033 }
1034
1035 static void
invlop_handler_one_req(enum invl_op_codes smp_tlb_op,pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)1036 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
1037 vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1038 {
1039 switch (smp_tlb_op) {
1040 case INVL_OP_TLB:
1041 invltlb_handler(smp_tlb_pmap);
1042 break;
1043 case INVL_OP_TLB_INVPCID:
1044 invltlb_invpcid_handler(smp_tlb_pmap);
1045 break;
1046 case INVL_OP_TLB_INVPCID_PTI:
1047 invltlb_invpcid_pti_handler(smp_tlb_pmap);
1048 break;
1049 case INVL_OP_TLB_PCID:
1050 invltlb_pcid_handler(smp_tlb_pmap);
1051 break;
1052 case INVL_OP_PGRNG:
1053 invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1054 break;
1055 case INVL_OP_PGRNG_INVPCID:
1056 invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1057 smp_tlb_addr2);
1058 break;
1059 case INVL_OP_PGRNG_PCID:
1060 invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1061 smp_tlb_addr2);
1062 break;
1063 case INVL_OP_PG:
1064 invlpg_handler(smp_tlb_addr1);
1065 break;
1066 case INVL_OP_PG_INVPCID:
1067 invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1068 break;
1069 case INVL_OP_PG_PCID:
1070 invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1071 break;
1072 case INVL_OP_CACHE:
1073 invlcache_handler();
1074 break;
1075 default:
1076 __assert_unreachable();
1077 break;
1078 }
1079 }
1080
1081 void
invlop_handler(void)1082 invlop_handler(void)
1083 {
1084 struct pcpu *initiator_pc;
1085 pmap_t smp_tlb_pmap;
1086 vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1087 u_int initiator_cpu_id;
1088 enum invl_op_codes smp_tlb_op;
1089 uint32_t *scoreboard, smp_tlb_gen;
1090
1091 scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1092 for (;;) {
1093 for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1094 initiator_cpu_id++) {
1095 if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1096 break;
1097 }
1098 if (initiator_cpu_id > mp_maxid)
1099 break;
1100 initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1101
1102 /*
1103 * This acquire fence and its corresponding release
1104 * fence in smp_targeted_tlb_shootdown() is between
1105 * reading zero scoreboard slot and accessing PCPU of
1106 * initiator for pc_smp_tlb values.
1107 */
1108 atomic_thread_fence_acq();
1109 smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1110 smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1111 smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1112 smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1113 smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1114
1115 /*
1116 * Ensure that we do not make our scoreboard
1117 * notification visible to the initiator until the
1118 * pc_smp_tlb values are read. The corresponding
1119 * fence is implicitly provided by the barrier in the
1120 * IPI send operation before the APIC ICR register
1121 * write.
1122 *
1123 * As an optimization, the request is acknowledged
1124 * before the actual invalidation is performed. It is
1125 * safe because target CPU cannot return to userspace
1126 * before handler finishes. Only NMI can preempt the
1127 * handler, but NMI would see the kernel handler frame
1128 * and not touch not-invalidated user page table.
1129 */
1130 atomic_thread_fence_acq();
1131 atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1132
1133 invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1134 smp_tlb_addr2);
1135 }
1136 }
1137