xref: /freebsd/sys/i386/i386/mp_machdep.c (revision 55305b590797524dd1cecfc9406869700e925e51)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1996, by Steve Passe
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. The name of the developer may NOT be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include "opt_acpi.h"
30 #include "opt_apic.h"
31 #include "opt_cpu.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_pmap.h"
34 #include "opt_sched.h"
35 #include "opt_smp.h"
36 
37 #if !defined(lint)
38 #if !defined(SMP)
39 #error How did you get here?
40 #endif
41 
42 #ifndef DEV_APIC
43 #error The apic device is required for SMP, add "device apic" to your config file.
44 #endif
45 #endif /* not lint */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bus.h>
50 #include <sys/cons.h>	/* cngetc() */
51 #include <sys/cpuset.h>
52 #include <sys/kdb.h>
53 #include <sys/kernel.h>
54 #include <sys/ktr.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/memrange.h>
58 #include <sys/mutex.h>
59 #include <sys/pcpu.h>
60 #include <sys/proc.h>
61 #include <sys/sched.h>
62 #include <sys/smp.h>
63 #include <sys/sysctl.h>
64 
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_extern.h>
70 
71 #include <x86/apicreg.h>
72 #include <machine/clock.h>
73 #include <machine/cpu.h>
74 #include <machine/cputypes.h>
75 #include <x86/mca.h>
76 #include <machine/md_var.h>
77 #include <machine/pcb.h>
78 #include <machine/psl.h>
79 #include <machine/smp.h>
80 #include <machine/specialreg.h>
81 #include <x86/ucode.h>
82 
83 #ifdef DEV_ACPI
84 #include <contrib/dev/acpica/include/acpi.h>
85 #include <dev/acpica/acpivar.h>
86 #endif
87 
88 #define WARMBOOT_TARGET		0
89 #define WARMBOOT_OFF		(PMAP_MAP_LOW + 0x0467)
90 #define WARMBOOT_SEG		(PMAP_MAP_LOW + 0x0469)
91 
92 #define CMOS_REG		(0x70)
93 #define CMOS_DATA		(0x71)
94 #define BIOS_RESET		(0x0f)
95 #define BIOS_WARM		(0x0a)
96 
97 /*
98  * this code MUST be enabled here and in mpboot.s.
99  * it follows the very early stages of AP boot by placing values in CMOS ram.
100  * it NORMALLY will never be needed and thus the primitive method for enabling.
101  *
102 #define CHECK_POINTS
103  */
104 
105 #if defined(CHECK_POINTS)
106 #define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
107 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
108 
109 #define CHECK_INIT(D);				\
110 	CHECK_WRITE(0x34, (D));			\
111 	CHECK_WRITE(0x35, (D));			\
112 	CHECK_WRITE(0x36, (D));			\
113 	CHECK_WRITE(0x37, (D));			\
114 	CHECK_WRITE(0x38, (D));			\
115 	CHECK_WRITE(0x39, (D));
116 
117 #define CHECK_PRINT(S);				\
118 	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
119 	   (S),					\
120 	   CHECK_READ(0x34),			\
121 	   CHECK_READ(0x35),			\
122 	   CHECK_READ(0x36),			\
123 	   CHECK_READ(0x37),			\
124 	   CHECK_READ(0x38),			\
125 	   CHECK_READ(0x39));
126 
127 #else				/* CHECK_POINTS */
128 
129 #define CHECK_INIT(D)
130 #define CHECK_PRINT(S)
131 #define CHECK_WRITE(A, D)
132 
133 #endif				/* CHECK_POINTS */
134 
135 /*
136  * Local data and functions.
137  */
138 
139 static void	install_ap_tramp(void);
140 static int	start_all_aps(void);
141 static int	start_ap(int apic_id);
142 
143 static char *ap_copyout_buf;
144 static char *ap_tramp_stack_base;
145 
146 unsigned int boot_address;
147 
148 #define MiB(v)	(v ## ULL << 20)
149 
150 /* Allocate memory for the AP trampoline. */
151 void
alloc_ap_trampoline(vm_paddr_t * physmap,unsigned int * physmap_idx)152 alloc_ap_trampoline(vm_paddr_t *physmap, unsigned int *physmap_idx)
153 {
154 	unsigned int i;
155 	bool allocated;
156 
157 	allocated = false;
158 	for (i = *physmap_idx; i <= *physmap_idx; i -= 2) {
159 		/*
160 		 * Find a memory region big enough and below the 1MB boundary
161 		 * for the trampoline code.
162 		 * NB: needs to be page aligned.
163 		 */
164 		if (physmap[i] >= MiB(1) ||
165 		    (trunc_page(physmap[i + 1]) - round_page(physmap[i])) <
166 		    round_page(bootMP_size))
167 			continue;
168 
169 		allocated = true;
170 		/*
171 		 * Try to steal from the end of the region to mimic previous
172 		 * behaviour, else fallback to steal from the start.
173 		 */
174 		if (physmap[i + 1] < MiB(1)) {
175 			boot_address = trunc_page(physmap[i + 1]);
176 			if ((physmap[i + 1] - boot_address) < bootMP_size)
177 				boot_address -= round_page(bootMP_size);
178 			physmap[i + 1] = boot_address;
179 		} else {
180 			boot_address = round_page(physmap[i]);
181 			physmap[i] = boot_address + round_page(bootMP_size);
182 		}
183 		if (physmap[i] == physmap[i + 1] && *physmap_idx != 0) {
184 			memmove(&physmap[i], &physmap[i + 2],
185 			    sizeof(*physmap) * (*physmap_idx - i + 2));
186 			*physmap_idx -= 2;
187 		}
188 		break;
189 	}
190 
191 	if (!allocated) {
192 		boot_address = basemem * 1024 - bootMP_size;
193 		if (bootverbose)
194 			printf(
195 "Cannot find enough space for the boot trampoline, placing it at %#x",
196 			    boot_address);
197 	}
198 }
199 
200 /*
201  * Initialize the IPI handlers and start up the AP's.
202  */
203 void
cpu_mp_start(void)204 cpu_mp_start(void)
205 {
206 	int i;
207 
208 	/* Initialize the logical ID to APIC ID table. */
209 	for (i = 0; i < MAXCPU; i++) {
210 		cpu_apic_ids[i] = -1;
211 	}
212 
213 	/* Install an inter-CPU IPI for TLB invalidation */
214 	setidt(IPI_INVLTLB, IDTVEC(invltlb),
215 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
216 	setidt(IPI_INVLPG, IDTVEC(invlpg),
217 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
218 	setidt(IPI_INVLRNG, IDTVEC(invlrng),
219 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
220 
221 	/* Install an inter-CPU IPI for cache invalidation. */
222 	setidt(IPI_INVLCACHE, IDTVEC(invlcache),
223 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
224 
225 	/* Install an inter-CPU IPI for all-CPU rendezvous */
226 	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous),
227 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
228 
229 	/* Install generic inter-CPU IPI handler */
230 	setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
231 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
232 
233 	/* Install an inter-CPU IPI for CPU stop/restart */
234 	setidt(IPI_STOP, IDTVEC(cpustop),
235 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
236 
237 	/* Install an inter-CPU IPI for CPU suspend/resume */
238 	setidt(IPI_SUSPEND, IDTVEC(cpususpend),
239 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
240 
241 	/* Install an IPI for calling delayed SWI */
242 	setidt(IPI_SWI, IDTVEC(ipi_swi),
243 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
244 
245 	/* Set boot_cpu_id if needed. */
246 	if (boot_cpu_id == -1) {
247 		boot_cpu_id = PCPU_GET(apic_id);
248 		cpu_info[boot_cpu_id].cpu_bsp = 1;
249 	} else
250 		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
251 		    ("BSP's APIC ID doesn't match boot_cpu_id"));
252 
253 	/* Probe logical/physical core configuration. */
254 	topo_probe();
255 
256 	assign_cpu_ids();
257 
258 	/* Start each Application Processor */
259 	start_all_aps();
260 
261 	set_interrupt_apic_ids();
262 
263 #if defined(DEV_ACPI) && MAXMEMDOM > 1
264 	acpi_pxm_set_cpu_locality();
265 #endif
266 }
267 
268 /*
269  * AP CPU's call this to initialize themselves.
270  */
271 void
init_secondary(void)272 init_secondary(void)
273 {
274 	struct pcpu *pc;
275 	struct i386tss *common_tssp;
276 	struct region_descriptor r_gdt, r_idt;
277 	int gsel_tss, myid, x;
278 	u_int cr0;
279 
280 	/* bootAP is set in start_ap() to our ID. */
281 	myid = bootAP;
282 
283 	/* Update microcode before doing anything else. */
284 	ucode_load_ap(myid);
285 
286 	/* Get per-cpu data */
287 	pc = &__pcpu[myid];
288 
289 	/* prime data page for it to use */
290 	pcpu_init(pc, myid, sizeof(struct pcpu));
291 	dpcpu_init(dpcpu, myid);
292 	pc->pc_apic_id = cpu_apic_ids[myid];
293 	pc->pc_prvspace = pc;
294 	pc->pc_curthread = 0;
295 	pc->pc_common_tssp = common_tssp = &(__pcpu[0].pc_common_tssp)[myid];
296 
297 	fix_cpuid();
298 
299 	gdt_segs[GPRIV_SEL].ssd_base = (int)pc;
300 	gdt_segs[GPROC0_SEL].ssd_base = (int)common_tssp;
301 	gdt_segs[GLDT_SEL].ssd_base = (int)ldt;
302 
303 	for (x = 0; x < NGDT; x++) {
304 		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
305 	}
306 
307 	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
308 	r_gdt.rd_base = (int) &gdt[myid * NGDT];
309 	lgdt(&r_gdt);			/* does magic intra-segment return */
310 
311 	r_idt.rd_limit = sizeof(struct gate_descriptor) * NIDT - 1;
312 	r_idt.rd_base = (int)idt;
313 	lidt(&r_idt);
314 
315 	lldt(_default_ldt);
316 	PCPU_SET(currentldt, _default_ldt);
317 
318 	PCPU_SET(trampstk, (uintptr_t)ap_tramp_stack_base + TRAMP_STACK_SZ -
319 	    VM86_STACK_SPACE);
320 
321 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
322 	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
323 	common_tssp->tss_esp0 = PCPU_GET(trampstk);
324 	common_tssp->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
325 	common_tssp->tss_ioopt = sizeof(struct i386tss) << 16;
326 	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
327 	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
328 	ltr(gsel_tss);
329 
330 	PCPU_SET(fsgs_gdt, &gdt[myid * NGDT + GUFS_SEL].sd);
331 	PCPU_SET(copyout_buf, ap_copyout_buf);
332 
333 	/*
334 	 * Set to a known state:
335 	 * Set by mpboot.s: CR0_PG, CR0_PE
336 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
337 	 */
338 	cr0 = rcr0();
339 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
340 	load_cr0(cr0);
341 	CHECK_WRITE(0x38, 5);
342 
343 	/* signal our startup to the BSP. */
344 	mp_naps++;
345 	CHECK_WRITE(0x39, 6);
346 
347 	/* Spin until the BSP releases the AP's. */
348 	while (atomic_load_acq_int(&aps_ready) == 0)
349 		ia32_pause();
350 
351 	/* BSP may have changed PTD while we were waiting */
352 	invltlb();
353 
354 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
355 	lidt(&r_idt);
356 #endif
357 
358 	init_secondary_tail();
359 }
360 
361 /*
362  * start each AP in our list
363  */
364 #define TMPMAP_START 1
365 static int
start_all_aps(void)366 start_all_aps(void)
367 {
368 	u_char mpbiosreason;
369 	u_int32_t mpbioswarmvec;
370 	int apic_id, cpu;
371 
372 	pmap_remap_lower(true);
373 
374 	/* install the AP 1st level boot code */
375 	install_ap_tramp();
376 
377 	/* save the current value of the warm-start vector */
378 	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
379 	outb(CMOS_REG, BIOS_RESET);
380 	mpbiosreason = inb(CMOS_DATA);
381 
382 	/* take advantage of the P==V mapping for PTD[0] for AP boot */
383 
384 	/* start each AP */
385 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
386 		apic_id = cpu_apic_ids[cpu];
387 
388 		/* allocate and set up a boot stack data page */
389 		bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
390 		    M_WAITOK | M_ZERO);
391 		dpcpu = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
392 		/* setup a vector to our boot code */
393 		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
394 		*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
395 		outb(CMOS_REG, BIOS_RESET);
396 		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
397 
398 		bootSTK = (char *)bootstacks[cpu] + kstack_pages *
399 		    PAGE_SIZE - 4;
400 		bootAP = cpu;
401 
402 		ap_tramp_stack_base = pmap_trm_alloc(TRAMP_STACK_SZ, M_NOWAIT);
403 		ap_copyout_buf = pmap_trm_alloc(TRAMP_COPYOUT_SZ, M_NOWAIT);
404 
405 		/* attempt to start the Application Processor */
406 		CHECK_INIT(99);	/* setup checkpoints */
407 		if (!start_ap(apic_id)) {
408 			printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id);
409 			CHECK_PRINT("trace");	/* show checkpoints */
410 			/* better panic as the AP may be running loose */
411 			printf("panic y/n? [y] ");
412 			if (cngetc() != 'n')
413 				panic("bye-bye");
414 		}
415 		CHECK_PRINT("trace");		/* show checkpoints */
416 
417 		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
418 	}
419 
420 	pmap_remap_lower(false);
421 
422 	/* restore the warmstart vector */
423 	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
424 
425 	outb(CMOS_REG, BIOS_RESET);
426 	outb(CMOS_DATA, mpbiosreason);
427 
428 	/* number of APs actually started */
429 	return mp_naps;
430 }
431 
432 /*
433  * load the 1st level AP boot code into base memory.
434  */
435 
436 /* targets for relocation */
437 extern void bigJump(void);
438 extern void bootCodeSeg(void);
439 extern void bootDataSeg(void);
440 extern void MPentry(void);
441 extern u_int MP_GDT;
442 extern u_int mp_gdtbase;
443 
444 static void
install_ap_tramp(void)445 install_ap_tramp(void)
446 {
447 	int     x;
448 	int     size = *(int *) ((u_long) & bootMP_size);
449 	vm_offset_t va = boot_address;
450 	u_char *src = (u_char *) ((u_long) bootMP);
451 	u_char *dst = (u_char *) va;
452 	u_int   boot_base = (u_int) bootMP;
453 	u_int8_t *dst8;
454 	u_int16_t *dst16;
455 	u_int32_t *dst32;
456 
457 	KASSERT (size <= PAGE_SIZE,
458 	    ("'size' do not fit into PAGE_SIZE, as expected."));
459 	pmap_kenter(va, boot_address);
460 	pmap_invalidate_page (kernel_pmap, va);
461 	for (x = 0; x < size; ++x)
462 		*dst++ = *src++;
463 
464 	/*
465 	 * modify addresses in code we just moved to basemem. unfortunately we
466 	 * need fairly detailed info about mpboot.s for this to work.  changes
467 	 * to mpboot.s might require changes here.
468 	 */
469 
470 	/* boot code is located in KERNEL space */
471 	dst = (u_char *) va;
472 
473 	/* modify the lgdt arg */
474 	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
475 	*dst32 = boot_address + ((u_int) & MP_GDT - boot_base);
476 
477 	/* modify the ljmp target for MPentry() */
478 	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
479 	*dst32 = (u_int)MPentry;
480 
481 	/* modify the target for boot code segment */
482 	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
483 	dst8 = (u_int8_t *) (dst16 + 1);
484 	*dst16 = (u_int) boot_address & 0xffff;
485 	*dst8 = ((u_int) boot_address >> 16) & 0xff;
486 
487 	/* modify the target for boot data segment */
488 	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
489 	dst8 = (u_int8_t *) (dst16 + 1);
490 	*dst16 = (u_int) boot_address & 0xffff;
491 	*dst8 = ((u_int) boot_address >> 16) & 0xff;
492 }
493 
494 /*
495  * This function starts the AP (application processor) identified
496  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
497  * to accomplish this.  This is necessary because of the nuances
498  * of the different hardware we might encounter.  It isn't pretty,
499  * but it seems to work.
500  */
501 static int
start_ap(int apic_id)502 start_ap(int apic_id)
503 {
504 	int vector, ms;
505 	int cpus;
506 
507 	/* calculate the vector */
508 	vector = (boot_address >> 12) & 0xff;
509 
510 	/* used as a watchpoint to signal AP startup */
511 	cpus = mp_naps;
512 
513 	ipi_startup(apic_id, vector);
514 
515 	/* Wait up to 5 seconds for it to start. */
516 	for (ms = 0; ms < 5000; ms++) {
517 		if (mp_naps > cpus)
518 			return 1;	/* return SUCCESS */
519 		DELAY(1000);
520 	}
521 	return 0;		/* return FAILURE */
522 }
523 
524 /*
525  * Flush the TLB on other CPU's
526  */
527 
528 /* Variables needed for SMP tlb shootdown. */
529 vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
530 pmap_t smp_tlb_pmap;
531 volatile uint32_t smp_tlb_generation;
532 
533 /*
534  * Used by pmap to request cache or TLB invalidation on local and
535  * remote processors.  Mask provides the set of remote CPUs which are
536  * to be signalled with the invalidation IPI.  Vector specifies which
537  * invalidation IPI is used.  As an optimization, the curcpu_cb
538  * callback is invoked on the calling CPU while waiting for remote
539  * CPUs to complete the operation.
540  *
541  * The callback function is called unconditionally on the caller's
542  * underlying processor, even when this processor is not set in the
543  * mask.  So, the callback function must be prepared to handle such
544  * spurious invocations.
545  */
546 static void
smp_targeted_tlb_shootdown(cpuset_t mask,u_int vector,pmap_t pmap,vm_offset_t addr1,vm_offset_t addr2,smp_invl_cb_t curcpu_cb)547 smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
548     vm_offset_t addr1, vm_offset_t addr2, smp_invl_cb_t curcpu_cb)
549 {
550 	cpuset_t other_cpus;
551 	volatile uint32_t *p_cpudone;
552 	uint32_t generation;
553 	int cpu;
554 
555 	/*
556 	 * It is not necessary to signal other CPUs while booting or
557 	 * when in the debugger.
558 	 */
559 	if (kdb_active || KERNEL_PANICKED() || !smp_started) {
560 		curcpu_cb(pmap, addr1, addr2);
561 		return;
562 	}
563 
564 	sched_pin();
565 
566 	/*
567 	 * Check for other cpus.  Return if none.
568 	 */
569 	if (CPU_ISFULLSET(&mask)) {
570 		if (mp_ncpus <= 1)
571 			goto nospinexit;
572 	} else {
573 		CPU_CLR(PCPU_GET(cpuid), &mask);
574 		if (CPU_EMPTY(&mask))
575 			goto nospinexit;
576 	}
577 
578 	KASSERT((read_eflags() & PSL_I) != 0,
579 	    ("smp_targeted_tlb_shootdown: interrupts disabled"));
580 	mtx_lock_spin(&smp_ipi_mtx);
581 	smp_tlb_addr1 = addr1;
582 	smp_tlb_addr2 = addr2;
583 	smp_tlb_pmap = pmap;
584 	generation = ++smp_tlb_generation;
585 	if (CPU_ISFULLSET(&mask)) {
586 		ipi_all_but_self(vector);
587 		other_cpus = all_cpus;
588 		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
589 	} else {
590 		other_cpus = mask;
591 		ipi_selected(mask, vector);
592 	}
593 	curcpu_cb(pmap, addr1, addr2);
594 	CPU_FOREACH_ISSET(cpu, &other_cpus) {
595 		p_cpudone = &cpuid_to_pcpu[cpu]->pc_smp_tlb_done;
596 		while (*p_cpudone != generation)
597 			ia32_pause();
598 	}
599 	mtx_unlock_spin(&smp_ipi_mtx);
600 	sched_unpin();
601 	return;
602 
603 nospinexit:
604 	curcpu_cb(pmap, addr1, addr2);
605 	sched_unpin();
606 }
607 
608 void
smp_masked_invltlb(cpuset_t mask,pmap_t pmap,smp_invl_cb_t curcpu_cb)609 smp_masked_invltlb(cpuset_t mask, pmap_t pmap, smp_invl_cb_t curcpu_cb)
610 {
611 	smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0, curcpu_cb);
612 #ifdef COUNT_XINVLTLB_HITS
613 	ipi_global++;
614 #endif
615 }
616 
617 void
smp_masked_invlpg(cpuset_t mask,vm_offset_t addr,pmap_t pmap,smp_invl_cb_t curcpu_cb)618 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr, pmap_t pmap,
619     smp_invl_cb_t curcpu_cb)
620 {
621 	smp_targeted_tlb_shootdown(mask, IPI_INVLPG, pmap, addr, 0, curcpu_cb);
622 #ifdef COUNT_XINVLTLB_HITS
623 	ipi_page++;
624 #endif
625 }
626 
627 void
smp_masked_invlpg_range(cpuset_t mask,vm_offset_t addr1,vm_offset_t addr2,pmap_t pmap,smp_invl_cb_t curcpu_cb)628 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2,
629     pmap_t pmap, smp_invl_cb_t curcpu_cb)
630 {
631 	smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, pmap, addr1, addr2,
632 	    curcpu_cb);
633 #ifdef COUNT_XINVLTLB_HITS
634 	ipi_range++;
635 	ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
636 #endif
637 }
638 
639 void
smp_cache_flush(smp_invl_cb_t curcpu_cb)640 smp_cache_flush(smp_invl_cb_t curcpu_cb)
641 {
642 	smp_targeted_tlb_shootdown(all_cpus, IPI_INVLCACHE, NULL, 0, 0,
643 	    curcpu_cb);
644 }
645 
646 /*
647  * Handlers for TLB related IPIs
648  */
649 void
invltlb_handler(void)650 invltlb_handler(void)
651 {
652 	uint32_t generation;
653 
654 	trap_check_kstack();
655 #ifdef COUNT_XINVLTLB_HITS
656 	xhits_gbl[PCPU_GET(cpuid)]++;
657 #endif /* COUNT_XINVLTLB_HITS */
658 #ifdef COUNT_IPIS
659 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
660 #endif /* COUNT_IPIS */
661 
662 	/*
663 	 * Reading the generation here allows greater parallelism
664 	 * since invalidating the TLB is a serializing operation.
665 	 */
666 	generation = smp_tlb_generation;
667 	if (smp_tlb_pmap == kernel_pmap)
668 		invltlb_glob();
669 	PCPU_SET(smp_tlb_done, generation);
670 }
671 
672 void
invlpg_handler(void)673 invlpg_handler(void)
674 {
675 	uint32_t generation;
676 
677 	trap_check_kstack();
678 #ifdef COUNT_XINVLTLB_HITS
679 	xhits_pg[PCPU_GET(cpuid)]++;
680 #endif /* COUNT_XINVLTLB_HITS */
681 #ifdef COUNT_IPIS
682 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
683 #endif /* COUNT_IPIS */
684 
685 	generation = smp_tlb_generation;	/* Overlap with serialization */
686 	if (smp_tlb_pmap == kernel_pmap)
687 		invlpg(smp_tlb_addr1);
688 	PCPU_SET(smp_tlb_done, generation);
689 }
690 
691 void
invlrng_handler(void)692 invlrng_handler(void)
693 {
694 	vm_offset_t addr, addr2;
695 	uint32_t generation;
696 
697 	trap_check_kstack();
698 #ifdef COUNT_XINVLTLB_HITS
699 	xhits_rng[PCPU_GET(cpuid)]++;
700 #endif /* COUNT_XINVLTLB_HITS */
701 #ifdef COUNT_IPIS
702 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
703 #endif /* COUNT_IPIS */
704 
705 	addr = smp_tlb_addr1;
706 	addr2 = smp_tlb_addr2;
707 	generation = smp_tlb_generation;	/* Overlap with serialization */
708 	if (smp_tlb_pmap == kernel_pmap) {
709 		do {
710 			invlpg(addr);
711 			addr += PAGE_SIZE;
712 		} while (addr < addr2);
713 	}
714 
715 	PCPU_SET(smp_tlb_done, generation);
716 }
717 
718 void
invlcache_handler(void)719 invlcache_handler(void)
720 {
721 	uint32_t generation;
722 
723 	trap_check_kstack();
724 #ifdef COUNT_IPIS
725 	(*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
726 #endif /* COUNT_IPIS */
727 
728 	/*
729 	 * Reading the generation here allows greater parallelism
730 	 * since wbinvd is a serializing instruction.  Without the
731 	 * temporary, we'd wait for wbinvd to complete, then the read
732 	 * would execute, then the dependent write, which must then
733 	 * complete before return from interrupt.
734 	 */
735 	generation = smp_tlb_generation;
736 	wbinvd();
737 	PCPU_SET(smp_tlb_done, generation);
738 }
739