1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1996, by Steve Passe
5 * All rights reserved.
6 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * Local APIC support on Pentium and later processors.
34 */
35
36 #include <sys/cdefs.h>
37 #include "opt_atpic.h"
38
39 #include "opt_ddb.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/asan.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/msan.h>
49 #include <sys/mutex.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/refcount.h>
53 #include <sys/sched.h>
54 #include <sys/smp.h>
55 #include <sys/sysctl.h>
56 #include <sys/timeet.h>
57 #include <sys/timetc.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61
62 #include <x86/apicreg.h>
63 #include <machine/clock.h>
64 #include <machine/cpufunc.h>
65 #include <machine/cputypes.h>
66 #include <machine/fpu.h>
67 #include <machine/frame.h>
68 #include <machine/intr_machdep.h>
69 #include <x86/apicvar.h>
70 #include <x86/mca.h>
71 #include <machine/md_var.h>
72 #include <machine/smp.h>
73 #include <machine/specialreg.h>
74 #include <x86/init.h>
75
76 #ifdef DDB
77 #include <sys/interrupt.h>
78 #include <ddb/ddb.h>
79 #endif
80
81 #ifdef __amd64__
82 #define SDT_APIC SDT_SYSIGT
83 #define GSEL_APIC 0
84 #else
85 #define SDT_APIC SDT_SYS386IGT
86 #define GSEL_APIC GSEL(GCODE_SEL, SEL_KPL)
87 #endif
88
89 static MALLOC_DEFINE(M_LAPIC, "local_apic", "Local APIC items");
90
91 /* Sanity checks on IDT vectors. */
92 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
93 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
94 CTASSERT(APIC_LOCAL_INTS == 240);
95 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
96
97 /*
98 * I/O interrupts use non-negative IRQ values. These values are used
99 * to mark unused IDT entries or IDT entries reserved for a non-I/O
100 * interrupt.
101 */
102 #define IRQ_FREE -1
103 #define IRQ_TIMER -2
104 #define IRQ_SYSCALL -3
105 #define IRQ_DTRACE_RET -4
106 #define IRQ_EVTCHN -5
107
108 enum lat_timer_mode {
109 LAT_MODE_UNDEF = 0,
110 LAT_MODE_PERIODIC = 1,
111 LAT_MODE_ONESHOT = 2,
112 LAT_MODE_DEADLINE = 3,
113 };
114
115 /*
116 * Support for local APICs. Local APICs manage interrupts on each
117 * individual processor as opposed to I/O APICs which receive interrupts
118 * from I/O devices and then forward them on to the local APICs.
119 *
120 * Local APICs can also send interrupts to each other thus providing the
121 * mechanism for IPIs.
122 */
123
124 struct lvt {
125 u_int lvt_edgetrigger:1;
126 u_int lvt_activehi:1;
127 u_int lvt_masked:1;
128 u_int lvt_active:1;
129 u_int lvt_mode:16;
130 u_int lvt_vector:8;
131 u_int lvt_reg;
132 const char *lvt_desc;
133 };
134
135 struct lapic {
136 struct lvt la_lvts[APIC_LVT_MAX + 1];
137 struct lvt la_elvts[APIC_ELVT_MAX + 1];
138 u_int la_id:8;
139 u_int la_cluster:4;
140 u_int la_cluster_id:2;
141 u_int la_present:1;
142 u_long *la_timer_count;
143 uint64_t la_timer_period;
144 enum lat_timer_mode la_timer_mode;
145 uint32_t lvt_timer_base;
146 uint32_t lvt_timer_last;
147 /* Include IDT_SYSCALL to make indexing easier. */
148 int la_ioint_irqs[APIC_NUM_IOINTS + 1];
149 } static *lapics;
150
151 /* Global defaults for local APIC LVT entries. */
152 static struct lvt lvts[] = {
153 /* LINT0: masked ExtINT */
154 [APIC_LVT_LINT0] = {
155 .lvt_edgetrigger = 1,
156 .lvt_activehi = 1,
157 .lvt_masked = 1,
158 .lvt_active = 1,
159 .lvt_mode = APIC_LVT_DM_EXTINT,
160 .lvt_vector = 0,
161 .lvt_reg = LAPIC_LVT_LINT0,
162 .lvt_desc = "LINT0",
163 },
164 /* LINT1: NMI */
165 [APIC_LVT_LINT1] = {
166 .lvt_edgetrigger = 1,
167 .lvt_activehi = 1,
168 .lvt_masked = 0,
169 .lvt_active = 1,
170 .lvt_mode = APIC_LVT_DM_NMI,
171 .lvt_vector = 0,
172 .lvt_reg = LAPIC_LVT_LINT1,
173 .lvt_desc = "LINT1",
174 },
175 [APIC_LVT_TIMER] = {
176 .lvt_edgetrigger = 1,
177 .lvt_activehi = 1,
178 .lvt_masked = 1,
179 .lvt_active = 1,
180 .lvt_mode = APIC_LVT_DM_FIXED,
181 .lvt_vector = APIC_TIMER_INT,
182 .lvt_reg = LAPIC_LVT_TIMER,
183 .lvt_desc = "TIMER",
184 },
185 [APIC_LVT_ERROR] = {
186 .lvt_edgetrigger = 1,
187 .lvt_activehi = 1,
188 .lvt_masked = 0,
189 .lvt_active = 1,
190 .lvt_mode = APIC_LVT_DM_FIXED,
191 .lvt_vector = APIC_ERROR_INT,
192 .lvt_reg = LAPIC_LVT_ERROR,
193 .lvt_desc = "ERROR",
194 },
195 [APIC_LVT_PMC] = {
196 .lvt_edgetrigger = 1,
197 .lvt_activehi = 1,
198 .lvt_masked = 1,
199 .lvt_active = 1,
200 .lvt_mode = APIC_LVT_DM_NMI,
201 .lvt_vector = 0,
202 .lvt_reg = LAPIC_LVT_PCINT,
203 .lvt_desc = "PMC",
204 },
205 [APIC_LVT_THERMAL] = {
206 .lvt_edgetrigger = 1,
207 .lvt_activehi = 1,
208 .lvt_masked = 1,
209 .lvt_active = 1,
210 .lvt_mode = APIC_LVT_DM_FIXED,
211 .lvt_vector = APIC_THERMAL_INT,
212 .lvt_reg = LAPIC_LVT_THERMAL,
213 .lvt_desc = "THERM",
214 },
215 [APIC_LVT_CMCI] = {
216 .lvt_edgetrigger = 1,
217 .lvt_activehi = 1,
218 .lvt_masked = 1,
219 .lvt_active = 1,
220 .lvt_mode = APIC_LVT_DM_FIXED,
221 .lvt_vector = APIC_CMC_INT,
222 .lvt_reg = LAPIC_LVT_CMCI,
223 .lvt_desc = "CMCI",
224 },
225 };
226
227 /* Global defaults for AMD local APIC ELVT entries. */
228 static struct lvt elvts[] = {
229 [APIC_ELVT_IBS] = {
230 .lvt_edgetrigger = 1,
231 .lvt_activehi = 1,
232 .lvt_masked = 1,
233 .lvt_active = 0,
234 .lvt_mode = APIC_LVT_DM_FIXED,
235 .lvt_vector = 0,
236 .lvt_reg = LAPIC_EXT_LVT0,
237 .lvt_desc = "ELVT0",
238 },
239 [APIC_ELVT_MCA] = {
240 .lvt_edgetrigger = 1,
241 .lvt_activehi = 1,
242 .lvt_masked = 1,
243 .lvt_active = 0,
244 .lvt_mode = APIC_LVT_DM_FIXED,
245 .lvt_vector = APIC_CMC_INT,
246 .lvt_reg = LAPIC_EXT_LVT1,
247 .lvt_desc = "MCA",
248 },
249 [APIC_ELVT_DEI] = {
250 .lvt_edgetrigger = 1,
251 .lvt_activehi = 1,
252 .lvt_masked = 1,
253 .lvt_active = 0,
254 .lvt_mode = APIC_LVT_DM_FIXED,
255 .lvt_vector = 0,
256 .lvt_reg = LAPIC_EXT_LVT2,
257 .lvt_desc = "ELVT2",
258 },
259 [APIC_ELVT_SBI] = {
260 .lvt_edgetrigger = 1,
261 .lvt_activehi = 1,
262 .lvt_masked = 1,
263 .lvt_active = 0,
264 .lvt_mode = APIC_LVT_DM_FIXED,
265 .lvt_vector = 0,
266 .lvt_reg = LAPIC_EXT_LVT3,
267 .lvt_desc = "ELVT3",
268 },
269 };
270
271 static inthand_t *ioint_handlers[] = {
272 NULL, /* 0 - 31 */
273 IDTVEC(apic_isr1), /* 32 - 63 */
274 IDTVEC(apic_isr2), /* 64 - 95 */
275 IDTVEC(apic_isr3), /* 96 - 127 */
276 IDTVEC(apic_isr4), /* 128 - 159 */
277 IDTVEC(apic_isr5), /* 160 - 191 */
278 IDTVEC(apic_isr6), /* 192 - 223 */
279 IDTVEC(apic_isr7), /* 224 - 255 */
280 };
281
282 static inthand_t *ioint_pti_handlers[] = {
283 NULL, /* 0 - 31 */
284 IDTVEC(apic_isr1_pti), /* 32 - 63 */
285 IDTVEC(apic_isr2_pti), /* 64 - 95 */
286 IDTVEC(apic_isr3_pti), /* 96 - 127 */
287 IDTVEC(apic_isr4_pti), /* 128 - 159 */
288 IDTVEC(apic_isr5_pti), /* 160 - 191 */
289 IDTVEC(apic_isr6_pti), /* 192 - 223 */
290 IDTVEC(apic_isr7_pti), /* 224 - 255 */
291 };
292
293 static u_int32_t lapic_timer_divisors[] = {
294 APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
295 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
296 };
297
298 extern inthand_t IDTVEC(rsvd_pti), IDTVEC(rsvd);
299
300 volatile char *lapic_map;
301 vm_paddr_t lapic_paddr = DEFAULT_APIC_BASE;
302 int x2apic_mode;
303 int lapic_eoi_suppression;
304 static int lapic_timer_tsc_deadline;
305 static u_long lapic_timer_divisor, count_freq;
306 static struct eventtimer lapic_et;
307 #ifdef SMP
308 static uint64_t lapic_ipi_wait_mult;
309 static int __read_mostly lapic_ds_idle_timeout = 1000000;
310 #endif
311 unsigned int max_apic_id;
312 static int pcint_refcnt = 0;
313
314 SYSCTL_NODE(_hw, OID_AUTO, apic, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
315 "APIC options");
316 SYSCTL_INT(_hw_apic, OID_AUTO, x2apic_mode, CTLFLAG_RD, &x2apic_mode, 0, "");
317 SYSCTL_INT(_hw_apic, OID_AUTO, eoi_suppression, CTLFLAG_RD,
318 &lapic_eoi_suppression, 0, "");
319 SYSCTL_INT(_hw_apic, OID_AUTO, timer_tsc_deadline, CTLFLAG_RD,
320 &lapic_timer_tsc_deadline, 0, "");
321 #ifdef SMP
322 SYSCTL_INT(_hw_apic, OID_AUTO, ds_idle_timeout, CTLFLAG_RWTUN,
323 &lapic_ds_idle_timeout, 0,
324 "timeout (in us) for APIC Delivery Status to become Idle (xAPIC only)");
325 #endif
326
327 static void lapic_calibrate_initcount(struct lapic *la);
328
329 /*
330 * Calculate the max index of the present LVT entry from the value of
331 * the LAPIC version register.
332 */
333 static int
lapic_maxlvt(uint32_t version)334 lapic_maxlvt(uint32_t version)
335 {
336 return ((version & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
337 }
338
339 /*
340 * Use __nosanitizethread to exempt the LAPIC I/O accessors from KCSan
341 * instrumentation. Otherwise, if x2APIC is not available, use of the global
342 * lapic_map will generate a KCSan false positive. While the mapping is
343 * shared among all CPUs, the physical access will always take place on the
344 * local CPU's APIC, so there isn't in fact a race here. Furthermore, the
345 * KCSan warning printf can cause a panic if issued during LAPIC access,
346 * due to attempted recursive use of event timer resources.
347 */
348
349 static uint32_t __nosanitizethread
lapic_read32(enum LAPIC_REGISTERS reg)350 lapic_read32(enum LAPIC_REGISTERS reg)
351 {
352 uint32_t res;
353
354 if (x2apic_mode) {
355 res = rdmsr32(MSR_APIC_000 + reg);
356 } else {
357 res = *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL);
358 }
359 return (res);
360 }
361
362 static void __nosanitizethread
lapic_write32(enum LAPIC_REGISTERS reg,uint32_t val)363 lapic_write32(enum LAPIC_REGISTERS reg, uint32_t val)
364 {
365
366 if (x2apic_mode) {
367 mfence();
368 lfence();
369 wrmsr(MSR_APIC_000 + reg, val);
370 } else {
371 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
372 }
373 }
374
375 static void __nosanitizethread
lapic_write32_nofence(enum LAPIC_REGISTERS reg,uint32_t val)376 lapic_write32_nofence(enum LAPIC_REGISTERS reg, uint32_t val)
377 {
378
379 if (x2apic_mode) {
380 wrmsr(MSR_APIC_000 + reg, val);
381 } else {
382 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
383 }
384 }
385
386 #ifdef SMP
387 static uint64_t
lapic_read_icr_lo(void)388 lapic_read_icr_lo(void)
389 {
390
391 return (lapic_read32(LAPIC_ICR_LO));
392 }
393
394 static void
lapic_write_icr(uint32_t vhi,uint32_t vlo)395 lapic_write_icr(uint32_t vhi, uint32_t vlo)
396 {
397 register_t saveintr;
398 uint64_t v;
399
400 if (x2apic_mode) {
401 v = ((uint64_t)vhi << 32) | vlo;
402 mfence();
403 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, v);
404 } else {
405 saveintr = intr_disable();
406 lapic_write32(LAPIC_ICR_HI, vhi);
407 lapic_write32(LAPIC_ICR_LO, vlo);
408 intr_restore(saveintr);
409 }
410 }
411
412 static void
lapic_write_icr_lo(uint32_t vlo)413 lapic_write_icr_lo(uint32_t vlo)
414 {
415
416 if (x2apic_mode) {
417 mfence();
418 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, vlo);
419 } else {
420 lapic_write32(LAPIC_ICR_LO, vlo);
421 }
422 }
423
424 static void
lapic_write_self_ipi(uint32_t vector)425 lapic_write_self_ipi(uint32_t vector)
426 {
427
428 KASSERT(x2apic_mode, ("SELF IPI write in xAPIC mode"));
429 wrmsr(MSR_APIC_000 + LAPIC_SELF_IPI, vector);
430 }
431 #endif /* SMP */
432
433 static void
lapic_enable_x2apic(void)434 lapic_enable_x2apic(void)
435 {
436 uint64_t apic_base;
437
438 apic_base = rdmsr(MSR_APICBASE);
439 apic_base |= APICBASE_X2APIC | APICBASE_ENABLED;
440 wrmsr(MSR_APICBASE, apic_base);
441 }
442
443 bool
lapic_is_x2apic(void)444 lapic_is_x2apic(void)
445 {
446 uint64_t apic_base;
447
448 apic_base = rdmsr(MSR_APICBASE);
449 return ((apic_base & (APICBASE_X2APIC | APICBASE_ENABLED)) ==
450 (APICBASE_X2APIC | APICBASE_ENABLED));
451 }
452
453 static void lapic_early_mask_vecs(void);
454 static void lapic_enable(void);
455 static void lapic_resume(struct pic *pic, bool suspend_cancelled);
456 static void lapic_timer_oneshot(struct lapic *);
457 static void lapic_timer_oneshot_nointr(struct lapic *, uint32_t);
458 static void lapic_timer_periodic(struct lapic *);
459 static void lapic_timer_deadline(struct lapic *);
460 static void lapic_timer_stop(struct lapic *);
461 static void lapic_timer_set_divisor(u_int divisor);
462 static uint32_t lvt_mode(struct lapic *la, u_int pin, uint32_t value);
463 static int lapic_et_start(struct eventtimer *et,
464 sbintime_t first, sbintime_t period);
465 static int lapic_et_stop(struct eventtimer *et);
466 static u_int apic_idt_to_irq(u_int apic_id, u_int vector);
467 static void lapic_set_tpr(u_int vector);
468
469 struct pic lapic_pic = { .pic_resume = lapic_resume };
470
471 static uint32_t
lvt_mode_impl(struct lapic * la,struct lvt * lvt,u_int pin,uint32_t value)472 lvt_mode_impl(struct lapic *la, struct lvt *lvt, u_int pin, uint32_t value)
473 {
474
475 value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
476 APIC_LVT_VECTOR);
477 if (lvt->lvt_edgetrigger == 0)
478 value |= APIC_LVT_TM;
479 if (lvt->lvt_activehi == 0)
480 value |= APIC_LVT_IIPP_INTALO;
481 if (lvt->lvt_masked)
482 value |= APIC_LVT_M;
483 value |= lvt->lvt_mode;
484 switch (lvt->lvt_mode) {
485 case APIC_LVT_DM_NMI:
486 case APIC_LVT_DM_SMI:
487 case APIC_LVT_DM_INIT:
488 case APIC_LVT_DM_EXTINT:
489 if (!lvt->lvt_edgetrigger) {
490 if (bootverbose) {
491 printf(
492 "lapic%u: Forcing LINT%u to edge trigger\n",
493 la->la_id, pin);
494 }
495 value &= ~APIC_LVT_TM;
496 }
497 /* Use a vector of 0. */
498 break;
499 case APIC_LVT_DM_FIXED:
500 value |= lvt->lvt_vector;
501 break;
502 default:
503 panic("bad APIC LVT delivery mode: %#x\n", value);
504 }
505 return (value);
506 }
507
508 static uint32_t
lvt_mode(struct lapic * la,u_int pin,uint32_t value)509 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
510 {
511 struct lvt *lvt;
512
513 KASSERT(pin <= APIC_LVT_MAX,
514 ("%s: pin %u out of range", __func__, pin));
515 if (la->la_lvts[pin].lvt_active)
516 lvt = &la->la_lvts[pin];
517 else
518 lvt = &lvts[pin];
519
520 return (lvt_mode_impl(la, lvt, pin, value));
521 }
522
523 static uint32_t
elvt_mode(struct lapic * la,u_int idx,uint32_t value)524 elvt_mode(struct lapic *la, u_int idx, uint32_t value)
525 {
526 struct lvt *elvt;
527
528 KASSERT(idx <= APIC_ELVT_MAX,
529 ("%s: idx %u out of range", __func__, idx));
530
531 elvt = &la->la_elvts[idx];
532 KASSERT(elvt->lvt_active, ("%s: ELVT%u is not active", __func__, idx));
533 KASSERT(elvt->lvt_edgetrigger,
534 ("%s: ELVT%u is not edge triggered", __func__, idx));
535 KASSERT(elvt->lvt_activehi,
536 ("%s: ELVT%u is not active high", __func__, idx));
537 return (lvt_mode_impl(la, elvt, idx, value));
538 }
539
540 /*
541 * Map the local APIC and setup necessary interrupt vectors.
542 */
543 void
lapic_init(vm_paddr_t addr)544 lapic_init(vm_paddr_t addr)
545 {
546 #ifdef SMP
547 uint64_t r, r1, r2, rx;
548 #endif
549 uint32_t ver;
550 int i;
551 bool arat;
552
553 TSENTER();
554
555 /*
556 * Enable x2APIC mode if possible. Map the local APIC
557 * registers page.
558 *
559 * Keep the LAPIC registers page mapped uncached for x2APIC
560 * mode too, to have direct map page attribute set to
561 * uncached. This is needed to work around CPU errata present
562 * on all Intel processors.
563 */
564 KASSERT(trunc_page(addr) == addr,
565 ("local APIC not aligned on a page boundary"));
566 lapic_paddr = addr;
567 lapic_map = pmap_mapdev(addr, PAGE_SIZE);
568 if (x2apic_mode) {
569 lapic_enable_x2apic();
570 lapic_map = NULL;
571 }
572
573 /* Setup the spurious interrupt handler. */
574 setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
575 GSEL_APIC);
576
577 /* Perform basic initialization of the BSP's local APIC. */
578 lapic_enable();
579 lapic_early_mask_vecs();
580
581 /* Set BSP's per-CPU local APIC ID. */
582 PCPU_SET(apic_id, lapic_id());
583
584 /* Local APIC timer interrupt. */
585 setidt(APIC_TIMER_INT, pti ? IDTVEC(timerint_pti) : IDTVEC(timerint),
586 SDT_APIC, SEL_KPL, GSEL_APIC);
587
588 /* Local APIC error interrupt. */
589 setidt(APIC_ERROR_INT, pti ? IDTVEC(errorint_pti) : IDTVEC(errorint),
590 SDT_APIC, SEL_KPL, GSEL_APIC);
591
592 /* XXX: Thermal interrupt */
593
594 /* Local APIC CMCI. */
595 setidt(APIC_CMC_INT, pti ? IDTVEC(cmcint_pti) : IDTVEC(cmcint),
596 SDT_APIC, SEL_KPL, GSEL_APIC);
597
598 if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
599 /* Set if APIC timer runs in C3. */
600 arat = (cpu_power_eax & CPUTPM1_ARAT);
601
602 bzero(&lapic_et, sizeof(lapic_et));
603 lapic_et.et_name = "LAPIC";
604 lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
605 ET_FLAGS_PERCPU;
606 lapic_et.et_quality = 600;
607 if (!arat) {
608 lapic_et.et_flags |= ET_FLAGS_C3STOP;
609 lapic_et.et_quality = 100;
610 }
611 if ((cpu_feature & CPUID_TSC) != 0 &&
612 (cpu_feature2 & CPUID2_TSCDLT) != 0 &&
613 tsc_is_invariant && tsc_freq != 0) {
614 lapic_timer_tsc_deadline = 1;
615 TUNABLE_INT_FETCH("hw.apic.timer_tsc_deadline",
616 &lapic_timer_tsc_deadline);
617 }
618
619 lapic_et.et_frequency = 0;
620 /* We don't know frequency yet, so trying to guess. */
621 lapic_et.et_min_period = 0x00001000LL;
622 lapic_et.et_max_period = SBT_1S;
623 lapic_et.et_start = lapic_et_start;
624 lapic_et.et_stop = lapic_et_stop;
625 lapic_et.et_priv = NULL;
626 et_register(&lapic_et);
627 }
628
629 /*
630 * Set lapic_eoi_suppression after lapic_enable(), to not
631 * enable suppression in the hardware prematurely. Note that
632 * we by default enable suppression even when system only has
633 * one IO-APIC, since EOI is broadcasted to all APIC agents,
634 * including CPUs, otherwise.
635 *
636 * It seems that at least some KVM versions report
637 * EOI_SUPPRESSION bit, but auto-EOI does not work.
638 */
639 ver = lapic_read32(LAPIC_VERSION);
640 if ((ver & APIC_VER_EOI_SUPPRESSION) != 0) {
641 lapic_eoi_suppression = 1;
642 if (vm_guest == VM_GUEST_KVM) {
643 if (bootverbose)
644 printf(
645 "KVM -- disabling lapic eoi suppression\n");
646 lapic_eoi_suppression = 0;
647 }
648 TUNABLE_INT_FETCH("hw.apic.eoi_suppression",
649 &lapic_eoi_suppression);
650 }
651
652 #ifdef SMP
653 #define LOOPS 1000
654 /*
655 * Calibrate the busy loop waiting for IPI ack in xAPIC mode.
656 * lapic_ipi_wait_mult contains the number of iterations which
657 * approximately delay execution for 1 microsecond (the
658 * argument to lapic_ipi_wait() is in microseconds).
659 *
660 * We assume that TSC is present and already measured.
661 * Possible TSC frequency jumps are irrelevant to the
662 * calibration loop below, the CPU clock management code is
663 * not yet started, and we do not enter sleep states.
664 */
665 KASSERT((cpu_feature & CPUID_TSC) != 0 && tsc_freq != 0,
666 ("TSC not initialized"));
667 if (!x2apic_mode) {
668 r = rdtsc();
669 for (rx = 0; rx < LOOPS; rx++) {
670 (void)lapic_read_icr_lo();
671 ia32_pause();
672 }
673 r = rdtsc() - r;
674 r1 = tsc_freq * LOOPS;
675 r2 = r * 1000000;
676 lapic_ipi_wait_mult = r1 >= r2 ? r1 / r2 : 1;
677 if (bootverbose) {
678 printf("LAPIC: ipi_wait() us multiplier %ju (r %ju "
679 "tsc %ju)\n", (uintmax_t)lapic_ipi_wait_mult,
680 (uintmax_t)r, (uintmax_t)tsc_freq);
681 }
682 }
683 #undef LOOPS
684 #endif /* SMP */
685
686 TSEXIT();
687 }
688
689 /*
690 * Create a local APIC instance.
691 */
692 void
lapic_create(u_int apic_id,int boot_cpu)693 lapic_create(u_int apic_id, int boot_cpu)
694 {
695 int i;
696
697 if (apic_id > max_apic_id) {
698 printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
699 if (boot_cpu)
700 panic("Can't ignore BSP");
701 return;
702 }
703 KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
704 apic_id));
705
706 /*
707 * Assume no local LVT overrides and a cluster of 0 and
708 * intra-cluster ID of 0.
709 */
710 lapics[apic_id].la_present = 1;
711 lapics[apic_id].la_id = apic_id;
712 for (i = 0; i <= APIC_LVT_MAX; i++) {
713 lapics[apic_id].la_lvts[i] = lvts[i];
714 lapics[apic_id].la_lvts[i].lvt_active = 0;
715 }
716 for (i = 0; i <= APIC_ELVT_MAX; i++) {
717 lapics[apic_id].la_elvts[i] = elvts[i];
718 lapics[apic_id].la_elvts[i].lvt_active = 0;
719 }
720 for (i = 0; i <= APIC_NUM_IOINTS; i++)
721 lapics[apic_id].la_ioint_irqs[i] = IRQ_FREE;
722 lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
723 lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
724 IRQ_TIMER;
725 #ifdef KDTRACE_HOOKS
726 lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] =
727 IRQ_DTRACE_RET;
728 #endif
729 #ifdef XENHVM
730 lapics[apic_id].la_ioint_irqs[IDT_EVTCHN - APIC_IO_INTS] = IRQ_EVTCHN;
731 #endif
732
733 #ifdef SMP
734 cpu_add(apic_id, boot_cpu);
735 #endif
736 }
737
738 static inline uint32_t
amd_read_ext_features(void)739 amd_read_ext_features(void)
740 {
741 uint32_t version;
742
743 if (cpu_vendor_id != CPU_VENDOR_AMD &&
744 cpu_vendor_id != CPU_VENDOR_HYGON)
745 return (0);
746 version = lapic_read32(LAPIC_VERSION);
747 if ((version & APIC_VER_AMD_EXT_SPACE) != 0)
748 return (lapic_read32(LAPIC_EXT_FEATURES));
749 else
750 return (0);
751 }
752
753 static inline uint32_t
amd_read_elvt_count(void)754 amd_read_elvt_count(void)
755 {
756 uint32_t extf;
757 uint32_t count;
758
759 extf = amd_read_ext_features();
760 count = (extf & APIC_EXTF_ELVT_MASK) >> APIC_EXTF_ELVT_SHIFT;
761 count = min(count, APIC_ELVT_MAX + 1);
762 return (count);
763 }
764
765 /*
766 * Dump contents of local APIC registers
767 */
768 void
lapic_dump(const char * str)769 lapic_dump(const char* str)
770 {
771 uint32_t version;
772 uint32_t maxlvt;
773 uint32_t extf;
774 int elvt_count;
775 int i;
776
777 version = lapic_read32(LAPIC_VERSION);
778 maxlvt = lapic_maxlvt(version);
779 printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
780 printf(" ID: 0x%08x VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
781 lapic_read32(LAPIC_ID), version,
782 lapic_read32(LAPIC_LDR), x2apic_mode ? 0 : lapic_read32(LAPIC_DFR));
783 if ((cpu_feature2 & CPUID2_X2APIC) != 0)
784 printf(" x2APIC: %d", x2apic_mode);
785 printf("\n lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
786 lapic_read32(LAPIC_LVT_LINT0), lapic_read32(LAPIC_LVT_LINT1),
787 lapic_read32(LAPIC_TPR), lapic_read32(LAPIC_SVR));
788 printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x",
789 lapic_read32(LAPIC_LVT_TIMER), lapic_read32(LAPIC_LVT_THERMAL),
790 lapic_read32(LAPIC_LVT_ERROR));
791 if (maxlvt >= APIC_LVT_PMC)
792 printf(" pmc: 0x%08x", lapic_read32(LAPIC_LVT_PCINT));
793 printf("\n");
794 if (maxlvt >= APIC_LVT_CMCI)
795 printf(" cmci: 0x%08x\n", lapic_read32(LAPIC_LVT_CMCI));
796 extf = amd_read_ext_features();
797 if (extf != 0) {
798 printf(" AMD ext features: 0x%08x", extf);
799 elvt_count = amd_read_elvt_count();
800 for (i = 0; i < elvt_count; i++)
801 printf("%s elvt%d: 0x%08x", (i % 4) ? "" : "\n ", i,
802 lapic_read32(LAPIC_EXT_LVT0 + i));
803 printf("\n");
804 }
805 }
806
807 void
lapic_xapic_mode(void)808 lapic_xapic_mode(void)
809 {
810 register_t saveintr;
811
812 saveintr = intr_disable();
813 if (x2apic_mode)
814 lapic_enable_x2apic();
815 intr_restore(saveintr);
816 }
817
818 static void
lapic_early_mask_vec(const struct lvt * l)819 lapic_early_mask_vec(const struct lvt *l)
820 {
821 uint32_t v;
822
823 if (l->lvt_masked != 0) {
824 v = lapic_read32(l->lvt_reg);
825 v |= APIC_LVT_M;
826 lapic_write32(l->lvt_reg, v);
827 }
828 }
829
830 /* Done on BSP only */
831 static void
lapic_early_mask_vecs(void)832 lapic_early_mask_vecs(void)
833 {
834 int elvt_count, lvts_count, i;
835 uint32_t version;
836
837 version = lapic_read32(LAPIC_VERSION);
838 lvts_count = min(nitems(lvts), lapic_maxlvt(version) + 1);
839 for (i = 0; i < lvts_count; i++)
840 lapic_early_mask_vec(&lvts[i]);
841
842 elvt_count = amd_read_elvt_count();
843 for (i = 0; i < elvt_count; i++)
844 lapic_early_mask_vec(&elvts[i]);
845 }
846
847 void
lapic_setup(int boot)848 lapic_setup(int boot)
849 {
850 struct lapic *la;
851 uint32_t version;
852 uint32_t maxlvt;
853 register_t saveintr;
854 int elvt_count;
855 int i;
856
857 saveintr = intr_disable();
858
859 la = &lapics[lapic_id()];
860 KASSERT(la->la_present, ("missing APIC structure"));
861 version = lapic_read32(LAPIC_VERSION);
862 maxlvt = lapic_maxlvt(version);
863
864 /* Initialize the TPR to allow all interrupts. */
865 lapic_set_tpr(0);
866
867 /* Setup spurious vector and enable the local APIC. */
868 lapic_enable();
869
870 /* Program LINT[01] LVT entries. */
871 lapic_write32(LAPIC_LVT_LINT0, lvt_mode(la, APIC_LVT_LINT0,
872 lapic_read32(LAPIC_LVT_LINT0)));
873 lapic_write32(LAPIC_LVT_LINT1, lvt_mode(la, APIC_LVT_LINT1,
874 lapic_read32(LAPIC_LVT_LINT1)));
875
876 /* Program the PMC LVT entry if present. */
877 if (maxlvt >= APIC_LVT_PMC) {
878 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
879 LAPIC_LVT_PCINT));
880 }
881
882 /*
883 * Program the timer LVT. Calibration is deferred until it is certain
884 * that we have a reliable timecounter.
885 */
886 la->lvt_timer_base = lvt_mode(la, APIC_LVT_TIMER,
887 lapic_read32(LAPIC_LVT_TIMER));
888 la->lvt_timer_last = la->lvt_timer_base;
889 lapic_write32(LAPIC_LVT_TIMER, la->lvt_timer_base);
890
891 if (boot)
892 la->la_timer_mode = LAT_MODE_UNDEF;
893 else if (la->la_timer_mode != LAT_MODE_UNDEF) {
894 KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
895 lapic_id()));
896 switch (la->la_timer_mode) {
897 case LAT_MODE_PERIODIC:
898 lapic_timer_set_divisor(lapic_timer_divisor);
899 lapic_timer_periodic(la);
900 break;
901 case LAT_MODE_ONESHOT:
902 lapic_timer_set_divisor(lapic_timer_divisor);
903 lapic_timer_oneshot(la);
904 break;
905 case LAT_MODE_DEADLINE:
906 lapic_timer_deadline(la);
907 break;
908 default:
909 panic("corrupted la_timer_mode %p %d", la,
910 la->la_timer_mode);
911 }
912 }
913
914 /* Program error LVT and clear any existing errors. */
915 lapic_write32(LAPIC_LVT_ERROR, lvt_mode(la, APIC_LVT_ERROR,
916 lapic_read32(LAPIC_LVT_ERROR)));
917 lapic_write32(LAPIC_ESR, 0);
918
919 /* XXX: Thermal LVT */
920
921 /* Program the CMCI LVT entry if present. */
922 if (maxlvt >= APIC_LVT_CMCI) {
923 lapic_write32(LAPIC_LVT_CMCI, lvt_mode(la, APIC_LVT_CMCI,
924 lapic_read32(LAPIC_LVT_CMCI)));
925 }
926
927 elvt_count = amd_read_elvt_count();
928 for (i = 0; i < elvt_count; i++) {
929 if (la->la_elvts[i].lvt_active)
930 lapic_write32(LAPIC_EXT_LVT0 + i,
931 elvt_mode(la, i, lapic_read32(LAPIC_EXT_LVT0 + i)));
932 }
933
934 intr_restore(saveintr);
935 }
936
937 static void
lapic_intrcnt(void * dummy __unused)938 lapic_intrcnt(void *dummy __unused)
939 {
940 struct pcpu *pc;
941 struct lapic *la;
942 char buf[MAXCOMLEN + 1];
943
944 /* If there are no APICs, skip this function. */
945 if (lapics == NULL)
946 return;
947
948 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
949 la = &lapics[pc->pc_apic_id];
950 if (!la->la_present)
951 continue;
952
953 snprintf(buf, sizeof(buf), "cpu%d:timer", pc->pc_cpuid);
954 intrcnt_add(buf, &la->la_timer_count);
955 }
956 }
957 SYSINIT(lapic_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, lapic_intrcnt, NULL);
958
959 void
lapic_reenable_pcint(void)960 lapic_reenable_pcint(void)
961 {
962 uint32_t value;
963
964 if (refcount_load(&pcint_refcnt) == 0)
965 return;
966 value = lapic_read32(LAPIC_LVT_PCINT);
967 value &= ~APIC_LVT_M;
968 lapic_write32(LAPIC_LVT_PCINT, value);
969 }
970
971 static void
lapic_update_pcint(void * dummy)972 lapic_update_pcint(void *dummy)
973 {
974 struct lapic *la;
975
976 la = &lapics[lapic_id()];
977 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
978 lapic_read32(LAPIC_LVT_PCINT)));
979 }
980
981 void
lapic_calibrate_timer(void)982 lapic_calibrate_timer(void)
983 {
984 struct lapic *la;
985 register_t intr;
986
987 #ifdef DEV_ATPIC
988 /* Fail if the local APIC is not present. */
989 if (!x2apic_mode && lapic_map == NULL)
990 return;
991 #endif
992
993 intr = intr_disable();
994 la = &lapics[lapic_id()];
995
996 lapic_calibrate_initcount(la);
997
998 intr_restore(intr);
999
1000 if (lapic_timer_tsc_deadline && bootverbose) {
1001 printf("lapic: deadline tsc mode, Frequency %ju Hz\n",
1002 (uintmax_t)tsc_freq);
1003 }
1004 }
1005
1006 int
lapic_enable_pcint(void)1007 lapic_enable_pcint(void)
1008 {
1009 u_int32_t maxlvt;
1010
1011 #ifdef DEV_ATPIC
1012 /* Fail if the local APIC is not present. */
1013 if (!x2apic_mode && lapic_map == NULL)
1014 return (0);
1015 #endif
1016
1017 /* Fail if the PMC LVT is not present. */
1018 maxlvt = lapic_maxlvt(lapic_read32(LAPIC_VERSION));
1019 if (maxlvt < APIC_LVT_PMC)
1020 return (0);
1021 if (refcount_acquire(&pcint_refcnt) > 0)
1022 return (1);
1023 lvts[APIC_LVT_PMC].lvt_masked = 0;
1024
1025 MPASS(mp_ncpus == 1 || smp_started);
1026 smp_rendezvous(NULL, lapic_update_pcint, NULL, NULL);
1027 return (1);
1028 }
1029
1030 void
lapic_disable_pcint(void)1031 lapic_disable_pcint(void)
1032 {
1033 u_int32_t maxlvt;
1034
1035 #ifdef DEV_ATPIC
1036 /* Fail if the local APIC is not present. */
1037 if (!x2apic_mode && lapic_map == NULL)
1038 return;
1039 #endif
1040
1041 /* Fail if the PMC LVT is not present. */
1042 maxlvt = lapic_maxlvt(lapic_read32(LAPIC_VERSION));
1043 if (maxlvt < APIC_LVT_PMC)
1044 return;
1045 if (!refcount_release(&pcint_refcnt))
1046 return;
1047 lvts[APIC_LVT_PMC].lvt_masked = 1;
1048
1049 #ifdef SMP
1050 /* The APs should always be started when hwpmc is unloaded. */
1051 KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
1052 #endif
1053 smp_rendezvous(NULL, lapic_update_pcint, NULL, NULL);
1054 }
1055
1056 static int
lapic_calibrate_initcount_cpuid_vm(void)1057 lapic_calibrate_initcount_cpuid_vm(void)
1058 {
1059 u_int regs[4];
1060 uint64_t freq;
1061
1062 /* Get value from CPUID leaf if possible. */
1063 if (vm_guest == VM_GUEST_NO)
1064 return (false);
1065 if (hv_high < 0x40000010)
1066 return (false);
1067 do_cpuid(0x40000010, regs);
1068 freq = (uint64_t)(regs[1]) * 1000;
1069
1070 /* Pick timer divisor. */
1071 lapic_timer_divisor = 2;
1072 do {
1073 if (freq / lapic_timer_divisor < APIC_TIMER_MAX_COUNT)
1074 break;
1075 lapic_timer_divisor <<= 1;
1076 } while (lapic_timer_divisor <= 128);
1077 if (lapic_timer_divisor > 128)
1078 return (false);
1079
1080 /* Record divided frequency. */
1081 count_freq = freq / lapic_timer_divisor;
1082 return (count_freq != 0);
1083 }
1084
1085 static uint64_t
cb_lapic_getcount(void)1086 cb_lapic_getcount(void)
1087 {
1088
1089 return (APIC_TIMER_MAX_COUNT - lapic_read32(LAPIC_CCR_TIMER));
1090 }
1091
1092 static void
lapic_calibrate_initcount(struct lapic * la)1093 lapic_calibrate_initcount(struct lapic *la)
1094 {
1095 uint64_t freq;
1096
1097 if (lapic_calibrate_initcount_cpuid_vm())
1098 goto done;
1099
1100 /* Calibrate the APIC timer frequency. */
1101 lapic_timer_set_divisor(2);
1102 lapic_timer_oneshot_nointr(la, APIC_TIMER_MAX_COUNT);
1103 fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX);
1104 freq = clockcalib(cb_lapic_getcount, "lapic");
1105 fpu_kern_leave(curthread, NULL);
1106
1107 /* Pick a different divisor if necessary. */
1108 lapic_timer_divisor = 2;
1109 do {
1110 if (freq * 2 / lapic_timer_divisor < APIC_TIMER_MAX_COUNT)
1111 break;
1112 lapic_timer_divisor <<= 1;
1113 } while (lapic_timer_divisor <= 128);
1114 if (lapic_timer_divisor > 128)
1115 panic("lapic: Divisor too big");
1116 count_freq = freq * 2 / lapic_timer_divisor;
1117 done:
1118 if (bootverbose) {
1119 printf("lapic: Divisor %lu, Frequency %lu Hz\n",
1120 lapic_timer_divisor, count_freq);
1121 }
1122 }
1123
1124 static void
lapic_change_mode(struct eventtimer * et,struct lapic * la,enum lat_timer_mode newmode)1125 lapic_change_mode(struct eventtimer *et, struct lapic *la,
1126 enum lat_timer_mode newmode)
1127 {
1128 if (la->la_timer_mode == newmode)
1129 return;
1130 switch (newmode) {
1131 case LAT_MODE_PERIODIC:
1132 lapic_timer_set_divisor(lapic_timer_divisor);
1133 et->et_frequency = count_freq;
1134 break;
1135 case LAT_MODE_DEADLINE:
1136 et->et_frequency = tsc_freq;
1137 break;
1138 case LAT_MODE_ONESHOT:
1139 lapic_timer_set_divisor(lapic_timer_divisor);
1140 et->et_frequency = count_freq;
1141 break;
1142 default:
1143 panic("lapic_change_mode %d", newmode);
1144 }
1145 la->la_timer_mode = newmode;
1146 et->et_min_period = (0x00000002LLU << 32) / et->et_frequency;
1147 et->et_max_period = (0xfffffffeLLU << 32) / et->et_frequency;
1148 }
1149
1150 static int
lapic_et_start(struct eventtimer * et,sbintime_t first,sbintime_t period)1151 lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
1152 {
1153 struct lapic *la;
1154
1155 la = &lapics[PCPU_GET(apic_id)];
1156 if (period != 0) {
1157 lapic_change_mode(et, la, LAT_MODE_PERIODIC);
1158 la->la_timer_period = ((uint32_t)et->et_frequency * period) >>
1159 32;
1160 lapic_timer_periodic(la);
1161 } else if (lapic_timer_tsc_deadline) {
1162 lapic_change_mode(et, la, LAT_MODE_DEADLINE);
1163 la->la_timer_period = (et->et_frequency * first) >> 32;
1164 lapic_timer_deadline(la);
1165 } else {
1166 lapic_change_mode(et, la, LAT_MODE_ONESHOT);
1167 la->la_timer_period = ((uint32_t)et->et_frequency * first) >>
1168 32;
1169 lapic_timer_oneshot(la);
1170 }
1171 return (0);
1172 }
1173
1174 static int
lapic_et_stop(struct eventtimer * et)1175 lapic_et_stop(struct eventtimer *et)
1176 {
1177 struct lapic *la;
1178
1179 la = &lapics[PCPU_GET(apic_id)];
1180 lapic_timer_stop(la);
1181 la->la_timer_mode = LAT_MODE_UNDEF;
1182 return (0);
1183 }
1184
1185 void
lapic_disable(void)1186 lapic_disable(void)
1187 {
1188 uint32_t value;
1189
1190 /* Software disable the local APIC. */
1191 value = lapic_read32(LAPIC_SVR);
1192 value &= ~APIC_SVR_SWEN;
1193 lapic_write32(LAPIC_SVR, value);
1194 }
1195
1196 static void
lapic_enable(void)1197 lapic_enable(void)
1198 {
1199 uint32_t value;
1200
1201 /* Program the spurious vector to enable the local APIC. */
1202 value = lapic_read32(LAPIC_SVR);
1203 value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
1204 value |= APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT;
1205 if (lapic_eoi_suppression)
1206 value |= APIC_SVR_EOI_SUPPRESSION;
1207 lapic_write32(LAPIC_SVR, value);
1208 }
1209
1210 /* Reset the local APIC on the BSP during resume. */
1211 static void
lapic_resume(struct pic * pic,bool suspend_cancelled)1212 lapic_resume(struct pic *pic, bool suspend_cancelled)
1213 {
1214
1215 lapic_setup(0);
1216 }
1217
1218 int
lapic_id(void)1219 lapic_id(void)
1220 {
1221 uint32_t v;
1222
1223 KASSERT(x2apic_mode || lapic_map != NULL, ("local APIC is not mapped"));
1224 v = lapic_read32(LAPIC_ID);
1225 if (!x2apic_mode)
1226 v >>= APIC_ID_SHIFT;
1227 return (v);
1228 }
1229
1230 int
lapic_intr_pending(u_int vector)1231 lapic_intr_pending(u_int vector)
1232 {
1233 uint32_t irr;
1234
1235 /*
1236 * The IRR registers are an array of registers each of which
1237 * only describes 32 interrupts in the low 32 bits. Thus, we
1238 * divide the vector by 32 to get the register index.
1239 * Finally, we modulus the vector by 32 to determine the
1240 * individual bit to test.
1241 */
1242 irr = lapic_read32(LAPIC_IRR0 + vector / 32);
1243 return (irr & 1 << (vector % 32));
1244 }
1245
1246 void
lapic_set_logical_id(u_int apic_id,u_int cluster,u_int cluster_id)1247 lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
1248 {
1249 struct lapic *la;
1250
1251 KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
1252 __func__, apic_id));
1253 KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
1254 __func__, cluster));
1255 KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
1256 ("%s: intra cluster id %u too big", __func__, cluster_id));
1257 la = &lapics[apic_id];
1258 la->la_cluster = cluster;
1259 la->la_cluster_id = cluster_id;
1260 }
1261
1262 int
lapic_set_lvt_mask(u_int apic_id,u_int pin,u_char masked)1263 lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
1264 {
1265
1266 if (pin > APIC_LVT_MAX)
1267 return (EINVAL);
1268 if (apic_id == APIC_ID_ALL) {
1269 lvts[pin].lvt_masked = masked;
1270 if (bootverbose)
1271 printf("lapic:");
1272 } else {
1273 KASSERT(lapics[apic_id].la_present,
1274 ("%s: missing APIC %u", __func__, apic_id));
1275 lapics[apic_id].la_lvts[pin].lvt_masked = masked;
1276 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1277 if (bootverbose)
1278 printf("lapic%u:", apic_id);
1279 }
1280 if (bootverbose)
1281 printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
1282 return (0);
1283 }
1284
1285 int
lapic_set_lvt_mode(u_int apic_id,u_int pin,u_int32_t mode)1286 lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
1287 {
1288 struct lvt *lvt;
1289
1290 if (pin > APIC_LVT_MAX)
1291 return (EINVAL);
1292 if (apic_id == APIC_ID_ALL) {
1293 lvt = &lvts[pin];
1294 if (bootverbose)
1295 printf("lapic:");
1296 } else {
1297 KASSERT(lapics[apic_id].la_present,
1298 ("%s: missing APIC %u", __func__, apic_id));
1299 lvt = &lapics[apic_id].la_lvts[pin];
1300 lvt->lvt_active = 1;
1301 if (bootverbose)
1302 printf("lapic%u:", apic_id);
1303 }
1304 lvt->lvt_mode = mode;
1305 switch (mode) {
1306 case APIC_LVT_DM_NMI:
1307 case APIC_LVT_DM_SMI:
1308 case APIC_LVT_DM_INIT:
1309 case APIC_LVT_DM_EXTINT:
1310 lvt->lvt_edgetrigger = 1;
1311 lvt->lvt_activehi = 1;
1312 if (mode == APIC_LVT_DM_EXTINT)
1313 lvt->lvt_masked = 1;
1314 else
1315 lvt->lvt_masked = 0;
1316 break;
1317 default:
1318 panic("Unsupported delivery mode: 0x%x\n", mode);
1319 }
1320 if (bootverbose) {
1321 printf(" Routing ");
1322 switch (mode) {
1323 case APIC_LVT_DM_NMI:
1324 printf("NMI");
1325 break;
1326 case APIC_LVT_DM_SMI:
1327 printf("SMI");
1328 break;
1329 case APIC_LVT_DM_INIT:
1330 printf("INIT");
1331 break;
1332 case APIC_LVT_DM_EXTINT:
1333 printf("ExtINT");
1334 break;
1335 }
1336 printf(" -> LINT%u\n", pin);
1337 }
1338 return (0);
1339 }
1340
1341 int
lapic_set_lvt_polarity(u_int apic_id,u_int pin,enum intr_polarity pol)1342 lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
1343 {
1344
1345 if (pin > APIC_LVT_MAX || pol == INTR_POLARITY_CONFORM)
1346 return (EINVAL);
1347 if (apic_id == APIC_ID_ALL) {
1348 lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
1349 if (bootverbose)
1350 printf("lapic:");
1351 } else {
1352 KASSERT(lapics[apic_id].la_present,
1353 ("%s: missing APIC %u", __func__, apic_id));
1354 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1355 lapics[apic_id].la_lvts[pin].lvt_activehi =
1356 (pol == INTR_POLARITY_HIGH);
1357 if (bootverbose)
1358 printf("lapic%u:", apic_id);
1359 }
1360 if (bootverbose)
1361 printf(" LINT%u polarity: %s\n", pin,
1362 pol == INTR_POLARITY_HIGH ? "high" : "low");
1363 return (0);
1364 }
1365
1366 int
lapic_set_lvt_triggermode(u_int apic_id,u_int pin,enum intr_trigger trigger)1367 lapic_set_lvt_triggermode(u_int apic_id, u_int pin,
1368 enum intr_trigger trigger)
1369 {
1370
1371 if (pin > APIC_LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
1372 return (EINVAL);
1373 if (apic_id == APIC_ID_ALL) {
1374 lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
1375 if (bootverbose)
1376 printf("lapic:");
1377 } else {
1378 KASSERT(lapics[apic_id].la_present,
1379 ("%s: missing APIC %u", __func__, apic_id));
1380 lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
1381 (trigger == INTR_TRIGGER_EDGE);
1382 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1383 if (bootverbose)
1384 printf("lapic%u:", apic_id);
1385 }
1386 if (bootverbose)
1387 printf(" LINT%u trigger: %s\n", pin,
1388 trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
1389 return (0);
1390 }
1391
1392 /*
1393 * Adjust the TPR of the current CPU so that it blocks all interrupts below
1394 * the passed in vector.
1395 */
1396 static void
lapic_set_tpr(u_int vector)1397 lapic_set_tpr(u_int vector)
1398 {
1399 #ifdef CHEAP_TPR
1400 lapic_write32(LAPIC_TPR, vector);
1401 #else
1402 uint32_t tpr;
1403
1404 tpr = lapic_read32(LAPIC_TPR) & ~APIC_TPR_PRIO;
1405 tpr |= vector;
1406 lapic_write32(LAPIC_TPR, tpr);
1407 #endif
1408 }
1409
1410 void
lapic_eoi(void)1411 lapic_eoi(void)
1412 {
1413
1414 lapic_write32_nofence(LAPIC_EOI, 0);
1415 }
1416
1417 void
lapic_handle_intr(int vector,struct trapframe * frame)1418 lapic_handle_intr(int vector, struct trapframe *frame)
1419 {
1420 struct intsrc *isrc;
1421
1422 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
1423 kmsan_mark(&vector, sizeof(vector), KMSAN_STATE_INITED);
1424 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
1425 trap_check_kstack();
1426
1427 isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
1428 vector));
1429 intr_execute_handlers(isrc, frame);
1430 }
1431
1432 void
lapic_handle_timer(struct trapframe * frame)1433 lapic_handle_timer(struct trapframe *frame)
1434 {
1435 struct lapic *la;
1436 struct trapframe *oldframe;
1437 struct thread *td;
1438
1439 /* Send EOI first thing. */
1440 lapic_eoi();
1441
1442 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
1443 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
1444 trap_check_kstack();
1445
1446 #if defined(SMP) && !defined(SCHED_ULE)
1447 /*
1448 * Don't do any accounting for the disabled HTT cores, since it
1449 * will provide misleading numbers for the userland.
1450 *
1451 * No locking is necessary here, since even if we lose the race
1452 * when hlt_cpus_mask changes it is not a big deal, really.
1453 *
1454 * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
1455 * and unlike other schedulers it actually schedules threads to
1456 * those CPUs.
1457 */
1458 if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask))
1459 return;
1460 #endif
1461
1462 /* Look up our local APIC structure for the tick counters. */
1463 la = &lapics[PCPU_GET(apic_id)];
1464 (*la->la_timer_count)++;
1465 critical_enter();
1466 if (lapic_et.et_active) {
1467 td = curthread;
1468 td->td_intr_nesting_level++;
1469 oldframe = td->td_intr_frame;
1470 td->td_intr_frame = frame;
1471 lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
1472 td->td_intr_frame = oldframe;
1473 td->td_intr_nesting_level--;
1474 }
1475 critical_exit();
1476 }
1477
1478 static void
lapic_timer_set_divisor(u_int divisor)1479 lapic_timer_set_divisor(u_int divisor)
1480 {
1481
1482 KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
1483 KASSERT(ffs(divisor) <= nitems(lapic_timer_divisors),
1484 ("lapic: invalid divisor %u", divisor));
1485 lapic_write32(LAPIC_DCR_TIMER, lapic_timer_divisors[ffs(divisor) - 1]);
1486 }
1487
1488 static void
lapic_timer_oneshot(struct lapic * la)1489 lapic_timer_oneshot(struct lapic *la)
1490 {
1491 uint32_t value;
1492
1493 value = la->lvt_timer_base;
1494 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1495 value |= APIC_LVTT_TM_ONE_SHOT;
1496 la->lvt_timer_last = value;
1497 lapic_write32(LAPIC_LVT_TIMER, value);
1498 lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1499 }
1500
1501 static void
lapic_timer_oneshot_nointr(struct lapic * la,uint32_t count)1502 lapic_timer_oneshot_nointr(struct lapic *la, uint32_t count)
1503 {
1504 uint32_t value;
1505
1506 value = la->lvt_timer_base;
1507 value &= ~APIC_LVTT_TM;
1508 value |= APIC_LVTT_TM_ONE_SHOT | APIC_LVT_M;
1509 la->lvt_timer_last = value;
1510 lapic_write32(LAPIC_LVT_TIMER, value);
1511 lapic_write32(LAPIC_ICR_TIMER, count);
1512 }
1513
1514 static void
lapic_timer_periodic(struct lapic * la)1515 lapic_timer_periodic(struct lapic *la)
1516 {
1517 uint32_t value;
1518
1519 value = la->lvt_timer_base;
1520 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1521 value |= APIC_LVTT_TM_PERIODIC;
1522 la->lvt_timer_last = value;
1523 lapic_write32(LAPIC_LVT_TIMER, value);
1524 lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1525 }
1526
1527 static void
lapic_timer_deadline(struct lapic * la)1528 lapic_timer_deadline(struct lapic *la)
1529 {
1530 uint32_t value;
1531
1532 value = la->lvt_timer_base;
1533 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1534 value |= APIC_LVTT_TM_TSCDLT;
1535 if (value != la->lvt_timer_last) {
1536 la->lvt_timer_last = value;
1537 lapic_write32_nofence(LAPIC_LVT_TIMER, value);
1538 if (!x2apic_mode)
1539 mfence();
1540 }
1541 wrmsr(MSR_TSC_DEADLINE, la->la_timer_period + rdtsc());
1542 }
1543
1544 static void
lapic_timer_stop(struct lapic * la)1545 lapic_timer_stop(struct lapic *la)
1546 {
1547 uint32_t value;
1548
1549 if (la->la_timer_mode == LAT_MODE_DEADLINE) {
1550 wrmsr(MSR_TSC_DEADLINE, 0);
1551 mfence();
1552 } else {
1553 value = la->lvt_timer_base;
1554 value &= ~APIC_LVTT_TM;
1555 value |= APIC_LVT_M;
1556 la->lvt_timer_last = value;
1557 lapic_write32(LAPIC_LVT_TIMER, value);
1558 }
1559 }
1560
1561 void
lapic_handle_cmc(void)1562 lapic_handle_cmc(void)
1563 {
1564 trap_check_kstack();
1565
1566 lapic_eoi();
1567 cmc_intr();
1568 }
1569
1570 /*
1571 * Called from the mca_init() to activate the CMC interrupt if this CPU is
1572 * responsible for monitoring any MC banks for CMC events. Since mca_init()
1573 * is called prior to lapic_setup() during boot, this just needs to unmask
1574 * this CPU's LVT_CMCI entry.
1575 */
1576 void
lapic_enable_cmc(void)1577 lapic_enable_cmc(void)
1578 {
1579 u_int apic_id;
1580
1581 #ifdef DEV_ATPIC
1582 if (!x2apic_mode && lapic_map == NULL)
1583 return;
1584 #endif
1585 apic_id = PCPU_GET(apic_id);
1586 KASSERT(lapics[apic_id].la_present,
1587 ("%s: missing APIC %u", __func__, apic_id));
1588 lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_masked = 0;
1589 lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_active = 1;
1590 }
1591
1592 int
lapic_enable_mca_elvt(void)1593 lapic_enable_mca_elvt(void)
1594 {
1595 u_int apic_id;
1596 uint32_t value;
1597 int elvt_count;
1598
1599 #ifdef DEV_ATPIC
1600 if (lapic_map == NULL)
1601 return (-1);
1602 #endif
1603
1604 apic_id = PCPU_GET(apic_id);
1605 KASSERT(lapics[apic_id].la_present,
1606 ("%s: missing APIC %u", __func__, apic_id));
1607 elvt_count = amd_read_elvt_count();
1608 if (elvt_count <= APIC_ELVT_MCA)
1609 return (-1);
1610
1611 value = lapic_read32(LAPIC_EXT_LVT0 + APIC_ELVT_MCA);
1612 if ((value & APIC_LVT_M) == 0) {
1613 if (bootverbose)
1614 printf("AMD MCE Thresholding Extended LVT is already active\n");
1615 return (APIC_ELVT_MCA);
1616 }
1617 lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_masked = 0;
1618 lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_active = 1;
1619 return (APIC_ELVT_MCA);
1620 }
1621
1622 void
lapic_handle_error(void)1623 lapic_handle_error(void)
1624 {
1625 uint32_t esr;
1626
1627 trap_check_kstack();
1628
1629 /*
1630 * Read the contents of the error status register. Write to
1631 * the register first before reading from it to force the APIC
1632 * to update its value to indicate any errors that have
1633 * occurred since the previous write to the register.
1634 */
1635 lapic_write32(LAPIC_ESR, 0);
1636 esr = lapic_read32(LAPIC_ESR);
1637
1638 printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
1639 lapic_eoi();
1640 }
1641
1642 u_int
apic_cpuid(u_int apic_id)1643 apic_cpuid(u_int apic_id)
1644 {
1645 #ifdef SMP
1646 return apic_cpuids[apic_id];
1647 #else
1648 return 0;
1649 #endif
1650 }
1651
1652 /* Request a free IDT vector to be used by the specified IRQ. */
1653 u_int
apic_alloc_vector(u_int apic_id,u_int irq)1654 apic_alloc_vector(u_int apic_id, u_int irq)
1655 {
1656 u_int vector;
1657
1658 KASSERT(irq < num_io_irqs, ("Invalid IRQ %u", irq));
1659
1660 /*
1661 * Search for a free vector. Currently we just use a very simple
1662 * algorithm to find the first free vector.
1663 */
1664 mtx_lock_spin(&icu_lock);
1665 for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1666 if (lapics[apic_id].la_ioint_irqs[vector] != IRQ_FREE)
1667 continue;
1668 lapics[apic_id].la_ioint_irqs[vector] = irq;
1669 mtx_unlock_spin(&icu_lock);
1670 return (vector + APIC_IO_INTS);
1671 }
1672 mtx_unlock_spin(&icu_lock);
1673 return (0);
1674 }
1675
1676 /*
1677 * Request 'count' free contiguous IDT vectors to be used by 'count'
1678 * IRQs. 'count' must be a power of two and the vectors will be
1679 * aligned on a boundary of 'align'. If the request cannot be
1680 * satisfied, 0 is returned.
1681 */
1682 u_int
apic_alloc_vectors(u_int apic_id,u_int * irqs,u_int count,u_int align)1683 apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
1684 {
1685 u_int first, run, vector;
1686
1687 KASSERT(powerof2(count), ("bad count"));
1688 KASSERT(powerof2(align), ("bad align"));
1689 KASSERT(align >= count, ("align < count"));
1690 #ifdef INVARIANTS
1691 for (run = 0; run < count; run++)
1692 KASSERT(irqs[run] < num_io_irqs, ("Invalid IRQ %u at index %u",
1693 irqs[run], run));
1694 #endif
1695
1696 /*
1697 * Search for 'count' free vectors. As with apic_alloc_vector(),
1698 * this just uses a simple first fit algorithm.
1699 */
1700 run = 0;
1701 first = 0;
1702 mtx_lock_spin(&icu_lock);
1703 for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1704 /* Vector is in use, end run. */
1705 if (lapics[apic_id].la_ioint_irqs[vector] != IRQ_FREE) {
1706 run = 0;
1707 first = 0;
1708 continue;
1709 }
1710
1711 /* Start a new run if run == 0 and vector is aligned. */
1712 if (run == 0) {
1713 if (((vector + APIC_IO_INTS) & (align - 1)) != 0)
1714 continue;
1715 first = vector;
1716 }
1717 run++;
1718
1719 /* Keep looping if the run isn't long enough yet. */
1720 if (run < count)
1721 continue;
1722
1723 /* Found a run, assign IRQs and return the first vector. */
1724 for (vector = 0; vector < count; vector++)
1725 lapics[apic_id].la_ioint_irqs[first + vector] =
1726 irqs[vector];
1727 mtx_unlock_spin(&icu_lock);
1728 return (first + APIC_IO_INTS);
1729 }
1730 mtx_unlock_spin(&icu_lock);
1731 printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1732 return (0);
1733 }
1734
1735 /*
1736 * Enable a vector for a particular apic_id. Since all lapics share idt
1737 * entries and ioint_handlers this enables the vector on all lapics. lapics
1738 * which do not have the vector configured would report spurious interrupts
1739 * should it fire.
1740 */
1741 void
apic_enable_vector(u_int apic_id,u_int vector)1742 apic_enable_vector(u_int apic_id, u_int vector)
1743 {
1744
1745 KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1746 KASSERT(ioint_handlers[vector / 32] != NULL,
1747 ("No ISR handler for vector %u", vector));
1748 #ifdef KDTRACE_HOOKS
1749 KASSERT(vector != IDT_DTRACE_RET,
1750 ("Attempt to overwrite DTrace entry"));
1751 #endif
1752 setidt(vector, (pti ? ioint_pti_handlers : ioint_handlers)[vector / 32],
1753 SDT_APIC, SEL_KPL, GSEL_APIC);
1754 }
1755
1756 void
apic_disable_vector(u_int apic_id,u_int vector)1757 apic_disable_vector(u_int apic_id, u_int vector)
1758 {
1759
1760 KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1761 #ifdef KDTRACE_HOOKS
1762 KASSERT(vector != IDT_DTRACE_RET,
1763 ("Attempt to overwrite DTrace entry"));
1764 #endif
1765 KASSERT(ioint_handlers[vector / 32] != NULL,
1766 ("No ISR handler for vector %u", vector));
1767 #ifdef notyet
1768 /*
1769 * We can not currently clear the idt entry because other cpus
1770 * may have a valid vector at this offset.
1771 */
1772 setidt(vector, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_APIC,
1773 SEL_KPL, GSEL_APIC);
1774 #endif
1775 }
1776
1777 /* Release an APIC vector when it's no longer in use. */
1778 void
apic_free_vector(u_int apic_id,u_int vector,u_int irq)1779 apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1780 {
1781 struct thread *td;
1782
1783 KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1784 vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1785 ("Vector %u does not map to an IRQ line", vector));
1786 KASSERT(irq < num_io_irqs, ("Invalid IRQ %u", irq));
1787 KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1788 irq, ("IRQ mismatch"));
1789 #ifdef KDTRACE_HOOKS
1790 KASSERT(vector != IDT_DTRACE_RET,
1791 ("Attempt to overwrite DTrace entry"));
1792 #endif
1793
1794 /*
1795 * Bind us to the cpu that owned the vector before freeing it so
1796 * we don't lose an interrupt delivery race.
1797 */
1798 td = curthread;
1799 if (!rebooting) {
1800 thread_lock(td);
1801 if (sched_is_bound(td))
1802 panic("apic_free_vector: Thread already bound.\n");
1803 sched_bind(td, apic_cpuid(apic_id));
1804 thread_unlock(td);
1805 }
1806 mtx_lock_spin(&icu_lock);
1807 lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = IRQ_FREE;
1808 mtx_unlock_spin(&icu_lock);
1809 if (!rebooting) {
1810 thread_lock(td);
1811 sched_unbind(td);
1812 thread_unlock(td);
1813 }
1814 }
1815
1816 /* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1817 static u_int
apic_idt_to_irq(u_int apic_id,u_int vector)1818 apic_idt_to_irq(u_int apic_id, u_int vector)
1819 {
1820 int irq;
1821
1822 KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1823 vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1824 ("Vector %u does not map to an IRQ line", vector));
1825 #ifdef KDTRACE_HOOKS
1826 KASSERT(vector != IDT_DTRACE_RET,
1827 ("Attempt to overwrite DTrace entry"));
1828 #endif
1829 irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1830 if (irq < 0)
1831 irq = 0;
1832 return (irq);
1833 }
1834
1835 #ifdef DDB
1836 /*
1837 * Dump data about APIC IDT vector mappings.
1838 */
DB_SHOW_COMMAND_FLAGS(apic,db_show_apic,DB_CMD_MEMSAFE)1839 DB_SHOW_COMMAND_FLAGS(apic, db_show_apic, DB_CMD_MEMSAFE)
1840 {
1841 struct intsrc *isrc;
1842 int i, verbose;
1843 u_int apic_id;
1844 u_int irq;
1845
1846 if (strcmp(modif, "vv") == 0)
1847 verbose = 2;
1848 else if (strcmp(modif, "v") == 0)
1849 verbose = 1;
1850 else
1851 verbose = 0;
1852 for (apic_id = 0; apic_id <= max_apic_id; apic_id++) {
1853 if (lapics[apic_id].la_present == 0)
1854 continue;
1855 db_printf("Interrupts bound to lapic %u\n", apic_id);
1856 for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1857 irq = lapics[apic_id].la_ioint_irqs[i];
1858 if (irq == IRQ_FREE || irq == IRQ_SYSCALL)
1859 continue;
1860 #ifdef KDTRACE_HOOKS
1861 if (irq == IRQ_DTRACE_RET)
1862 continue;
1863 #endif
1864 #ifdef XENHVM
1865 if (irq == IRQ_EVTCHN)
1866 continue;
1867 #endif
1868 db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1869 if (irq == IRQ_TIMER)
1870 db_printf("lapic timer\n");
1871 else if (irq < num_io_irqs) {
1872 isrc = intr_lookup_source(irq);
1873 if (isrc == NULL || verbose == 0)
1874 db_printf("IRQ %u\n", irq);
1875 else
1876 db_dump_intr_event(isrc->is_event,
1877 verbose == 2);
1878 } else
1879 db_printf("IRQ %u ???\n", irq);
1880 }
1881 }
1882 }
1883
1884 static void
dump_mask(const char * prefix,uint32_t v,int base)1885 dump_mask(const char *prefix, uint32_t v, int base)
1886 {
1887 int i, first;
1888
1889 first = 1;
1890 for (i = 0; i < 32; i++)
1891 if (v & (1 << i)) {
1892 if (first) {
1893 db_printf("%s:", prefix);
1894 first = 0;
1895 }
1896 db_printf(" %02x", base + i);
1897 }
1898 if (!first)
1899 db_printf("\n");
1900 }
1901
1902 /* Show info from the lapic regs for this CPU. */
DB_SHOW_COMMAND_FLAGS(lapic,db_show_lapic,DB_CMD_MEMSAFE)1903 DB_SHOW_COMMAND_FLAGS(lapic, db_show_lapic, DB_CMD_MEMSAFE)
1904 {
1905 const struct lvt *l;
1906 int elvt_count, lvts_count, i;
1907 uint32_t v, vr;
1908
1909 db_printf("lapic ID = %d\n", lapic_id());
1910 v = lapic_read32(LAPIC_VERSION);
1911 db_printf("version = %d.%d (%#x) \n", (v & APIC_VER_VERSION) >> 4,
1912 v & 0xf, v);
1913 db_printf("max LVT = %d\n", lapic_maxlvt(v));
1914 vr = lapic_read32(LAPIC_SVR);
1915 db_printf("SVR = %02x (%s)\n", vr & APIC_SVR_VECTOR,
1916 vr & APIC_SVR_ENABLE ? "enabled" : "disabled");
1917 db_printf("TPR = %02x\n", lapic_read32(LAPIC_TPR));
1918
1919 lvts_count = min(nitems(lvts), lapic_maxlvt(v) + 1);
1920 for (i = 0; i < lvts_count; i++) {
1921 l = &lvts[i];
1922 db_printf("LVT%d (reg %#x %-5s) = %#010x\n", i, l->lvt_reg,
1923 l->lvt_desc, lapic_read32(l->lvt_reg));
1924 }
1925
1926 elvt_count = amd_read_elvt_count();
1927 for (i = 0; i < elvt_count; i++) {
1928 l = &elvts[i];
1929 db_printf("ELVT%d (reg %#x %-5s) = %#010x\n", i, l->lvt_reg,
1930 l->lvt_desc, lapic_read32(l->lvt_reg));
1931 }
1932
1933 #define dump_field(prefix, regn, index) \
1934 dump_mask(__XSTRING(prefix ## index), \
1935 lapic_read32(LAPIC_ ## regn ## index), \
1936 index * 32)
1937
1938 db_printf("In-service Interrupts:\n");
1939 dump_field(isr, ISR, 0);
1940 dump_field(isr, ISR, 1);
1941 dump_field(isr, ISR, 2);
1942 dump_field(isr, ISR, 3);
1943 dump_field(isr, ISR, 4);
1944 dump_field(isr, ISR, 5);
1945 dump_field(isr, ISR, 6);
1946 dump_field(isr, ISR, 7);
1947
1948 db_printf("TMR Interrupts:\n");
1949 dump_field(tmr, TMR, 0);
1950 dump_field(tmr, TMR, 1);
1951 dump_field(tmr, TMR, 2);
1952 dump_field(tmr, TMR, 3);
1953 dump_field(tmr, TMR, 4);
1954 dump_field(tmr, TMR, 5);
1955 dump_field(tmr, TMR, 6);
1956 dump_field(tmr, TMR, 7);
1957
1958 db_printf("IRR Interrupts:\n");
1959 dump_field(irr, IRR, 0);
1960 dump_field(irr, IRR, 1);
1961 dump_field(irr, IRR, 2);
1962 dump_field(irr, IRR, 3);
1963 dump_field(irr, IRR, 4);
1964 dump_field(irr, IRR, 5);
1965 dump_field(irr, IRR, 6);
1966 dump_field(irr, IRR, 7);
1967
1968 #undef dump_field
1969 }
1970 #endif
1971
1972 /*
1973 * APIC probing support code. This includes code to manage enumerators.
1974 */
1975
1976 static SLIST_HEAD(, apic_enumerator) enumerators =
1977 SLIST_HEAD_INITIALIZER(enumerators);
1978 static struct apic_enumerator *best_enum;
1979
1980 void
apic_register_enumerator(struct apic_enumerator * enumerator)1981 apic_register_enumerator(struct apic_enumerator *enumerator)
1982 {
1983 #ifdef INVARIANTS
1984 struct apic_enumerator *apic_enum;
1985
1986 SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1987 if (apic_enum == enumerator)
1988 panic("%s: Duplicate register of %s", __func__,
1989 enumerator->apic_name);
1990 }
1991 #endif
1992 SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1993 }
1994
1995 /*
1996 * We have to look for CPU's very, very early because certain subsystems
1997 * want to know how many CPU's we have extremely early on in the boot
1998 * process.
1999 */
2000 static void
apic_init(void * dummy __unused)2001 apic_init(void *dummy __unused)
2002 {
2003 struct apic_enumerator *enumerator;
2004 int retval, best;
2005
2006 /* We only support built in local APICs. */
2007 if (!(cpu_feature & CPUID_APIC))
2008 return;
2009
2010 /* Don't probe if APIC mode is disabled. */
2011 if (resource_disabled("apic", 0))
2012 return;
2013
2014 /* Probe all the enumerators to find the best match. */
2015 best_enum = NULL;
2016 best = 0;
2017 SLIST_FOREACH(enumerator, &enumerators, apic_next) {
2018 retval = enumerator->apic_probe();
2019 if (retval > 0)
2020 continue;
2021 if (best_enum == NULL || best < retval) {
2022 best_enum = enumerator;
2023 best = retval;
2024 }
2025 }
2026 if (best_enum == NULL) {
2027 if (bootverbose)
2028 printf("APIC: Could not find any APICs.\n");
2029 #ifndef DEV_ATPIC
2030 panic("running without device atpic requires a local APIC");
2031 #endif
2032 return;
2033 }
2034
2035 if (bootverbose)
2036 printf("APIC: Using the %s enumerator.\n",
2037 best_enum->apic_name);
2038
2039 #ifdef I686_CPU
2040 /*
2041 * To work around an errata, we disable the local APIC on some
2042 * CPUs during early startup. We need to turn the local APIC back
2043 * on on such CPUs now.
2044 */
2045 ppro_reenable_apic();
2046 #endif
2047
2048 /* Probe the CPU's in the system. */
2049 retval = best_enum->apic_probe_cpus();
2050 if (retval != 0)
2051 printf("%s: Failed to probe CPUs: returned %d\n",
2052 best_enum->apic_name, retval);
2053
2054 }
2055 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
2056
2057 /*
2058 * Setup the local APIC. We have to do this prior to starting up the APs
2059 * in the SMP case.
2060 */
2061 static void
apic_setup_local(void * dummy __unused)2062 apic_setup_local(void *dummy __unused)
2063 {
2064 int retval;
2065
2066 if (best_enum == NULL)
2067 return;
2068
2069 lapics = malloc(sizeof(*lapics) * (max_apic_id + 1), M_LAPIC,
2070 M_WAITOK | M_ZERO);
2071
2072 /* Initialize the local APIC. */
2073 retval = best_enum->apic_setup_local();
2074 if (retval != 0)
2075 printf("%s: Failed to setup the local APIC: returned %d\n",
2076 best_enum->apic_name, retval);
2077 }
2078 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
2079
2080 /*
2081 * Setup the I/O APICs.
2082 */
2083 static void
apic_setup_io(void * dummy __unused)2084 apic_setup_io(void *dummy __unused)
2085 {
2086 int retval;
2087
2088 if (best_enum == NULL)
2089 return;
2090
2091 /*
2092 * Local APIC must be registered before other PICs and pseudo PICs
2093 * for proper suspend/resume order.
2094 */
2095 intr_register_pic(&lapic_pic);
2096
2097 retval = best_enum->apic_setup_io();
2098 if (retval != 0)
2099 printf("%s: Failed to setup I/O APICs: returned %d\n",
2100 best_enum->apic_name, retval);
2101
2102 /*
2103 * Finish setting up the local APIC on the BSP once we know
2104 * how to properly program the LINT pins. In particular, this
2105 * enables the EOI suppression mode, if LAPIC supports it and
2106 * user did not disable the mode.
2107 */
2108 lapic_setup(1);
2109 if (bootverbose)
2110 lapic_dump("BSP");
2111
2112 /* Enable the MSI "pic". */
2113 msi_init();
2114
2115 #ifdef XENHVM
2116 xen_intr_alloc_irqs();
2117 #endif
2118 }
2119 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_THIRD, apic_setup_io, NULL);
2120
2121 #ifdef SMP
2122 /*
2123 * Inter Processor Interrupt functions. The lapic_ipi_*() functions are
2124 * private to the MD code. The public interface for the rest of the
2125 * kernel is defined in mp_machdep.c.
2126 */
2127
2128 /*
2129 * Wait delay microseconds for IPI to be sent. If delay is -1, we
2130 * wait forever.
2131 */
2132 int
lapic_ipi_wait(int delay)2133 lapic_ipi_wait(int delay)
2134 {
2135 uint64_t rx;
2136
2137 /* LAPIC_ICR.APIC_DELSTAT_MASK is undefined in x2APIC mode */
2138 if (x2apic_mode)
2139 return (1);
2140
2141 for (rx = 0; delay == -1 || rx < lapic_ipi_wait_mult * delay; rx++) {
2142 if ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) ==
2143 APIC_DELSTAT_IDLE)
2144 return (1);
2145 ia32_pause();
2146 }
2147 return (0);
2148 }
2149
2150 void
lapic_ipi_raw(register_t icrlo,u_int dest)2151 lapic_ipi_raw(register_t icrlo, u_int dest)
2152 {
2153 uint32_t icrhi;
2154
2155 /* XXX: Need more sanity checking of icrlo? */
2156 KASSERT(x2apic_mode || lapic_map != NULL,
2157 ("%s called too early", __func__));
2158 KASSERT(x2apic_mode ||
2159 (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
2160 ("%s: invalid dest field", __func__));
2161 KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
2162 ("%s: reserved bits set in ICR LO register", __func__));
2163
2164 if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
2165 if (x2apic_mode)
2166 icrhi = dest;
2167 else
2168 icrhi = dest << APIC_ID_SHIFT;
2169 lapic_write_icr(icrhi, icrlo);
2170 } else {
2171 lapic_write_icr_lo(icrlo);
2172 }
2173 }
2174
2175 #ifdef DETECT_DEADLOCK
2176 #define AFTER_SPIN 50
2177 #endif
2178
2179 static void
native_lapic_ipi_vectored(u_int vector,int dest)2180 native_lapic_ipi_vectored(u_int vector, int dest)
2181 {
2182 register_t icrlo, destfield;
2183
2184 KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
2185 ("%s: invalid vector %d", __func__, vector));
2186
2187 destfield = 0;
2188 switch (dest) {
2189 case APIC_IPI_DEST_SELF:
2190 if (x2apic_mode && vector < IPI_NMI_FIRST) {
2191 lapic_write_self_ipi(vector);
2192 return;
2193 }
2194 icrlo = APIC_DEST_SELF;
2195 break;
2196 case APIC_IPI_DEST_ALL:
2197 icrlo = APIC_DEST_ALLISELF;
2198 break;
2199 case APIC_IPI_DEST_OTHERS:
2200 icrlo = APIC_DEST_ALLESELF;
2201 break;
2202 default:
2203 icrlo = 0;
2204 KASSERT(x2apic_mode ||
2205 (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
2206 ("%s: invalid destination 0x%x", __func__, dest));
2207 destfield = dest;
2208 }
2209
2210 /*
2211 * NMI IPIs are just fake vectors used to send a NMI. Use special rules
2212 * regarding NMIs if passed, otherwise specify the vector.
2213 */
2214 if (vector >= IPI_NMI_FIRST)
2215 icrlo |= APIC_DELMODE_NMI;
2216 else
2217 icrlo |= vector | APIC_DELMODE_FIXED;
2218 icrlo |= APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
2219
2220 /* Wait for an earlier IPI to finish. */
2221 if (!lapic_ipi_wait(lapic_ds_idle_timeout)) {
2222 if (KERNEL_PANICKED())
2223 return;
2224 else
2225 panic("APIC: Previous IPI is stuck");
2226 }
2227
2228 lapic_ipi_raw(icrlo, destfield);
2229
2230 #ifdef DETECT_DEADLOCK
2231 /* Wait for IPI to be delivered. */
2232 if (!lapic_ipi_wait(AFTER_SPIN)) {
2233 #ifdef needsattention
2234 /*
2235 * XXX FIXME:
2236 *
2237 * The above function waits for the message to actually be
2238 * delivered. It breaks out after an arbitrary timeout
2239 * since the message should eventually be delivered (at
2240 * least in theory) and that if it wasn't we would catch
2241 * the failure with the check above when the next IPI is
2242 * sent.
2243 *
2244 * We could skip this wait entirely, EXCEPT it probably
2245 * protects us from other routines that assume that the
2246 * message was delivered and acted upon when this function
2247 * returns.
2248 */
2249 printf("APIC: IPI might be stuck\n");
2250 #else /* !needsattention */
2251 /* Wait until mesage is sent without a timeout. */
2252 while (lapic_read_icr_lo() & APIC_DELSTAT_PEND)
2253 ia32_pause();
2254 #endif /* needsattention */
2255 }
2256 #endif /* DETECT_DEADLOCK */
2257 }
2258
2259 void (*ipi_vectored)(u_int, int) = &native_lapic_ipi_vectored;
2260 #endif /* SMP */
2261
2262 /*
2263 * Since the IDT is shared by all CPUs the IPI slot update needs to be globally
2264 * visible.
2265 *
2266 * Consider the case where an IPI is generated immediately after allocation:
2267 * vector = lapic_ipi_alloc(ipifunc);
2268 * ipi_selected(other_cpus, vector);
2269 *
2270 * In xAPIC mode a write to ICR_LO has serializing semantics because the
2271 * APIC page is mapped as an uncached region. In x2APIC mode there is an
2272 * explicit 'mfence' before the ICR MSR is written. Therefore in both cases
2273 * the IDT slot update is globally visible before the IPI is delivered.
2274 */
2275 int
lapic_ipi_alloc(inthand_t * ipifunc)2276 lapic_ipi_alloc(inthand_t *ipifunc)
2277 {
2278 struct gate_descriptor *ip;
2279 long func;
2280 int idx, vector;
2281
2282 KASSERT(ipifunc != &IDTVEC(rsvd) && ipifunc != &IDTVEC(rsvd_pti),
2283 ("invalid ipifunc %p", ipifunc));
2284
2285 vector = -1;
2286 mtx_lock_spin(&icu_lock);
2287 for (idx = IPI_DYN_FIRST; idx <= IPI_DYN_LAST; idx++) {
2288 ip = &idt[idx];
2289 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
2290 #ifdef __i386__
2291 func -= setidt_disp;
2292 #endif
2293 if ((!pti && func == (uintptr_t)&IDTVEC(rsvd)) ||
2294 (pti && func == (uintptr_t)&IDTVEC(rsvd_pti))) {
2295 vector = idx;
2296 setidt(vector, ipifunc, SDT_APIC, SEL_KPL, GSEL_APIC);
2297 break;
2298 }
2299 }
2300 mtx_unlock_spin(&icu_lock);
2301 return (vector);
2302 }
2303
2304 void
lapic_ipi_free(int vector)2305 lapic_ipi_free(int vector)
2306 {
2307 struct gate_descriptor *ip;
2308 long func __diagused;
2309
2310 KASSERT(vector >= IPI_DYN_FIRST && vector <= IPI_DYN_LAST,
2311 ("%s: invalid vector %d", __func__, vector));
2312
2313 mtx_lock_spin(&icu_lock);
2314 ip = &idt[vector];
2315 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
2316 #ifdef __i386__
2317 func -= setidt_disp;
2318 #endif
2319 KASSERT(func != (uintptr_t)&IDTVEC(rsvd) &&
2320 func != (uintptr_t)&IDTVEC(rsvd_pti),
2321 ("invalid idtfunc %#lx", func));
2322 setidt(vector, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_APIC,
2323 SEL_KPL, GSEL_APIC);
2324 mtx_unlock_spin(&icu_lock);
2325 }
2326