1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "opt_cpu.h" 29 #include "opt_user_ldt.h" 30 31 #ifdef SMP 32 #include <machine/smptests.h> 33 #else 34 #error 35 #endif 36 37 #include <sys/param.h> 38 #include <sys/bus.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/proc.h> 42 #include <sys/sysctl.h> 43 #include <sys/malloc.h> 44 #include <sys/memrange.h> 45 #include <sys/mutex.h> 46 #ifdef BETTER_CLOCK 47 #include <sys/dkstat.h> 48 #endif 49 #include <sys/cons.h> /* cngetc() */ 50 51 #include <vm/vm.h> 52 #include <vm/vm_param.h> 53 #include <vm/pmap.h> 54 #include <vm/vm_kern.h> 55 #include <vm/vm_extern.h> 56 #ifdef BETTER_CLOCK 57 #include <sys/lock.h> 58 #include <vm/vm_map.h> 59 #include <sys/user.h> 60 #ifdef GPROF 61 #include <sys/gmon.h> 62 #endif 63 #endif 64 65 #include <machine/smp.h> 66 #include <machine/apic.h> 67 #include <machine/atomic.h> 68 #include <machine/cpufunc.h> 69 #include <machine/mpapic.h> 70 #include <machine/psl.h> 71 #include <machine/segments.h> 72 #include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */ 73 #include <machine/tss.h> 74 #include <machine/specialreg.h> 75 #include <machine/globaldata.h> 76 77 #if defined(APIC_IO) 78 #include <machine/md_var.h> /* setidt() */ 79 #include <i386/isa/icu.h> /* IPIs */ 80 #include <i386/isa/intr_machdep.h> /* IPIs */ 81 #endif /* APIC_IO */ 82 83 #if defined(TEST_DEFAULT_CONFIG) 84 #define MPFPS_MPFB1 TEST_DEFAULT_CONFIG 85 #else 86 #define MPFPS_MPFB1 mpfps->mpfb1 87 #endif /* TEST_DEFAULT_CONFIG */ 88 89 #define WARMBOOT_TARGET 0 90 #define WARMBOOT_OFF (KERNBASE + 0x0467) 91 #define WARMBOOT_SEG (KERNBASE + 0x0469) 92 93 #ifdef PC98 94 #define BIOS_BASE (0xe8000) 95 #define BIOS_SIZE (0x18000) 96 #else 97 #define BIOS_BASE (0xf0000) 98 #define BIOS_SIZE (0x10000) 99 #endif 100 #define BIOS_COUNT (BIOS_SIZE/4) 101 102 #define CMOS_REG (0x70) 103 #define CMOS_DATA (0x71) 104 #define BIOS_RESET (0x0f) 105 #define BIOS_WARM (0x0a) 106 107 #define PROCENTRY_FLAG_EN 0x01 108 #define PROCENTRY_FLAG_BP 0x02 109 #define IOAPICENTRY_FLAG_EN 0x01 110 111 112 /* MP Floating Pointer Structure */ 113 typedef struct MPFPS { 114 char signature[4]; 115 void *pap; 116 u_char length; 117 u_char spec_rev; 118 u_char checksum; 119 u_char mpfb1; 120 u_char mpfb2; 121 u_char mpfb3; 122 u_char mpfb4; 123 u_char mpfb5; 124 } *mpfps_t; 125 126 /* MP Configuration Table Header */ 127 typedef struct MPCTH { 128 char signature[4]; 129 u_short base_table_length; 130 u_char spec_rev; 131 u_char checksum; 132 u_char oem_id[8]; 133 u_char product_id[12]; 134 void *oem_table_pointer; 135 u_short oem_table_size; 136 u_short entry_count; 137 void *apic_address; 138 u_short extended_table_length; 139 u_char extended_table_checksum; 140 u_char reserved; 141 } *mpcth_t; 142 143 144 typedef struct PROCENTRY { 145 u_char type; 146 u_char apic_id; 147 u_char apic_version; 148 u_char cpu_flags; 149 u_long cpu_signature; 150 u_long feature_flags; 151 u_long reserved1; 152 u_long reserved2; 153 } *proc_entry_ptr; 154 155 typedef struct BUSENTRY { 156 u_char type; 157 u_char bus_id; 158 char bus_type[6]; 159 } *bus_entry_ptr; 160 161 typedef struct IOAPICENTRY { 162 u_char type; 163 u_char apic_id; 164 u_char apic_version; 165 u_char apic_flags; 166 void *apic_address; 167 } *io_apic_entry_ptr; 168 169 typedef struct INTENTRY { 170 u_char type; 171 u_char int_type; 172 u_short int_flags; 173 u_char src_bus_id; 174 u_char src_bus_irq; 175 u_char dst_apic_id; 176 u_char dst_apic_int; 177 } *int_entry_ptr; 178 179 /* descriptions of MP basetable entries */ 180 typedef struct BASETABLE_ENTRY { 181 u_char type; 182 u_char length; 183 char name[16]; 184 } basetable_entry; 185 186 /* 187 * this code MUST be enabled here and in mpboot.s. 188 * it follows the very early stages of AP boot by placing values in CMOS ram. 189 * it NORMALLY will never be needed and thus the primitive method for enabling. 190 * 191 #define CHECK_POINTS 192 */ 193 194 #if defined(CHECK_POINTS) && !defined(PC98) 195 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 196 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 197 198 #define CHECK_INIT(D); \ 199 CHECK_WRITE(0x34, (D)); \ 200 CHECK_WRITE(0x35, (D)); \ 201 CHECK_WRITE(0x36, (D)); \ 202 CHECK_WRITE(0x37, (D)); \ 203 CHECK_WRITE(0x38, (D)); \ 204 CHECK_WRITE(0x39, (D)); 205 206 #define CHECK_PRINT(S); \ 207 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 208 (S), \ 209 CHECK_READ(0x34), \ 210 CHECK_READ(0x35), \ 211 CHECK_READ(0x36), \ 212 CHECK_READ(0x37), \ 213 CHECK_READ(0x38), \ 214 CHECK_READ(0x39)); 215 216 #else /* CHECK_POINTS */ 217 218 #define CHECK_INIT(D) 219 #define CHECK_PRINT(S) 220 221 #endif /* CHECK_POINTS */ 222 223 /* 224 * Values to send to the POST hardware. 225 */ 226 #define MP_BOOTADDRESS_POST 0x10 227 #define MP_PROBE_POST 0x11 228 #define MPTABLE_PASS1_POST 0x12 229 230 #define MP_START_POST 0x13 231 #define MP_ENABLE_POST 0x14 232 #define MPTABLE_PASS2_POST 0x15 233 234 #define START_ALL_APS_POST 0x16 235 #define INSTALL_AP_TRAMP_POST 0x17 236 #define START_AP_POST 0x18 237 238 #define MP_ANNOUNCE_POST 0x19 239 240 /* used to hold the AP's until we are ready to release them */ 241 struct simplelock ap_boot_lock; 242 243 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 244 int current_postcode; 245 246 /** XXX FIXME: what system files declare these??? */ 247 extern struct region_descriptor r_gdt, r_idt; 248 249 int bsp_apic_ready = 0; /* flags useability of BSP apic */ 250 int mp_ncpus; /* # of CPUs, including BSP */ 251 int mp_naps; /* # of Applications processors */ 252 int mp_nbusses; /* # of busses */ 253 int mp_napics; /* # of IO APICs */ 254 int boot_cpu_id; /* designated BSP */ 255 vm_offset_t cpu_apic_address; 256 vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */ 257 extern int nkpt; 258 259 u_int32_t cpu_apic_versions[MAXCPU]; 260 u_int32_t *io_apic_versions; 261 262 #ifdef APIC_INTR_DIAGNOSTIC 263 int apic_itrace_enter[32]; 264 int apic_itrace_tryisrlock[32]; 265 int apic_itrace_gotisrlock[32]; 266 int apic_itrace_active[32]; 267 int apic_itrace_masked[32]; 268 int apic_itrace_noisrlock[32]; 269 int apic_itrace_masked2[32]; 270 int apic_itrace_unmask[32]; 271 int apic_itrace_noforward[32]; 272 int apic_itrace_leave[32]; 273 int apic_itrace_enter2[32]; 274 int apic_itrace_doreti[32]; 275 int apic_itrace_splz[32]; 276 int apic_itrace_eoi[32]; 277 #ifdef APIC_INTR_DIAGNOSTIC_IRQ 278 unsigned short apic_itrace_debugbuffer[32768]; 279 int apic_itrace_debugbuffer_idx; 280 struct simplelock apic_itrace_debuglock; 281 #endif 282 #endif 283 284 #ifdef APIC_INTR_REORDER 285 struct { 286 volatile int *location; 287 int bit; 288 } apic_isrbit_location[32]; 289 #endif 290 291 struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE]; 292 293 /* 294 * APIC ID logical/physical mapping structures. 295 * We oversize these to simplify boot-time config. 296 */ 297 int cpu_num_to_apic_id[NAPICID]; 298 int io_num_to_apic_id[NAPICID]; 299 int apic_id_to_logical[NAPICID]; 300 301 302 /* Bitmap of all available CPUs */ 303 u_int all_cpus; 304 305 /* AP uses this during bootstrap. Do not staticize. */ 306 char *bootSTK; 307 static int bootAP; 308 309 /* Hotwire a 0->4MB V==P mapping */ 310 extern pt_entry_t *KPTphys; 311 312 /* SMP page table page */ 313 extern pt_entry_t *SMPpt; 314 315 struct pcb stoppcbs[MAXCPU]; 316 317 int smp_started; /* has the system started? */ 318 319 /* 320 * Local data and functions. 321 */ 322 323 static int mp_capable; 324 static u_int boot_address; 325 static u_int base_memory; 326 327 static int picmode; /* 0: virtual wire mode, 1: PIC mode */ 328 static mpfps_t mpfps; 329 static int search_for_sig(u_int32_t target, int count); 330 static void mp_enable(u_int boot_addr); 331 332 static void mptable_pass1(void); 333 static int mptable_pass2(void); 334 static void default_mp_table(int type); 335 static void fix_mp_table(void); 336 static void setup_apic_irq_mapping(void); 337 static void init_locks(void); 338 static int start_all_aps(u_int boot_addr); 339 static void install_ap_tramp(u_int boot_addr); 340 static int start_ap(int logicalCpu, u_int boot_addr); 341 static int apic_int_is_bus_type(int intr, int bus_type); 342 static void release_aps(void *dummy); 343 344 /* 345 * Calculate usable address in base memory for AP trampoline code. 346 */ 347 u_int 348 mp_bootaddress(u_int basemem) 349 { 350 POSTCODE(MP_BOOTADDRESS_POST); 351 352 base_memory = basemem * 1024; /* convert to bytes */ 353 354 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */ 355 if ((base_memory - boot_address) < bootMP_size) 356 boot_address -= 4096; /* not enough, lower by 4k */ 357 358 return boot_address; 359 } 360 361 362 /* 363 * Look for an Intel MP spec table (ie, SMP capable hardware). 364 */ 365 int 366 mp_probe(void) 367 { 368 int x; 369 u_long segment; 370 u_int32_t target; 371 372 POSTCODE(MP_PROBE_POST); 373 374 /* see if EBDA exists */ 375 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) { 376 /* search first 1K of EBDA */ 377 target = (u_int32_t) (segment << 4); 378 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 379 goto found; 380 } else { 381 /* last 1K of base memory, effective 'top of base' passed in */ 382 target = (u_int32_t) (base_memory - 0x400); 383 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 384 goto found; 385 } 386 387 /* search the BIOS */ 388 target = (u_int32_t) BIOS_BASE; 389 if ((x = search_for_sig(target, BIOS_COUNT)) >= 0) 390 goto found; 391 392 /* nothing found */ 393 mpfps = (mpfps_t)0; 394 mp_capable = 0; 395 return 0; 396 397 found: 398 /* calculate needed resources */ 399 mpfps = (mpfps_t)x; 400 mptable_pass1(); 401 402 /* flag fact that we are running multiple processors */ 403 mp_capable = 1; 404 return 1; 405 } 406 407 408 /* 409 * Initialize the SMP hardware and the APIC and start up the AP's. 410 */ 411 void 412 mp_start(void) 413 { 414 POSTCODE(MP_START_POST); 415 416 /* look for MP capable motherboard */ 417 if (mp_capable) 418 mp_enable(boot_address); 419 else 420 panic("MP hardware not found!"); 421 } 422 423 424 /* 425 * Print various information about the SMP system hardware and setup. 426 */ 427 void 428 mp_announce(void) 429 { 430 int x; 431 432 POSTCODE(MP_ANNOUNCE_POST); 433 434 printf("FreeBSD/SMP: Multiprocessor motherboard\n"); 435 printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0)); 436 printf(", version: 0x%08x", cpu_apic_versions[0]); 437 printf(", at 0x%08x\n", cpu_apic_address); 438 for (x = 1; x <= mp_naps; ++x) { 439 printf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x)); 440 printf(", version: 0x%08x", cpu_apic_versions[x]); 441 printf(", at 0x%08x\n", cpu_apic_address); 442 } 443 444 #if defined(APIC_IO) 445 for (x = 0; x < mp_napics; ++x) { 446 printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x)); 447 printf(", version: 0x%08x", io_apic_versions[x]); 448 printf(", at 0x%08x\n", io_apic_address[x]); 449 } 450 #else 451 printf(" Warning: APIC I/O disabled\n"); 452 #endif /* APIC_IO */ 453 } 454 455 /* 456 * AP cpu's call this to sync up protected mode. 457 */ 458 void 459 init_secondary(void) 460 { 461 int gsel_tss; 462 int x, myid = bootAP; 463 464 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid]; 465 gdt_segs[GPROC0_SEL].ssd_base = 466 (int) &SMP_prvspace[myid].globaldata.gd_common_tss; 467 SMP_prvspace[myid].globaldata.gd_prvspace = &SMP_prvspace[myid]; 468 469 for (x = 0; x < NGDT; x++) { 470 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 471 } 472 473 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 474 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 475 lgdt(&r_gdt); /* does magic intra-segment return */ 476 477 lidt(&r_idt); 478 479 lldt(_default_ldt); 480 #ifdef USER_LDT 481 PCPU_SET(currentldt, _default_ldt); 482 #endif 483 484 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 485 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 486 common_tss.tss_esp0 = 0; /* not used until after switch */ 487 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 488 common_tss.tss_ioopt = (sizeof common_tss) << 16; 489 tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd; 490 common_tssd = *tss_gdt; 491 ltr(gsel_tss); 492 493 pmap_set_opt(); 494 } 495 496 497 #if defined(APIC_IO) 498 /* 499 * Final configuration of the BSP's local APIC: 500 * - disable 'pic mode'. 501 * - disable 'virtual wire mode'. 502 * - enable NMI. 503 */ 504 void 505 bsp_apic_configure(void) 506 { 507 u_char byte; 508 u_int32_t temp; 509 510 /* leave 'pic mode' if necessary */ 511 if (picmode) { 512 outb(0x22, 0x70); /* select IMCR */ 513 byte = inb(0x23); /* current contents */ 514 byte |= 0x01; /* mask external INTR */ 515 outb(0x23, byte); /* disconnect 8259s/NMI */ 516 } 517 518 /* mask lint0 (the 8259 'virtual wire' connection) */ 519 temp = lapic.lvt_lint0; 520 temp |= APIC_LVT_M; /* set the mask */ 521 lapic.lvt_lint0 = temp; 522 523 /* setup lint1 to handle NMI */ 524 temp = lapic.lvt_lint1; 525 temp &= ~APIC_LVT_M; /* clear the mask */ 526 lapic.lvt_lint1 = temp; 527 528 if (bootverbose) 529 apic_dump("bsp_apic_configure()"); 530 } 531 #endif /* APIC_IO */ 532 533 534 /******************************************************************* 535 * local functions and data 536 */ 537 538 /* 539 * start the SMP system 540 */ 541 static void 542 mp_enable(u_int boot_addr) 543 { 544 int x; 545 #if defined(APIC_IO) 546 int apic; 547 u_int ux; 548 #endif /* APIC_IO */ 549 550 POSTCODE(MP_ENABLE_POST); 551 552 /* turn on 4MB of V == P addressing so we can get to MP table */ 553 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 554 invltlb(); 555 556 /* examine the MP table for needed info, uses physical addresses */ 557 x = mptable_pass2(); 558 559 *(int *)PTD = 0; 560 invltlb(); 561 562 /* can't process default configs till the CPU APIC is pmapped */ 563 if (x) 564 default_mp_table(x); 565 566 /* post scan cleanup */ 567 fix_mp_table(); 568 setup_apic_irq_mapping(); 569 570 #if defined(APIC_IO) 571 572 /* fill the LOGICAL io_apic_versions table */ 573 for (apic = 0; apic < mp_napics; ++apic) { 574 ux = io_apic_read(apic, IOAPIC_VER); 575 io_apic_versions[apic] = ux; 576 io_apic_set_id(apic, IO_TO_ID(apic)); 577 } 578 579 /* program each IO APIC in the system */ 580 for (apic = 0; apic < mp_napics; ++apic) 581 if (io_apic_setup(apic) < 0) 582 panic("IO APIC setup failure"); 583 584 /* install a 'Spurious INTerrupt' vector */ 585 setidt(XSPURIOUSINT_OFFSET, Xspuriousint, 586 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 587 588 /* install an inter-CPU IPI for TLB invalidation */ 589 setidt(XINVLTLB_OFFSET, Xinvltlb, 590 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 591 592 #ifdef BETTER_CLOCK 593 /* install an inter-CPU IPI for reading processor state */ 594 setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate, 595 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 596 #endif 597 598 /* install an inter-CPU IPI for all-CPU rendezvous */ 599 setidt(XRENDEZVOUS_OFFSET, Xrendezvous, 600 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 601 602 /* install an inter-CPU IPI for forcing an additional software trap */ 603 setidt(XCPUAST_OFFSET, Xcpuast, 604 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 605 606 /* install an inter-CPU IPI for interrupt forwarding */ 607 setidt(XFORWARD_IRQ_OFFSET, Xforward_irq, 608 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 609 610 /* install an inter-CPU IPI for CPU stop/restart */ 611 setidt(XCPUSTOP_OFFSET, Xcpustop, 612 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 613 614 #if defined(TEST_TEST1) 615 /* install a "fake hardware INTerrupt" vector */ 616 setidt(XTEST1_OFFSET, Xtest1, 617 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 618 #endif /** TEST_TEST1 */ 619 620 #endif /* APIC_IO */ 621 622 /* initialize all SMP locks */ 623 init_locks(); 624 625 /* obtain the ap_boot_lock */ 626 s_lock(&ap_boot_lock); 627 628 /* start each Application Processor */ 629 start_all_aps(boot_addr); 630 } 631 632 633 /* 634 * look for the MP spec signature 635 */ 636 637 /* string defined by the Intel MP Spec as identifying the MP table */ 638 #define MP_SIG 0x5f504d5f /* _MP_ */ 639 #define NEXT(X) ((X) += 4) 640 static int 641 search_for_sig(u_int32_t target, int count) 642 { 643 int x; 644 u_int32_t *addr = (u_int32_t *) (KERNBASE + target); 645 646 for (x = 0; x < count; NEXT(x)) 647 if (addr[x] == MP_SIG) 648 /* make array index a byte index */ 649 return (target + (x * sizeof(u_int32_t))); 650 651 return -1; 652 } 653 654 655 static basetable_entry basetable_entry_types[] = 656 { 657 {0, 20, "Processor"}, 658 {1, 8, "Bus"}, 659 {2, 8, "I/O APIC"}, 660 {3, 8, "I/O INT"}, 661 {4, 8, "Local INT"} 662 }; 663 664 typedef struct BUSDATA { 665 u_char bus_id; 666 enum busTypes bus_type; 667 } bus_datum; 668 669 typedef struct INTDATA { 670 u_char int_type; 671 u_short int_flags; 672 u_char src_bus_id; 673 u_char src_bus_irq; 674 u_char dst_apic_id; 675 u_char dst_apic_int; 676 u_char int_vector; 677 } io_int, local_int; 678 679 typedef struct BUSTYPENAME { 680 u_char type; 681 char name[7]; 682 } bus_type_name; 683 684 static bus_type_name bus_type_table[] = 685 { 686 {CBUS, "CBUS"}, 687 {CBUSII, "CBUSII"}, 688 {EISA, "EISA"}, 689 {MCA, "MCA"}, 690 {UNKNOWN_BUSTYPE, "---"}, 691 {ISA, "ISA"}, 692 {MCA, "MCA"}, 693 {UNKNOWN_BUSTYPE, "---"}, 694 {UNKNOWN_BUSTYPE, "---"}, 695 {UNKNOWN_BUSTYPE, "---"}, 696 {UNKNOWN_BUSTYPE, "---"}, 697 {UNKNOWN_BUSTYPE, "---"}, 698 {PCI, "PCI"}, 699 {UNKNOWN_BUSTYPE, "---"}, 700 {UNKNOWN_BUSTYPE, "---"}, 701 {UNKNOWN_BUSTYPE, "---"}, 702 {UNKNOWN_BUSTYPE, "---"}, 703 {XPRESS, "XPRESS"}, 704 {UNKNOWN_BUSTYPE, "---"} 705 }; 706 /* from MP spec v1.4, table 5-1 */ 707 static int default_data[7][5] = 708 { 709 /* nbus, id0, type0, id1, type1 */ 710 {1, 0, ISA, 255, 255}, 711 {1, 0, EISA, 255, 255}, 712 {1, 0, EISA, 255, 255}, 713 {1, 0, MCA, 255, 255}, 714 {2, 0, ISA, 1, PCI}, 715 {2, 0, EISA, 1, PCI}, 716 {2, 0, MCA, 1, PCI} 717 }; 718 719 720 /* the bus data */ 721 static bus_datum *bus_data; 722 723 /* the IO INT data, one entry per possible APIC INTerrupt */ 724 static io_int *io_apic_ints; 725 726 static int nintrs; 727 728 static int processor_entry __P((proc_entry_ptr entry, int cpu)); 729 static int bus_entry __P((bus_entry_ptr entry, int bus)); 730 static int io_apic_entry __P((io_apic_entry_ptr entry, int apic)); 731 static int int_entry __P((int_entry_ptr entry, int intr)); 732 static int lookup_bus_type __P((char *name)); 733 734 735 /* 736 * 1st pass on motherboard's Intel MP specification table. 737 * 738 * initializes: 739 * mp_ncpus = 1 740 * 741 * determines: 742 * cpu_apic_address (common to all CPUs) 743 * io_apic_address[N] 744 * mp_naps 745 * mp_nbusses 746 * mp_napics 747 * nintrs 748 */ 749 static void 750 mptable_pass1(void) 751 { 752 int x; 753 mpcth_t cth; 754 int totalSize; 755 void* position; 756 int count; 757 int type; 758 759 POSTCODE(MPTABLE_PASS1_POST); 760 761 /* clear various tables */ 762 for (x = 0; x < NAPICID; ++x) { 763 io_apic_address[x] = ~0; /* IO APIC address table */ 764 } 765 766 /* init everything to empty */ 767 mp_naps = 0; 768 mp_nbusses = 0; 769 mp_napics = 0; 770 nintrs = 0; 771 772 /* check for use of 'default' configuration */ 773 if (MPFPS_MPFB1 != 0) { 774 /* use default addresses */ 775 cpu_apic_address = DEFAULT_APIC_BASE; 776 io_apic_address[0] = DEFAULT_IO_APIC_BASE; 777 778 /* fill in with defaults */ 779 mp_naps = 2; /* includes BSP */ 780 mp_nbusses = default_data[MPFPS_MPFB1 - 1][0]; 781 #if defined(APIC_IO) 782 mp_napics = 1; 783 nintrs = 16; 784 #endif /* APIC_IO */ 785 } 786 else { 787 if ((cth = mpfps->pap) == 0) 788 panic("MP Configuration Table Header MISSING!"); 789 790 cpu_apic_address = (vm_offset_t) cth->apic_address; 791 792 /* walk the table, recording info of interest */ 793 totalSize = cth->base_table_length - sizeof(struct MPCTH); 794 position = (u_char *) cth + sizeof(struct MPCTH); 795 count = cth->entry_count; 796 797 while (count--) { 798 switch (type = *(u_char *) position) { 799 case 0: /* processor_entry */ 800 if (((proc_entry_ptr)position)->cpu_flags 801 & PROCENTRY_FLAG_EN) 802 ++mp_naps; 803 break; 804 case 1: /* bus_entry */ 805 ++mp_nbusses; 806 break; 807 case 2: /* io_apic_entry */ 808 if (((io_apic_entry_ptr)position)->apic_flags 809 & IOAPICENTRY_FLAG_EN) 810 io_apic_address[mp_napics++] = 811 (vm_offset_t)((io_apic_entry_ptr) 812 position)->apic_address; 813 break; 814 case 3: /* int_entry */ 815 ++nintrs; 816 break; 817 case 4: /* int_entry */ 818 break; 819 default: 820 panic("mpfps Base Table HOSED!"); 821 /* NOTREACHED */ 822 } 823 824 totalSize -= basetable_entry_types[type].length; 825 (u_char*)position += basetable_entry_types[type].length; 826 } 827 } 828 829 /* qualify the numbers */ 830 if (mp_naps > MAXCPU) { 831 printf("Warning: only using %d of %d available CPUs!\n", 832 MAXCPU, mp_naps); 833 mp_naps = MAXCPU; 834 } 835 836 /* 837 * Count the BSP. 838 * This is also used as a counter while starting the APs. 839 */ 840 mp_ncpus = 1; 841 842 --mp_naps; /* subtract the BSP */ 843 } 844 845 846 /* 847 * 2nd pass on motherboard's Intel MP specification table. 848 * 849 * sets: 850 * boot_cpu_id 851 * ID_TO_IO(N), phy APIC ID to log CPU/IO table 852 * CPU_TO_ID(N), logical CPU to APIC ID table 853 * IO_TO_ID(N), logical IO to APIC ID table 854 * bus_data[N] 855 * io_apic_ints[N] 856 */ 857 static int 858 mptable_pass2(void) 859 { 860 int x; 861 mpcth_t cth; 862 int totalSize; 863 void* position; 864 int count; 865 int type; 866 int apic, bus, cpu, intr; 867 int i, j; 868 int pgeflag; 869 870 POSTCODE(MPTABLE_PASS2_POST); 871 872 pgeflag = 0; /* XXX - Not used under SMP yet. */ 873 874 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics, 875 M_DEVBUF, M_WAITOK); 876 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics, 877 M_DEVBUF, M_WAITOK); 878 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * nintrs, 879 M_DEVBUF, M_WAITOK); 880 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses, 881 M_DEVBUF, M_WAITOK); 882 883 bzero(ioapic, sizeof(ioapic_t *) * mp_napics); 884 885 for (i = 0; i < mp_napics; i++) { 886 for (j = 0; j < mp_napics; j++) { 887 /* same page frame as a previous IO apic? */ 888 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 889 (io_apic_address[i] & PG_FRAME)) { 890 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 891 + (NPTEPG-2-j) * PAGE_SIZE 892 + (io_apic_address[i] & PAGE_MASK)); 893 break; 894 } 895 /* use this slot if available */ 896 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) { 897 SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW | 898 pgeflag | (io_apic_address[i] & PG_FRAME)); 899 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 900 + (NPTEPG-2-j) * PAGE_SIZE 901 + (io_apic_address[i] & PAGE_MASK)); 902 break; 903 } 904 } 905 } 906 907 /* clear various tables */ 908 for (x = 0; x < NAPICID; ++x) { 909 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */ 910 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */ 911 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */ 912 } 913 914 /* clear bus data table */ 915 for (x = 0; x < mp_nbusses; ++x) 916 bus_data[x].bus_id = 0xff; 917 918 /* clear IO APIC INT table */ 919 for (x = 0; x < nintrs; ++x) { 920 io_apic_ints[x].int_type = 0xff; 921 io_apic_ints[x].int_vector = 0xff; 922 } 923 924 /* setup the cpu/apic mapping arrays */ 925 boot_cpu_id = -1; 926 927 /* record whether PIC or virtual-wire mode */ 928 picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0; 929 930 /* check for use of 'default' configuration */ 931 if (MPFPS_MPFB1 != 0) 932 return MPFPS_MPFB1; /* return default configuration type */ 933 934 if ((cth = mpfps->pap) == 0) 935 panic("MP Configuration Table Header MISSING!"); 936 937 /* walk the table, recording info of interest */ 938 totalSize = cth->base_table_length - sizeof(struct MPCTH); 939 position = (u_char *) cth + sizeof(struct MPCTH); 940 count = cth->entry_count; 941 apic = bus = intr = 0; 942 cpu = 1; /* pre-count the BSP */ 943 944 while (count--) { 945 switch (type = *(u_char *) position) { 946 case 0: 947 if (processor_entry(position, cpu)) 948 ++cpu; 949 break; 950 case 1: 951 if (bus_entry(position, bus)) 952 ++bus; 953 break; 954 case 2: 955 if (io_apic_entry(position, apic)) 956 ++apic; 957 break; 958 case 3: 959 if (int_entry(position, intr)) 960 ++intr; 961 break; 962 case 4: 963 /* int_entry(position); */ 964 break; 965 default: 966 panic("mpfps Base Table HOSED!"); 967 /* NOTREACHED */ 968 } 969 970 totalSize -= basetable_entry_types[type].length; 971 (u_char *) position += basetable_entry_types[type].length; 972 } 973 974 if (boot_cpu_id == -1) 975 panic("NO BSP found!"); 976 977 /* report fact that its NOT a default configuration */ 978 return 0; 979 } 980 981 982 void 983 assign_apic_irq(int apic, int intpin, int irq) 984 { 985 int x; 986 987 if (int_to_apicintpin[irq].ioapic != -1) 988 panic("assign_apic_irq: inconsistent table"); 989 990 int_to_apicintpin[irq].ioapic = apic; 991 int_to_apicintpin[irq].int_pin = intpin; 992 int_to_apicintpin[irq].apic_address = ioapic[apic]; 993 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin; 994 995 for (x = 0; x < nintrs; x++) { 996 if ((io_apic_ints[x].int_type == 0 || 997 io_apic_ints[x].int_type == 3) && 998 io_apic_ints[x].int_vector == 0xff && 999 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) && 1000 io_apic_ints[x].dst_apic_int == intpin) 1001 io_apic_ints[x].int_vector = irq; 1002 } 1003 } 1004 1005 void 1006 revoke_apic_irq(int irq) 1007 { 1008 int x; 1009 int oldapic; 1010 int oldintpin; 1011 1012 if (int_to_apicintpin[irq].ioapic == -1) 1013 panic("assign_apic_irq: inconsistent table"); 1014 1015 oldapic = int_to_apicintpin[irq].ioapic; 1016 oldintpin = int_to_apicintpin[irq].int_pin; 1017 1018 int_to_apicintpin[irq].ioapic = -1; 1019 int_to_apicintpin[irq].int_pin = 0; 1020 int_to_apicintpin[irq].apic_address = NULL; 1021 int_to_apicintpin[irq].redirindex = 0; 1022 1023 for (x = 0; x < nintrs; x++) { 1024 if ((io_apic_ints[x].int_type == 0 || 1025 io_apic_ints[x].int_type == 3) && 1026 io_apic_ints[x].int_vector == 0xff && 1027 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) && 1028 io_apic_ints[x].dst_apic_int == oldintpin) 1029 io_apic_ints[x].int_vector = 0xff; 1030 } 1031 } 1032 1033 1034 1035 static void 1036 swap_apic_id(int apic, int oldid, int newid) 1037 { 1038 int x; 1039 int oapic; 1040 1041 1042 if (oldid == newid) 1043 return; /* Nothing to do */ 1044 1045 printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n", 1046 apic, oldid, newid); 1047 1048 /* Swap physical APIC IDs in interrupt entries */ 1049 for (x = 0; x < nintrs; x++) { 1050 if (io_apic_ints[x].dst_apic_id == oldid) 1051 io_apic_ints[x].dst_apic_id = newid; 1052 else if (io_apic_ints[x].dst_apic_id == newid) 1053 io_apic_ints[x].dst_apic_id = oldid; 1054 } 1055 1056 /* Swap physical APIC IDs in IO_TO_ID mappings */ 1057 for (oapic = 0; oapic < mp_napics; oapic++) 1058 if (IO_TO_ID(oapic) == newid) 1059 break; 1060 1061 if (oapic < mp_napics) { 1062 printf("Changing APIC ID for IO APIC #%d from " 1063 "%d to %d in MP table\n", 1064 oapic, newid, oldid); 1065 IO_TO_ID(oapic) = oldid; 1066 } 1067 IO_TO_ID(apic) = newid; 1068 } 1069 1070 1071 static void 1072 fix_id_to_io_mapping(void) 1073 { 1074 int x; 1075 1076 for (x = 0; x < NAPICID; x++) 1077 ID_TO_IO(x) = -1; 1078 1079 for (x = 0; x <= mp_naps; x++) 1080 if (CPU_TO_ID(x) < NAPICID) 1081 ID_TO_IO(CPU_TO_ID(x)) = x; 1082 1083 for (x = 0; x < mp_napics; x++) 1084 if (IO_TO_ID(x) < NAPICID) 1085 ID_TO_IO(IO_TO_ID(x)) = x; 1086 } 1087 1088 1089 static int 1090 first_free_apic_id(void) 1091 { 1092 int freeid, x; 1093 1094 for (freeid = 0; freeid < NAPICID; freeid++) { 1095 for (x = 0; x <= mp_naps; x++) 1096 if (CPU_TO_ID(x) == freeid) 1097 break; 1098 if (x <= mp_naps) 1099 continue; 1100 for (x = 0; x < mp_napics; x++) 1101 if (IO_TO_ID(x) == freeid) 1102 break; 1103 if (x < mp_napics) 1104 continue; 1105 return freeid; 1106 } 1107 return freeid; 1108 } 1109 1110 1111 static int 1112 io_apic_id_acceptable(int apic, int id) 1113 { 1114 int cpu; /* Logical CPU number */ 1115 int oapic; /* Logical IO APIC number for other IO APIC */ 1116 1117 if (id >= NAPICID) 1118 return 0; /* Out of range */ 1119 1120 for (cpu = 0; cpu <= mp_naps; cpu++) 1121 if (CPU_TO_ID(cpu) == id) 1122 return 0; /* Conflict with CPU */ 1123 1124 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++) 1125 if (IO_TO_ID(oapic) == id) 1126 return 0; /* Conflict with other APIC */ 1127 1128 return 1; /* ID is acceptable for IO APIC */ 1129 } 1130 1131 1132 /* 1133 * parse an Intel MP specification table 1134 */ 1135 static void 1136 fix_mp_table(void) 1137 { 1138 int x; 1139 int id; 1140 int bus_0 = 0; /* Stop GCC warning */ 1141 int bus_pci = 0; /* Stop GCC warning */ 1142 int num_pci_bus; 1143 int apic; /* IO APIC unit number */ 1144 int freeid; /* Free physical APIC ID */ 1145 int physid; /* Current physical IO APIC ID */ 1146 1147 /* 1148 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS 1149 * did it wrong. The MP spec says that when more than 1 PCI bus 1150 * exists the BIOS must begin with bus entries for the PCI bus and use 1151 * actual PCI bus numbering. This implies that when only 1 PCI bus 1152 * exists the BIOS can choose to ignore this ordering, and indeed many 1153 * MP motherboards do ignore it. This causes a problem when the PCI 1154 * sub-system makes requests of the MP sub-system based on PCI bus 1155 * numbers. So here we look for the situation and renumber the 1156 * busses and associated INTs in an effort to "make it right". 1157 */ 1158 1159 /* find bus 0, PCI bus, count the number of PCI busses */ 1160 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) { 1161 if (bus_data[x].bus_id == 0) { 1162 bus_0 = x; 1163 } 1164 if (bus_data[x].bus_type == PCI) { 1165 ++num_pci_bus; 1166 bus_pci = x; 1167 } 1168 } 1169 /* 1170 * bus_0 == slot of bus with ID of 0 1171 * bus_pci == slot of last PCI bus encountered 1172 */ 1173 1174 /* check the 1 PCI bus case for sanity */ 1175 /* if it is number 0 all is well */ 1176 if (num_pci_bus == 1 && 1177 bus_data[bus_pci].bus_id != 0) { 1178 1179 /* mis-numbered, swap with whichever bus uses slot 0 */ 1180 1181 /* swap the bus entry types */ 1182 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type; 1183 bus_data[bus_0].bus_type = PCI; 1184 1185 /* swap each relavant INTerrupt entry */ 1186 id = bus_data[bus_pci].bus_id; 1187 for (x = 0; x < nintrs; ++x) { 1188 if (io_apic_ints[x].src_bus_id == id) { 1189 io_apic_ints[x].src_bus_id = 0; 1190 } 1191 else if (io_apic_ints[x].src_bus_id == 0) { 1192 io_apic_ints[x].src_bus_id = id; 1193 } 1194 } 1195 } 1196 1197 /* Assign IO APIC IDs. 1198 * 1199 * First try the existing ID. If a conflict is detected, try 1200 * the ID in the MP table. If a conflict is still detected, find 1201 * a free id. 1202 * 1203 * We cannot use the ID_TO_IO table before all conflicts has been 1204 * resolved and the table has been corrected. 1205 */ 1206 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */ 1207 1208 /* First try to use the value set by the BIOS */ 1209 physid = io_apic_get_id(apic); 1210 if (io_apic_id_acceptable(apic, physid)) { 1211 if (IO_TO_ID(apic) != physid) 1212 swap_apic_id(apic, IO_TO_ID(apic), physid); 1213 continue; 1214 } 1215 1216 /* Then check if the value in the MP table is acceptable */ 1217 if (io_apic_id_acceptable(apic, IO_TO_ID(apic))) 1218 continue; 1219 1220 /* Last resort, find a free APIC ID and use it */ 1221 freeid = first_free_apic_id(); 1222 if (freeid >= NAPICID) 1223 panic("No free physical APIC IDs found"); 1224 1225 if (io_apic_id_acceptable(apic, freeid)) { 1226 swap_apic_id(apic, IO_TO_ID(apic), freeid); 1227 continue; 1228 } 1229 panic("Free physical APIC ID not usable"); 1230 } 1231 fix_id_to_io_mapping(); 1232 } 1233 1234 1235 /* Assign low level interrupt handlers */ 1236 static void 1237 setup_apic_irq_mapping(void) 1238 { 1239 int x; 1240 int int_vector; 1241 1242 /* Clear array */ 1243 for (x = 0; x < APIC_INTMAPSIZE; x++) { 1244 int_to_apicintpin[x].ioapic = -1; 1245 int_to_apicintpin[x].int_pin = 0; 1246 int_to_apicintpin[x].apic_address = NULL; 1247 int_to_apicintpin[x].redirindex = 0; 1248 } 1249 1250 /* First assign ISA/EISA interrupts */ 1251 for (x = 0; x < nintrs; x++) { 1252 int_vector = io_apic_ints[x].src_bus_irq; 1253 if (int_vector < APIC_INTMAPSIZE && 1254 io_apic_ints[x].int_vector == 0xff && 1255 int_to_apicintpin[int_vector].ioapic == -1 && 1256 (apic_int_is_bus_type(x, ISA) || 1257 apic_int_is_bus_type(x, EISA)) && 1258 io_apic_ints[x].int_type == 0) { 1259 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1260 io_apic_ints[x].dst_apic_int, 1261 int_vector); 1262 } 1263 } 1264 1265 /* Assign interrupts on first 24 intpins on IOAPIC #0 */ 1266 for (x = 0; x < nintrs; x++) { 1267 int_vector = io_apic_ints[x].dst_apic_int; 1268 if (int_vector < APIC_INTMAPSIZE && 1269 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) && 1270 io_apic_ints[x].int_vector == 0xff && 1271 int_to_apicintpin[int_vector].ioapic == -1 && 1272 (io_apic_ints[x].int_type == 0 || 1273 io_apic_ints[x].int_type == 3)) { 1274 assign_apic_irq(0, 1275 io_apic_ints[x].dst_apic_int, 1276 int_vector); 1277 } 1278 } 1279 /* 1280 * Assign interrupts for remaining intpins. 1281 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates 1282 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode 1283 * due to 8254 interrupts not being delivered can reuse that low level 1284 * interrupt handler. 1285 */ 1286 int_vector = 0; 1287 while (int_vector < APIC_INTMAPSIZE && 1288 int_to_apicintpin[int_vector].ioapic != -1) 1289 int_vector++; 1290 for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) { 1291 if ((io_apic_ints[x].int_type == 0 || 1292 (io_apic_ints[x].int_type == 3 && 1293 (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) || 1294 io_apic_ints[x].dst_apic_int != 0))) && 1295 io_apic_ints[x].int_vector == 0xff) { 1296 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1297 io_apic_ints[x].dst_apic_int, 1298 int_vector); 1299 int_vector++; 1300 while (int_vector < APIC_INTMAPSIZE && 1301 int_to_apicintpin[int_vector].ioapic != -1) 1302 int_vector++; 1303 } 1304 } 1305 } 1306 1307 1308 static int 1309 processor_entry(proc_entry_ptr entry, int cpu) 1310 { 1311 /* check for usability */ 1312 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN)) 1313 return 0; 1314 1315 if(entry->apic_id >= NAPICID) 1316 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1); 1317 /* check for BSP flag */ 1318 if (entry->cpu_flags & PROCENTRY_FLAG_BP) { 1319 boot_cpu_id = entry->apic_id; 1320 CPU_TO_ID(0) = entry->apic_id; 1321 ID_TO_CPU(entry->apic_id) = 0; 1322 return 0; /* its already been counted */ 1323 } 1324 1325 /* add another AP to list, if less than max number of CPUs */ 1326 else if (cpu < MAXCPU) { 1327 CPU_TO_ID(cpu) = entry->apic_id; 1328 ID_TO_CPU(entry->apic_id) = cpu; 1329 return 1; 1330 } 1331 1332 return 0; 1333 } 1334 1335 1336 static int 1337 bus_entry(bus_entry_ptr entry, int bus) 1338 { 1339 int x; 1340 char c, name[8]; 1341 1342 /* encode the name into an index */ 1343 for (x = 0; x < 6; ++x) { 1344 if ((c = entry->bus_type[x]) == ' ') 1345 break; 1346 name[x] = c; 1347 } 1348 name[x] = '\0'; 1349 1350 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE) 1351 panic("unknown bus type: '%s'", name); 1352 1353 bus_data[bus].bus_id = entry->bus_id; 1354 bus_data[bus].bus_type = x; 1355 1356 return 1; 1357 } 1358 1359 1360 static int 1361 io_apic_entry(io_apic_entry_ptr entry, int apic) 1362 { 1363 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN)) 1364 return 0; 1365 1366 IO_TO_ID(apic) = entry->apic_id; 1367 if (entry->apic_id < NAPICID) 1368 ID_TO_IO(entry->apic_id) = apic; 1369 1370 return 1; 1371 } 1372 1373 1374 static int 1375 lookup_bus_type(char *name) 1376 { 1377 int x; 1378 1379 for (x = 0; x < MAX_BUSTYPE; ++x) 1380 if (strcmp(bus_type_table[x].name, name) == 0) 1381 return bus_type_table[x].type; 1382 1383 return UNKNOWN_BUSTYPE; 1384 } 1385 1386 1387 static int 1388 int_entry(int_entry_ptr entry, int intr) 1389 { 1390 int apic; 1391 1392 io_apic_ints[intr].int_type = entry->int_type; 1393 io_apic_ints[intr].int_flags = entry->int_flags; 1394 io_apic_ints[intr].src_bus_id = entry->src_bus_id; 1395 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq; 1396 if (entry->dst_apic_id == 255) { 1397 /* This signal goes to all IO APICS. Select an IO APIC 1398 with sufficient number of interrupt pins */ 1399 for (apic = 0; apic < mp_napics; apic++) 1400 if (((io_apic_read(apic, IOAPIC_VER) & 1401 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >= 1402 entry->dst_apic_int) 1403 break; 1404 if (apic < mp_napics) 1405 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic); 1406 else 1407 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1408 } else 1409 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1410 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int; 1411 1412 return 1; 1413 } 1414 1415 1416 static int 1417 apic_int_is_bus_type(int intr, int bus_type) 1418 { 1419 int bus; 1420 1421 for (bus = 0; bus < mp_nbusses; ++bus) 1422 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id) 1423 && ((int) bus_data[bus].bus_type == bus_type)) 1424 return 1; 1425 1426 return 0; 1427 } 1428 1429 1430 /* 1431 * Given a traditional ISA INT mask, return an APIC mask. 1432 */ 1433 u_int 1434 isa_apic_mask(u_int isa_mask) 1435 { 1436 int isa_irq; 1437 int apic_pin; 1438 1439 #if defined(SKIP_IRQ15_REDIRECT) 1440 if (isa_mask == (1 << 15)) { 1441 printf("skipping ISA IRQ15 redirect\n"); 1442 return isa_mask; 1443 } 1444 #endif /* SKIP_IRQ15_REDIRECT */ 1445 1446 isa_irq = ffs(isa_mask); /* find its bit position */ 1447 if (isa_irq == 0) /* doesn't exist */ 1448 return 0; 1449 --isa_irq; /* make it zero based */ 1450 1451 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */ 1452 if (apic_pin == -1) 1453 return 0; 1454 1455 return (1 << apic_pin); /* convert pin# to a mask */ 1456 } 1457 1458 1459 /* 1460 * Determine which APIC pin an ISA/EISA INT is attached to. 1461 */ 1462 #define INTTYPE(I) (io_apic_ints[(I)].int_type) 1463 #define INTPIN(I) (io_apic_ints[(I)].dst_apic_int) 1464 #define INTIRQ(I) (io_apic_ints[(I)].int_vector) 1465 #define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id)) 1466 1467 #define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq) 1468 int 1469 isa_apic_irq(int isa_irq) 1470 { 1471 int intr; 1472 1473 for (intr = 0; intr < nintrs; ++intr) { /* check each record */ 1474 if (INTTYPE(intr) == 0) { /* standard INT */ 1475 if (SRCBUSIRQ(intr) == isa_irq) { 1476 if (apic_int_is_bus_type(intr, ISA) || 1477 apic_int_is_bus_type(intr, EISA)) 1478 return INTIRQ(intr); /* found */ 1479 } 1480 } 1481 } 1482 return -1; /* NOT found */ 1483 } 1484 1485 1486 /* 1487 * Determine which APIC pin a PCI INT is attached to. 1488 */ 1489 #define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id) 1490 #define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f) 1491 #define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03) 1492 int 1493 pci_apic_irq(int pciBus, int pciDevice, int pciInt) 1494 { 1495 int intr; 1496 1497 --pciInt; /* zero based */ 1498 1499 for (intr = 0; intr < nintrs; ++intr) /* check each record */ 1500 if ((INTTYPE(intr) == 0) /* standard INT */ 1501 && (SRCBUSID(intr) == pciBus) 1502 && (SRCBUSDEVICE(intr) == pciDevice) 1503 && (SRCBUSLINE(intr) == pciInt)) /* a candidate IRQ */ 1504 if (apic_int_is_bus_type(intr, PCI)) 1505 return INTIRQ(intr); /* exact match */ 1506 1507 return -1; /* NOT found */ 1508 } 1509 1510 int 1511 next_apic_irq(int irq) 1512 { 1513 int intr, ointr; 1514 int bus, bustype; 1515 1516 bus = 0; 1517 bustype = 0; 1518 for (intr = 0; intr < nintrs; intr++) { 1519 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0) 1520 continue; 1521 bus = SRCBUSID(intr); 1522 bustype = apic_bus_type(bus); 1523 if (bustype != ISA && 1524 bustype != EISA && 1525 bustype != PCI) 1526 continue; 1527 break; 1528 } 1529 if (intr >= nintrs) { 1530 return -1; 1531 } 1532 for (ointr = intr + 1; ointr < nintrs; ointr++) { 1533 if (INTTYPE(ointr) != 0) 1534 continue; 1535 if (bus != SRCBUSID(ointr)) 1536 continue; 1537 if (bustype == PCI) { 1538 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr)) 1539 continue; 1540 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr)) 1541 continue; 1542 } 1543 if (bustype == ISA || bustype == EISA) { 1544 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr)) 1545 continue; 1546 } 1547 if (INTPIN(intr) == INTPIN(ointr)) 1548 continue; 1549 break; 1550 } 1551 if (ointr >= nintrs) { 1552 return -1; 1553 } 1554 return INTIRQ(ointr); 1555 } 1556 #undef SRCBUSLINE 1557 #undef SRCBUSDEVICE 1558 #undef SRCBUSID 1559 #undef SRCBUSIRQ 1560 1561 #undef INTPIN 1562 #undef INTIRQ 1563 #undef INTAPIC 1564 #undef INTTYPE 1565 1566 1567 /* 1568 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt. 1569 * 1570 * XXX FIXME: 1571 * Exactly what this means is unclear at this point. It is a solution 1572 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard 1573 * could route any of the ISA INTs to upper (>15) IRQ values. But most would 1574 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an 1575 * option. 1576 */ 1577 int 1578 undirect_isa_irq(int rirq) 1579 { 1580 #if defined(READY) 1581 if (bootverbose) 1582 printf("Freeing redirected ISA irq %d.\n", rirq); 1583 /** FIXME: tickle the MB redirector chip */ 1584 return ???; 1585 #else 1586 if (bootverbose) 1587 printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq); 1588 return 0; 1589 #endif /* READY */ 1590 } 1591 1592 1593 /* 1594 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt 1595 */ 1596 int 1597 undirect_pci_irq(int rirq) 1598 { 1599 #if defined(READY) 1600 if (bootverbose) 1601 printf("Freeing redirected PCI irq %d.\n", rirq); 1602 1603 /** FIXME: tickle the MB redirector chip */ 1604 return ???; 1605 #else 1606 if (bootverbose) 1607 printf("Freeing (NOT implemented) redirected PCI irq %d.\n", 1608 rirq); 1609 return 0; 1610 #endif /* READY */ 1611 } 1612 1613 1614 /* 1615 * given a bus ID, return: 1616 * the bus type if found 1617 * -1 if NOT found 1618 */ 1619 int 1620 apic_bus_type(int id) 1621 { 1622 int x; 1623 1624 for (x = 0; x < mp_nbusses; ++x) 1625 if (bus_data[x].bus_id == id) 1626 return bus_data[x].bus_type; 1627 1628 return -1; 1629 } 1630 1631 1632 /* 1633 * given a LOGICAL APIC# and pin#, return: 1634 * the associated src bus ID if found 1635 * -1 if NOT found 1636 */ 1637 int 1638 apic_src_bus_id(int apic, int pin) 1639 { 1640 int x; 1641 1642 /* search each of the possible INTerrupt sources */ 1643 for (x = 0; x < nintrs; ++x) 1644 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1645 (pin == io_apic_ints[x].dst_apic_int)) 1646 return (io_apic_ints[x].src_bus_id); 1647 1648 return -1; /* NOT found */ 1649 } 1650 1651 1652 /* 1653 * given a LOGICAL APIC# and pin#, return: 1654 * the associated src bus IRQ if found 1655 * -1 if NOT found 1656 */ 1657 int 1658 apic_src_bus_irq(int apic, int pin) 1659 { 1660 int x; 1661 1662 for (x = 0; x < nintrs; x++) 1663 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1664 (pin == io_apic_ints[x].dst_apic_int)) 1665 return (io_apic_ints[x].src_bus_irq); 1666 1667 return -1; /* NOT found */ 1668 } 1669 1670 1671 /* 1672 * given a LOGICAL APIC# and pin#, return: 1673 * the associated INTerrupt type if found 1674 * -1 if NOT found 1675 */ 1676 int 1677 apic_int_type(int apic, int pin) 1678 { 1679 int x; 1680 1681 /* search each of the possible INTerrupt sources */ 1682 for (x = 0; x < nintrs; ++x) 1683 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1684 (pin == io_apic_ints[x].dst_apic_int)) 1685 return (io_apic_ints[x].int_type); 1686 1687 return -1; /* NOT found */ 1688 } 1689 1690 int 1691 apic_irq(int apic, int pin) 1692 { 1693 int x; 1694 int res; 1695 1696 for (x = 0; x < nintrs; ++x) 1697 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1698 (pin == io_apic_ints[x].dst_apic_int)) { 1699 res = io_apic_ints[x].int_vector; 1700 if (res == 0xff) 1701 return -1; 1702 if (apic != int_to_apicintpin[res].ioapic) 1703 panic("apic_irq: inconsistent table"); 1704 if (pin != int_to_apicintpin[res].int_pin) 1705 panic("apic_irq inconsistent table (2)"); 1706 return res; 1707 } 1708 return -1; 1709 } 1710 1711 1712 /* 1713 * given a LOGICAL APIC# and pin#, return: 1714 * the associated trigger mode if found 1715 * -1 if NOT found 1716 */ 1717 int 1718 apic_trigger(int apic, int pin) 1719 { 1720 int x; 1721 1722 /* search each of the possible INTerrupt sources */ 1723 for (x = 0; x < nintrs; ++x) 1724 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1725 (pin == io_apic_ints[x].dst_apic_int)) 1726 return ((io_apic_ints[x].int_flags >> 2) & 0x03); 1727 1728 return -1; /* NOT found */ 1729 } 1730 1731 1732 /* 1733 * given a LOGICAL APIC# and pin#, return: 1734 * the associated 'active' level if found 1735 * -1 if NOT found 1736 */ 1737 int 1738 apic_polarity(int apic, int pin) 1739 { 1740 int x; 1741 1742 /* search each of the possible INTerrupt sources */ 1743 for (x = 0; x < nintrs; ++x) 1744 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1745 (pin == io_apic_ints[x].dst_apic_int)) 1746 return (io_apic_ints[x].int_flags & 0x03); 1747 1748 return -1; /* NOT found */ 1749 } 1750 1751 1752 /* 1753 * set data according to MP defaults 1754 * FIXME: probably not complete yet... 1755 */ 1756 static void 1757 default_mp_table(int type) 1758 { 1759 int ap_cpu_id; 1760 #if defined(APIC_IO) 1761 int io_apic_id; 1762 int pin; 1763 #endif /* APIC_IO */ 1764 1765 #if 0 1766 printf(" MP default config type: %d\n", type); 1767 switch (type) { 1768 case 1: 1769 printf(" bus: ISA, APIC: 82489DX\n"); 1770 break; 1771 case 2: 1772 printf(" bus: EISA, APIC: 82489DX\n"); 1773 break; 1774 case 3: 1775 printf(" bus: EISA, APIC: 82489DX\n"); 1776 break; 1777 case 4: 1778 printf(" bus: MCA, APIC: 82489DX\n"); 1779 break; 1780 case 5: 1781 printf(" bus: ISA+PCI, APIC: Integrated\n"); 1782 break; 1783 case 6: 1784 printf(" bus: EISA+PCI, APIC: Integrated\n"); 1785 break; 1786 case 7: 1787 printf(" bus: MCA+PCI, APIC: Integrated\n"); 1788 break; 1789 default: 1790 printf(" future type\n"); 1791 break; 1792 /* NOTREACHED */ 1793 } 1794 #endif /* 0 */ 1795 1796 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24; 1797 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0; 1798 1799 /* BSP */ 1800 CPU_TO_ID(0) = boot_cpu_id; 1801 ID_TO_CPU(boot_cpu_id) = 0; 1802 1803 /* one and only AP */ 1804 CPU_TO_ID(1) = ap_cpu_id; 1805 ID_TO_CPU(ap_cpu_id) = 1; 1806 1807 #if defined(APIC_IO) 1808 /* one and only IO APIC */ 1809 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24; 1810 1811 /* 1812 * sanity check, refer to MP spec section 3.6.6, last paragraph 1813 * necessary as some hardware isn't properly setting up the IO APIC 1814 */ 1815 #if defined(REALLY_ANAL_IOAPICID_VALUE) 1816 if (io_apic_id != 2) { 1817 #else 1818 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) { 1819 #endif /* REALLY_ANAL_IOAPICID_VALUE */ 1820 io_apic_set_id(0, 2); 1821 io_apic_id = 2; 1822 } 1823 IO_TO_ID(0) = io_apic_id; 1824 ID_TO_IO(io_apic_id) = 0; 1825 #endif /* APIC_IO */ 1826 1827 /* fill out bus entries */ 1828 switch (type) { 1829 case 1: 1830 case 2: 1831 case 3: 1832 case 4: 1833 case 5: 1834 case 6: 1835 case 7: 1836 bus_data[0].bus_id = default_data[type - 1][1]; 1837 bus_data[0].bus_type = default_data[type - 1][2]; 1838 bus_data[1].bus_id = default_data[type - 1][3]; 1839 bus_data[1].bus_type = default_data[type - 1][4]; 1840 break; 1841 1842 /* case 4: case 7: MCA NOT supported */ 1843 default: /* illegal/reserved */ 1844 panic("BAD default MP config: %d", type); 1845 /* NOTREACHED */ 1846 } 1847 1848 #if defined(APIC_IO) 1849 /* general cases from MP v1.4, table 5-2 */ 1850 for (pin = 0; pin < 16; ++pin) { 1851 io_apic_ints[pin].int_type = 0; 1852 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */ 1853 io_apic_ints[pin].src_bus_id = 0; 1854 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */ 1855 io_apic_ints[pin].dst_apic_id = io_apic_id; 1856 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */ 1857 } 1858 1859 /* special cases from MP v1.4, table 5-2 */ 1860 if (type == 2) { 1861 io_apic_ints[2].int_type = 0xff; /* N/C */ 1862 io_apic_ints[13].int_type = 0xff; /* N/C */ 1863 #if !defined(APIC_MIXED_MODE) 1864 /** FIXME: ??? */ 1865 panic("sorry, can't support type 2 default yet"); 1866 #endif /* APIC_MIXED_MODE */ 1867 } 1868 else 1869 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */ 1870 1871 if (type == 7) 1872 io_apic_ints[0].int_type = 0xff; /* N/C */ 1873 else 1874 io_apic_ints[0].int_type = 3; /* vectored 8259 */ 1875 #endif /* APIC_IO */ 1876 } 1877 1878 1879 /* 1880 * initialize all the SMP locks 1881 */ 1882 1883 /* critical region around IO APIC, apic_imen */ 1884 struct simplelock imen_lock; 1885 1886 /* critical region around splxx(), cpl, cml, cil, ipending */ 1887 struct simplelock cpl_lock; 1888 1889 /* Make FAST_INTR() routines sequential */ 1890 struct simplelock fast_intr_lock; 1891 1892 /* critical region around INTR() routines */ 1893 struct simplelock intr_lock; 1894 1895 /* lock region used by kernel profiling */ 1896 struct simplelock mcount_lock; 1897 1898 #ifdef USE_COMLOCK 1899 /* locks com (tty) data/hardware accesses: a FASTINTR() */ 1900 struct simplelock com_lock; 1901 #endif /* USE_COMLOCK */ 1902 1903 /* lock around the MP rendezvous */ 1904 static struct simplelock smp_rv_lock; 1905 1906 /* only 1 CPU can panic at a time :) */ 1907 struct simplelock panic_lock; 1908 1909 static void 1910 init_locks(void) 1911 { 1912 #if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ) 1913 s_lock_init((struct simplelock*)&apic_itrace_debuglock); 1914 #endif 1915 1916 s_lock_init((struct simplelock*)&mcount_lock); 1917 1918 s_lock_init((struct simplelock*)&fast_intr_lock); 1919 s_lock_init((struct simplelock*)&intr_lock); 1920 s_lock_init((struct simplelock*)&imen_lock); 1921 s_lock_init((struct simplelock*)&cpl_lock); 1922 s_lock_init(&smp_rv_lock); 1923 s_lock_init(&panic_lock); 1924 1925 #ifdef USE_COMLOCK 1926 s_lock_init((struct simplelock*)&com_lock); 1927 #endif /* USE_COMLOCK */ 1928 1929 s_lock_init(&ap_boot_lock); 1930 } 1931 1932 /* 1933 * start each AP in our list 1934 */ 1935 static int 1936 start_all_aps(u_int boot_addr) 1937 { 1938 int x, i, pg; 1939 u_char mpbiosreason; 1940 u_long mpbioswarmvec; 1941 struct globaldata *gd; 1942 char *stack; 1943 1944 POSTCODE(START_ALL_APS_POST); 1945 1946 /* initialize BSP's local APIC */ 1947 apic_initialize(); 1948 bsp_apic_ready = 1; 1949 1950 /* install the AP 1st level boot code */ 1951 install_ap_tramp(boot_addr); 1952 1953 1954 /* save the current value of the warm-start vector */ 1955 mpbioswarmvec = *((u_long *) WARMBOOT_OFF); 1956 #ifndef PC98 1957 outb(CMOS_REG, BIOS_RESET); 1958 mpbiosreason = inb(CMOS_DATA); 1959 #endif 1960 1961 /* record BSP in CPU map */ 1962 all_cpus = 1; 1963 1964 /* set up 0 -> 4MB P==V mapping for AP boot */ 1965 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 1966 invltlb(); 1967 1968 /* start each AP */ 1969 for (x = 1; x <= mp_naps; ++x) { 1970 1971 /* This is a bit verbose, it will go away soon. */ 1972 1973 /* first page of AP's private space */ 1974 pg = x * i386_btop(sizeof(struct privatespace)); 1975 1976 /* allocate a new private data page */ 1977 gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE); 1978 1979 /* wire it into the private page table page */ 1980 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd)); 1981 1982 /* allocate and set up an idle stack data page */ 1983 stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE); 1984 for (i = 0; i < UPAGES; i++) 1985 SMPpt[pg + 5 + i] = (pt_entry_t) 1986 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 1987 1988 SMPpt[pg + 1] = 0; /* *prv_CMAP1 */ 1989 SMPpt[pg + 2] = 0; /* *prv_CMAP2 */ 1990 SMPpt[pg + 3] = 0; /* *prv_CMAP3 */ 1991 SMPpt[pg + 4] = 0; /* *prv_PMAP1 */ 1992 1993 /* prime data page for it to use */ 1994 SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu); 1995 gd->gd_cpuid = x; 1996 gd->gd_cpu_lockid = x << 24; 1997 gd->gd_prv_CMAP1 = &SMPpt[pg + 1]; 1998 gd->gd_prv_CMAP2 = &SMPpt[pg + 2]; 1999 gd->gd_prv_CMAP3 = &SMPpt[pg + 3]; 2000 gd->gd_prv_PMAP1 = &SMPpt[pg + 4]; 2001 gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1; 2002 gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2; 2003 gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3; 2004 gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1; 2005 2006 /* setup a vector to our boot code */ 2007 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 2008 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 2009 #ifndef PC98 2010 outb(CMOS_REG, BIOS_RESET); 2011 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 2012 #endif 2013 2014 bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE]; 2015 bootAP = x; 2016 2017 /* attempt to start the Application Processor */ 2018 CHECK_INIT(99); /* setup checkpoints */ 2019 if (!start_ap(x, boot_addr)) { 2020 printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x)); 2021 CHECK_PRINT("trace"); /* show checkpoints */ 2022 /* better panic as the AP may be running loose */ 2023 printf("panic y/n? [y] "); 2024 if (cngetc() != 'n') 2025 panic("bye-bye"); 2026 } 2027 CHECK_PRINT("trace"); /* show checkpoints */ 2028 2029 /* record its version info */ 2030 cpu_apic_versions[x] = cpu_apic_versions[0]; 2031 2032 all_cpus |= (1 << x); /* record AP in CPU map */ 2033 } 2034 2035 /* build our map of 'other' CPUs */ 2036 other_cpus = all_cpus & ~(1 << cpuid); 2037 2038 /* fill in our (BSP) APIC version */ 2039 cpu_apic_versions[0] = lapic.version; 2040 2041 /* restore the warmstart vector */ 2042 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 2043 #ifndef PC98 2044 outb(CMOS_REG, BIOS_RESET); 2045 outb(CMOS_DATA, mpbiosreason); 2046 #endif 2047 2048 /* 2049 * Set up the idle context for the BSP. Similar to above except 2050 * that some was done by locore, some by pmap.c and some is implicit 2051 * because the BSP is cpu#0 and the page is initially zero, and also 2052 * because we can refer to variables by name on the BSP.. 2053 */ 2054 2055 /* Allocate and setup BSP idle stack */ 2056 stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE); 2057 for (i = 0; i < UPAGES; i++) 2058 SMPpt[5 + i] = (pt_entry_t) 2059 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 2060 2061 *(int *)PTD = 0; 2062 pmap_set_opt(); 2063 2064 /* number of APs actually started */ 2065 return mp_ncpus - 1; 2066 } 2067 2068 2069 /* 2070 * load the 1st level AP boot code into base memory. 2071 */ 2072 2073 /* targets for relocation */ 2074 extern void bigJump(void); 2075 extern void bootCodeSeg(void); 2076 extern void bootDataSeg(void); 2077 extern void MPentry(void); 2078 extern u_int MP_GDT; 2079 extern u_int mp_gdtbase; 2080 2081 static void 2082 install_ap_tramp(u_int boot_addr) 2083 { 2084 int x; 2085 int size = *(int *) ((u_long) & bootMP_size); 2086 u_char *src = (u_char *) ((u_long) bootMP); 2087 u_char *dst = (u_char *) boot_addr + KERNBASE; 2088 u_int boot_base = (u_int) bootMP; 2089 u_int8_t *dst8; 2090 u_int16_t *dst16; 2091 u_int32_t *dst32; 2092 2093 POSTCODE(INSTALL_AP_TRAMP_POST); 2094 2095 for (x = 0; x < size; ++x) 2096 *dst++ = *src++; 2097 2098 /* 2099 * modify addresses in code we just moved to basemem. unfortunately we 2100 * need fairly detailed info about mpboot.s for this to work. changes 2101 * to mpboot.s might require changes here. 2102 */ 2103 2104 /* boot code is located in KERNEL space */ 2105 dst = (u_char *) boot_addr + KERNBASE; 2106 2107 /* modify the lgdt arg */ 2108 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 2109 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 2110 2111 /* modify the ljmp target for MPentry() */ 2112 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 2113 *dst32 = ((u_int) MPentry - KERNBASE); 2114 2115 /* modify the target for boot code segment */ 2116 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 2117 dst8 = (u_int8_t *) (dst16 + 1); 2118 *dst16 = (u_int) boot_addr & 0xffff; 2119 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2120 2121 /* modify the target for boot data segment */ 2122 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 2123 dst8 = (u_int8_t *) (dst16 + 1); 2124 *dst16 = (u_int) boot_addr & 0xffff; 2125 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2126 } 2127 2128 2129 /* 2130 * this function starts the AP (application processor) identified 2131 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 2132 * to accomplish this. This is necessary because of the nuances 2133 * of the different hardware we might encounter. It ain't pretty, 2134 * but it seems to work. 2135 */ 2136 static int 2137 start_ap(int logical_cpu, u_int boot_addr) 2138 { 2139 int physical_cpu; 2140 int vector; 2141 int cpus; 2142 u_long icr_lo, icr_hi; 2143 2144 POSTCODE(START_AP_POST); 2145 2146 /* get the PHYSICAL APIC ID# */ 2147 physical_cpu = CPU_TO_ID(logical_cpu); 2148 2149 /* calculate the vector */ 2150 vector = (boot_addr >> 12) & 0xff; 2151 2152 /* used as a watchpoint to signal AP startup */ 2153 cpus = mp_ncpus; 2154 2155 /* 2156 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 2157 * and running the target CPU. OR this INIT IPI might be latched (P5 2158 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 2159 * ignored. 2160 */ 2161 2162 /* setup the address for the target AP */ 2163 icr_hi = lapic.icr_hi & ~APIC_ID_MASK; 2164 icr_hi |= (physical_cpu << 24); 2165 lapic.icr_hi = icr_hi; 2166 2167 /* do an INIT IPI: assert RESET */ 2168 icr_lo = lapic.icr_lo & 0xfff00000; 2169 lapic.icr_lo = icr_lo | 0x0000c500; 2170 2171 /* wait for pending status end */ 2172 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2173 /* spin */ ; 2174 2175 /* do an INIT IPI: deassert RESET */ 2176 lapic.icr_lo = icr_lo | 0x00008500; 2177 2178 /* wait for pending status end */ 2179 u_sleep(10000); /* wait ~10mS */ 2180 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2181 /* spin */ ; 2182 2183 /* 2184 * next we do a STARTUP IPI: the previous INIT IPI might still be 2185 * latched, (P5 bug) this 1st STARTUP would then terminate 2186 * immediately, and the previously started INIT IPI would continue. OR 2187 * the previous INIT IPI has already run. and this STARTUP IPI will 2188 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 2189 * will run. 2190 */ 2191 2192 /* do a STARTUP IPI */ 2193 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2194 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2195 /* spin */ ; 2196 u_sleep(200); /* wait ~200uS */ 2197 2198 /* 2199 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 2200 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 2201 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 2202 * recognized after hardware RESET or INIT IPI. 2203 */ 2204 2205 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2206 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2207 /* spin */ ; 2208 u_sleep(200); /* wait ~200uS */ 2209 2210 /* wait for it to start */ 2211 set_apic_timer(5000000);/* == 5 seconds */ 2212 while (read_apic_timer()) 2213 if (mp_ncpus > cpus) 2214 return 1; /* return SUCCESS */ 2215 2216 return 0; /* return FAILURE */ 2217 } 2218 2219 /* 2220 * Flush the TLB on all other CPU's 2221 * 2222 * XXX: Needs to handshake and wait for completion before proceding. 2223 */ 2224 void 2225 smp_invltlb(void) 2226 { 2227 #if defined(APIC_IO) 2228 if (smp_started && invltlb_ok) 2229 all_but_self_ipi(XINVLTLB_OFFSET); 2230 #endif /* APIC_IO */ 2231 } 2232 2233 void 2234 invlpg(u_int addr) 2235 { 2236 __asm __volatile("invlpg (%0)"::"r"(addr):"memory"); 2237 2238 /* send a message to the other CPUs */ 2239 smp_invltlb(); 2240 } 2241 2242 void 2243 invltlb(void) 2244 { 2245 u_long temp; 2246 2247 /* 2248 * This should be implemented as load_cr3(rcr3()) when load_cr3() is 2249 * inlined. 2250 */ 2251 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory"); 2252 2253 /* send a message to the other CPUs */ 2254 smp_invltlb(); 2255 } 2256 2257 2258 /* 2259 * When called the executing CPU will send an IPI to all other CPUs 2260 * requesting that they halt execution. 2261 * 2262 * Usually (but not necessarily) called with 'other_cpus' as its arg. 2263 * 2264 * - Signals all CPUs in map to stop. 2265 * - Waits for each to stop. 2266 * 2267 * Returns: 2268 * -1: error 2269 * 0: NA 2270 * 1: ok 2271 * 2272 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 2273 * from executing at same time. 2274 */ 2275 int 2276 stop_cpus(u_int map) 2277 { 2278 if (!smp_started) 2279 return 0; 2280 2281 /* send the Xcpustop IPI to all CPUs in map */ 2282 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 2283 2284 while ((stopped_cpus & map) != map) 2285 /* spin */ ; 2286 2287 return 1; 2288 } 2289 2290 2291 /* 2292 * Called by a CPU to restart stopped CPUs. 2293 * 2294 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 2295 * 2296 * - Signals all CPUs in map to restart. 2297 * - Waits for each to restart. 2298 * 2299 * Returns: 2300 * -1: error 2301 * 0: NA 2302 * 1: ok 2303 */ 2304 int 2305 restart_cpus(u_int map) 2306 { 2307 if (!smp_started) 2308 return 0; 2309 2310 started_cpus = map; /* signal other cpus to restart */ 2311 2312 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */ 2313 /* spin */ ; 2314 2315 return 1; 2316 } 2317 2318 int smp_active = 0; /* are the APs allowed to run? */ 2319 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, ""); 2320 2321 /* XXX maybe should be hw.ncpu */ 2322 static int smp_cpus = 1; /* how many cpu's running */ 2323 SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, ""); 2324 2325 int invltlb_ok = 0; /* throttle smp_invltlb() till safe */ 2326 SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, ""); 2327 2328 /* Warning: Do not staticize. Used from swtch.s */ 2329 int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */ 2330 SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW, 2331 &do_page_zero_idle, 0, ""); 2332 2333 /* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */ 2334 int forward_irq_enabled = 1; 2335 SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW, 2336 &forward_irq_enabled, 0, ""); 2337 2338 /* Enable forwarding of a signal to a process running on a different CPU */ 2339 static int forward_signal_enabled = 1; 2340 SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 2341 &forward_signal_enabled, 0, ""); 2342 2343 /* Enable forwarding of roundrobin to all other cpus */ 2344 static int forward_roundrobin_enabled = 1; 2345 SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 2346 &forward_roundrobin_enabled, 0, ""); 2347 2348 /* 2349 * This is called once the rest of the system is up and running and we're 2350 * ready to let the AP's out of the pen. 2351 */ 2352 void ap_init(void); 2353 2354 void 2355 ap_init(void) 2356 { 2357 u_int apic_id; 2358 2359 /* lock against other AP's that are waking up */ 2360 s_lock(&ap_boot_lock); 2361 2362 /* BSP may have changed PTD while we're waiting for the lock */ 2363 cpu_invltlb(); 2364 2365 smp_cpus++; 2366 2367 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 2368 lidt(&r_idt); 2369 #endif 2370 2371 /* Build our map of 'other' CPUs. */ 2372 other_cpus = all_cpus & ~(1 << cpuid); 2373 2374 printf("SMP: AP CPU #%d Launched!\n", cpuid); 2375 2376 /* set up CPU registers and state */ 2377 cpu_setregs(); 2378 2379 /* set up FPU state on the AP */ 2380 npxinit(__INITIAL_NPXCW__); 2381 2382 /* A quick check from sanity claus */ 2383 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]); 2384 if (cpuid != apic_id) { 2385 printf("SMP: cpuid = %d\n", cpuid); 2386 printf("SMP: apic_id = %d\n", apic_id); 2387 printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 2388 panic("cpuid mismatch! boom!!"); 2389 } 2390 2391 /* Init local apic for irq's */ 2392 apic_initialize(); 2393 2394 /* Set memory range attributes for this CPU to match the BSP */ 2395 mem_range_AP_init(); 2396 2397 /* 2398 * Activate smp_invltlb, although strictly speaking, this isn't 2399 * quite correct yet. We should have a bitfield for cpus willing 2400 * to accept TLB flush IPI's or something and sync them. 2401 */ 2402 if (smp_cpus == mp_ncpus) { 2403 invltlb_ok = 1; 2404 smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */ 2405 smp_active = 1; /* historic */ 2406 } 2407 2408 /* let other AP's wake up now */ 2409 s_unlock(&ap_boot_lock); 2410 2411 /* wait until all the AP's are up */ 2412 while (smp_started == 0) 2413 ; /* nothing */ 2414 2415 /* 2416 * Set curproc to our per-cpu idleproc so that mutexes have 2417 * something unique to lock with. 2418 */ 2419 PCPU_SET(curproc,idleproc); 2420 2421 microuptime(&switchtime); 2422 switchticks = ticks; 2423 2424 /* ok, now grab sched_lock and enter the scheduler */ 2425 enable_intr(); 2426 mtx_enter(&sched_lock, MTX_SPIN); 2427 cpu_throw(); /* doesn't return */ 2428 2429 panic("scheduler returned us to ap_init"); 2430 } 2431 2432 #ifdef BETTER_CLOCK 2433 2434 #define CHECKSTATE_USER 0 2435 #define CHECKSTATE_SYS 1 2436 #define CHECKSTATE_INTR 2 2437 2438 /* Do not staticize. Used from apic_vector.s */ 2439 struct proc* checkstate_curproc[MAXCPU]; 2440 int checkstate_cpustate[MAXCPU]; 2441 u_long checkstate_pc[MAXCPU]; 2442 2443 #define PC_TO_INDEX(pc, prof) \ 2444 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \ 2445 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) 2446 2447 static void 2448 addupc_intr_forwarded(struct proc *p, int id, int *astmap) 2449 { 2450 int i; 2451 struct uprof *prof; 2452 u_long pc; 2453 2454 pc = checkstate_pc[id]; 2455 prof = &p->p_stats->p_prof; 2456 if (pc >= prof->pr_off && 2457 (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) { 2458 if ((p->p_flag & P_OWEUPC) == 0) { 2459 prof->pr_addr = pc; 2460 prof->pr_ticks = 1; 2461 p->p_flag |= P_OWEUPC; 2462 } 2463 *astmap |= (1 << id); 2464 } 2465 } 2466 2467 static void 2468 forwarded_statclock(int id, int pscnt, int *astmap) 2469 { 2470 struct pstats *pstats; 2471 long rss; 2472 struct rusage *ru; 2473 struct vmspace *vm; 2474 int cpustate; 2475 struct proc *p; 2476 #ifdef GPROF 2477 register struct gmonparam *g; 2478 int i; 2479 #endif 2480 2481 p = checkstate_curproc[id]; 2482 cpustate = checkstate_cpustate[id]; 2483 2484 /* XXX */ 2485 if (p->p_ithd) 2486 cpustate = CHECKSTATE_INTR; 2487 else if (p == SMP_prvspace[id].globaldata.gd_idleproc) 2488 cpustate = CHECKSTATE_SYS; 2489 2490 switch (cpustate) { 2491 case CHECKSTATE_USER: 2492 if (p->p_flag & P_PROFIL) 2493 addupc_intr_forwarded(p, id, astmap); 2494 if (pscnt > 1) 2495 return; 2496 p->p_uticks++; 2497 if (p->p_nice > NZERO) 2498 cp_time[CP_NICE]++; 2499 else 2500 cp_time[CP_USER]++; 2501 break; 2502 case CHECKSTATE_SYS: 2503 #ifdef GPROF 2504 /* 2505 * Kernel statistics are just like addupc_intr, only easier. 2506 */ 2507 g = &_gmonparam; 2508 if (g->state == GMON_PROF_ON) { 2509 i = checkstate_pc[id] - g->lowpc; 2510 if (i < g->textsize) { 2511 i /= HISTFRACTION * sizeof(*g->kcount); 2512 g->kcount[i]++; 2513 } 2514 } 2515 #endif 2516 if (pscnt > 1) 2517 return; 2518 2519 p->p_sticks++; 2520 if (p == SMP_prvspace[id].globaldata.gd_idleproc) 2521 cp_time[CP_IDLE]++; 2522 else 2523 cp_time[CP_SYS]++; 2524 break; 2525 case CHECKSTATE_INTR: 2526 default: 2527 #ifdef GPROF 2528 /* 2529 * Kernel statistics are just like addupc_intr, only easier. 2530 */ 2531 g = &_gmonparam; 2532 if (g->state == GMON_PROF_ON) { 2533 i = checkstate_pc[id] - g->lowpc; 2534 if (i < g->textsize) { 2535 i /= HISTFRACTION * sizeof(*g->kcount); 2536 g->kcount[i]++; 2537 } 2538 } 2539 #endif 2540 if (pscnt > 1) 2541 return; 2542 if (p) 2543 p->p_iticks++; 2544 cp_time[CP_INTR]++; 2545 } 2546 schedclock(p); 2547 2548 /* Update resource usage integrals and maximums. */ 2549 if ((pstats = p->p_stats) != NULL && 2550 (ru = &pstats->p_ru) != NULL && 2551 (vm = p->p_vmspace) != NULL) { 2552 ru->ru_ixrss += pgtok(vm->vm_tsize); 2553 ru->ru_idrss += pgtok(vm->vm_dsize); 2554 ru->ru_isrss += pgtok(vm->vm_ssize); 2555 rss = pgtok(vmspace_resident_count(vm)); 2556 if (ru->ru_maxrss < rss) 2557 ru->ru_maxrss = rss; 2558 } 2559 } 2560 2561 void 2562 forward_statclock(int pscnt) 2563 { 2564 int map; 2565 int id; 2566 int i; 2567 2568 /* Kludge. We don't yet have separate locks for the interrupts 2569 * and the kernel. This means that we cannot let the other processors 2570 * handle complex interrupts while inhibiting them from entering 2571 * the kernel in a non-interrupt context. 2572 * 2573 * What we can do, without changing the locking mechanisms yet, 2574 * is letting the other processors handle a very simple interrupt 2575 * (wich determines the processor states), and do the main 2576 * work ourself. 2577 */ 2578 2579 if (!smp_started || !invltlb_ok || cold || panicstr) 2580 return; 2581 2582 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */ 2583 2584 map = other_cpus & ~stopped_cpus ; 2585 checkstate_probed_cpus = 0; 2586 if (map != 0) 2587 selected_apic_ipi(map, 2588 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2589 2590 i = 0; 2591 while (checkstate_probed_cpus != map) { 2592 /* spin */ 2593 i++; 2594 if (i == 100000) { 2595 #ifdef BETTER_CLOCK_DIAGNOSTIC 2596 printf("forward_statclock: checkstate %x\n", 2597 checkstate_probed_cpus); 2598 #endif 2599 break; 2600 } 2601 } 2602 2603 /* 2604 * Step 2: walk through other processors processes, update ticks and 2605 * profiling info. 2606 */ 2607 2608 map = 0; 2609 for (id = 0; id < mp_ncpus; id++) { 2610 if (id == cpuid) 2611 continue; 2612 if (((1 << id) & checkstate_probed_cpus) == 0) 2613 continue; 2614 forwarded_statclock(id, pscnt, &map); 2615 } 2616 if (map != 0) { 2617 checkstate_need_ast |= map; 2618 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2619 i = 0; 2620 while ((checkstate_need_ast & map) != 0) { 2621 /* spin */ 2622 i++; 2623 if (i > 100000) { 2624 #ifdef BETTER_CLOCK_DIAGNOSTIC 2625 printf("forward_statclock: dropped ast 0x%x\n", 2626 checkstate_need_ast & map); 2627 #endif 2628 break; 2629 } 2630 } 2631 } 2632 } 2633 2634 void 2635 forward_hardclock(int pscnt) 2636 { 2637 int map; 2638 int id; 2639 struct proc *p; 2640 struct pstats *pstats; 2641 int i; 2642 2643 /* Kludge. We don't yet have separate locks for the interrupts 2644 * and the kernel. This means that we cannot let the other processors 2645 * handle complex interrupts while inhibiting them from entering 2646 * the kernel in a non-interrupt context. 2647 * 2648 * What we can do, without changing the locking mechanisms yet, 2649 * is letting the other processors handle a very simple interrupt 2650 * (wich determines the processor states), and do the main 2651 * work ourself. 2652 */ 2653 2654 if (!smp_started || !invltlb_ok || cold || panicstr) 2655 return; 2656 2657 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */ 2658 2659 map = other_cpus & ~stopped_cpus ; 2660 checkstate_probed_cpus = 0; 2661 if (map != 0) 2662 selected_apic_ipi(map, 2663 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2664 2665 i = 0; 2666 while (checkstate_probed_cpus != map) { 2667 /* spin */ 2668 i++; 2669 if (i == 100000) { 2670 #ifdef BETTER_CLOCK_DIAGNOSTIC 2671 printf("forward_hardclock: checkstate %x\n", 2672 checkstate_probed_cpus); 2673 #endif 2674 break; 2675 } 2676 } 2677 2678 /* 2679 * Step 2: walk through other processors processes, update virtual 2680 * timer and profiling timer. If stathz == 0, also update ticks and 2681 * profiling info. 2682 */ 2683 2684 map = 0; 2685 for (id = 0; id < mp_ncpus; id++) { 2686 if (id == cpuid) 2687 continue; 2688 if (((1 << id) & checkstate_probed_cpus) == 0) 2689 continue; 2690 p = checkstate_curproc[id]; 2691 if (p) { 2692 pstats = p->p_stats; 2693 if (checkstate_cpustate[id] == CHECKSTATE_USER && 2694 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 2695 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { 2696 psignal(p, SIGVTALRM); 2697 map |= (1 << id); 2698 } 2699 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 2700 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { 2701 psignal(p, SIGPROF); 2702 map |= (1 << id); 2703 } 2704 } 2705 if (stathz == 0) { 2706 forwarded_statclock( id, pscnt, &map); 2707 } 2708 } 2709 if (map != 0) { 2710 checkstate_need_ast |= map; 2711 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2712 i = 0; 2713 while ((checkstate_need_ast & map) != 0) { 2714 /* spin */ 2715 i++; 2716 if (i > 100000) { 2717 #ifdef BETTER_CLOCK_DIAGNOSTIC 2718 printf("forward_hardclock: dropped ast 0x%x\n", 2719 checkstate_need_ast & map); 2720 #endif 2721 break; 2722 } 2723 } 2724 } 2725 } 2726 2727 #endif /* BETTER_CLOCK */ 2728 2729 void 2730 forward_signal(struct proc *p) 2731 { 2732 int map; 2733 int id; 2734 int i; 2735 2736 /* Kludge. We don't yet have separate locks for the interrupts 2737 * and the kernel. This means that we cannot let the other processors 2738 * handle complex interrupts while inhibiting them from entering 2739 * the kernel in a non-interrupt context. 2740 * 2741 * What we can do, without changing the locking mechanisms yet, 2742 * is letting the other processors handle a very simple interrupt 2743 * (wich determines the processor states), and do the main 2744 * work ourself. 2745 */ 2746 2747 if (!smp_started || !invltlb_ok || cold || panicstr) 2748 return; 2749 if (!forward_signal_enabled) 2750 return; 2751 while (1) { 2752 if (p->p_stat != SRUN) 2753 return; 2754 id = p->p_oncpu; 2755 if (id == 0xff) 2756 return; 2757 map = (1<<id); 2758 checkstate_need_ast |= map; 2759 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2760 i = 0; 2761 while ((checkstate_need_ast & map) != 0) { 2762 /* spin */ 2763 i++; 2764 if (i > 100000) { 2765 #if 0 2766 printf("forward_signal: dropped ast 0x%x\n", 2767 checkstate_need_ast & map); 2768 #endif 2769 break; 2770 } 2771 } 2772 if (id == p->p_oncpu) 2773 return; 2774 } 2775 } 2776 2777 void 2778 forward_roundrobin(void) 2779 { 2780 u_int map; 2781 int i; 2782 2783 if (!smp_started || !invltlb_ok || cold || panicstr) 2784 return; 2785 if (!forward_roundrobin_enabled) 2786 return; 2787 resched_cpus |= other_cpus; 2788 map = other_cpus & ~stopped_cpus ; 2789 #if 1 2790 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2791 #else 2792 (void) all_but_self_ipi(XCPUAST_OFFSET); 2793 #endif 2794 i = 0; 2795 while ((checkstate_need_ast & map) != 0) { 2796 /* spin */ 2797 i++; 2798 if (i > 100000) { 2799 #if 0 2800 printf("forward_roundrobin: dropped ast 0x%x\n", 2801 checkstate_need_ast & map); 2802 #endif 2803 break; 2804 } 2805 } 2806 } 2807 2808 2809 #ifdef APIC_INTR_REORDER 2810 /* 2811 * Maintain mapping from softintr vector to isr bit in local apic. 2812 */ 2813 void 2814 set_lapic_isrloc(int intr, int vector) 2815 { 2816 if (intr < 0 || intr > 32) 2817 panic("set_apic_isrloc: bad intr argument: %d",intr); 2818 if (vector < ICU_OFFSET || vector > 255) 2819 panic("set_apic_isrloc: bad vector argument: %d",vector); 2820 apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2); 2821 apic_isrbit_location[intr].bit = (1<<(vector & 31)); 2822 } 2823 #endif 2824 2825 /* 2826 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 2827 * (if specified), rendezvous, execute the action function (if specified), 2828 * rendezvous again, execute the teardown function (if specified), and then 2829 * resume. 2830 * 2831 * Note that the supplied external functions _must_ be reentrant and aware 2832 * that they are running in parallel and in an unknown lock context. 2833 */ 2834 static void (*smp_rv_setup_func)(void *arg); 2835 static void (*smp_rv_action_func)(void *arg); 2836 static void (*smp_rv_teardown_func)(void *arg); 2837 static void *smp_rv_func_arg; 2838 static volatile int smp_rv_waiters[2]; 2839 2840 void 2841 smp_rendezvous_action(void) 2842 { 2843 /* setup function */ 2844 if (smp_rv_setup_func != NULL) 2845 smp_rv_setup_func(smp_rv_func_arg); 2846 /* spin on entry rendezvous */ 2847 atomic_add_int(&smp_rv_waiters[0], 1); 2848 while (smp_rv_waiters[0] < mp_ncpus) 2849 ; 2850 /* action function */ 2851 if (smp_rv_action_func != NULL) 2852 smp_rv_action_func(smp_rv_func_arg); 2853 /* spin on exit rendezvous */ 2854 atomic_add_int(&smp_rv_waiters[1], 1); 2855 while (smp_rv_waiters[1] < mp_ncpus) 2856 ; 2857 /* teardown function */ 2858 if (smp_rv_teardown_func != NULL) 2859 smp_rv_teardown_func(smp_rv_func_arg); 2860 } 2861 2862 void 2863 smp_rendezvous(void (* setup_func)(void *), 2864 void (* action_func)(void *), 2865 void (* teardown_func)(void *), 2866 void *arg) 2867 { 2868 u_int efl; 2869 2870 /* obtain rendezvous lock */ 2871 s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */ 2872 2873 /* set static function pointers */ 2874 smp_rv_setup_func = setup_func; 2875 smp_rv_action_func = action_func; 2876 smp_rv_teardown_func = teardown_func; 2877 smp_rv_func_arg = arg; 2878 smp_rv_waiters[0] = 0; 2879 smp_rv_waiters[1] = 0; 2880 2881 /* disable interrupts on this CPU, save interrupt status */ 2882 efl = read_eflags(); 2883 write_eflags(efl & ~PSL_I); 2884 2885 /* signal other processors, which will enter the IPI with interrupts off */ 2886 all_but_self_ipi(XRENDEZVOUS_OFFSET); 2887 2888 /* call executor function */ 2889 smp_rendezvous_action(); 2890 2891 /* restore interrupt flag */ 2892 write_eflags(efl); 2893 2894 /* release lock */ 2895 s_unlock(&smp_rv_lock); 2896 } 2897 2898 void 2899 release_aps(void *dummy __unused) 2900 { 2901 s_unlock(&ap_boot_lock); 2902 } 2903 2904 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 2905