1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "opt_smp.h" 29 #include "opt_cpu.h" 30 #include "opt_user_ldt.h" 31 32 #ifdef SMP 33 #include <machine/smptests.h> 34 #else 35 #error 36 #endif 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/proc.h> 42 #include <sys/sysctl.h> 43 #include <sys/malloc.h> 44 #include <sys/memrange.h> 45 #ifdef BETTER_CLOCK 46 #include <sys/dkstat.h> 47 #endif 48 #include <sys/cons.h> /* cngetc() */ 49 50 #include <vm/vm.h> 51 #include <vm/vm_param.h> 52 #include <vm/pmap.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_extern.h> 55 #ifdef BETTER_CLOCK 56 #include <sys/lock.h> 57 #include <vm/vm_map.h> 58 #include <sys/user.h> 59 #ifdef GPROF 60 #include <sys/gmon.h> 61 #endif 62 #endif 63 64 #include <machine/smp.h> 65 #include <machine/apic.h> 66 #include <machine/atomic.h> 67 #include <machine/cpufunc.h> 68 #include <machine/mpapic.h> 69 #include <machine/psl.h> 70 #include <machine/segments.h> 71 #include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */ 72 #include <machine/tss.h> 73 #include <machine/specialreg.h> 74 #include <machine/globaldata.h> 75 76 #if defined(APIC_IO) 77 #include <machine/md_var.h> /* setidt() */ 78 #include <i386/isa/icu.h> /* IPIs */ 79 #include <i386/isa/intr_machdep.h> /* IPIs */ 80 #endif /* APIC_IO */ 81 82 #if defined(TEST_DEFAULT_CONFIG) 83 #define MPFPS_MPFB1 TEST_DEFAULT_CONFIG 84 #else 85 #define MPFPS_MPFB1 mpfps->mpfb1 86 #endif /* TEST_DEFAULT_CONFIG */ 87 88 #define WARMBOOT_TARGET 0 89 #define WARMBOOT_OFF (KERNBASE + 0x0467) 90 #define WARMBOOT_SEG (KERNBASE + 0x0469) 91 92 #ifdef PC98 93 #define BIOS_BASE (0xe8000) 94 #define BIOS_SIZE (0x18000) 95 #else 96 #define BIOS_BASE (0xf0000) 97 #define BIOS_SIZE (0x10000) 98 #endif 99 #define BIOS_COUNT (BIOS_SIZE/4) 100 101 #define CMOS_REG (0x70) 102 #define CMOS_DATA (0x71) 103 #define BIOS_RESET (0x0f) 104 #define BIOS_WARM (0x0a) 105 106 #define PROCENTRY_FLAG_EN 0x01 107 #define PROCENTRY_FLAG_BP 0x02 108 #define IOAPICENTRY_FLAG_EN 0x01 109 110 111 /* MP Floating Pointer Structure */ 112 typedef struct MPFPS { 113 char signature[4]; 114 void *pap; 115 u_char length; 116 u_char spec_rev; 117 u_char checksum; 118 u_char mpfb1; 119 u_char mpfb2; 120 u_char mpfb3; 121 u_char mpfb4; 122 u_char mpfb5; 123 } *mpfps_t; 124 125 /* MP Configuration Table Header */ 126 typedef struct MPCTH { 127 char signature[4]; 128 u_short base_table_length; 129 u_char spec_rev; 130 u_char checksum; 131 u_char oem_id[8]; 132 u_char product_id[12]; 133 void *oem_table_pointer; 134 u_short oem_table_size; 135 u_short entry_count; 136 void *apic_address; 137 u_short extended_table_length; 138 u_char extended_table_checksum; 139 u_char reserved; 140 } *mpcth_t; 141 142 143 typedef struct PROCENTRY { 144 u_char type; 145 u_char apic_id; 146 u_char apic_version; 147 u_char cpu_flags; 148 u_long cpu_signature; 149 u_long feature_flags; 150 u_long reserved1; 151 u_long reserved2; 152 } *proc_entry_ptr; 153 154 typedef struct BUSENTRY { 155 u_char type; 156 u_char bus_id; 157 char bus_type[6]; 158 } *bus_entry_ptr; 159 160 typedef struct IOAPICENTRY { 161 u_char type; 162 u_char apic_id; 163 u_char apic_version; 164 u_char apic_flags; 165 void *apic_address; 166 } *io_apic_entry_ptr; 167 168 typedef struct INTENTRY { 169 u_char type; 170 u_char int_type; 171 u_short int_flags; 172 u_char src_bus_id; 173 u_char src_bus_irq; 174 u_char dst_apic_id; 175 u_char dst_apic_int; 176 } *int_entry_ptr; 177 178 /* descriptions of MP basetable entries */ 179 typedef struct BASETABLE_ENTRY { 180 u_char type; 181 u_char length; 182 char name[16]; 183 } basetable_entry; 184 185 /* 186 * this code MUST be enabled here and in mpboot.s. 187 * it follows the very early stages of AP boot by placing values in CMOS ram. 188 * it NORMALLY will never be needed and thus the primitive method for enabling. 189 * 190 #define CHECK_POINTS 191 */ 192 193 #if defined(CHECK_POINTS) && !defined(PC98) 194 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 195 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 196 197 #define CHECK_INIT(D); \ 198 CHECK_WRITE(0x34, (D)); \ 199 CHECK_WRITE(0x35, (D)); \ 200 CHECK_WRITE(0x36, (D)); \ 201 CHECK_WRITE(0x37, (D)); \ 202 CHECK_WRITE(0x38, (D)); \ 203 CHECK_WRITE(0x39, (D)); 204 205 #define CHECK_PRINT(S); \ 206 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 207 (S), \ 208 CHECK_READ(0x34), \ 209 CHECK_READ(0x35), \ 210 CHECK_READ(0x36), \ 211 CHECK_READ(0x37), \ 212 CHECK_READ(0x38), \ 213 CHECK_READ(0x39)); 214 215 #else /* CHECK_POINTS */ 216 217 #define CHECK_INIT(D) 218 #define CHECK_PRINT(S) 219 220 #endif /* CHECK_POINTS */ 221 222 /* 223 * Values to send to the POST hardware. 224 */ 225 #define MP_BOOTADDRESS_POST 0x10 226 #define MP_PROBE_POST 0x11 227 #define MPTABLE_PASS1_POST 0x12 228 229 #define MP_START_POST 0x13 230 #define MP_ENABLE_POST 0x14 231 #define MPTABLE_PASS2_POST 0x15 232 233 #define START_ALL_APS_POST 0x16 234 #define INSTALL_AP_TRAMP_POST 0x17 235 #define START_AP_POST 0x18 236 237 #define MP_ANNOUNCE_POST 0x19 238 239 240 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 241 int current_postcode; 242 243 /** XXX FIXME: what system files declare these??? */ 244 extern struct region_descriptor r_gdt, r_idt; 245 246 int bsp_apic_ready = 0; /* flags useability of BSP apic */ 247 int mp_ncpus; /* # of CPUs, including BSP */ 248 int mp_naps; /* # of Applications processors */ 249 int mp_nbusses; /* # of busses */ 250 int mp_napics; /* # of IO APICs */ 251 int boot_cpu_id; /* designated BSP */ 252 vm_offset_t cpu_apic_address; 253 vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */ 254 extern int nkpt; 255 256 u_int32_t cpu_apic_versions[NCPU]; 257 u_int32_t io_apic_versions[NAPIC]; 258 259 #ifdef APIC_INTR_DIAGNOSTIC 260 int apic_itrace_enter[32]; 261 int apic_itrace_tryisrlock[32]; 262 int apic_itrace_gotisrlock[32]; 263 int apic_itrace_active[32]; 264 int apic_itrace_masked[32]; 265 int apic_itrace_noisrlock[32]; 266 int apic_itrace_masked2[32]; 267 int apic_itrace_unmask[32]; 268 int apic_itrace_noforward[32]; 269 int apic_itrace_leave[32]; 270 int apic_itrace_enter2[32]; 271 int apic_itrace_doreti[32]; 272 int apic_itrace_splz[32]; 273 int apic_itrace_eoi[32]; 274 #ifdef APIC_INTR_DIAGNOSTIC_IRQ 275 unsigned short apic_itrace_debugbuffer[32768]; 276 int apic_itrace_debugbuffer_idx; 277 struct simplelock apic_itrace_debuglock; 278 #endif 279 #endif 280 281 #ifdef APIC_INTR_REORDER 282 struct { 283 volatile int *location; 284 int bit; 285 } apic_isrbit_location[32]; 286 #endif 287 288 struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE]; 289 290 /* 291 * APIC ID logical/physical mapping structures. 292 * We oversize these to simplify boot-time config. 293 */ 294 int cpu_num_to_apic_id[NAPICID]; 295 int io_num_to_apic_id[NAPICID]; 296 int apic_id_to_logical[NAPICID]; 297 298 299 /* Bitmap of all available CPUs */ 300 u_int all_cpus; 301 302 /* AP uses this during bootstrap. Do not staticize. */ 303 char *bootSTK; 304 static int bootAP; 305 306 /* Hotwire a 0->4MB V==P mapping */ 307 extern pt_entry_t *KPTphys; 308 309 /* SMP page table page */ 310 extern pt_entry_t *SMPpt; 311 312 struct pcb stoppcbs[NCPU]; 313 314 int smp_started; /* has the system started? */ 315 316 /* 317 * Local data and functions. 318 */ 319 320 static int mp_capable; 321 static u_int boot_address; 322 static u_int base_memory; 323 324 static int picmode; /* 0: virtual wire mode, 1: PIC mode */ 325 static mpfps_t mpfps; 326 static int search_for_sig(u_int32_t target, int count); 327 static void mp_enable(u_int boot_addr); 328 329 static int mptable_pass1(void); 330 static int mptable_pass2(void); 331 static void default_mp_table(int type); 332 static void fix_mp_table(void); 333 static void setup_apic_irq_mapping(void); 334 static void init_locks(void); 335 static int start_all_aps(u_int boot_addr); 336 static void install_ap_tramp(u_int boot_addr); 337 static int start_ap(int logicalCpu, u_int boot_addr); 338 static int apic_int_is_bus_type(int intr, int bus_type); 339 340 /* 341 * Calculate usable address in base memory for AP trampoline code. 342 */ 343 u_int 344 mp_bootaddress(u_int basemem) 345 { 346 POSTCODE(MP_BOOTADDRESS_POST); 347 348 base_memory = basemem * 1024; /* convert to bytes */ 349 350 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */ 351 if ((base_memory - boot_address) < bootMP_size) 352 boot_address -= 4096; /* not enough, lower by 4k */ 353 354 return boot_address; 355 } 356 357 358 /* 359 * Look for an Intel MP spec table (ie, SMP capable hardware). 360 */ 361 int 362 mp_probe(void) 363 { 364 int x; 365 u_long segment; 366 u_int32_t target; 367 368 POSTCODE(MP_PROBE_POST); 369 370 /* see if EBDA exists */ 371 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) { 372 /* search first 1K of EBDA */ 373 target = (u_int32_t) (segment << 4); 374 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 375 goto found; 376 } else { 377 /* last 1K of base memory, effective 'top of base' passed in */ 378 target = (u_int32_t) (base_memory - 0x400); 379 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 380 goto found; 381 } 382 383 /* search the BIOS */ 384 target = (u_int32_t) BIOS_BASE; 385 if ((x = search_for_sig(target, BIOS_COUNT)) >= 0) 386 goto found; 387 388 /* nothing found */ 389 mpfps = (mpfps_t)0; 390 mp_capable = 0; 391 return 0; 392 393 found: 394 /* calculate needed resources */ 395 mpfps = (mpfps_t)x; 396 if (mptable_pass1()) 397 panic("you must reconfigure your kernel"); 398 399 /* flag fact that we are running multiple processors */ 400 mp_capable = 1; 401 return 1; 402 } 403 404 405 /* 406 * Startup the SMP processors. 407 */ 408 void 409 mp_start(void) 410 { 411 POSTCODE(MP_START_POST); 412 413 /* look for MP capable motherboard */ 414 if (mp_capable) 415 mp_enable(boot_address); 416 else 417 panic("MP hardware not found!"); 418 } 419 420 421 /* 422 * Print various information about the SMP system hardware and setup. 423 */ 424 void 425 mp_announce(void) 426 { 427 int x; 428 429 POSTCODE(MP_ANNOUNCE_POST); 430 431 printf("FreeBSD/SMP: Multiprocessor motherboard\n"); 432 printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0)); 433 printf(", version: 0x%08x", cpu_apic_versions[0]); 434 printf(", at 0x%08x\n", cpu_apic_address); 435 for (x = 1; x <= mp_naps; ++x) { 436 printf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x)); 437 printf(", version: 0x%08x", cpu_apic_versions[x]); 438 printf(", at 0x%08x\n", cpu_apic_address); 439 } 440 441 #if defined(APIC_IO) 442 for (x = 0; x < mp_napics; ++x) { 443 printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x)); 444 printf(", version: 0x%08x", io_apic_versions[x]); 445 printf(", at 0x%08x\n", io_apic_address[x]); 446 } 447 #else 448 printf(" Warning: APIC I/O disabled\n"); 449 #endif /* APIC_IO */ 450 } 451 452 /* 453 * AP cpu's call this to sync up protected mode. 454 */ 455 void 456 init_secondary(void) 457 { 458 int gsel_tss; 459 int x, myid = bootAP; 460 461 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid]; 462 gdt_segs[GPROC0_SEL].ssd_base = 463 (int) &SMP_prvspace[myid].globaldata.gd_common_tss; 464 SMP_prvspace[myid].globaldata.gd_prvspace = &SMP_prvspace[myid]; 465 466 for (x = 0; x < NGDT; x++) { 467 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 468 } 469 470 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 471 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 472 lgdt(&r_gdt); /* does magic intra-segment return */ 473 474 lidt(&r_idt); 475 476 lldt(_default_ldt); 477 #ifdef USER_LDT 478 currentldt = _default_ldt; 479 #endif 480 481 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 482 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 483 common_tss.tss_esp0 = 0; /* not used until after switch */ 484 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 485 common_tss.tss_ioopt = (sizeof common_tss) << 16; 486 tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd; 487 common_tssd = *tss_gdt; 488 ltr(gsel_tss); 489 490 load_cr0(0x8005003b); /* XXX! */ 491 492 pmap_set_opt(); 493 } 494 495 496 #if defined(APIC_IO) 497 /* 498 * Final configuration of the BSP's local APIC: 499 * - disable 'pic mode'. 500 * - disable 'virtual wire mode'. 501 * - enable NMI. 502 */ 503 void 504 bsp_apic_configure(void) 505 { 506 u_char byte; 507 u_int32_t temp; 508 509 /* leave 'pic mode' if necessary */ 510 if (picmode) { 511 outb(0x22, 0x70); /* select IMCR */ 512 byte = inb(0x23); /* current contents */ 513 byte |= 0x01; /* mask external INTR */ 514 outb(0x23, byte); /* disconnect 8259s/NMI */ 515 } 516 517 /* mask lint0 (the 8259 'virtual wire' connection) */ 518 temp = lapic.lvt_lint0; 519 temp |= APIC_LVT_M; /* set the mask */ 520 lapic.lvt_lint0 = temp; 521 522 /* setup lint1 to handle NMI */ 523 temp = lapic.lvt_lint1; 524 temp &= ~APIC_LVT_M; /* clear the mask */ 525 lapic.lvt_lint1 = temp; 526 527 if (bootverbose) 528 apic_dump("bsp_apic_configure()"); 529 } 530 #endif /* APIC_IO */ 531 532 533 /******************************************************************* 534 * local functions and data 535 */ 536 537 /* 538 * start the SMP system 539 */ 540 static void 541 mp_enable(u_int boot_addr) 542 { 543 int x; 544 #if defined(APIC_IO) 545 int apic; 546 u_int ux; 547 #endif /* APIC_IO */ 548 549 POSTCODE(MP_ENABLE_POST); 550 551 /* turn on 4MB of V == P addressing so we can get to MP table */ 552 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 553 invltlb(); 554 555 /* examine the MP table for needed info, uses physical addresses */ 556 x = mptable_pass2(); 557 558 *(int *)PTD = 0; 559 invltlb(); 560 561 /* can't process default configs till the CPU APIC is pmapped */ 562 if (x) 563 default_mp_table(x); 564 565 /* post scan cleanup */ 566 fix_mp_table(); 567 setup_apic_irq_mapping(); 568 569 #if defined(APIC_IO) 570 571 /* fill the LOGICAL io_apic_versions table */ 572 for (apic = 0; apic < mp_napics; ++apic) { 573 ux = io_apic_read(apic, IOAPIC_VER); 574 io_apic_versions[apic] = ux; 575 io_apic_set_id(apic, IO_TO_ID(apic)); 576 } 577 578 /* program each IO APIC in the system */ 579 for (apic = 0; apic < mp_napics; ++apic) 580 if (io_apic_setup(apic) < 0) 581 panic("IO APIC setup failure"); 582 583 /* install a 'Spurious INTerrupt' vector */ 584 setidt(XSPURIOUSINT_OFFSET, Xspuriousint, 585 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 586 587 /* install an inter-CPU IPI for TLB invalidation */ 588 setidt(XINVLTLB_OFFSET, Xinvltlb, 589 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 590 591 #ifdef BETTER_CLOCK 592 /* install an inter-CPU IPI for reading processor state */ 593 setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate, 594 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 595 #endif 596 597 /* install an inter-CPU IPI for all-CPU rendezvous */ 598 setidt(XRENDEZVOUS_OFFSET, Xrendezvous, 599 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 600 601 /* install an inter-CPU IPI for forcing an additional software trap */ 602 setidt(XCPUAST_OFFSET, Xcpuast, 603 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 604 605 /* install an inter-CPU IPI for interrupt forwarding */ 606 setidt(XFORWARD_IRQ_OFFSET, Xforward_irq, 607 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 608 609 /* install an inter-CPU IPI for CPU stop/restart */ 610 setidt(XCPUSTOP_OFFSET, Xcpustop, 611 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 612 613 #if defined(TEST_TEST1) 614 /* install a "fake hardware INTerrupt" vector */ 615 setidt(XTEST1_OFFSET, Xtest1, 616 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 617 #endif /** TEST_TEST1 */ 618 619 #endif /* APIC_IO */ 620 621 /* initialize all SMP locks */ 622 init_locks(); 623 624 /* start each Application Processor */ 625 start_all_aps(boot_addr); 626 627 /* 628 * The init process might be started on a different CPU now, 629 * and the boot CPU might not call prepare_usermode to get 630 * cr0 correctly configured. Thus we initialize cr0 here. 631 */ 632 load_cr0(rcr0() | CR0_WP | CR0_AM); 633 } 634 635 636 /* 637 * look for the MP spec signature 638 */ 639 640 /* string defined by the Intel MP Spec as identifying the MP table */ 641 #define MP_SIG 0x5f504d5f /* _MP_ */ 642 #define NEXT(X) ((X) += 4) 643 static int 644 search_for_sig(u_int32_t target, int count) 645 { 646 int x; 647 u_int32_t *addr = (u_int32_t *) (KERNBASE + target); 648 649 for (x = 0; x < count; NEXT(x)) 650 if (addr[x] == MP_SIG) 651 /* make array index a byte index */ 652 return (target + (x * sizeof(u_int32_t))); 653 654 return -1; 655 } 656 657 658 static basetable_entry basetable_entry_types[] = 659 { 660 {0, 20, "Processor"}, 661 {1, 8, "Bus"}, 662 {2, 8, "I/O APIC"}, 663 {3, 8, "I/O INT"}, 664 {4, 8, "Local INT"} 665 }; 666 667 typedef struct BUSDATA { 668 u_char bus_id; 669 enum busTypes bus_type; 670 } bus_datum; 671 672 typedef struct INTDATA { 673 u_char int_type; 674 u_short int_flags; 675 u_char src_bus_id; 676 u_char src_bus_irq; 677 u_char dst_apic_id; 678 u_char dst_apic_int; 679 u_char int_vector; 680 } io_int, local_int; 681 682 typedef struct BUSTYPENAME { 683 u_char type; 684 char name[7]; 685 } bus_type_name; 686 687 static bus_type_name bus_type_table[] = 688 { 689 {CBUS, "CBUS"}, 690 {CBUSII, "CBUSII"}, 691 {EISA, "EISA"}, 692 {MCA, "MCA"}, 693 {UNKNOWN_BUSTYPE, "---"}, 694 {ISA, "ISA"}, 695 {MCA, "MCA"}, 696 {UNKNOWN_BUSTYPE, "---"}, 697 {UNKNOWN_BUSTYPE, "---"}, 698 {UNKNOWN_BUSTYPE, "---"}, 699 {UNKNOWN_BUSTYPE, "---"}, 700 {UNKNOWN_BUSTYPE, "---"}, 701 {PCI, "PCI"}, 702 {UNKNOWN_BUSTYPE, "---"}, 703 {UNKNOWN_BUSTYPE, "---"}, 704 {UNKNOWN_BUSTYPE, "---"}, 705 {UNKNOWN_BUSTYPE, "---"}, 706 {XPRESS, "XPRESS"}, 707 {UNKNOWN_BUSTYPE, "---"} 708 }; 709 /* from MP spec v1.4, table 5-1 */ 710 static int default_data[7][5] = 711 { 712 /* nbus, id0, type0, id1, type1 */ 713 {1, 0, ISA, 255, 255}, 714 {1, 0, EISA, 255, 255}, 715 {1, 0, EISA, 255, 255}, 716 {1, 0, MCA, 255, 255}, 717 {2, 0, ISA, 1, PCI}, 718 {2, 0, EISA, 1, PCI}, 719 {2, 0, MCA, 1, PCI} 720 }; 721 722 723 /* the bus data */ 724 static bus_datum bus_data[NBUS]; 725 726 /* the IO INT data, one entry per possible APIC INTerrupt */ 727 static io_int io_apic_ints[NINTR]; 728 729 static int nintrs; 730 731 static int processor_entry __P((proc_entry_ptr entry, int cpu)); 732 static int bus_entry __P((bus_entry_ptr entry, int bus)); 733 static int io_apic_entry __P((io_apic_entry_ptr entry, int apic)); 734 static int int_entry __P((int_entry_ptr entry, int intr)); 735 static int lookup_bus_type __P((char *name)); 736 737 738 /* 739 * 1st pass on motherboard's Intel MP specification table. 740 * 741 * initializes: 742 * mp_ncpus = 1 743 * 744 * determines: 745 * cpu_apic_address (common to all CPUs) 746 * io_apic_address[N] 747 * mp_naps 748 * mp_nbusses 749 * mp_napics 750 * nintrs 751 */ 752 static int 753 mptable_pass1(void) 754 { 755 int x; 756 mpcth_t cth; 757 int totalSize; 758 void* position; 759 int count; 760 int type; 761 int mustpanic; 762 763 POSTCODE(MPTABLE_PASS1_POST); 764 765 mustpanic = 0; 766 767 /* clear various tables */ 768 for (x = 0; x < NAPICID; ++x) { 769 io_apic_address[x] = ~0; /* IO APIC address table */ 770 } 771 772 /* init everything to empty */ 773 mp_naps = 0; 774 mp_nbusses = 0; 775 mp_napics = 0; 776 nintrs = 0; 777 778 /* check for use of 'default' configuration */ 779 if (MPFPS_MPFB1 != 0) { 780 /* use default addresses */ 781 cpu_apic_address = DEFAULT_APIC_BASE; 782 io_apic_address[0] = DEFAULT_IO_APIC_BASE; 783 784 /* fill in with defaults */ 785 mp_naps = 2; /* includes BSP */ 786 mp_nbusses = default_data[MPFPS_MPFB1 - 1][0]; 787 #if defined(APIC_IO) 788 mp_napics = 1; 789 nintrs = 16; 790 #endif /* APIC_IO */ 791 } 792 else { 793 if ((cth = mpfps->pap) == 0) 794 panic("MP Configuration Table Header MISSING!"); 795 796 cpu_apic_address = (vm_offset_t) cth->apic_address; 797 798 /* walk the table, recording info of interest */ 799 totalSize = cth->base_table_length - sizeof(struct MPCTH); 800 position = (u_char *) cth + sizeof(struct MPCTH); 801 count = cth->entry_count; 802 803 while (count--) { 804 switch (type = *(u_char *) position) { 805 case 0: /* processor_entry */ 806 if (((proc_entry_ptr)position)->cpu_flags 807 & PROCENTRY_FLAG_EN) 808 ++mp_naps; 809 break; 810 case 1: /* bus_entry */ 811 ++mp_nbusses; 812 break; 813 case 2: /* io_apic_entry */ 814 if (((io_apic_entry_ptr)position)->apic_flags 815 & IOAPICENTRY_FLAG_EN) 816 io_apic_address[mp_napics++] = 817 (vm_offset_t)((io_apic_entry_ptr) 818 position)->apic_address; 819 break; 820 case 3: /* int_entry */ 821 ++nintrs; 822 break; 823 case 4: /* int_entry */ 824 break; 825 default: 826 panic("mpfps Base Table HOSED!"); 827 /* NOTREACHED */ 828 } 829 830 totalSize -= basetable_entry_types[type].length; 831 (u_char*)position += basetable_entry_types[type].length; 832 } 833 } 834 835 /* qualify the numbers */ 836 if (mp_naps > NCPU) { 837 printf("Warning: only using %d of %d available CPUs!\n", 838 NCPU, mp_naps); 839 mp_naps = NCPU; 840 } 841 if (mp_nbusses > NBUS) { 842 printf("found %d busses, increase NBUS\n", mp_nbusses); 843 mustpanic = 1; 844 } 845 if (mp_napics > NAPIC) { 846 printf("found %d apics, increase NAPIC\n", mp_napics); 847 mustpanic = 1; 848 } 849 if (nintrs > NINTR) { 850 printf("found %d intrs, increase NINTR\n", nintrs); 851 mustpanic = 1; 852 } 853 854 /* 855 * Count the BSP. 856 * This is also used as a counter while starting the APs. 857 */ 858 mp_ncpus = 1; 859 860 --mp_naps; /* subtract the BSP */ 861 862 return mustpanic; 863 } 864 865 866 /* 867 * 2nd pass on motherboard's Intel MP specification table. 868 * 869 * sets: 870 * boot_cpu_id 871 * ID_TO_IO(N), phy APIC ID to log CPU/IO table 872 * CPU_TO_ID(N), logical CPU to APIC ID table 873 * IO_TO_ID(N), logical IO to APIC ID table 874 * bus_data[N] 875 * io_apic_ints[N] 876 */ 877 static int 878 mptable_pass2(void) 879 { 880 int x; 881 mpcth_t cth; 882 int totalSize; 883 void* position; 884 int count; 885 int type; 886 int apic, bus, cpu, intr; 887 888 POSTCODE(MPTABLE_PASS2_POST); 889 890 /* clear various tables */ 891 for (x = 0; x < NAPICID; ++x) { 892 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */ 893 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */ 894 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */ 895 } 896 897 /* clear bus data table */ 898 for (x = 0; x < NBUS; ++x) 899 bus_data[x].bus_id = 0xff; 900 901 /* clear IO APIC INT table */ 902 for (x = 0; x < NINTR; ++x) { 903 io_apic_ints[x].int_type = 0xff; 904 io_apic_ints[x].int_vector = 0xff; 905 } 906 907 /* setup the cpu/apic mapping arrays */ 908 boot_cpu_id = -1; 909 910 /* record whether PIC or virtual-wire mode */ 911 picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0; 912 913 /* check for use of 'default' configuration */ 914 if (MPFPS_MPFB1 != 0) 915 return MPFPS_MPFB1; /* return default configuration type */ 916 917 if ((cth = mpfps->pap) == 0) 918 panic("MP Configuration Table Header MISSING!"); 919 920 /* walk the table, recording info of interest */ 921 totalSize = cth->base_table_length - sizeof(struct MPCTH); 922 position = (u_char *) cth + sizeof(struct MPCTH); 923 count = cth->entry_count; 924 apic = bus = intr = 0; 925 cpu = 1; /* pre-count the BSP */ 926 927 while (count--) { 928 switch (type = *(u_char *) position) { 929 case 0: 930 if (processor_entry(position, cpu)) 931 ++cpu; 932 break; 933 case 1: 934 if (bus_entry(position, bus)) 935 ++bus; 936 break; 937 case 2: 938 if (io_apic_entry(position, apic)) 939 ++apic; 940 break; 941 case 3: 942 if (int_entry(position, intr)) 943 ++intr; 944 break; 945 case 4: 946 /* int_entry(position); */ 947 break; 948 default: 949 panic("mpfps Base Table HOSED!"); 950 /* NOTREACHED */ 951 } 952 953 totalSize -= basetable_entry_types[type].length; 954 (u_char *) position += basetable_entry_types[type].length; 955 } 956 957 if (boot_cpu_id == -1) 958 panic("NO BSP found!"); 959 960 /* report fact that its NOT a default configuration */ 961 return 0; 962 } 963 964 965 void 966 assign_apic_irq(int apic, int intpin, int irq) 967 { 968 int x; 969 970 if (int_to_apicintpin[irq].ioapic != -1) 971 panic("assign_apic_irq: inconsistent table"); 972 973 int_to_apicintpin[irq].ioapic = apic; 974 int_to_apicintpin[irq].int_pin = intpin; 975 int_to_apicintpin[irq].apic_address = ioapic[apic]; 976 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin; 977 978 for (x = 0; x < nintrs; x++) { 979 if ((io_apic_ints[x].int_type == 0 || 980 io_apic_ints[x].int_type == 3) && 981 io_apic_ints[x].int_vector == 0xff && 982 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) && 983 io_apic_ints[x].dst_apic_int == intpin) 984 io_apic_ints[x].int_vector = irq; 985 } 986 } 987 988 void 989 revoke_apic_irq(int irq) 990 { 991 int x; 992 int oldapic; 993 int oldintpin; 994 995 if (int_to_apicintpin[irq].ioapic == -1) 996 panic("assign_apic_irq: inconsistent table"); 997 998 oldapic = int_to_apicintpin[irq].ioapic; 999 oldintpin = int_to_apicintpin[irq].int_pin; 1000 1001 int_to_apicintpin[irq].ioapic = -1; 1002 int_to_apicintpin[irq].int_pin = 0; 1003 int_to_apicintpin[irq].apic_address = NULL; 1004 int_to_apicintpin[irq].redirindex = 0; 1005 1006 for (x = 0; x < nintrs; x++) { 1007 if ((io_apic_ints[x].int_type == 0 || 1008 io_apic_ints[x].int_type == 3) && 1009 io_apic_ints[x].int_vector == 0xff && 1010 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) && 1011 io_apic_ints[x].dst_apic_int == oldintpin) 1012 io_apic_ints[x].int_vector = 0xff; 1013 } 1014 } 1015 1016 /* 1017 * parse an Intel MP specification table 1018 */ 1019 static void 1020 fix_mp_table(void) 1021 { 1022 int x; 1023 int id; 1024 int bus_0 = 0; /* Stop GCC warning */ 1025 int bus_pci = 0; /* Stop GCC warning */ 1026 int num_pci_bus; 1027 1028 /* 1029 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS 1030 * did it wrong. The MP spec says that when more than 1 PCI bus 1031 * exists the BIOS must begin with bus entries for the PCI bus and use 1032 * actual PCI bus numbering. This implies that when only 1 PCI bus 1033 * exists the BIOS can choose to ignore this ordering, and indeed many 1034 * MP motherboards do ignore it. This causes a problem when the PCI 1035 * sub-system makes requests of the MP sub-system based on PCI bus 1036 * numbers. So here we look for the situation and renumber the 1037 * busses and associated INTs in an effort to "make it right". 1038 */ 1039 1040 /* find bus 0, PCI bus, count the number of PCI busses */ 1041 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) { 1042 if (bus_data[x].bus_id == 0) { 1043 bus_0 = x; 1044 } 1045 if (bus_data[x].bus_type == PCI) { 1046 ++num_pci_bus; 1047 bus_pci = x; 1048 } 1049 } 1050 /* 1051 * bus_0 == slot of bus with ID of 0 1052 * bus_pci == slot of last PCI bus encountered 1053 */ 1054 1055 /* check the 1 PCI bus case for sanity */ 1056 if (num_pci_bus == 1) { 1057 1058 /* if it is number 0 all is well */ 1059 if (bus_data[bus_pci].bus_id == 0) 1060 return; 1061 1062 /* mis-numbered, swap with whichever bus uses slot 0 */ 1063 1064 /* swap the bus entry types */ 1065 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type; 1066 bus_data[bus_0].bus_type = PCI; 1067 1068 /* swap each relavant INTerrupt entry */ 1069 id = bus_data[bus_pci].bus_id; 1070 for (x = 0; x < nintrs; ++x) { 1071 if (io_apic_ints[x].src_bus_id == id) { 1072 io_apic_ints[x].src_bus_id = 0; 1073 } 1074 else if (io_apic_ints[x].src_bus_id == 0) { 1075 io_apic_ints[x].src_bus_id = id; 1076 } 1077 } 1078 } 1079 } 1080 1081 1082 /* Assign low level interrupt handlers */ 1083 static void 1084 setup_apic_irq_mapping(void) 1085 { 1086 int x; 1087 int int_vector; 1088 1089 /* Clear array */ 1090 for (x = 0; x < APIC_INTMAPSIZE; x++) { 1091 int_to_apicintpin[x].ioapic = -1; 1092 int_to_apicintpin[x].int_pin = 0; 1093 int_to_apicintpin[x].apic_address = NULL; 1094 int_to_apicintpin[x].redirindex = 0; 1095 } 1096 1097 /* First assign ISA/EISA interrupts */ 1098 for (x = 0; x < nintrs; x++) { 1099 int_vector = io_apic_ints[x].src_bus_irq; 1100 if (int_vector < APIC_INTMAPSIZE && 1101 io_apic_ints[x].int_vector == 0xff && 1102 int_to_apicintpin[int_vector].ioapic == -1 && 1103 (apic_int_is_bus_type(x, ISA) || 1104 apic_int_is_bus_type(x, EISA)) && 1105 io_apic_ints[x].int_type == 0) { 1106 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1107 io_apic_ints[x].dst_apic_int, 1108 int_vector); 1109 } 1110 } 1111 1112 /* Assign interrupts on first 24 intpins on IOAPIC #0 */ 1113 for (x = 0; x < nintrs; x++) { 1114 int_vector = io_apic_ints[x].dst_apic_int; 1115 if (int_vector < APIC_INTMAPSIZE && 1116 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) && 1117 io_apic_ints[x].int_vector == 0xff && 1118 int_to_apicintpin[int_vector].ioapic == -1 && 1119 (io_apic_ints[x].int_type == 0 || 1120 io_apic_ints[x].int_type == 3)) { 1121 assign_apic_irq(0, 1122 io_apic_ints[x].dst_apic_int, 1123 int_vector); 1124 } 1125 } 1126 /* 1127 * Assign interrupts for remaining intpins. 1128 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates 1129 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode 1130 * due to 8254 interrupts not being delivered can reuse that low level 1131 * interrupt handler. 1132 */ 1133 int_vector = 0; 1134 while (int_vector < APIC_INTMAPSIZE && 1135 int_to_apicintpin[int_vector].ioapic != -1) 1136 int_vector++; 1137 for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) { 1138 if ((io_apic_ints[x].int_type == 0 || 1139 (io_apic_ints[x].int_type == 3 && 1140 (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) || 1141 io_apic_ints[x].dst_apic_int != 0))) && 1142 io_apic_ints[x].int_vector == 0xff) { 1143 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1144 io_apic_ints[x].dst_apic_int, 1145 int_vector); 1146 int_vector++; 1147 while (int_vector < APIC_INTMAPSIZE && 1148 int_to_apicintpin[int_vector].ioapic != -1) 1149 int_vector++; 1150 } 1151 } 1152 } 1153 1154 1155 static int 1156 processor_entry(proc_entry_ptr entry, int cpu) 1157 { 1158 /* check for usability */ 1159 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN)) 1160 return 0; 1161 1162 /* check for BSP flag */ 1163 if (entry->cpu_flags & PROCENTRY_FLAG_BP) { 1164 boot_cpu_id = entry->apic_id; 1165 CPU_TO_ID(0) = entry->apic_id; 1166 ID_TO_CPU(entry->apic_id) = 0; 1167 return 0; /* its already been counted */ 1168 } 1169 1170 /* add another AP to list, if less than max number of CPUs */ 1171 else if (cpu < NCPU) { 1172 CPU_TO_ID(cpu) = entry->apic_id; 1173 ID_TO_CPU(entry->apic_id) = cpu; 1174 return 1; 1175 } 1176 1177 return 0; 1178 } 1179 1180 1181 static int 1182 bus_entry(bus_entry_ptr entry, int bus) 1183 { 1184 int x; 1185 char c, name[8]; 1186 1187 /* encode the name into an index */ 1188 for (x = 0; x < 6; ++x) { 1189 if ((c = entry->bus_type[x]) == ' ') 1190 break; 1191 name[x] = c; 1192 } 1193 name[x] = '\0'; 1194 1195 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE) 1196 panic("unknown bus type: '%s'", name); 1197 1198 bus_data[bus].bus_id = entry->bus_id; 1199 bus_data[bus].bus_type = x; 1200 1201 return 1; 1202 } 1203 1204 1205 static int 1206 io_apic_entry(io_apic_entry_ptr entry, int apic) 1207 { 1208 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN)) 1209 return 0; 1210 1211 IO_TO_ID(apic) = entry->apic_id; 1212 ID_TO_IO(entry->apic_id) = apic; 1213 1214 return 1; 1215 } 1216 1217 1218 static int 1219 lookup_bus_type(char *name) 1220 { 1221 int x; 1222 1223 for (x = 0; x < MAX_BUSTYPE; ++x) 1224 if (strcmp(bus_type_table[x].name, name) == 0) 1225 return bus_type_table[x].type; 1226 1227 return UNKNOWN_BUSTYPE; 1228 } 1229 1230 1231 static int 1232 int_entry(int_entry_ptr entry, int intr) 1233 { 1234 int apic; 1235 1236 io_apic_ints[intr].int_type = entry->int_type; 1237 io_apic_ints[intr].int_flags = entry->int_flags; 1238 io_apic_ints[intr].src_bus_id = entry->src_bus_id; 1239 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq; 1240 if (entry->dst_apic_id == 255) { 1241 /* This signal goes to all IO APICS. Select an IO APIC 1242 with sufficient number of interrupt pins */ 1243 for (apic = 0; apic < mp_napics; apic++) 1244 if (((io_apic_read(apic, IOAPIC_VER) & 1245 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >= 1246 entry->dst_apic_int) 1247 break; 1248 if (apic < mp_napics) 1249 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic); 1250 else 1251 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1252 } else 1253 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1254 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int; 1255 1256 return 1; 1257 } 1258 1259 1260 static int 1261 apic_int_is_bus_type(int intr, int bus_type) 1262 { 1263 int bus; 1264 1265 for (bus = 0; bus < mp_nbusses; ++bus) 1266 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id) 1267 && ((int) bus_data[bus].bus_type == bus_type)) 1268 return 1; 1269 1270 return 0; 1271 } 1272 1273 1274 /* 1275 * Given a traditional ISA INT mask, return an APIC mask. 1276 */ 1277 u_int 1278 isa_apic_mask(u_int isa_mask) 1279 { 1280 int isa_irq; 1281 int apic_pin; 1282 1283 #if defined(SKIP_IRQ15_REDIRECT) 1284 if (isa_mask == (1 << 15)) { 1285 printf("skipping ISA IRQ15 redirect\n"); 1286 return isa_mask; 1287 } 1288 #endif /* SKIP_IRQ15_REDIRECT */ 1289 1290 isa_irq = ffs(isa_mask); /* find its bit position */ 1291 if (isa_irq == 0) /* doesn't exist */ 1292 return 0; 1293 --isa_irq; /* make it zero based */ 1294 1295 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */ 1296 if (apic_pin == -1) 1297 return 0; 1298 1299 return (1 << apic_pin); /* convert pin# to a mask */ 1300 } 1301 1302 1303 /* 1304 * Determine which APIC pin an ISA/EISA INT is attached to. 1305 */ 1306 #define INTTYPE(I) (io_apic_ints[(I)].int_type) 1307 #define INTPIN(I) (io_apic_ints[(I)].dst_apic_int) 1308 #define INTIRQ(I) (io_apic_ints[(I)].int_vector) 1309 #define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id)) 1310 1311 #define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq) 1312 int 1313 isa_apic_irq(int isa_irq) 1314 { 1315 int intr; 1316 1317 for (intr = 0; intr < nintrs; ++intr) { /* check each record */ 1318 if (INTTYPE(intr) == 0) { /* standard INT */ 1319 if (SRCBUSIRQ(intr) == isa_irq) { 1320 if (apic_int_is_bus_type(intr, ISA) || 1321 apic_int_is_bus_type(intr, EISA)) 1322 return INTIRQ(intr); /* found */ 1323 } 1324 } 1325 } 1326 return -1; /* NOT found */ 1327 } 1328 1329 1330 /* 1331 * Determine which APIC pin a PCI INT is attached to. 1332 */ 1333 #define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id) 1334 #define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f) 1335 #define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03) 1336 int 1337 pci_apic_irq(int pciBus, int pciDevice, int pciInt) 1338 { 1339 int intr; 1340 1341 --pciInt; /* zero based */ 1342 1343 for (intr = 0; intr < nintrs; ++intr) /* check each record */ 1344 if ((INTTYPE(intr) == 0) /* standard INT */ 1345 && (SRCBUSID(intr) == pciBus) 1346 && (SRCBUSDEVICE(intr) == pciDevice) 1347 && (SRCBUSLINE(intr) == pciInt)) /* a candidate IRQ */ 1348 if (apic_int_is_bus_type(intr, PCI)) 1349 return INTIRQ(intr); /* exact match */ 1350 1351 return -1; /* NOT found */ 1352 } 1353 1354 int 1355 next_apic_irq(int irq) 1356 { 1357 int intr, ointr; 1358 int bus, bustype; 1359 1360 bus = 0; 1361 bustype = 0; 1362 for (intr = 0; intr < nintrs; intr++) { 1363 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0) 1364 continue; 1365 bus = SRCBUSID(intr); 1366 bustype = apic_bus_type(bus); 1367 if (bustype != ISA && 1368 bustype != EISA && 1369 bustype != PCI) 1370 continue; 1371 break; 1372 } 1373 if (intr >= nintrs) { 1374 return -1; 1375 } 1376 for (ointr = intr + 1; ointr < nintrs; ointr++) { 1377 if (INTTYPE(ointr) != 0) 1378 continue; 1379 if (bus != SRCBUSID(ointr)) 1380 continue; 1381 if (bustype == PCI) { 1382 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr)) 1383 continue; 1384 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr)) 1385 continue; 1386 } 1387 if (bustype == ISA || bustype == EISA) { 1388 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr)) 1389 continue; 1390 } 1391 if (INTPIN(intr) == INTPIN(ointr)) 1392 continue; 1393 break; 1394 } 1395 if (ointr >= nintrs) { 1396 return -1; 1397 } 1398 return INTIRQ(ointr); 1399 } 1400 #undef SRCBUSLINE 1401 #undef SRCBUSDEVICE 1402 #undef SRCBUSID 1403 #undef SRCBUSIRQ 1404 1405 #undef INTPIN 1406 #undef INTIRQ 1407 #undef INTAPIC 1408 #undef INTTYPE 1409 1410 1411 /* 1412 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt. 1413 * 1414 * XXX FIXME: 1415 * Exactly what this means is unclear at this point. It is a solution 1416 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard 1417 * could route any of the ISA INTs to upper (>15) IRQ values. But most would 1418 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an 1419 * option. 1420 */ 1421 int 1422 undirect_isa_irq(int rirq) 1423 { 1424 #if defined(READY) 1425 if (bootverbose) 1426 printf("Freeing redirected ISA irq %d.\n", rirq); 1427 /** FIXME: tickle the MB redirector chip */ 1428 return ???; 1429 #else 1430 if (bootverbose) 1431 printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq); 1432 return 0; 1433 #endif /* READY */ 1434 } 1435 1436 1437 /* 1438 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt 1439 */ 1440 int 1441 undirect_pci_irq(int rirq) 1442 { 1443 #if defined(READY) 1444 if (bootverbose) 1445 printf("Freeing redirected PCI irq %d.\n", rirq); 1446 1447 /** FIXME: tickle the MB redirector chip */ 1448 return ???; 1449 #else 1450 if (bootverbose) 1451 printf("Freeing (NOT implemented) redirected PCI irq %d.\n", 1452 rirq); 1453 return 0; 1454 #endif /* READY */ 1455 } 1456 1457 1458 /* 1459 * given a bus ID, return: 1460 * the bus type if found 1461 * -1 if NOT found 1462 */ 1463 int 1464 apic_bus_type(int id) 1465 { 1466 int x; 1467 1468 for (x = 0; x < mp_nbusses; ++x) 1469 if (bus_data[x].bus_id == id) 1470 return bus_data[x].bus_type; 1471 1472 return -1; 1473 } 1474 1475 1476 /* 1477 * given a LOGICAL APIC# and pin#, return: 1478 * the associated src bus ID if found 1479 * -1 if NOT found 1480 */ 1481 int 1482 apic_src_bus_id(int apic, int pin) 1483 { 1484 int x; 1485 1486 /* search each of the possible INTerrupt sources */ 1487 for (x = 0; x < nintrs; ++x) 1488 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1489 (pin == io_apic_ints[x].dst_apic_int)) 1490 return (io_apic_ints[x].src_bus_id); 1491 1492 return -1; /* NOT found */ 1493 } 1494 1495 1496 /* 1497 * given a LOGICAL APIC# and pin#, return: 1498 * the associated src bus IRQ if found 1499 * -1 if NOT found 1500 */ 1501 int 1502 apic_src_bus_irq(int apic, int pin) 1503 { 1504 int x; 1505 1506 for (x = 0; x < nintrs; x++) 1507 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1508 (pin == io_apic_ints[x].dst_apic_int)) 1509 return (io_apic_ints[x].src_bus_irq); 1510 1511 return -1; /* NOT found */ 1512 } 1513 1514 1515 /* 1516 * given a LOGICAL APIC# and pin#, return: 1517 * the associated INTerrupt type if found 1518 * -1 if NOT found 1519 */ 1520 int 1521 apic_int_type(int apic, int pin) 1522 { 1523 int x; 1524 1525 /* search each of the possible INTerrupt sources */ 1526 for (x = 0; x < nintrs; ++x) 1527 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1528 (pin == io_apic_ints[x].dst_apic_int)) 1529 return (io_apic_ints[x].int_type); 1530 1531 return -1; /* NOT found */ 1532 } 1533 1534 int 1535 apic_irq(int apic, int pin) 1536 { 1537 int x; 1538 int res; 1539 1540 for (x = 0; x < nintrs; ++x) 1541 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1542 (pin == io_apic_ints[x].dst_apic_int)) { 1543 res = io_apic_ints[x].int_vector; 1544 if (res == 0xff) 1545 return -1; 1546 if (apic != int_to_apicintpin[res].ioapic) 1547 panic("apic_irq: inconsistent table"); 1548 if (pin != int_to_apicintpin[res].int_pin) 1549 panic("apic_irq inconsistent table (2)"); 1550 return res; 1551 } 1552 return -1; 1553 } 1554 1555 1556 /* 1557 * given a LOGICAL APIC# and pin#, return: 1558 * the associated trigger mode if found 1559 * -1 if NOT found 1560 */ 1561 int 1562 apic_trigger(int apic, int pin) 1563 { 1564 int x; 1565 1566 /* search each of the possible INTerrupt sources */ 1567 for (x = 0; x < nintrs; ++x) 1568 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1569 (pin == io_apic_ints[x].dst_apic_int)) 1570 return ((io_apic_ints[x].int_flags >> 2) & 0x03); 1571 1572 return -1; /* NOT found */ 1573 } 1574 1575 1576 /* 1577 * given a LOGICAL APIC# and pin#, return: 1578 * the associated 'active' level if found 1579 * -1 if NOT found 1580 */ 1581 int 1582 apic_polarity(int apic, int pin) 1583 { 1584 int x; 1585 1586 /* search each of the possible INTerrupt sources */ 1587 for (x = 0; x < nintrs; ++x) 1588 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1589 (pin == io_apic_ints[x].dst_apic_int)) 1590 return (io_apic_ints[x].int_flags & 0x03); 1591 1592 return -1; /* NOT found */ 1593 } 1594 1595 1596 /* 1597 * set data according to MP defaults 1598 * FIXME: probably not complete yet... 1599 */ 1600 static void 1601 default_mp_table(int type) 1602 { 1603 int ap_cpu_id; 1604 #if defined(APIC_IO) 1605 u_int32_t ux; 1606 int io_apic_id; 1607 int pin; 1608 #endif /* APIC_IO */ 1609 1610 #if 0 1611 printf(" MP default config type: %d\n", type); 1612 switch (type) { 1613 case 1: 1614 printf(" bus: ISA, APIC: 82489DX\n"); 1615 break; 1616 case 2: 1617 printf(" bus: EISA, APIC: 82489DX\n"); 1618 break; 1619 case 3: 1620 printf(" bus: EISA, APIC: 82489DX\n"); 1621 break; 1622 case 4: 1623 printf(" bus: MCA, APIC: 82489DX\n"); 1624 break; 1625 case 5: 1626 printf(" bus: ISA+PCI, APIC: Integrated\n"); 1627 break; 1628 case 6: 1629 printf(" bus: EISA+PCI, APIC: Integrated\n"); 1630 break; 1631 case 7: 1632 printf(" bus: MCA+PCI, APIC: Integrated\n"); 1633 break; 1634 default: 1635 printf(" future type\n"); 1636 break; 1637 /* NOTREACHED */ 1638 } 1639 #endif /* 0 */ 1640 1641 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24; 1642 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0; 1643 1644 /* BSP */ 1645 CPU_TO_ID(0) = boot_cpu_id; 1646 ID_TO_CPU(boot_cpu_id) = 0; 1647 1648 /* one and only AP */ 1649 CPU_TO_ID(1) = ap_cpu_id; 1650 ID_TO_CPU(ap_cpu_id) = 1; 1651 1652 #if defined(APIC_IO) 1653 /* one and only IO APIC */ 1654 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24; 1655 1656 /* 1657 * sanity check, refer to MP spec section 3.6.6, last paragraph 1658 * necessary as some hardware isn't properly setting up the IO APIC 1659 */ 1660 #if defined(REALLY_ANAL_IOAPICID_VALUE) 1661 if (io_apic_id != 2) { 1662 #else 1663 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) { 1664 #endif /* REALLY_ANAL_IOAPICID_VALUE */ 1665 io_apic_set_id(0, 2); 1666 io_apic_id = 2; 1667 } 1668 IO_TO_ID(0) = io_apic_id; 1669 ID_TO_IO(io_apic_id) = 0; 1670 #endif /* APIC_IO */ 1671 1672 /* fill out bus entries */ 1673 switch (type) { 1674 case 1: 1675 case 2: 1676 case 3: 1677 case 4: 1678 case 5: 1679 case 6: 1680 case 7: 1681 bus_data[0].bus_id = default_data[type - 1][1]; 1682 bus_data[0].bus_type = default_data[type - 1][2]; 1683 bus_data[1].bus_id = default_data[type - 1][3]; 1684 bus_data[1].bus_type = default_data[type - 1][4]; 1685 break; 1686 1687 /* case 4: case 7: MCA NOT supported */ 1688 default: /* illegal/reserved */ 1689 panic("BAD default MP config: %d", type); 1690 /* NOTREACHED */ 1691 } 1692 1693 #if defined(APIC_IO) 1694 /* general cases from MP v1.4, table 5-2 */ 1695 for (pin = 0; pin < 16; ++pin) { 1696 io_apic_ints[pin].int_type = 0; 1697 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */ 1698 io_apic_ints[pin].src_bus_id = 0; 1699 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */ 1700 io_apic_ints[pin].dst_apic_id = io_apic_id; 1701 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */ 1702 } 1703 1704 /* special cases from MP v1.4, table 5-2 */ 1705 if (type == 2) { 1706 io_apic_ints[2].int_type = 0xff; /* N/C */ 1707 io_apic_ints[13].int_type = 0xff; /* N/C */ 1708 #if !defined(APIC_MIXED_MODE) 1709 /** FIXME: ??? */ 1710 panic("sorry, can't support type 2 default yet"); 1711 #endif /* APIC_MIXED_MODE */ 1712 } 1713 else 1714 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */ 1715 1716 if (type == 7) 1717 io_apic_ints[0].int_type = 0xff; /* N/C */ 1718 else 1719 io_apic_ints[0].int_type = 3; /* vectored 8259 */ 1720 #endif /* APIC_IO */ 1721 } 1722 1723 1724 /* 1725 * initialize all the SMP locks 1726 */ 1727 1728 /* critical region around IO APIC, apic_imen */ 1729 struct simplelock imen_lock; 1730 1731 /* critical region around splxx(), cpl, cml, cil, ipending */ 1732 struct simplelock cpl_lock; 1733 1734 /* Make FAST_INTR() routines sequential */ 1735 struct simplelock fast_intr_lock; 1736 1737 /* critical region around INTR() routines */ 1738 struct simplelock intr_lock; 1739 1740 /* lock regions protected in UP kernel via cli/sti */ 1741 struct simplelock mpintr_lock; 1742 1743 /* lock region used by kernel profiling */ 1744 struct simplelock mcount_lock; 1745 1746 #ifdef USE_COMLOCK 1747 /* locks com (tty) data/hardware accesses: a FASTINTR() */ 1748 struct simplelock com_lock; 1749 #endif /* USE_COMLOCK */ 1750 1751 #ifdef USE_CLOCKLOCK 1752 /* lock regions around the clock hardware */ 1753 struct simplelock clock_lock; 1754 #endif /* USE_CLOCKLOCK */ 1755 1756 /* lock around the MP rendezvous */ 1757 static struct simplelock smp_rv_lock; 1758 1759 static void 1760 init_locks(void) 1761 { 1762 /* 1763 * Get the initial mp_lock with a count of 1 for the BSP. 1764 * This uses a LOGICAL cpu ID, ie BSP == 0. 1765 */ 1766 mp_lock = 0x00000001; 1767 1768 #if 0 1769 /* ISR uses its own "giant lock" */ 1770 isr_lock = FREE_LOCK; 1771 #endif 1772 1773 #if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ) 1774 s_lock_init((struct simplelock*)&apic_itrace_debuglock); 1775 #endif 1776 1777 s_lock_init((struct simplelock*)&mpintr_lock); 1778 1779 s_lock_init((struct simplelock*)&mcount_lock); 1780 1781 s_lock_init((struct simplelock*)&fast_intr_lock); 1782 s_lock_init((struct simplelock*)&intr_lock); 1783 s_lock_init((struct simplelock*)&imen_lock); 1784 s_lock_init((struct simplelock*)&cpl_lock); 1785 s_lock_init(&smp_rv_lock); 1786 1787 #ifdef USE_COMLOCK 1788 s_lock_init((struct simplelock*)&com_lock); 1789 #endif /* USE_COMLOCK */ 1790 #ifdef USE_CLOCKLOCK 1791 s_lock_init((struct simplelock*)&clock_lock); 1792 #endif /* USE_CLOCKLOCK */ 1793 } 1794 1795 1796 /* Wait for all APs to be fully initialized */ 1797 extern int wait_ap(unsigned int); 1798 1799 /* 1800 * start each AP in our list 1801 */ 1802 static int 1803 start_all_aps(u_int boot_addr) 1804 { 1805 int x, i, pg; 1806 u_char mpbiosreason; 1807 u_long mpbioswarmvec; 1808 struct globaldata *gd; 1809 char *stack; 1810 1811 POSTCODE(START_ALL_APS_POST); 1812 1813 /* initialize BSP's local APIC */ 1814 apic_initialize(); 1815 bsp_apic_ready = 1; 1816 1817 /* install the AP 1st level boot code */ 1818 install_ap_tramp(boot_addr); 1819 1820 1821 /* save the current value of the warm-start vector */ 1822 mpbioswarmvec = *((u_long *) WARMBOOT_OFF); 1823 #ifndef PC98 1824 outb(CMOS_REG, BIOS_RESET); 1825 mpbiosreason = inb(CMOS_DATA); 1826 #endif 1827 1828 /* record BSP in CPU map */ 1829 all_cpus = 1; 1830 1831 /* set up 0 -> 4MB P==V mapping for AP boot */ 1832 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 1833 invltlb(); 1834 1835 /* start each AP */ 1836 for (x = 1; x <= mp_naps; ++x) { 1837 1838 /* This is a bit verbose, it will go away soon. */ 1839 1840 /* first page of AP's private space */ 1841 pg = x * i386_btop(sizeof(struct privatespace)); 1842 1843 /* allocate a new private data page */ 1844 gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE); 1845 1846 /* wire it into the private page table page */ 1847 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd)); 1848 1849 /* allocate and set up an idle stack data page */ 1850 stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE); 1851 for (i = 0; i < UPAGES; i++) 1852 SMPpt[pg + 5 + i] = (pt_entry_t) 1853 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 1854 1855 SMPpt[pg + 1] = 0; /* *prv_CMAP1 */ 1856 SMPpt[pg + 2] = 0; /* *prv_CMAP2 */ 1857 SMPpt[pg + 3] = 0; /* *prv_CMAP3 */ 1858 SMPpt[pg + 4] = 0; /* *prv_PMAP1 */ 1859 1860 /* prime data page for it to use */ 1861 gd->gd_cpuid = x; 1862 gd->gd_cpu_lockid = x << 24; 1863 gd->gd_prv_CMAP1 = &SMPpt[pg + 1]; 1864 gd->gd_prv_CMAP2 = &SMPpt[pg + 2]; 1865 gd->gd_prv_CMAP3 = &SMPpt[pg + 3]; 1866 gd->gd_prv_PMAP1 = &SMPpt[pg + 4]; 1867 gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1; 1868 gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2; 1869 gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3; 1870 gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1; 1871 1872 /* setup a vector to our boot code */ 1873 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 1874 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 1875 #ifndef PC98 1876 outb(CMOS_REG, BIOS_RESET); 1877 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 1878 #endif 1879 1880 bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE]; 1881 bootAP = x; 1882 1883 /* attempt to start the Application Processor */ 1884 CHECK_INIT(99); /* setup checkpoints */ 1885 if (!start_ap(x, boot_addr)) { 1886 printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x)); 1887 CHECK_PRINT("trace"); /* show checkpoints */ 1888 /* better panic as the AP may be running loose */ 1889 printf("panic y/n? [y] "); 1890 if (cngetc() != 'n') 1891 panic("bye-bye"); 1892 } 1893 CHECK_PRINT("trace"); /* show checkpoints */ 1894 1895 /* record its version info */ 1896 cpu_apic_versions[x] = cpu_apic_versions[0]; 1897 1898 all_cpus |= (1 << x); /* record AP in CPU map */ 1899 } 1900 1901 /* build our map of 'other' CPUs */ 1902 other_cpus = all_cpus & ~(1 << cpuid); 1903 1904 /* fill in our (BSP) APIC version */ 1905 cpu_apic_versions[0] = lapic.version; 1906 1907 /* restore the warmstart vector */ 1908 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 1909 #ifndef PC98 1910 outb(CMOS_REG, BIOS_RESET); 1911 outb(CMOS_DATA, mpbiosreason); 1912 #endif 1913 1914 /* 1915 * Set up the idle context for the BSP. Similar to above except 1916 * that some was done by locore, some by pmap.c and some is implicit 1917 * because the BSP is cpu#0 and the page is initially zero, and also 1918 * because we can refer to variables by name on the BSP.. 1919 */ 1920 1921 /* Allocate and setup BSP idle stack */ 1922 stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE); 1923 for (i = 0; i < UPAGES; i++) 1924 SMPpt[5 + i] = (pt_entry_t) 1925 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 1926 1927 *(int *)PTD = 0; 1928 pmap_set_opt(); 1929 1930 /* number of APs actually started */ 1931 return mp_ncpus - 1; 1932 } 1933 1934 1935 /* 1936 * load the 1st level AP boot code into base memory. 1937 */ 1938 1939 /* targets for relocation */ 1940 extern void bigJump(void); 1941 extern void bootCodeSeg(void); 1942 extern void bootDataSeg(void); 1943 extern void MPentry(void); 1944 extern u_int MP_GDT; 1945 extern u_int mp_gdtbase; 1946 1947 static void 1948 install_ap_tramp(u_int boot_addr) 1949 { 1950 int x; 1951 int size = *(int *) ((u_long) & bootMP_size); 1952 u_char *src = (u_char *) ((u_long) bootMP); 1953 u_char *dst = (u_char *) boot_addr + KERNBASE; 1954 u_int boot_base = (u_int) bootMP; 1955 u_int8_t *dst8; 1956 u_int16_t *dst16; 1957 u_int32_t *dst32; 1958 1959 POSTCODE(INSTALL_AP_TRAMP_POST); 1960 1961 for (x = 0; x < size; ++x) 1962 *dst++ = *src++; 1963 1964 /* 1965 * modify addresses in code we just moved to basemem. unfortunately we 1966 * need fairly detailed info about mpboot.s for this to work. changes 1967 * to mpboot.s might require changes here. 1968 */ 1969 1970 /* boot code is located in KERNEL space */ 1971 dst = (u_char *) boot_addr + KERNBASE; 1972 1973 /* modify the lgdt arg */ 1974 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 1975 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 1976 1977 /* modify the ljmp target for MPentry() */ 1978 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 1979 *dst32 = ((u_int) MPentry - KERNBASE); 1980 1981 /* modify the target for boot code segment */ 1982 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 1983 dst8 = (u_int8_t *) (dst16 + 1); 1984 *dst16 = (u_int) boot_addr & 0xffff; 1985 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 1986 1987 /* modify the target for boot data segment */ 1988 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 1989 dst8 = (u_int8_t *) (dst16 + 1); 1990 *dst16 = (u_int) boot_addr & 0xffff; 1991 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 1992 } 1993 1994 1995 /* 1996 * this function starts the AP (application processor) identified 1997 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 1998 * to accomplish this. This is necessary because of the nuances 1999 * of the different hardware we might encounter. It ain't pretty, 2000 * but it seems to work. 2001 */ 2002 static int 2003 start_ap(int logical_cpu, u_int boot_addr) 2004 { 2005 int physical_cpu; 2006 int vector; 2007 int cpus; 2008 u_long icr_lo, icr_hi; 2009 2010 POSTCODE(START_AP_POST); 2011 2012 /* get the PHYSICAL APIC ID# */ 2013 physical_cpu = CPU_TO_ID(logical_cpu); 2014 2015 /* calculate the vector */ 2016 vector = (boot_addr >> 12) & 0xff; 2017 2018 /* used as a watchpoint to signal AP startup */ 2019 cpus = mp_ncpus; 2020 2021 /* 2022 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 2023 * and running the target CPU. OR this INIT IPI might be latched (P5 2024 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 2025 * ignored. 2026 */ 2027 2028 /* setup the address for the target AP */ 2029 icr_hi = lapic.icr_hi & ~APIC_ID_MASK; 2030 icr_hi |= (physical_cpu << 24); 2031 lapic.icr_hi = icr_hi; 2032 2033 /* do an INIT IPI: assert RESET */ 2034 icr_lo = lapic.icr_lo & 0xfff00000; 2035 lapic.icr_lo = icr_lo | 0x0000c500; 2036 2037 /* wait for pending status end */ 2038 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2039 /* spin */ ; 2040 2041 /* do an INIT IPI: deassert RESET */ 2042 lapic.icr_lo = icr_lo | 0x00008500; 2043 2044 /* wait for pending status end */ 2045 u_sleep(10000); /* wait ~10mS */ 2046 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2047 /* spin */ ; 2048 2049 /* 2050 * next we do a STARTUP IPI: the previous INIT IPI might still be 2051 * latched, (P5 bug) this 1st STARTUP would then terminate 2052 * immediately, and the previously started INIT IPI would continue. OR 2053 * the previous INIT IPI has already run. and this STARTUP IPI will 2054 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 2055 * will run. 2056 */ 2057 2058 /* do a STARTUP IPI */ 2059 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2060 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2061 /* spin */ ; 2062 u_sleep(200); /* wait ~200uS */ 2063 2064 /* 2065 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 2066 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 2067 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 2068 * recognized after hardware RESET or INIT IPI. 2069 */ 2070 2071 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2072 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2073 /* spin */ ; 2074 u_sleep(200); /* wait ~200uS */ 2075 2076 /* wait for it to start */ 2077 set_apic_timer(5000000);/* == 5 seconds */ 2078 while (read_apic_timer()) 2079 if (mp_ncpus > cpus) 2080 return 1; /* return SUCCESS */ 2081 2082 return 0; /* return FAILURE */ 2083 } 2084 2085 2086 /* 2087 * Flush the TLB on all other CPU's 2088 * 2089 * XXX: Needs to handshake and wait for completion before proceding. 2090 */ 2091 void 2092 smp_invltlb(void) 2093 { 2094 #if defined(APIC_IO) 2095 if (smp_started && invltlb_ok) 2096 all_but_self_ipi(XINVLTLB_OFFSET); 2097 #endif /* APIC_IO */ 2098 } 2099 2100 void 2101 invlpg(u_int addr) 2102 { 2103 __asm __volatile("invlpg (%0)"::"r"(addr):"memory"); 2104 2105 /* send a message to the other CPUs */ 2106 smp_invltlb(); 2107 } 2108 2109 void 2110 invltlb(void) 2111 { 2112 u_long temp; 2113 2114 /* 2115 * This should be implemented as load_cr3(rcr3()) when load_cr3() is 2116 * inlined. 2117 */ 2118 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory"); 2119 2120 /* send a message to the other CPUs */ 2121 smp_invltlb(); 2122 } 2123 2124 2125 /* 2126 * When called the executing CPU will send an IPI to all other CPUs 2127 * requesting that they halt execution. 2128 * 2129 * Usually (but not necessarily) called with 'other_cpus' as its arg. 2130 * 2131 * - Signals all CPUs in map to stop. 2132 * - Waits for each to stop. 2133 * 2134 * Returns: 2135 * -1: error 2136 * 0: NA 2137 * 1: ok 2138 * 2139 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 2140 * from executing at same time. 2141 */ 2142 int 2143 stop_cpus(u_int map) 2144 { 2145 if (!smp_started) 2146 return 0; 2147 2148 /* send the Xcpustop IPI to all CPUs in map */ 2149 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 2150 2151 while ((stopped_cpus & map) != map) 2152 /* spin */ ; 2153 2154 return 1; 2155 } 2156 2157 2158 /* 2159 * Called by a CPU to restart stopped CPUs. 2160 * 2161 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 2162 * 2163 * - Signals all CPUs in map to restart. 2164 * - Waits for each to restart. 2165 * 2166 * Returns: 2167 * -1: error 2168 * 0: NA 2169 * 1: ok 2170 */ 2171 int 2172 restart_cpus(u_int map) 2173 { 2174 if (!smp_started) 2175 return 0; 2176 2177 started_cpus = map; /* signal other cpus to restart */ 2178 2179 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */ 2180 /* spin */ ; 2181 2182 return 1; 2183 } 2184 2185 int smp_active = 0; /* are the APs allowed to run? */ 2186 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, ""); 2187 2188 /* XXX maybe should be hw.ncpu */ 2189 static int smp_cpus = 1; /* how many cpu's running */ 2190 SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, ""); 2191 2192 int invltlb_ok = 0; /* throttle smp_invltlb() till safe */ 2193 SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, ""); 2194 2195 /* Warning: Do not staticize. Used from swtch.s */ 2196 int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */ 2197 SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW, 2198 &do_page_zero_idle, 0, ""); 2199 2200 /* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */ 2201 int forward_irq_enabled = 1; 2202 SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW, 2203 &forward_irq_enabled, 0, ""); 2204 2205 /* Enable forwarding of a signal to a process running on a different CPU */ 2206 static int forward_signal_enabled = 1; 2207 SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 2208 &forward_signal_enabled, 0, ""); 2209 2210 /* Enable forwarding of roundrobin to all other cpus */ 2211 static int forward_roundrobin_enabled = 1; 2212 SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 2213 &forward_roundrobin_enabled, 0, ""); 2214 2215 /* 2216 * This is called once the rest of the system is up and running and we're 2217 * ready to let the AP's out of the pen. 2218 */ 2219 void ap_init(void); 2220 2221 void 2222 ap_init() 2223 { 2224 u_int apic_id; 2225 2226 /* BSP may have changed PTD while we're waiting for the lock */ 2227 cpu_invltlb(); 2228 2229 smp_cpus++; 2230 2231 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 2232 lidt(&r_idt); 2233 #endif 2234 2235 /* Build our map of 'other' CPUs. */ 2236 other_cpus = all_cpus & ~(1 << cpuid); 2237 2238 printf("SMP: AP CPU #%d Launched!\n", cpuid); 2239 2240 /* XXX FIXME: i386 specific, and redundant: Setup the FPU. */ 2241 load_cr0((rcr0() & ~CR0_EM) | CR0_MP | CR0_NE | CR0_TS); 2242 2243 /* set up FPU state on the AP */ 2244 npxinit(__INITIAL_NPXCW__); 2245 2246 /* A quick check from sanity claus */ 2247 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]); 2248 if (cpuid != apic_id) { 2249 printf("SMP: cpuid = %d\n", cpuid); 2250 printf("SMP: apic_id = %d\n", apic_id); 2251 printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 2252 panic("cpuid mismatch! boom!!"); 2253 } 2254 2255 /* Init local apic for irq's */ 2256 apic_initialize(); 2257 2258 /* Set memory range attributes for this CPU to match the BSP */ 2259 mem_range_AP_init(); 2260 2261 /* 2262 * Activate smp_invltlb, although strictly speaking, this isn't 2263 * quite correct yet. We should have a bitfield for cpus willing 2264 * to accept TLB flush IPI's or something and sync them. 2265 */ 2266 if (smp_cpus == mp_ncpus) { 2267 invltlb_ok = 1; 2268 smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */ 2269 smp_active = 1; /* historic */ 2270 } 2271 } 2272 2273 #ifdef BETTER_CLOCK 2274 2275 #define CHECKSTATE_USER 0 2276 #define CHECKSTATE_SYS 1 2277 #define CHECKSTATE_INTR 2 2278 2279 /* Do not staticize. Used from apic_vector.s */ 2280 struct proc* checkstate_curproc[NCPU]; 2281 int checkstate_cpustate[NCPU]; 2282 u_long checkstate_pc[NCPU]; 2283 2284 extern long cp_time[CPUSTATES]; 2285 2286 #define PC_TO_INDEX(pc, prof) \ 2287 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \ 2288 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) 2289 2290 static void 2291 addupc_intr_forwarded(struct proc *p, int id, int *astmap) 2292 { 2293 int i; 2294 struct uprof *prof; 2295 u_long pc; 2296 2297 pc = checkstate_pc[id]; 2298 prof = &p->p_stats->p_prof; 2299 if (pc >= prof->pr_off && 2300 (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) { 2301 if ((p->p_flag & P_OWEUPC) == 0) { 2302 prof->pr_addr = pc; 2303 prof->pr_ticks = 1; 2304 p->p_flag |= P_OWEUPC; 2305 } 2306 *astmap |= (1 << id); 2307 } 2308 } 2309 2310 static void 2311 forwarded_statclock(int id, int pscnt, int *astmap) 2312 { 2313 struct pstats *pstats; 2314 long rss; 2315 struct rusage *ru; 2316 struct vmspace *vm; 2317 int cpustate; 2318 struct proc *p; 2319 #ifdef GPROF 2320 register struct gmonparam *g; 2321 int i; 2322 #endif 2323 2324 p = checkstate_curproc[id]; 2325 cpustate = checkstate_cpustate[id]; 2326 2327 switch (cpustate) { 2328 case CHECKSTATE_USER: 2329 if (p->p_flag & P_PROFIL) 2330 addupc_intr_forwarded(p, id, astmap); 2331 if (pscnt > 1) 2332 return; 2333 p->p_uticks++; 2334 if (p->p_nice > NZERO) 2335 cp_time[CP_NICE]++; 2336 else 2337 cp_time[CP_USER]++; 2338 break; 2339 case CHECKSTATE_SYS: 2340 #ifdef GPROF 2341 /* 2342 * Kernel statistics are just like addupc_intr, only easier. 2343 */ 2344 g = &_gmonparam; 2345 if (g->state == GMON_PROF_ON) { 2346 i = checkstate_pc[id] - g->lowpc; 2347 if (i < g->textsize) { 2348 i /= HISTFRACTION * sizeof(*g->kcount); 2349 g->kcount[i]++; 2350 } 2351 } 2352 #endif 2353 if (pscnt > 1) 2354 return; 2355 2356 if (!p) 2357 cp_time[CP_IDLE]++; 2358 else { 2359 p->p_sticks++; 2360 cp_time[CP_SYS]++; 2361 } 2362 break; 2363 case CHECKSTATE_INTR: 2364 default: 2365 #ifdef GPROF 2366 /* 2367 * Kernel statistics are just like addupc_intr, only easier. 2368 */ 2369 g = &_gmonparam; 2370 if (g->state == GMON_PROF_ON) { 2371 i = checkstate_pc[id] - g->lowpc; 2372 if (i < g->textsize) { 2373 i /= HISTFRACTION * sizeof(*g->kcount); 2374 g->kcount[i]++; 2375 } 2376 } 2377 #endif 2378 if (pscnt > 1) 2379 return; 2380 if (p) 2381 p->p_iticks++; 2382 cp_time[CP_INTR]++; 2383 } 2384 if (p != NULL) { 2385 schedclock(p); 2386 2387 /* Update resource usage integrals and maximums. */ 2388 if ((pstats = p->p_stats) != NULL && 2389 (ru = &pstats->p_ru) != NULL && 2390 (vm = p->p_vmspace) != NULL) { 2391 ru->ru_ixrss += pgtok(vm->vm_tsize); 2392 ru->ru_idrss += pgtok(vm->vm_dsize); 2393 ru->ru_isrss += pgtok(vm->vm_ssize); 2394 rss = pgtok(vmspace_resident_count(vm)); 2395 if (ru->ru_maxrss < rss) 2396 ru->ru_maxrss = rss; 2397 } 2398 } 2399 } 2400 2401 void 2402 forward_statclock(int pscnt) 2403 { 2404 int map; 2405 int id; 2406 int i; 2407 2408 /* Kludge. We don't yet have separate locks for the interrupts 2409 * and the kernel. This means that we cannot let the other processors 2410 * handle complex interrupts while inhibiting them from entering 2411 * the kernel in a non-interrupt context. 2412 * 2413 * What we can do, without changing the locking mechanisms yet, 2414 * is letting the other processors handle a very simple interrupt 2415 * (wich determines the processor states), and do the main 2416 * work ourself. 2417 */ 2418 2419 if (!smp_started || !invltlb_ok || cold || panicstr) 2420 return; 2421 2422 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */ 2423 2424 map = other_cpus & ~stopped_cpus ; 2425 checkstate_probed_cpus = 0; 2426 if (map != 0) 2427 selected_apic_ipi(map, 2428 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2429 2430 i = 0; 2431 while (checkstate_probed_cpus != map) { 2432 /* spin */ 2433 i++; 2434 if (i == 100000) { 2435 #ifdef BETTER_CLOCK_DIAGNOSTIC 2436 printf("forward_statclock: checkstate %x\n", 2437 checkstate_probed_cpus); 2438 #endif 2439 break; 2440 } 2441 } 2442 2443 /* 2444 * Step 2: walk through other processors processes, update ticks and 2445 * profiling info. 2446 */ 2447 2448 map = 0; 2449 for (id = 0; id < mp_ncpus; id++) { 2450 if (id == cpuid) 2451 continue; 2452 if (((1 << id) & checkstate_probed_cpus) == 0) 2453 continue; 2454 forwarded_statclock(id, pscnt, &map); 2455 } 2456 if (map != 0) { 2457 checkstate_need_ast |= map; 2458 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2459 i = 0; 2460 while ((checkstate_need_ast & map) != 0) { 2461 /* spin */ 2462 i++; 2463 if (i > 100000) { 2464 #ifdef BETTER_CLOCK_DIAGNOSTIC 2465 printf("forward_statclock: dropped ast 0x%x\n", 2466 checkstate_need_ast & map); 2467 #endif 2468 break; 2469 } 2470 } 2471 } 2472 } 2473 2474 void 2475 forward_hardclock(int pscnt) 2476 { 2477 int map; 2478 int id; 2479 struct proc *p; 2480 struct pstats *pstats; 2481 int i; 2482 2483 /* Kludge. We don't yet have separate locks for the interrupts 2484 * and the kernel. This means that we cannot let the other processors 2485 * handle complex interrupts while inhibiting them from entering 2486 * the kernel in a non-interrupt context. 2487 * 2488 * What we can do, without changing the locking mechanisms yet, 2489 * is letting the other processors handle a very simple interrupt 2490 * (wich determines the processor states), and do the main 2491 * work ourself. 2492 */ 2493 2494 if (!smp_started || !invltlb_ok || cold || panicstr) 2495 return; 2496 2497 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */ 2498 2499 map = other_cpus & ~stopped_cpus ; 2500 checkstate_probed_cpus = 0; 2501 if (map != 0) 2502 selected_apic_ipi(map, 2503 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2504 2505 i = 0; 2506 while (checkstate_probed_cpus != map) { 2507 /* spin */ 2508 i++; 2509 if (i == 100000) { 2510 #ifdef BETTER_CLOCK_DIAGNOSTIC 2511 printf("forward_hardclock: checkstate %x\n", 2512 checkstate_probed_cpus); 2513 #endif 2514 break; 2515 } 2516 } 2517 2518 /* 2519 * Step 2: walk through other processors processes, update virtual 2520 * timer and profiling timer. If stathz == 0, also update ticks and 2521 * profiling info. 2522 */ 2523 2524 map = 0; 2525 for (id = 0; id < mp_ncpus; id++) { 2526 if (id == cpuid) 2527 continue; 2528 if (((1 << id) & checkstate_probed_cpus) == 0) 2529 continue; 2530 p = checkstate_curproc[id]; 2531 if (p) { 2532 pstats = p->p_stats; 2533 if (checkstate_cpustate[id] == CHECKSTATE_USER && 2534 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 2535 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { 2536 psignal(p, SIGVTALRM); 2537 map |= (1 << id); 2538 } 2539 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 2540 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { 2541 psignal(p, SIGPROF); 2542 map |= (1 << id); 2543 } 2544 } 2545 if (stathz == 0) { 2546 forwarded_statclock( id, pscnt, &map); 2547 } 2548 } 2549 if (map != 0) { 2550 checkstate_need_ast |= map; 2551 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2552 i = 0; 2553 while ((checkstate_need_ast & map) != 0) { 2554 /* spin */ 2555 i++; 2556 if (i > 100000) { 2557 #ifdef BETTER_CLOCK_DIAGNOSTIC 2558 printf("forward_hardclock: dropped ast 0x%x\n", 2559 checkstate_need_ast & map); 2560 #endif 2561 break; 2562 } 2563 } 2564 } 2565 } 2566 2567 #endif /* BETTER_CLOCK */ 2568 2569 void 2570 forward_signal(struct proc *p) 2571 { 2572 int map; 2573 int id; 2574 int i; 2575 2576 /* Kludge. We don't yet have separate locks for the interrupts 2577 * and the kernel. This means that we cannot let the other processors 2578 * handle complex interrupts while inhibiting them from entering 2579 * the kernel in a non-interrupt context. 2580 * 2581 * What we can do, without changing the locking mechanisms yet, 2582 * is letting the other processors handle a very simple interrupt 2583 * (wich determines the processor states), and do the main 2584 * work ourself. 2585 */ 2586 2587 if (!smp_started || !invltlb_ok || cold || panicstr) 2588 return; 2589 if (!forward_signal_enabled) 2590 return; 2591 while (1) { 2592 if (p->p_stat != SRUN) 2593 return; 2594 id = p->p_oncpu; 2595 if (id == 0xff) 2596 return; 2597 map = (1<<id); 2598 checkstate_need_ast |= map; 2599 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2600 i = 0; 2601 while ((checkstate_need_ast & map) != 0) { 2602 /* spin */ 2603 i++; 2604 if (i > 100000) { 2605 #if 0 2606 printf("forward_signal: dropped ast 0x%x\n", 2607 checkstate_need_ast & map); 2608 #endif 2609 break; 2610 } 2611 } 2612 if (id == p->p_oncpu) 2613 return; 2614 } 2615 } 2616 2617 void 2618 forward_roundrobin(void) 2619 { 2620 u_int map; 2621 int i; 2622 2623 if (!smp_started || !invltlb_ok || cold || panicstr) 2624 return; 2625 if (!forward_roundrobin_enabled) 2626 return; 2627 resched_cpus |= other_cpus; 2628 map = other_cpus & ~stopped_cpus ; 2629 #if 1 2630 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2631 #else 2632 (void) all_but_self_ipi(XCPUAST_OFFSET); 2633 #endif 2634 i = 0; 2635 while ((checkstate_need_ast & map) != 0) { 2636 /* spin */ 2637 i++; 2638 if (i > 100000) { 2639 #if 0 2640 printf("forward_roundrobin: dropped ast 0x%x\n", 2641 checkstate_need_ast & map); 2642 #endif 2643 break; 2644 } 2645 } 2646 } 2647 2648 2649 #ifdef APIC_INTR_REORDER 2650 /* 2651 * Maintain mapping from softintr vector to isr bit in local apic. 2652 */ 2653 void 2654 set_lapic_isrloc(int intr, int vector) 2655 { 2656 if (intr < 0 || intr > 32) 2657 panic("set_apic_isrloc: bad intr argument: %d",intr); 2658 if (vector < ICU_OFFSET || vector > 255) 2659 panic("set_apic_isrloc: bad vector argument: %d",vector); 2660 apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2); 2661 apic_isrbit_location[intr].bit = (1<<(vector & 31)); 2662 } 2663 #endif 2664 2665 /* 2666 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 2667 * (if specified), rendezvous, execute the action function (if specified), 2668 * rendezvous again, execute the teardown function (if specified), and then 2669 * resume. 2670 * 2671 * Note that the supplied external functions _must_ be reentrant and aware 2672 * that they are running in parallel and in an unknown lock context. 2673 */ 2674 static void (*smp_rv_setup_func)(void *arg); 2675 static void (*smp_rv_action_func)(void *arg); 2676 static void (*smp_rv_teardown_func)(void *arg); 2677 static void *smp_rv_func_arg; 2678 static volatile int smp_rv_waiters[2]; 2679 2680 void 2681 smp_rendezvous_action(void) 2682 { 2683 /* setup function */ 2684 if (smp_rv_setup_func != NULL) 2685 smp_rv_setup_func(smp_rv_func_arg); 2686 /* spin on entry rendezvous */ 2687 atomic_add_int(&smp_rv_waiters[0], 1); 2688 while (smp_rv_waiters[0] < mp_ncpus) 2689 ; 2690 /* action function */ 2691 if (smp_rv_action_func != NULL) 2692 smp_rv_action_func(smp_rv_func_arg); 2693 /* spin on exit rendezvous */ 2694 atomic_add_int(&smp_rv_waiters[1], 1); 2695 while (smp_rv_waiters[1] < mp_ncpus) 2696 ; 2697 /* teardown function */ 2698 if (smp_rv_teardown_func != NULL) 2699 smp_rv_teardown_func(smp_rv_func_arg); 2700 } 2701 2702 void 2703 smp_rendezvous(void (* setup_func)(void *), 2704 void (* action_func)(void *), 2705 void (* teardown_func)(void *), 2706 void *arg) 2707 { 2708 u_int efl; 2709 2710 /* obtain rendezvous lock */ 2711 s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */ 2712 2713 /* set static function pointers */ 2714 smp_rv_setup_func = setup_func; 2715 smp_rv_action_func = action_func; 2716 smp_rv_teardown_func = teardown_func; 2717 smp_rv_func_arg = arg; 2718 smp_rv_waiters[0] = 0; 2719 smp_rv_waiters[1] = 0; 2720 2721 /* disable interrupts on this CPU, save interrupt status */ 2722 efl = read_eflags(); 2723 write_eflags(efl & ~PSL_I); 2724 2725 /* signal other processors, which will enter the IPI with interrupts off */ 2726 all_but_self_ipi(XRENDEZVOUS_OFFSET); 2727 2728 /* call executor function */ 2729 smp_rendezvous_action(); 2730 2731 /* restore interrupt flag */ 2732 write_eflags(efl); 2733 2734 /* release lock */ 2735 s_unlock(&smp_rv_lock); 2736 } 2737