1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PSMI 1.1 extensions are supported only in 2.6 and later versions. 31 * PSMI 1.2 extensions are supported only in 2.7 and later versions. 32 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10. 33 * PSMI 1.5 extensions are supported in Solaris Nevada. 34 */ 35 #define PSMI_1_5 36 37 #include <sys/processor.h> 38 #include <sys/time.h> 39 #include <sys/psm.h> 40 #include <sys/smp_impldefs.h> 41 #include <sys/cram.h> 42 #include <sys/acpi/acpi.h> 43 #include <sys/acpica.h> 44 #include <sys/psm_common.h> 45 #include "apic.h" 46 #include <sys/pit.h> 47 #include <sys/ddi.h> 48 #include <sys/sunddi.h> 49 #include <sys/ddi_impldefs.h> 50 #include <sys/pci.h> 51 #include <sys/promif.h> 52 #include <sys/x86_archext.h> 53 #include <sys/cpc_impl.h> 54 #include <sys/uadmin.h> 55 #include <sys/panic.h> 56 #include <sys/debug.h> 57 #include <sys/archsystm.h> 58 #include <sys/trap.h> 59 #include <sys/machsystm.h> 60 #include <sys/cpuvar.h> 61 #include <sys/rm_platter.h> 62 #include <sys/privregs.h> 63 #include <sys/cyclic.h> 64 #include <sys/note.h> 65 #include <sys/pci_intr_lib.h> 66 67 /* 68 * Local Function Prototypes 69 */ 70 static void apic_init_intr(); 71 static void apic_ret(); 72 static int apic_handle_defconf(); 73 static int apic_parse_mpct(caddr_t mpct, int bypass); 74 static struct apic_mpfps_hdr *apic_find_fps_sig(caddr_t fptr, int size); 75 static int apic_checksum(caddr_t bptr, int len); 76 static int get_apic_cmd1(); 77 static int get_apic_pri(); 78 static int apic_find_bus_type(char *bus); 79 static int apic_find_bus(int busid); 80 static int apic_find_bus_id(int bustype); 81 static struct apic_io_intr *apic_find_io_intr(int irqno); 82 int apic_allocate_irq(int irq); 83 static int apic_find_free_irq(int start, int end); 84 static uchar_t apic_allocate_vector(int ipl, int irq, int pri); 85 static void apic_modify_vector(uchar_t vector, int irq); 86 static void apic_mark_vector(uchar_t oldvector, uchar_t newvector); 87 static uchar_t apic_xlate_vector(uchar_t oldvector); 88 static void apic_xlate_vector_free_timeout_handler(void *arg); 89 static void apic_free_vector(uchar_t vector); 90 static void apic_reprogram_timeout_handler(void *arg); 91 static int apic_check_stuck_interrupt(apic_irq_t *irq_ptr, int old_bind_cpu, 92 int new_bind_cpu, volatile int32_t *ioapic, int intin_no, int which_irq); 93 static int apic_setup_io_intr(apic_irq_t *irqptr, int irq); 94 static int apic_setup_io_intr_deferred(apic_irq_t *irqptr, int irq); 95 static void apic_record_rdt_entry(apic_irq_t *irqptr, int irq); 96 static struct apic_io_intr *apic_find_io_intr_w_busid(int irqno, int busid); 97 static int apic_find_intin(uchar_t ioapic, uchar_t intin); 98 static int apic_handle_pci_pci_bridge(dev_info_t *idip, int child_devno, 99 int child_ipin, struct apic_io_intr **intrp); 100 static int apic_setup_irq_table(dev_info_t *dip, int irqno, 101 struct apic_io_intr *intrp, struct intrspec *ispec, iflag_t *intr_flagp, 102 int type); 103 static int apic_setup_sci_irq_table(int irqno, uchar_t ipl, 104 iflag_t *intr_flagp); 105 static void apic_nmi_intr(caddr_t arg); 106 uchar_t apic_bind_intr(dev_info_t *dip, int irq, uchar_t ioapicid, 107 uchar_t intin); 108 static int apic_rebind(apic_irq_t *irq_ptr, int bind_cpu, int acquire_lock, 109 int when); 110 int apic_rebind_all(apic_irq_t *irq_ptr, int bind_cpu, int safe); 111 static void apic_intr_redistribute(); 112 static void apic_cleanup_busy(); 113 static void apic_set_pwroff_method_from_mpcnfhdr(struct apic_mp_cnf_hdr *hdrp); 114 int apic_introp_xlate(dev_info_t *dip, struct intrspec *ispec, int type); 115 116 /* ACPI support routines */ 117 static int acpi_probe(void); 118 static int apic_acpi_irq_configure(acpi_psm_lnk_t *acpipsmlnkp, dev_info_t *dip, 119 int *pci_irqp, iflag_t *intr_flagp); 120 121 static int apic_acpi_translate_pci_irq(dev_info_t *dip, int busid, int devid, 122 int ipin, int *pci_irqp, iflag_t *intr_flagp); 123 static uchar_t acpi_find_ioapic(int irq); 124 static int acpi_intr_compatible(iflag_t iflag1, iflag_t iflag2); 125 126 /* 127 * standard MP entries 128 */ 129 static int apic_probe(); 130 static int apic_clkinit(); 131 static int apic_getclkirq(int ipl); 132 static uint_t apic_calibrate(volatile uint32_t *addr, 133 uint16_t *pit_ticks_adj); 134 static hrtime_t apic_gettime(); 135 static hrtime_t apic_gethrtime(); 136 static void apic_init(); 137 static void apic_picinit(void); 138 static void apic_cpu_start(processorid_t cpun, caddr_t rm_code); 139 static int apic_post_cpu_start(void); 140 static void apic_send_ipi(int cpun, int ipl); 141 static void apic_set_softintr(int softintr); 142 static void apic_set_idlecpu(processorid_t cpun); 143 static void apic_unset_idlecpu(processorid_t cpun); 144 static int apic_softlvl_to_irq(int ipl); 145 static int apic_intr_enter(int ipl, int *vect); 146 static void apic_intr_exit(int ipl, int vect); 147 static void apic_setspl(int ipl); 148 static int apic_addspl(int ipl, int vector, int min_ipl, int max_ipl); 149 static int apic_delspl(int ipl, int vector, int min_ipl, int max_ipl); 150 static void apic_shutdown(int cmd, int fcn); 151 static void apic_preshutdown(int cmd, int fcn); 152 static int apic_disable_intr(processorid_t cpun); 153 static void apic_enable_intr(processorid_t cpun); 154 static processorid_t apic_get_next_processorid(processorid_t cpun); 155 static int apic_get_ipivect(int ipl, int type); 156 static void apic_timer_reprogram(hrtime_t time); 157 static void apic_timer_enable(void); 158 static void apic_timer_disable(void); 159 static void apic_post_cyclic_setup(void *arg); 160 extern int apic_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 161 psm_intr_op_t, int *); 162 163 static int apic_oneshot = 0; 164 int apic_oneshot_enable = 1; /* to allow disabling one-shot capability */ 165 166 /* 167 * These variables are frequently accessed in apic_intr_enter(), 168 * apic_intr_exit and apic_setspl, so group them together 169 */ 170 volatile uint32_t *apicadr = NULL; /* virtual addr of local APIC */ 171 int apic_setspl_delay = 1; /* apic_setspl - delay enable */ 172 int apic_clkvect; 173 174 /* ACPI SCI interrupt configuration; -1 if SCI not used */ 175 int apic_sci_vect = -1; 176 iflag_t apic_sci_flags; 177 178 /* vector at which error interrupts come in */ 179 int apic_errvect; 180 int apic_enable_error_intr = 1; 181 int apic_error_display_delay = 100; 182 183 /* vector at which performance counter overflow interrupts come in */ 184 int apic_cpcovf_vect; 185 int apic_enable_cpcovf_intr = 1; 186 187 /* Max wait time (in microsecs) for flags to clear in an RDT entry. */ 188 static int apic_max_usecs_clear_pending = 1000; 189 190 /* Amt of usecs to wait before checking if RDT flags have reset. */ 191 #define APIC_USECS_PER_WAIT_INTERVAL 100 192 193 /* Maximum number of times to retry reprogramming via the timeout */ 194 #define APIC_REPROGRAM_MAX_TIMEOUTS 10 195 196 /* timeout delay for IOAPIC delayed reprogramming */ 197 #define APIC_REPROGRAM_TIMEOUT_DELAY 5 /* microseconds */ 198 199 /* Parameter to apic_rebind(): Should reprogramming be done now or later? */ 200 #define DEFERRED 1 201 #define IMMEDIATE 0 202 203 /* 204 * number of bits per byte, from <sys/param.h> 205 */ 206 #define UCHAR_MAX ((1 << NBBY) - 1) 207 208 uchar_t apic_reserved_irqlist[MAX_ISA_IRQ]; 209 210 /* 211 * The following vector assignments influence the value of ipltopri and 212 * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program 213 * idle to 0 and IPL 0 to 0x10 to differentiate idle in case 214 * we care to do so in future. Note some IPLs which are rarely used 215 * will share the vector ranges and heavily used IPLs (5 and 6) have 216 * a wide range. 217 * IPL Vector range. as passed to intr_enter 218 * 0 none. 219 * 1,2,3 0x20-0x2f 0x0-0xf 220 * 4 0x30-0x3f 0x10-0x1f 221 * 5 0x40-0x5f 0x20-0x3f 222 * 6 0x60-0x7f 0x40-0x5f 223 * 7,8,9 0x80-0x8f 0x60-0x6f 224 * 10 0x90-0x9f 0x70-0x7f 225 * 11 0xa0-0xaf 0x80-0x8f 226 * ... ... 227 * 16 0xf0-0xff 0xd0-0xdf 228 */ 229 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = { 230 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 16 231 }; 232 /* 233 * The ipl of an ISR at vector X is apic_vectortoipl[X<<4] 234 * NOTE that this is vector as passed into intr_enter which is 235 * programmed vector - 0x20 (APIC_BASE_VECT) 236 */ 237 238 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */ 239 /* The taskpri to be programmed into apic to mask given ipl */ 240 241 #if defined(__amd64) 242 uchar_t apic_cr8pri[MAXIPL + 1]; /* unix ipl to cr8 pri */ 243 #endif 244 245 /* 246 * Patchable global variables. 247 */ 248 int apic_forceload = 0; 249 250 #define INTR_ROUND_ROBIN_WITH_AFFINITY 0 251 #define INTR_ROUND_ROBIN 1 252 #define INTR_LOWEST_PRIORITY 2 253 254 int apic_intr_policy = INTR_ROUND_ROBIN_WITH_AFFINITY; 255 256 static int apic_next_bind_cpu = 2; /* For round robin assignment */ 257 /* start with cpu 1 */ 258 259 int apic_coarse_hrtime = 1; /* 0 - use accurate slow gethrtime() */ 260 /* 1 - use gettime() for performance */ 261 int apic_flat_model = 0; /* 0 - clustered. 1 - flat */ 262 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */ 263 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */ 264 int apic_panic_on_nmi = 0; 265 int apic_panic_on_apic_error = 0; 266 267 int apic_verbose = 0; 268 269 /* Flag definitions for apic_verbose */ 270 #define APIC_VERBOSE_IOAPIC_FLAG 0x00000001 271 #define APIC_VERBOSE_IRQ_FLAG 0x00000002 272 #define APIC_VERBOSE_POWEROFF_FLAG 0x00000004 273 #define APIC_VERBOSE_POWEROFF_PAUSE_FLAG 0x00000008 274 275 276 #define APIC_VERBOSE_IOAPIC(fmt) \ 277 if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) \ 278 cmn_err fmt; 279 280 #define APIC_VERBOSE_IRQ(fmt) \ 281 if (apic_verbose & APIC_VERBOSE_IRQ_FLAG) \ 282 cmn_err fmt; 283 284 #define APIC_VERBOSE_POWEROFF(fmt) \ 285 if (apic_verbose & APIC_VERBOSE_POWEROFF_FLAG) \ 286 prom_printf fmt; 287 288 289 /* Now the ones for Dynamic Interrupt distribution */ 290 int apic_enable_dynamic_migration = 0; 291 292 /* 293 * If enabled, the distribution works as follows: 294 * On every interrupt entry, the current ipl for the CPU is set in cpu_info 295 * and the irq corresponding to the ipl is also set in the aci_current array. 296 * interrupt exit and setspl (due to soft interrupts) will cause the current 297 * ipl to be be changed. This is cache friendly as these frequently used 298 * paths write into a per cpu structure. 299 * 300 * Sampling is done by checking the structures for all CPUs and incrementing 301 * the busy field of the irq (if any) executing on each CPU and the busy field 302 * of the corresponding CPU. 303 * In periodic mode this is done on every clock interrupt. 304 * In one-shot mode, this is done thru a cyclic with an interval of 305 * apic_redistribute_sample_interval (default 10 milli sec). 306 * 307 * Every apic_sample_factor_redistribution times we sample, we do computations 308 * to decide which interrupt needs to be migrated (see comments 309 * before apic_intr_redistribute(). 310 */ 311 312 /* 313 * Following 3 variables start as % and can be patched or set using an 314 * API to be defined in future. They will be scaled to 315 * sample_factor_redistribution which is in turn set to hertz+1 (in periodic 316 * mode), or 101 in one-shot mode to stagger it away from one sec processing 317 */ 318 319 int apic_int_busy_mark = 60; 320 int apic_int_free_mark = 20; 321 int apic_diff_for_redistribution = 10; 322 323 /* sampling interval for interrupt redistribution for dynamic migration */ 324 int apic_redistribute_sample_interval = NANOSEC / 100; /* 10 millisec */ 325 326 /* 327 * number of times we sample before deciding to redistribute interrupts 328 * for dynamic migration 329 */ 330 int apic_sample_factor_redistribution = 101; 331 332 /* timeout for xlate_vector, mark_vector */ 333 int apic_revector_timeout = 16 * 10000; /* 160 millisec */ 334 335 int apic_redist_cpu_skip = 0; 336 int apic_num_imbalance = 0; 337 int apic_num_rebind = 0; 338 339 int apic_nproc = 0; 340 int apic_defconf = 0; 341 int apic_irq_translate = 0; 342 int apic_spec_rev = 0; 343 int apic_imcrp = 0; 344 345 int apic_use_acpi = 1; /* 1 = use ACPI, 0 = don't use ACPI */ 346 int apic_use_acpi_madt_only = 0; /* 1=ONLY use MADT from ACPI */ 347 348 /* 349 * For interrupt link devices, if apic_unconditional_srs is set, an irq resource 350 * will be assigned (via _SRS). If it is not set, use the current 351 * irq setting (via _CRS), but only if that irq is in the set of possible 352 * irqs (returned by _PRS) for the device. 353 */ 354 int apic_unconditional_srs = 1; 355 356 /* 357 * For interrupt link devices, if apic_prefer_crs is set when we are 358 * assigning an IRQ resource to a device, prefer the current IRQ setting 359 * over other possible irq settings under same conditions. 360 */ 361 362 int apic_prefer_crs = 1; 363 364 365 /* minimum number of timer ticks to program to */ 366 int apic_min_timer_ticks = 1; 367 /* 368 * Local static data 369 */ 370 static struct psm_ops apic_ops = { 371 apic_probe, 372 373 apic_init, 374 apic_picinit, 375 apic_intr_enter, 376 apic_intr_exit, 377 apic_setspl, 378 apic_addspl, 379 apic_delspl, 380 apic_disable_intr, 381 apic_enable_intr, 382 apic_softlvl_to_irq, 383 apic_set_softintr, 384 385 apic_set_idlecpu, 386 apic_unset_idlecpu, 387 388 apic_clkinit, 389 apic_getclkirq, 390 (void (*)(void))NULL, /* psm_hrtimeinit */ 391 apic_gethrtime, 392 393 apic_get_next_processorid, 394 apic_cpu_start, 395 apic_post_cpu_start, 396 apic_shutdown, 397 apic_get_ipivect, 398 apic_send_ipi, 399 400 (int (*)(dev_info_t *, int))NULL, /* psm_translate_irq */ 401 (int (*)(todinfo_t *))NULL, /* psm_tod_get */ 402 (int (*)(todinfo_t *))NULL, /* psm_tod_set */ 403 (void (*)(int, char *))NULL, /* psm_notify_error */ 404 (void (*)(int))NULL, /* psm_notify_func */ 405 apic_timer_reprogram, 406 apic_timer_enable, 407 apic_timer_disable, 408 apic_post_cyclic_setup, 409 apic_preshutdown, 410 apic_intr_ops /* Advanced DDI Interrupt framework */ 411 }; 412 413 414 static struct psm_info apic_psm_info = { 415 PSM_INFO_VER01_5, /* version */ 416 PSM_OWN_EXCLUSIVE, /* ownership */ 417 (struct psm_ops *)&apic_ops, /* operation */ 418 "pcplusmp", /* machine name */ 419 "pcplusmp v1.4 compatible %I%", 420 }; 421 422 static void *apic_hdlp; 423 424 #ifdef DEBUG 425 #define DENT 0x0001 426 int apic_debug = 0; 427 /* 428 * set apic_restrict_vector to the # of vectors we want to allow per range 429 * useful in testing shared interrupt logic by setting it to 2 or 3 430 */ 431 int apic_restrict_vector = 0; 432 433 #define APIC_DEBUG_MSGBUFSIZE 2048 434 int apic_debug_msgbuf[APIC_DEBUG_MSGBUFSIZE]; 435 int apic_debug_msgbufindex = 0; 436 437 /* 438 * Put "int" info into debug buffer. No MP consistency, but light weight. 439 * Good enough for most debugging. 440 */ 441 #define APIC_DEBUG_BUF_PUT(x) \ 442 apic_debug_msgbuf[apic_debug_msgbufindex++] = x; \ 443 if (apic_debug_msgbufindex >= (APIC_DEBUG_MSGBUFSIZE - NCPU)) \ 444 apic_debug_msgbufindex = 0; 445 446 #endif /* DEBUG */ 447 448 apic_cpus_info_t *apic_cpus; 449 450 static uint_t apic_cpumask = 0; 451 static uint_t apic_flag; 452 453 /* Flag to indicate that we need to shut down all processors */ 454 static uint_t apic_shutdown_processors; 455 456 uint_t apic_nsec_per_intr = 0; 457 458 /* 459 * apic_let_idle_redistribute can have the following values: 460 * 0 - If clock decremented it from 1 to 0, clock has to call redistribute. 461 * apic_redistribute_lock prevents multiple idle cpus from redistributing 462 */ 463 int apic_num_idle_redistributions = 0; 464 static int apic_let_idle_redistribute = 0; 465 static uint_t apic_nticks = 0; 466 static uint_t apic_skipped_redistribute = 0; 467 468 /* to gather intr data and redistribute */ 469 static void apic_redistribute_compute(void); 470 471 static uint_t last_count_read = 0; 472 static lock_t apic_gethrtime_lock; 473 volatile int apic_hrtime_stamp = 0; 474 volatile hrtime_t apic_nsec_since_boot = 0; 475 static uint_t apic_hertz_count, apic_nsec_per_tick; 476 static hrtime_t apic_nsec_max; 477 478 static hrtime_t apic_last_hrtime = 0; 479 int apic_hrtime_error = 0; 480 int apic_remote_hrterr = 0; 481 int apic_num_nmis = 0; 482 int apic_apic_error = 0; 483 int apic_num_apic_errors = 0; 484 int apic_num_cksum_errors = 0; 485 486 static uchar_t apic_io_id[MAX_IO_APIC]; 487 static uchar_t apic_io_ver[MAX_IO_APIC]; 488 static uchar_t apic_io_vectbase[MAX_IO_APIC]; 489 static uchar_t apic_io_vectend[MAX_IO_APIC]; 490 volatile int32_t *apicioadr[MAX_IO_APIC]; 491 492 /* 493 * First available slot to be used as IRQ index into the apic_irq_table 494 * for those interrupts (like MSI/X) that don't have a physical IRQ. 495 */ 496 int apic_first_avail_irq = APIC_FIRST_FREE_IRQ; 497 498 /* 499 * apic_ioapic_lock protects the ioapics (reg select), the status, temp_bound 500 * and bound elements of cpus_info and the temp_cpu element of irq_struct 501 */ 502 lock_t apic_ioapic_lock; 503 504 /* 505 * apic_ioapic_reprogram_lock prevents a CPU from exiting 506 * apic_intr_exit before IOAPIC reprogramming information 507 * is collected. 508 */ 509 static lock_t apic_ioapic_reprogram_lock; 510 static int apic_io_max = 0; /* no. of i/o apics enabled */ 511 512 static struct apic_io_intr *apic_io_intrp = 0; 513 static struct apic_bus *apic_busp; 514 515 uchar_t apic_vector_to_irq[APIC_MAX_VECTOR+1]; 516 static uchar_t apic_resv_vector[MAXIPL+1]; 517 518 static char apic_level_intr[APIC_MAX_VECTOR+1]; 519 static int apic_error = 0; 520 /* values which apic_error can take. Not catastrophic, but may help debug */ 521 #define APIC_ERR_BOOT_EOI 0x1 522 #define APIC_ERR_GET_IPIVECT_FAIL 0x2 523 #define APIC_ERR_INVALID_INDEX 0x4 524 #define APIC_ERR_MARK_VECTOR_FAIL 0x8 525 #define APIC_ERR_APIC_ERROR 0x40000000 526 #define APIC_ERR_NMI 0x80000000 527 528 static int apic_cmos_ssb_set = 0; 529 530 static uint32_t eisa_level_intr_mask = 0; 531 /* At least MSB will be set if EISA bus */ 532 533 static int apic_pci_bus_total = 0; 534 static uchar_t apic_single_pci_busid = 0; 535 536 537 /* 538 * airq_mutex protects additions to the apic_irq_table - the first 539 * pointer and any airq_nexts off of that one. It also protects 540 * apic_max_device_irq & apic_min_device_irq. It also guarantees 541 * that share_id is unique as new ids are generated only when new 542 * irq_t structs are linked in. Once linked in the structs are never 543 * deleted. temp_cpu & mps_intr_index field indicate if it is programmed 544 * or allocated. Note that there is a slight gap between allocating in 545 * apic_introp_xlate and programming in addspl. 546 */ 547 kmutex_t airq_mutex; 548 apic_irq_t *apic_irq_table[APIC_MAX_VECTOR+1]; 549 int apic_max_device_irq = 0; 550 int apic_min_device_irq = APIC_MAX_VECTOR; 551 552 /* use to make sure only one cpu handles the nmi */ 553 static lock_t apic_nmi_lock; 554 /* use to make sure only one cpu handles the error interrupt */ 555 static lock_t apic_error_lock; 556 557 /* 558 * Following declarations are for revectoring; used when ISRs at different 559 * IPLs share an irq. 560 */ 561 static lock_t apic_revector_lock; 562 static int apic_revector_pending = 0; 563 static uchar_t *apic_oldvec_to_newvec; 564 static uchar_t *apic_newvec_to_oldvec; 565 566 /* Ensures that the IOAPIC-reprogramming timeout is not reentrant */ 567 static kmutex_t apic_reprogram_timeout_mutex; 568 569 static struct ioapic_reprogram_data { 570 int valid; /* This entry is valid */ 571 int bindcpu; /* The CPU to which the int will be bound */ 572 unsigned timeouts; /* # times the reprogram timeout was called */ 573 } apic_reprogram_info[APIC_MAX_VECTOR+1]; 574 /* 575 * APIC_MAX_VECTOR + 1 is the maximum # of IRQs as well. apic_reprogram_info 576 * is indexed by IRQ number, NOT by vector number. 577 */ 578 579 580 /* 581 * The following added to identify a software poweroff method if available. 582 */ 583 584 static struct { 585 int poweroff_method; 586 char oem_id[APIC_MPS_OEM_ID_LEN + 1]; /* MAX + 1 for NULL */ 587 char prod_id[APIC_MPS_PROD_ID_LEN + 1]; /* MAX + 1 for NULL */ 588 } apic_mps_ids[] = { 589 { APIC_POWEROFF_VIA_RTC, "INTEL", "ALDER" }, /* 4300 */ 590 { APIC_POWEROFF_VIA_RTC, "NCR", "AMC" }, /* 4300 */ 591 { APIC_POWEROFF_VIA_ASPEN_BMC, "INTEL", "A450NX" }, /* 4400? */ 592 { APIC_POWEROFF_VIA_ASPEN_BMC, "INTEL", "AD450NX" }, /* 4400 */ 593 { APIC_POWEROFF_VIA_ASPEN_BMC, "INTEL", "AC450NX" }, /* 4400R */ 594 { APIC_POWEROFF_VIA_SITKA_BMC, "INTEL", "S450NX" }, /* S50 */ 595 { APIC_POWEROFF_VIA_SITKA_BMC, "INTEL", "SC450NX" } /* S50? */ 596 }; 597 598 int apic_poweroff_method = APIC_POWEROFF_NONE; 599 600 static struct { 601 uchar_t cntl; 602 uchar_t data; 603 } aspen_bmc[] = { 604 { CC_SMS_WR_START, 0x18 }, /* NetFn/LUN */ 605 { CC_SMS_WR_NEXT, 0x24 }, /* Cmd SET_WATCHDOG_TIMER */ 606 { CC_SMS_WR_NEXT, 0x84 }, /* DataByte 1: SMS/OS no log */ 607 { CC_SMS_WR_NEXT, 0x2 }, /* DataByte 2: Power Down */ 608 { CC_SMS_WR_NEXT, 0x0 }, /* DataByte 3: no pre-timeout */ 609 { CC_SMS_WR_NEXT, 0x0 }, /* DataByte 4: timer expir. */ 610 { CC_SMS_WR_NEXT, 0xa }, /* DataByte 5: init countdown */ 611 { CC_SMS_WR_END, 0x0 }, /* DataByte 6: init countdown */ 612 613 { CC_SMS_WR_START, 0x18 }, /* NetFn/LUN */ 614 { CC_SMS_WR_END, 0x22 } /* Cmd RESET_WATCHDOG_TIMER */ 615 }; 616 617 static struct { 618 int port; 619 uchar_t data; 620 } sitka_bmc[] = { 621 { SMS_COMMAND_REGISTER, SMS_WRITE_START }, 622 { SMS_DATA_REGISTER, 0x18 }, /* NetFn/LUN */ 623 { SMS_DATA_REGISTER, 0x24 }, /* Cmd SET_WATCHDOG_TIMER */ 624 { SMS_DATA_REGISTER, 0x84 }, /* DataByte 1: SMS/OS no log */ 625 { SMS_DATA_REGISTER, 0x2 }, /* DataByte 2: Power Down */ 626 { SMS_DATA_REGISTER, 0x0 }, /* DataByte 3: no pre-timeout */ 627 { SMS_DATA_REGISTER, 0x0 }, /* DataByte 4: timer expir. */ 628 { SMS_DATA_REGISTER, 0xa }, /* DataByte 5: init countdown */ 629 { SMS_COMMAND_REGISTER, SMS_WRITE_END }, 630 { SMS_DATA_REGISTER, 0x0 }, /* DataByte 6: init countdown */ 631 632 { SMS_COMMAND_REGISTER, SMS_WRITE_START }, 633 { SMS_DATA_REGISTER, 0x18 }, /* NetFn/LUN */ 634 { SMS_COMMAND_REGISTER, SMS_WRITE_END }, 635 { SMS_DATA_REGISTER, 0x22 } /* Cmd RESET_WATCHDOG_TIMER */ 636 }; 637 638 639 /* Patchable global variables. */ 640 int apic_kmdb_on_nmi = 0; /* 0 - no, 1 - yes enter kmdb */ 641 int apic_debug_mps_id = 0; /* 1 - print MPS ID strings */ 642 643 /* 644 * ACPI definitions 645 */ 646 /* _PIC method arguments */ 647 #define ACPI_PIC_MODE 0 648 #define ACPI_APIC_MODE 1 649 650 /* APIC error flags we care about */ 651 #define APIC_SEND_CS_ERROR 0x01 652 #define APIC_RECV_CS_ERROR 0x02 653 #define APIC_CS_ERRORS (APIC_SEND_CS_ERROR|APIC_RECV_CS_ERROR) 654 655 /* 656 * ACPI variables 657 */ 658 /* 1 = acpi is enabled & working, 0 = acpi is not enabled or not there */ 659 static int apic_enable_acpi = 0; 660 661 /* ACPI Multiple APIC Description Table ptr */ 662 static MULTIPLE_APIC_TABLE *acpi_mapic_dtp = NULL; 663 664 /* ACPI Interrupt Source Override Structure ptr */ 665 static MADT_INTERRUPT_OVERRIDE *acpi_isop = NULL; 666 static int acpi_iso_cnt = 0; 667 668 /* ACPI Non-maskable Interrupt Sources ptr */ 669 static MADT_NMI_SOURCE *acpi_nmi_sp = NULL; 670 static int acpi_nmi_scnt = 0; 671 static MADT_LOCAL_APIC_NMI *acpi_nmi_cp = NULL; 672 static int acpi_nmi_ccnt = 0; 673 674 /* 675 * extern declarations 676 */ 677 extern int intr_clear(void); 678 extern void intr_restore(uint_t); 679 #if defined(__amd64) 680 extern int intpri_use_cr8; 681 #endif /* __amd64 */ 682 683 extern int apic_pci_msi_enable_vector(dev_info_t *, int, int, 684 int, int, int); 685 extern apic_irq_t *apic_find_irq(dev_info_t *, struct intrspec *, int); 686 687 /* 688 * This is the loadable module wrapper 689 */ 690 691 int 692 _init(void) 693 { 694 if (apic_coarse_hrtime) 695 apic_ops.psm_gethrtime = &apic_gettime; 696 return (psm_mod_init(&apic_hdlp, &apic_psm_info)); 697 } 698 699 int 700 _fini(void) 701 { 702 return (psm_mod_fini(&apic_hdlp, &apic_psm_info)); 703 } 704 705 int 706 _info(struct modinfo *modinfop) 707 { 708 return (psm_mod_info(&apic_hdlp, &apic_psm_info, modinfop)); 709 } 710 711 /* 712 * Auto-configuration routines 713 */ 714 715 /* 716 * Look at MPSpec 1.4 (Intel Order # 242016-005) for details of what we do here 717 * May work with 1.1 - but not guaranteed. 718 * According to the MP Spec, the MP floating pointer structure 719 * will be searched in the order described below: 720 * 1. In the first kilobyte of Extended BIOS Data Area (EBDA) 721 * 2. Within the last kilobyte of system base memory 722 * 3. In the BIOS ROM address space between 0F0000h and 0FFFFh 723 * Once we find the right signature with proper checksum, we call 724 * either handle_defconf or parse_mpct to get all info necessary for 725 * subsequent operations. 726 */ 727 static int 728 apic_probe() 729 { 730 uint32_t mpct_addr, ebda_start = 0, base_mem_end; 731 caddr_t biosdatap; 732 caddr_t mpct; 733 caddr_t fptr; 734 int i, mpct_size, mapsize, retval = PSM_FAILURE; 735 ushort_t ebda_seg, base_mem_size; 736 struct apic_mpfps_hdr *fpsp; 737 struct apic_mp_cnf_hdr *hdrp; 738 int bypass_cpu_and_ioapics_in_mptables; 739 int acpi_user_options; 740 741 if (apic_forceload < 0) 742 return (retval); 743 744 /* Allow override for MADT-only mode */ 745 acpi_user_options = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(), 0, 746 "acpi-user-options", 0); 747 apic_use_acpi_madt_only = ((acpi_user_options & ACPI_OUSER_MADT) != 0); 748 749 /* Allow apic_use_acpi to override MADT-only mode */ 750 if (!apic_use_acpi) 751 apic_use_acpi_madt_only = 0; 752 753 retval = acpi_probe(); 754 755 /* 756 * mapin the bios data area 40:0 757 * 40:13h - two-byte location reports the base memory size 758 * 40:0Eh - two-byte location for the exact starting address of 759 * the EBDA segment for EISA 760 */ 761 biosdatap = psm_map_phys(0x400, 0x20, PROT_READ); 762 if (!biosdatap) 763 return (retval); 764 fpsp = (struct apic_mpfps_hdr *)NULL; 765 mapsize = MPFPS_RAM_WIN_LEN; 766 /*LINTED: pointer cast may result in improper alignment */ 767 ebda_seg = *((ushort_t *)(biosdatap+0xe)); 768 /* check the 1k of EBDA */ 769 if (ebda_seg) { 770 ebda_start = ((uint32_t)ebda_seg) << 4; 771 fptr = psm_map_phys(ebda_start, MPFPS_RAM_WIN_LEN, PROT_READ); 772 if (fptr) { 773 if (!(fpsp = 774 apic_find_fps_sig(fptr, MPFPS_RAM_WIN_LEN))) 775 psm_unmap_phys(fptr, MPFPS_RAM_WIN_LEN); 776 } 777 } 778 /* If not in EBDA, check the last k of system base memory */ 779 if (!fpsp) { 780 /*LINTED: pointer cast may result in improper alignment */ 781 base_mem_size = *((ushort_t *)(biosdatap + 0x13)); 782 783 if (base_mem_size > 512) 784 base_mem_end = 639 * 1024; 785 else 786 base_mem_end = 511 * 1024; 787 /* if ebda == last k of base mem, skip to check BIOS ROM */ 788 if (base_mem_end != ebda_start) { 789 790 fptr = psm_map_phys(base_mem_end, MPFPS_RAM_WIN_LEN, 791 PROT_READ); 792 793 if (fptr) { 794 if (!(fpsp = apic_find_fps_sig(fptr, 795 MPFPS_RAM_WIN_LEN))) 796 psm_unmap_phys(fptr, MPFPS_RAM_WIN_LEN); 797 } 798 } 799 } 800 psm_unmap_phys(biosdatap, 0x20); 801 802 /* If still cannot find it, check the BIOS ROM space */ 803 if (!fpsp) { 804 mapsize = MPFPS_ROM_WIN_LEN; 805 fptr = psm_map_phys(MPFPS_ROM_WIN_START, 806 MPFPS_ROM_WIN_LEN, PROT_READ); 807 if (fptr) { 808 if (!(fpsp = 809 apic_find_fps_sig(fptr, MPFPS_ROM_WIN_LEN))) { 810 psm_unmap_phys(fptr, MPFPS_ROM_WIN_LEN); 811 return (retval); 812 } 813 } 814 } 815 816 if (apic_checksum((caddr_t)fpsp, fpsp->mpfps_length * 16) != 0) { 817 psm_unmap_phys(fptr, MPFPS_ROM_WIN_LEN); 818 return (retval); 819 } 820 821 apic_spec_rev = fpsp->mpfps_spec_rev; 822 if ((apic_spec_rev != 04) && (apic_spec_rev != 01)) { 823 psm_unmap_phys(fptr, MPFPS_ROM_WIN_LEN); 824 return (retval); 825 } 826 827 /* check IMCR is present or not */ 828 apic_imcrp = fpsp->mpfps_featinfo2 & MPFPS_FEATINFO2_IMCRP; 829 830 /* check default configuration (dual CPUs) */ 831 if ((apic_defconf = fpsp->mpfps_featinfo1) != 0) { 832 psm_unmap_phys(fptr, mapsize); 833 return (apic_handle_defconf()); 834 } 835 836 /* MP Configuration Table */ 837 mpct_addr = (uint32_t)(fpsp->mpfps_mpct_paddr); 838 839 psm_unmap_phys(fptr, mapsize); /* unmap floating ptr struct */ 840 841 /* 842 * Map in enough memory for the MP Configuration Table Header. 843 * Use this table to read the total length of the BIOS data and 844 * map in all the info 845 */ 846 /*LINTED: pointer cast may result in improper alignment */ 847 hdrp = (struct apic_mp_cnf_hdr *)psm_map_phys(mpct_addr, 848 sizeof (struct apic_mp_cnf_hdr), PROT_READ); 849 if (!hdrp) 850 return (retval); 851 852 /* check mp configuration table signature PCMP */ 853 if (hdrp->mpcnf_sig != 0x504d4350) { 854 psm_unmap_phys((caddr_t)hdrp, sizeof (struct apic_mp_cnf_hdr)); 855 return (retval); 856 } 857 mpct_size = (int)hdrp->mpcnf_tbl_length; 858 859 apic_set_pwroff_method_from_mpcnfhdr(hdrp); 860 861 psm_unmap_phys((caddr_t)hdrp, sizeof (struct apic_mp_cnf_hdr)); 862 863 if ((retval == PSM_SUCCESS) && !apic_use_acpi_madt_only) { 864 /* This is an ACPI machine No need for further checks */ 865 return (retval); 866 } 867 868 /* 869 * Map in the entries for this machine, ie. Processor 870 * Entry Tables, Bus Entry Tables, etc. 871 * They are in fixed order following one another 872 */ 873 mpct = psm_map_phys(mpct_addr, mpct_size, PROT_READ); 874 if (!mpct) 875 return (retval); 876 877 if (apic_checksum(mpct, mpct_size) != 0) 878 goto apic_fail1; 879 880 881 /*LINTED: pointer cast may result in improper alignment */ 882 hdrp = (struct apic_mp_cnf_hdr *)mpct; 883 /*LINTED: pointer cast may result in improper alignment */ 884 apicadr = (uint32_t *)psm_map_phys((uint32_t)hdrp->mpcnf_local_apic, 885 APIC_LOCAL_MEMLEN, PROT_READ | PROT_WRITE); 886 if (!apicadr) 887 goto apic_fail1; 888 889 /* Parse all information in the tables */ 890 bypass_cpu_and_ioapics_in_mptables = (retval == PSM_SUCCESS); 891 if (apic_parse_mpct(mpct, bypass_cpu_and_ioapics_in_mptables) == 892 PSM_SUCCESS) 893 return (PSM_SUCCESS); 894 895 for (i = 0; i < apic_io_max; i++) 896 psm_unmap_phys((caddr_t)apicioadr[i], APIC_IO_MEMLEN); 897 if (apic_cpus) 898 kmem_free(apic_cpus, sizeof (*apic_cpus) * apic_nproc); 899 if (apicadr) 900 psm_unmap_phys((caddr_t)apicadr, APIC_LOCAL_MEMLEN); 901 apic_fail1: 902 psm_unmap_phys(mpct, mpct_size); 903 return (retval); 904 } 905 906 static void 907 apic_set_pwroff_method_from_mpcnfhdr(struct apic_mp_cnf_hdr *hdrp) 908 { 909 int i; 910 911 for (i = 0; i < (sizeof (apic_mps_ids) / sizeof (apic_mps_ids[0])); 912 i++) { 913 if ((strncmp(hdrp->mpcnf_oem_str, apic_mps_ids[i].oem_id, 914 strlen(apic_mps_ids[i].oem_id)) == 0) && 915 (strncmp(hdrp->mpcnf_prod_str, apic_mps_ids[i].prod_id, 916 strlen(apic_mps_ids[i].prod_id)) == 0)) { 917 918 apic_poweroff_method = apic_mps_ids[i].poweroff_method; 919 break; 920 } 921 } 922 923 if (apic_debug_mps_id != 0) { 924 cmn_err(CE_CONT, "pcplusmp: MPS OEM ID = '%c%c%c%c%c%c%c%c'" 925 "Product ID = '%c%c%c%c%c%c%c%c%c%c%c%c'\n", 926 hdrp->mpcnf_oem_str[0], 927 hdrp->mpcnf_oem_str[1], 928 hdrp->mpcnf_oem_str[2], 929 hdrp->mpcnf_oem_str[3], 930 hdrp->mpcnf_oem_str[4], 931 hdrp->mpcnf_oem_str[5], 932 hdrp->mpcnf_oem_str[6], 933 hdrp->mpcnf_oem_str[7], 934 hdrp->mpcnf_prod_str[0], 935 hdrp->mpcnf_prod_str[1], 936 hdrp->mpcnf_prod_str[2], 937 hdrp->mpcnf_prod_str[3], 938 hdrp->mpcnf_prod_str[4], 939 hdrp->mpcnf_prod_str[5], 940 hdrp->mpcnf_prod_str[6], 941 hdrp->mpcnf_prod_str[7], 942 hdrp->mpcnf_prod_str[8], 943 hdrp->mpcnf_prod_str[9], 944 hdrp->mpcnf_prod_str[10], 945 hdrp->mpcnf_prod_str[11]); 946 } 947 } 948 949 static int 950 acpi_probe(void) 951 { 952 int i, id, intmax, ver, index, rv; 953 int acpi_verboseflags = 0; 954 int madt_seen, madt_size; 955 APIC_HEADER *ap; 956 MADT_PROCESSOR_APIC *mpa; 957 MADT_IO_APIC *mia; 958 MADT_IO_SAPIC *misa; 959 MADT_INTERRUPT_OVERRIDE *mio; 960 MADT_NMI_SOURCE *mns; 961 MADT_INTERRUPT_SOURCE *mis; 962 MADT_LOCAL_APIC_NMI *mlan; 963 MADT_ADDRESS_OVERRIDE *mao; 964 ACPI_OBJECT_LIST arglist; 965 ACPI_OBJECT arg; 966 int sci; 967 iflag_t sci_flags; 968 volatile int32_t *ioapic; 969 char local_ids[NCPU]; 970 char proc_ids[NCPU]; 971 uchar_t hid; 972 973 if (!apic_use_acpi) 974 return (PSM_FAILURE); 975 976 if (AcpiGetFirmwareTable(APIC_SIG, 1, ACPI_LOGICAL_ADDRESSING, 977 (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK) 978 return (PSM_FAILURE); 979 980 apicadr = (uint32_t *)psm_map_phys( 981 (uint32_t)acpi_mapic_dtp->LocalApicAddress, 982 APIC_LOCAL_MEMLEN, PROT_READ | PROT_WRITE); 983 if (!apicadr) 984 return (PSM_FAILURE); 985 986 id = apicadr[APIC_LID_REG]; 987 local_ids[0] = (uchar_t)(((uint_t)id) >> 24); 988 apic_nproc = index = 1; 989 apic_io_max = 0; 990 991 ap = (APIC_HEADER *) (acpi_mapic_dtp + 1); 992 madt_size = acpi_mapic_dtp->Length; 993 madt_seen = sizeof (*acpi_mapic_dtp); 994 995 while (madt_seen < madt_size) { 996 switch (ap->Type) { 997 case APIC_PROCESSOR: 998 mpa = (MADT_PROCESSOR_APIC *) ap; 999 if (mpa->ProcessorEnabled) { 1000 if (mpa->LocalApicId == local_ids[0]) 1001 proc_ids[0] = mpa->ProcessorId; 1002 else if (apic_nproc < NCPU) { 1003 local_ids[index] = mpa->LocalApicId; 1004 proc_ids[index] = mpa->ProcessorId; 1005 index++; 1006 apic_nproc++; 1007 } else 1008 cmn_err(CE_WARN, "pcplusmp: exceeded " 1009 "maximum no. of CPUs (= %d)", NCPU); 1010 } 1011 break; 1012 1013 case APIC_IO: 1014 mia = (MADT_IO_APIC *) ap; 1015 if (apic_io_max < MAX_IO_APIC) { 1016 apic_io_id[apic_io_max] = mia->IoApicId; 1017 apic_io_vectbase[apic_io_max] = 1018 mia->Interrupt; 1019 ioapic = apicioadr[apic_io_max] = 1020 (int32_t *)psm_map_phys( 1021 (uint32_t)mia->Address, 1022 APIC_IO_MEMLEN, PROT_READ | PROT_WRITE); 1023 if (!ioapic) 1024 goto cleanup; 1025 apic_io_max++; 1026 } 1027 break; 1028 1029 case APIC_XRUPT_OVERRIDE: 1030 mio = (MADT_INTERRUPT_OVERRIDE *) ap; 1031 if (acpi_isop == NULL) 1032 acpi_isop = mio; 1033 acpi_iso_cnt++; 1034 break; 1035 1036 case APIC_NMI: 1037 /* UNIMPLEMENTED */ 1038 mns = (MADT_NMI_SOURCE *) ap; 1039 if (acpi_nmi_sp == NULL) 1040 acpi_nmi_sp = mns; 1041 acpi_nmi_scnt++; 1042 1043 cmn_err(CE_NOTE, "!apic: nmi source: %d %d %d\n", 1044 mns->Interrupt, mns->Polarity, 1045 mns->TriggerMode); 1046 break; 1047 1048 case APIC_LOCAL_NMI: 1049 /* UNIMPLEMENTED */ 1050 mlan = (MADT_LOCAL_APIC_NMI *) ap; 1051 if (acpi_nmi_cp == NULL) 1052 acpi_nmi_cp = mlan; 1053 acpi_nmi_ccnt++; 1054 1055 cmn_err(CE_NOTE, "!apic: local nmi: %d %d %d %d\n", 1056 mlan->ProcessorId, mlan->Polarity, 1057 mlan->TriggerMode, mlan->Lint); 1058 break; 1059 1060 case APIC_ADDRESS_OVERRIDE: 1061 /* UNIMPLEMENTED */ 1062 mao = (MADT_ADDRESS_OVERRIDE *) ap; 1063 cmn_err(CE_NOTE, "!apic: address override: %lx\n", 1064 (long)mao->Address); 1065 break; 1066 1067 case APIC_IO_SAPIC: 1068 /* UNIMPLEMENTED */ 1069 misa = (MADT_IO_SAPIC *) ap; 1070 1071 cmn_err(CE_NOTE, "!apic: io sapic: %d %d %lx\n", 1072 misa->IoSapicId, misa->InterruptBase, 1073 (long)misa->Address); 1074 break; 1075 1076 case APIC_XRUPT_SOURCE: 1077 /* UNIMPLEMENTED */ 1078 mis = (MADT_INTERRUPT_SOURCE *) ap; 1079 1080 cmn_err(CE_NOTE, 1081 "!apic: irq source: %d %d %d %d %d %d %d\n", 1082 mis->ProcessorId, mis->ProcessorEid, 1083 mis->Interrupt, mis->Polarity, 1084 mis->TriggerMode, mis->InterruptType, 1085 mis->IoSapicVector); 1086 break; 1087 case APIC_RESERVED: 1088 default: 1089 goto cleanup; 1090 } 1091 1092 /* advance to next entry */ 1093 madt_seen += ap->Length; 1094 ap = (APIC_HEADER *)(((char *)ap) + ap->Length); 1095 } 1096 1097 if ((apic_cpus = kmem_zalloc(sizeof (*apic_cpus) * apic_nproc, 1098 KM_NOSLEEP)) == NULL) 1099 goto cleanup; 1100 1101 apic_cpumask = (1 << apic_nproc) - 1; 1102 1103 /* 1104 * ACPI doesn't provide the local apic ver, get it directly from the 1105 * local apic 1106 */ 1107 ver = apicadr[APIC_VERS_REG]; 1108 for (i = 0; i < apic_nproc; i++) { 1109 apic_cpus[i].aci_local_id = local_ids[i]; 1110 apic_cpus[i].aci_local_ver = (uchar_t)(ver & 0xFF); 1111 } 1112 for (i = 0; i < apic_io_max; i++) { 1113 ioapic = apicioadr[i]; 1114 1115 /* 1116 * need to check Sitka on the following acpi problem 1117 * On the Sitka, the ioapic's apic_id field isn't reporting 1118 * the actual io apic id. We have reported this problem 1119 * to Intel. Until they fix the problem, we will get the 1120 * actual id directly from the ioapic. 1121 */ 1122 ioapic[APIC_IO_REG] = APIC_ID_CMD; 1123 id = ioapic[APIC_IO_DATA]; 1124 hid = (uchar_t)(((uint_t)id) >> 24); 1125 1126 if (hid != apic_io_id[i]) { 1127 if (apic_io_id[i] == 0) 1128 apic_io_id[i] = hid; 1129 else { /* set ioapic id to whatever reported by ACPI */ 1130 id = ((int32_t)apic_io_id[i]) << 24; 1131 ioapic[APIC_IO_REG] = APIC_ID_CMD; 1132 ioapic[APIC_IO_DATA] = id; 1133 } 1134 } 1135 ioapic[APIC_IO_REG] = APIC_VERS_CMD; 1136 ver = ioapic[APIC_IO_DATA]; 1137 apic_io_ver[i] = (uchar_t)(ver & 0xff); 1138 intmax = (ver >> 16) & 0xff; 1139 apic_io_vectend[i] = apic_io_vectbase[i] + intmax; 1140 if (apic_first_avail_irq <= apic_io_vectend[i]) 1141 apic_first_avail_irq = apic_io_vectend[i] + 1; 1142 } 1143 1144 1145 /* 1146 * Process SCI configuration here 1147 * An error may be returned here if 1148 * acpi-user-options specifies legacy mode 1149 * (no SCI, no ACPI mode) 1150 */ 1151 if (acpica_get_sci(&sci, &sci_flags) != AE_OK) 1152 sci = -1; 1153 1154 /* 1155 * Now call acpi_init() to generate namespaces 1156 * If this fails, we don't attempt to use ACPI 1157 * even if we were able to get a MADT above 1158 */ 1159 if (acpica_init() != AE_OK) 1160 goto cleanup; 1161 1162 /* 1163 * Squirrel away the SCI and flags for later on 1164 * in apic_picinit() when we're ready 1165 */ 1166 apic_sci_vect = sci; 1167 apic_sci_flags = sci_flags; 1168 1169 if (apic_verbose & APIC_VERBOSE_IRQ_FLAG) 1170 acpi_verboseflags |= PSM_VERBOSE_IRQ_FLAG; 1171 1172 if (apic_verbose & APIC_VERBOSE_POWEROFF_FLAG) 1173 acpi_verboseflags |= PSM_VERBOSE_POWEROFF_FLAG; 1174 1175 if (apic_verbose & APIC_VERBOSE_POWEROFF_PAUSE_FLAG) 1176 acpi_verboseflags |= PSM_VERBOSE_POWEROFF_PAUSE_FLAG; 1177 1178 if (acpi_psm_init(apic_psm_info.p_mach_idstring, acpi_verboseflags) == 1179 ACPI_PSM_FAILURE) 1180 goto cleanup; 1181 1182 /* Enable ACPI APIC interrupt routing */ 1183 arglist.Count = 1; 1184 arglist.Pointer = &arg; 1185 arg.Type = ACPI_TYPE_INTEGER; 1186 arg.Integer.Value = ACPI_APIC_MODE; /* 1 */ 1187 rv = AcpiEvaluateObject(NULL, "\\_PIC", &arglist, NULL); 1188 if (rv == AE_OK) { 1189 build_reserved_irqlist((uchar_t *)apic_reserved_irqlist); 1190 apic_enable_acpi = 1; 1191 if (apic_use_acpi_madt_only) { 1192 cmn_err(CE_CONT, 1193 "?Using ACPI for CPU/IOAPIC information ONLY\n"); 1194 } 1195 return (PSM_SUCCESS); 1196 } 1197 /* if setting APIC mode failed above, we fall through to cleanup */ 1198 1199 cleanup: 1200 if (apicadr != NULL) { 1201 psm_unmap_phys((caddr_t)apicadr, APIC_LOCAL_MEMLEN); 1202 apicadr = NULL; 1203 } 1204 apic_nproc = 0; 1205 for (i = 0; i < apic_io_max; i++) { 1206 psm_unmap_phys((caddr_t)apicioadr[i], APIC_IO_MEMLEN); 1207 apicioadr[i] = NULL; 1208 } 1209 apic_io_max = 0; 1210 acpi_isop = NULL; 1211 acpi_iso_cnt = 0; 1212 acpi_nmi_sp = NULL; 1213 acpi_nmi_scnt = 0; 1214 acpi_nmi_cp = NULL; 1215 acpi_nmi_ccnt = 0; 1216 return (PSM_FAILURE); 1217 } 1218 1219 /* 1220 * Handle default configuration. Fill in reqd global variables & tables 1221 * Fill all details as MP table does not give any more info 1222 */ 1223 static int 1224 apic_handle_defconf() 1225 { 1226 uint_t lid; 1227 1228 /*LINTED: pointer cast may result in improper alignment */ 1229 apicioadr[0] = (int32_t *)psm_map_phys(APIC_IO_ADDR, 1230 APIC_IO_MEMLEN, PROT_READ | PROT_WRITE); 1231 /*LINTED: pointer cast may result in improper alignment */ 1232 apicadr = (uint32_t *)psm_map_phys(APIC_LOCAL_ADDR, 1233 APIC_LOCAL_MEMLEN, PROT_READ | PROT_WRITE); 1234 apic_cpus = (apic_cpus_info_t *) 1235 kmem_zalloc(sizeof (*apic_cpus) * 2, KM_NOSLEEP); 1236 if ((!apicadr) || (!apicioadr[0]) || (!apic_cpus)) 1237 goto apic_handle_defconf_fail; 1238 apic_cpumask = 3; 1239 apic_nproc = 2; 1240 lid = apicadr[APIC_LID_REG]; 1241 apic_cpus[0].aci_local_id = (uchar_t)(lid >> APIC_ID_BIT_OFFSET); 1242 /* 1243 * According to the PC+MP spec 1.1, the local ids 1244 * for the default configuration has to be 0 or 1 1245 */ 1246 if (apic_cpus[0].aci_local_id == 1) 1247 apic_cpus[1].aci_local_id = 0; 1248 else if (apic_cpus[0].aci_local_id == 0) 1249 apic_cpus[1].aci_local_id = 1; 1250 else 1251 goto apic_handle_defconf_fail; 1252 1253 apic_io_id[0] = 2; 1254 apic_io_max = 1; 1255 if (apic_defconf >= 5) { 1256 apic_cpus[0].aci_local_ver = APIC_INTEGRATED_VERS; 1257 apic_cpus[1].aci_local_ver = APIC_INTEGRATED_VERS; 1258 apic_io_ver[0] = APIC_INTEGRATED_VERS; 1259 } else { 1260 apic_cpus[0].aci_local_ver = 0; /* 82489 DX */ 1261 apic_cpus[1].aci_local_ver = 0; 1262 apic_io_ver[0] = 0; 1263 } 1264 if (apic_defconf == 2 || apic_defconf == 3 || apic_defconf == 6) 1265 eisa_level_intr_mask = (inb(EISA_LEVEL_CNTL + 1) << 8) | 1266 inb(EISA_LEVEL_CNTL) | ((uint_t)INT32_MAX + 1); 1267 return (PSM_SUCCESS); 1268 1269 apic_handle_defconf_fail: 1270 if (apic_cpus) 1271 kmem_free(apic_cpus, sizeof (*apic_cpus) * 2); 1272 if (apicadr) 1273 psm_unmap_phys((caddr_t)apicadr, APIC_LOCAL_MEMLEN); 1274 if (apicioadr[0]) 1275 psm_unmap_phys((caddr_t)apicioadr[0], APIC_IO_MEMLEN); 1276 return (PSM_FAILURE); 1277 } 1278 1279 /* Parse the entries in MP configuration table and collect info that we need */ 1280 static int 1281 apic_parse_mpct(caddr_t mpct, int bypass_cpus_and_ioapics) 1282 { 1283 struct apic_procent *procp; 1284 struct apic_bus *busp; 1285 struct apic_io_entry *ioapicp; 1286 struct apic_io_intr *intrp; 1287 volatile int32_t *ioapic; 1288 uint_t lid; 1289 int id; 1290 uchar_t hid; 1291 1292 /*LINTED: pointer cast may result in improper alignment */ 1293 procp = (struct apic_procent *)(mpct + sizeof (struct apic_mp_cnf_hdr)); 1294 1295 /* No need to count cpu entries if we won't use them */ 1296 if (!bypass_cpus_and_ioapics) { 1297 1298 /* Find max # of CPUS and allocate structure accordingly */ 1299 apic_nproc = 0; 1300 while (procp->proc_entry == APIC_CPU_ENTRY) { 1301 if (procp->proc_cpuflags & CPUFLAGS_EN) { 1302 apic_nproc++; 1303 } 1304 procp++; 1305 } 1306 if (apic_nproc > NCPU) 1307 cmn_err(CE_WARN, "pcplusmp: exceeded " 1308 "maximum no. of CPUs (= %d)", NCPU); 1309 if (!apic_nproc || !(apic_cpus = (apic_cpus_info_t *) 1310 kmem_zalloc(sizeof (*apic_cpus)*apic_nproc, KM_NOSLEEP))) 1311 return (PSM_FAILURE); 1312 } 1313 1314 /*LINTED: pointer cast may result in improper alignment */ 1315 procp = (struct apic_procent *)(mpct + sizeof (struct apic_mp_cnf_hdr)); 1316 1317 /* 1318 * start with index 1 as 0 needs to be filled in with Boot CPU, but 1319 * if we're bypassing this information, it has already been filled 1320 * in by acpi_probe(), so don't overwrite it. 1321 */ 1322 if (!bypass_cpus_and_ioapics) 1323 apic_nproc = 1; 1324 1325 while (procp->proc_entry == APIC_CPU_ENTRY) { 1326 /* check whether the cpu exists or not */ 1327 if (!bypass_cpus_and_ioapics && 1328 procp->proc_cpuflags & CPUFLAGS_EN) { 1329 if (procp->proc_cpuflags & CPUFLAGS_BP) { /* Boot CPU */ 1330 lid = apicadr[APIC_LID_REG]; 1331 apic_cpus[0].aci_local_id = procp->proc_apicid; 1332 if (apic_cpus[0].aci_local_id != 1333 (uchar_t)(lid >> APIC_ID_BIT_OFFSET)) { 1334 return (PSM_FAILURE); 1335 } 1336 apic_cpus[0].aci_local_ver = 1337 procp->proc_version; 1338 } else { 1339 1340 apic_cpus[apic_nproc].aci_local_id = 1341 procp->proc_apicid; 1342 apic_cpus[apic_nproc].aci_local_ver = 1343 procp->proc_version; 1344 apic_nproc++; 1345 1346 } 1347 } 1348 procp++; 1349 } 1350 1351 if (!bypass_cpus_and_ioapics) { 1352 /* convert the number of processors into a cpumask */ 1353 apic_cpumask = (1 << apic_nproc) - 1; 1354 } 1355 1356 /* 1357 * Save start of bus entries for later use. 1358 * Get EISA level cntrl if EISA bus is present. 1359 * Also get the CPI bus id for single CPI bus case 1360 */ 1361 apic_busp = busp = (struct apic_bus *)procp; 1362 while (busp->bus_entry == APIC_BUS_ENTRY) { 1363 lid = apic_find_bus_type((char *)&busp->bus_str1); 1364 if (lid == BUS_EISA) { 1365 eisa_level_intr_mask = (inb(EISA_LEVEL_CNTL + 1) << 8) | 1366 inb(EISA_LEVEL_CNTL) | ((uint_t)INT32_MAX + 1); 1367 } else if (lid == BUS_PCI) { 1368 /* 1369 * apic_single_pci_busid will be used only if 1370 * apic_pic_bus_total is equal to 1 1371 */ 1372 apic_pci_bus_total++; 1373 apic_single_pci_busid = busp->bus_id; 1374 } 1375 busp++; 1376 } 1377 1378 ioapicp = (struct apic_io_entry *)busp; 1379 1380 if (!bypass_cpus_and_ioapics) 1381 apic_io_max = 0; 1382 do { 1383 if (!bypass_cpus_and_ioapics && apic_io_max < MAX_IO_APIC) { 1384 if (ioapicp->io_flags & IOAPIC_FLAGS_EN) { 1385 apic_io_id[apic_io_max] = ioapicp->io_apicid; 1386 apic_io_ver[apic_io_max] = ioapicp->io_version; 1387 /*LINTED: pointer cast may result in improper alignment */ 1388 apicioadr[apic_io_max] = 1389 (int32_t *)psm_map_phys( 1390 (uint32_t)ioapicp->io_apic_addr, 1391 APIC_IO_MEMLEN, PROT_READ | PROT_WRITE); 1392 1393 if (!apicioadr[apic_io_max]) 1394 return (PSM_FAILURE); 1395 1396 ioapic = apicioadr[apic_io_max]; 1397 ioapic[APIC_IO_REG] = APIC_ID_CMD; 1398 id = ioapic[APIC_IO_DATA]; 1399 hid = (uchar_t)(((uint_t)id) >> 24); 1400 1401 if (hid != apic_io_id[apic_io_max]) { 1402 if (apic_io_id[apic_io_max] == 0) 1403 apic_io_id[apic_io_max] = hid; 1404 else { 1405 /* 1406 * set ioapic id to whatever 1407 * reported by MPS 1408 * 1409 * may not need to set index 1410 * again ??? 1411 * take it out and try 1412 */ 1413 1414 id = ((int32_t) 1415 apic_io_id[apic_io_max]) << 1416 24; 1417 1418 ioapic[APIC_IO_REG] = 1419 APIC_ID_CMD; 1420 1421 ioapic[APIC_IO_DATA] = id; 1422 1423 } 1424 } 1425 apic_io_max++; 1426 } 1427 } 1428 ioapicp++; 1429 } while (ioapicp->io_entry == APIC_IO_ENTRY); 1430 1431 apic_io_intrp = (struct apic_io_intr *)ioapicp; 1432 1433 intrp = apic_io_intrp; 1434 while (intrp->intr_entry == APIC_IO_INTR_ENTRY) { 1435 if ((intrp->intr_irq > APIC_MAX_ISA_IRQ) || 1436 (apic_find_bus(intrp->intr_busid) == BUS_PCI)) { 1437 apic_irq_translate = 1; 1438 break; 1439 } 1440 intrp++; 1441 } 1442 1443 return (PSM_SUCCESS); 1444 } 1445 1446 boolean_t 1447 apic_cpu_in_range(int cpu) 1448 { 1449 return ((cpu & ~IRQ_USER_BOUND) < apic_nproc); 1450 } 1451 1452 static struct apic_mpfps_hdr * 1453 apic_find_fps_sig(caddr_t cptr, int len) 1454 { 1455 int i; 1456 1457 /* Look for the pattern "_MP_" */ 1458 for (i = 0; i < len; i += 16) { 1459 if ((*(cptr+i) == '_') && 1460 (*(cptr+i+1) == 'M') && 1461 (*(cptr+i+2) == 'P') && 1462 (*(cptr+i+3) == '_')) 1463 /*LINTED: pointer cast may result in improper alignment */ 1464 return ((struct apic_mpfps_hdr *)(cptr + i)); 1465 } 1466 return (NULL); 1467 } 1468 1469 static int 1470 apic_checksum(caddr_t bptr, int len) 1471 { 1472 int i; 1473 uchar_t cksum; 1474 1475 cksum = 0; 1476 for (i = 0; i < len; i++) 1477 cksum += *bptr++; 1478 return ((int)cksum); 1479 } 1480 1481 1482 /* 1483 * Initialise vector->ipl and ipl->pri arrays. level_intr and irqtable 1484 * are also set to NULL. vector->irq is set to a value which cannot map 1485 * to a real irq to show that it is free. 1486 */ 1487 void 1488 apic_init() 1489 { 1490 int i; 1491 int *iptr; 1492 1493 int j = 1; 1494 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */ 1495 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) { 1496 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) && 1497 (apic_vectortoipl[i + 1] == apic_vectortoipl[i])) 1498 /* get to highest vector at the same ipl */ 1499 continue; 1500 for (; j <= apic_vectortoipl[i]; j++) { 1501 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + 1502 APIC_BASE_VECT; 1503 } 1504 } 1505 for (; j < MAXIPL + 1; j++) 1506 /* fill up any empty ipltopri slots */ 1507 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT; 1508 1509 /* cpu 0 is always up */ 1510 apic_cpus[0].aci_status = APIC_CPU_ONLINE | APIC_CPU_INTR_ENABLE; 1511 1512 iptr = (int *)&apic_irq_table[0]; 1513 for (i = 0; i <= APIC_MAX_VECTOR; i++) { 1514 apic_level_intr[i] = 0; 1515 *iptr++ = NULL; 1516 apic_vector_to_irq[i] = APIC_RESV_IRQ; 1517 apic_reprogram_info[i].valid = 0; 1518 apic_reprogram_info[i].bindcpu = 0; 1519 apic_reprogram_info[i].timeouts = 0; 1520 } 1521 1522 /* 1523 * Allocate a dummy irq table entry for the reserved entry. 1524 * This takes care of the race between removing an irq and 1525 * clock detecting a CPU in that irq during interrupt load 1526 * sampling. 1527 */ 1528 apic_irq_table[APIC_RESV_IRQ] = 1529 kmem_zalloc(sizeof (apic_irq_t), KM_NOSLEEP); 1530 1531 mutex_init(&airq_mutex, NULL, MUTEX_DEFAULT, NULL); 1532 mutex_init(&apic_reprogram_timeout_mutex, NULL, MUTEX_DEFAULT, NULL); 1533 #if defined(__amd64) 1534 /* 1535 * Make cpu-specific interrupt info point to cr8pri vector 1536 */ 1537 for (i = 0; i <= MAXIPL; i++) 1538 apic_cr8pri[i] = apic_ipltopri[i] >> APIC_IPL_SHIFT; 1539 CPU->cpu_pri_data = apic_cr8pri; 1540 intpri_use_cr8 = 1; 1541 #endif /* __amd64 */ 1542 } 1543 1544 /* 1545 * handler for APIC Error interrupt. Just print a warning and continue 1546 */ 1547 static int 1548 apic_error_intr() 1549 { 1550 uint_t error0, error1, error; 1551 uint_t i; 1552 1553 /* 1554 * We need to write before read as per 7.4.17 of system prog manual. 1555 * We do both and or the results to be safe 1556 */ 1557 error0 = apicadr[APIC_ERROR_STATUS]; 1558 apicadr[APIC_ERROR_STATUS] = 0; 1559 error1 = apicadr[APIC_ERROR_STATUS]; 1560 error = error0 | error1; 1561 1562 /* 1563 * Clear the APIC error status (do this on all cpus that enter here) 1564 * (two writes are required due to the semantics of accessing the 1565 * error status register.) 1566 */ 1567 apicadr[APIC_ERROR_STATUS] = 0; 1568 apicadr[APIC_ERROR_STATUS] = 0; 1569 1570 /* 1571 * Prevent more than 1 CPU from handling error interrupt causing 1572 * double printing (interleave of characters from multiple 1573 * CPU's when using prom_printf) 1574 */ 1575 if (lock_try(&apic_error_lock) == 0) 1576 return (error ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 1577 if (error) { 1578 #if DEBUG 1579 if (apic_debug) 1580 debug_enter("pcplusmp: APIC Error interrupt received"); 1581 #endif /* DEBUG */ 1582 if (apic_panic_on_apic_error) 1583 cmn_err(CE_PANIC, 1584 "APIC Error interrupt on CPU %d. Status = %x\n", 1585 psm_get_cpu_id(), error); 1586 else { 1587 if ((error & ~APIC_CS_ERRORS) == 0) { 1588 /* cksum error only */ 1589 apic_error |= APIC_ERR_APIC_ERROR; 1590 apic_apic_error |= error; 1591 apic_num_apic_errors++; 1592 apic_num_cksum_errors++; 1593 } else { 1594 /* 1595 * prom_printf is the best shot we have of 1596 * something which is problem free from 1597 * high level/NMI type of interrupts 1598 */ 1599 prom_printf("APIC Error interrupt on CPU %d. " 1600 "Status 0 = %x, Status 1 = %x\n", 1601 psm_get_cpu_id(), error0, error1); 1602 apic_error |= APIC_ERR_APIC_ERROR; 1603 apic_apic_error |= error; 1604 apic_num_apic_errors++; 1605 for (i = 0; i < apic_error_display_delay; i++) { 1606 tenmicrosec(); 1607 } 1608 /* 1609 * provide more delay next time limited to 1610 * roughly 1 clock tick time 1611 */ 1612 if (apic_error_display_delay < 500) 1613 apic_error_display_delay *= 2; 1614 } 1615 } 1616 lock_clear(&apic_error_lock); 1617 return (DDI_INTR_CLAIMED); 1618 } else { 1619 lock_clear(&apic_error_lock); 1620 return (DDI_INTR_UNCLAIMED); 1621 } 1622 /* NOTREACHED */ 1623 } 1624 1625 /* 1626 * Turn off the mask bit in the performance counter Local Vector Table entry. 1627 */ 1628 static void 1629 apic_cpcovf_mask_clear(void) 1630 { 1631 apicadr[APIC_PCINT_VECT] &= ~APIC_LVT_MASK; 1632 } 1633 1634 static void 1635 apic_init_intr() 1636 { 1637 processorid_t cpun = psm_get_cpu_id(); 1638 1639 #if defined(__amd64) 1640 setcr8((ulong_t)(APIC_MASK_ALL >> APIC_IPL_SHIFT)); 1641 #else 1642 apicadr[APIC_TASK_REG] = APIC_MASK_ALL; 1643 #endif 1644 1645 if (apic_flat_model) 1646 apicadr[APIC_FORMAT_REG] = APIC_FLAT_MODEL; 1647 else 1648 apicadr[APIC_FORMAT_REG] = APIC_CLUSTER_MODEL; 1649 apicadr[APIC_DEST_REG] = AV_HIGH_ORDER >> cpun; 1650 1651 /* need to enable APIC before unmasking NMI */ 1652 apicadr[APIC_SPUR_INT_REG] = AV_UNIT_ENABLE | APIC_SPUR_INTR; 1653 1654 apicadr[APIC_LOCAL_TIMER] = AV_MASK; 1655 apicadr[APIC_INT_VECT0] = AV_MASK; /* local intr reg 0 */ 1656 apicadr[APIC_INT_VECT1] = AV_NMI; /* enable NMI */ 1657 1658 if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) 1659 return; 1660 1661 /* Enable performance counter overflow interrupt */ 1662 1663 if ((x86_feature & X86_MSR) != X86_MSR) 1664 apic_enable_cpcovf_intr = 0; 1665 if (apic_enable_cpcovf_intr) { 1666 if (apic_cpcovf_vect == 0) { 1667 int ipl = APIC_PCINT_IPL; 1668 int irq = apic_get_ipivect(ipl, -1); 1669 1670 ASSERT(irq != -1); 1671 apic_cpcovf_vect = apic_irq_table[irq]->airq_vector; 1672 ASSERT(apic_cpcovf_vect); 1673 (void) add_avintr(NULL, ipl, 1674 (avfunc)kcpc_hw_overflow_intr, 1675 "apic pcint", irq, NULL, NULL, NULL, NULL); 1676 kcpc_hw_overflow_intr_installed = 1; 1677 kcpc_hw_enable_cpc_intr = apic_cpcovf_mask_clear; 1678 } 1679 apicadr[APIC_PCINT_VECT] = apic_cpcovf_vect; 1680 } 1681 1682 /* Enable error interrupt */ 1683 1684 if (apic_enable_error_intr) { 1685 if (apic_errvect == 0) { 1686 int ipl = 0xf; /* get highest priority intr */ 1687 int irq = apic_get_ipivect(ipl, -1); 1688 1689 ASSERT(irq != -1); 1690 apic_errvect = apic_irq_table[irq]->airq_vector; 1691 ASSERT(apic_errvect); 1692 /* 1693 * Not PSMI compliant, but we are going to merge 1694 * with ON anyway 1695 */ 1696 (void) add_avintr((void *)NULL, ipl, 1697 (avfunc)apic_error_intr, "apic error intr", 1698 irq, NULL, NULL, NULL, NULL); 1699 } 1700 apicadr[APIC_ERR_VECT] = apic_errvect; 1701 apicadr[APIC_ERROR_STATUS] = 0; 1702 apicadr[APIC_ERROR_STATUS] = 0; 1703 } 1704 } 1705 1706 static void 1707 apic_disable_local_apic() 1708 { 1709 apicadr[APIC_TASK_REG] = APIC_MASK_ALL; 1710 apicadr[APIC_LOCAL_TIMER] = AV_MASK; 1711 apicadr[APIC_INT_VECT0] = AV_MASK; /* local intr reg 0 */ 1712 apicadr[APIC_INT_VECT1] = AV_MASK; /* disable NMI */ 1713 apicadr[APIC_ERR_VECT] = AV_MASK; /* and error interrupt */ 1714 apicadr[APIC_PCINT_VECT] = AV_MASK; /* and perf counter intr */ 1715 apicadr[APIC_SPUR_INT_REG] = APIC_SPUR_INTR; 1716 } 1717 1718 static void 1719 apic_picinit(void) 1720 { 1721 int i, j; 1722 uint_t isr; 1723 volatile int32_t *ioapic; 1724 apic_irq_t *irqptr; 1725 struct intrspec ispec; 1726 1727 /* 1728 * On UniSys Model 6520, the BIOS leaves vector 0x20 isr 1729 * bit on without clearing it with EOI. Since softint 1730 * uses vector 0x20 to interrupt itself, so softint will 1731 * not work on this machine. In order to fix this problem 1732 * a check is made to verify all the isr bits are clear. 1733 * If not, EOIs are issued to clear the bits. 1734 */ 1735 for (i = 7; i >= 1; i--) { 1736 if ((isr = apicadr[APIC_ISR_REG + (i * 4)]) != 0) 1737 for (j = 0; ((j < 32) && (isr != 0)); j++) 1738 if (isr & (1 << j)) { 1739 apicadr[APIC_EOI_REG] = 0; 1740 isr &= ~(1 << j); 1741 apic_error |= APIC_ERR_BOOT_EOI; 1742 } 1743 } 1744 1745 /* set a flag so we know we have run apic_picinit() */ 1746 apic_flag = 1; 1747 LOCK_INIT_CLEAR(&apic_gethrtime_lock); 1748 LOCK_INIT_CLEAR(&apic_ioapic_lock); 1749 LOCK_INIT_CLEAR(&apic_revector_lock); 1750 LOCK_INIT_CLEAR(&apic_ioapic_reprogram_lock); 1751 LOCK_INIT_CLEAR(&apic_error_lock); 1752 1753 picsetup(); /* initialise the 8259 */ 1754 1755 /* add nmi handler - least priority nmi handler */ 1756 LOCK_INIT_CLEAR(&apic_nmi_lock); 1757 1758 if (!psm_add_nmintr(0, (avfunc) apic_nmi_intr, 1759 "pcplusmp NMI handler", (caddr_t)NULL)) 1760 cmn_err(CE_WARN, "pcplusmp: Unable to add nmi handler"); 1761 1762 apic_init_intr(); 1763 1764 /* enable apic mode if imcr present */ 1765 if (apic_imcrp) { 1766 outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT); 1767 outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_APIC); 1768 } 1769 1770 /* mask interrupt vectors */ 1771 for (j = 0; j < apic_io_max; j++) { 1772 int intin_max; 1773 ioapic = apicioadr[j]; 1774 ioapic[APIC_IO_REG] = APIC_VERS_CMD; 1775 /* Bits 23-16 define the maximum redirection entries */ 1776 intin_max = (ioapic[APIC_IO_DATA] >> 16) & 0xff; 1777 for (i = 0; i < intin_max; i++) { 1778 ioapic[APIC_IO_REG] = APIC_RDT_CMD + 2 * i; 1779 ioapic[APIC_IO_DATA] = AV_MASK; 1780 } 1781 } 1782 1783 /* 1784 * Hack alert: deal with ACPI SCI interrupt chicken/egg here 1785 */ 1786 if (apic_sci_vect > 0) { 1787 /* 1788 * acpica has already done add_avintr(); we just 1789 * to finish the job by mimicing translate_irq() 1790 * 1791 * Fake up an intrspec and setup the tables 1792 */ 1793 ispec.intrspec_vec = apic_sci_vect; 1794 ispec.intrspec_pri = SCI_IPL; 1795 1796 if (apic_setup_irq_table(NULL, apic_sci_vect, NULL, 1797 &ispec, &apic_sci_flags, DDI_INTR_TYPE_FIXED) < 0) { 1798 cmn_err(CE_WARN, "!apic: SCI setup failed"); 1799 return; 1800 } 1801 irqptr = apic_irq_table[apic_sci_vect]; 1802 1803 /* Program I/O APIC */ 1804 (void) apic_setup_io_intr(irqptr, apic_sci_vect); 1805 1806 irqptr->airq_share++; 1807 } 1808 } 1809 1810 1811 static void 1812 apic_cpu_start(processorid_t cpun, caddr_t rm_code) 1813 { 1814 int loop_count; 1815 uint32_t vector; 1816 uint_t cpu_id, iflag; 1817 1818 cpu_id = apic_cpus[cpun].aci_local_id; 1819 1820 apic_cmos_ssb_set = 1; 1821 1822 /* 1823 * Interrupts on BSP cpu will be disabled during these startup 1824 * steps in order to avoid unwanted side effects from 1825 * executing interrupt handlers on a problematic BIOS. 1826 */ 1827 1828 iflag = intr_clear(); 1829 outb(CMOS_ADDR, SSB); 1830 outb(CMOS_DATA, BIOS_SHUTDOWN); 1831 1832 while (get_apic_cmd1() & AV_PENDING) 1833 apic_ret(); 1834 1835 /* for integrated - make sure there is one INIT IPI in buffer */ 1836 /* for external - it will wake up the cpu */ 1837 apicadr[APIC_INT_CMD2] = cpu_id << APIC_ICR_ID_BIT_OFFSET; 1838 apicadr[APIC_INT_CMD1] = AV_ASSERT | AV_RESET; 1839 1840 /* If only 1 CPU is installed, PENDING bit will not go low */ 1841 for (loop_count = 0x1000; loop_count; loop_count--) 1842 if (get_apic_cmd1() & AV_PENDING) 1843 apic_ret(); 1844 else 1845 break; 1846 1847 apicadr[APIC_INT_CMD2] = cpu_id << APIC_ICR_ID_BIT_OFFSET; 1848 apicadr[APIC_INT_CMD1] = AV_DEASSERT | AV_RESET; 1849 1850 drv_usecwait(20000); /* 20 milli sec */ 1851 1852 if (apic_cpus[cpun].aci_local_ver >= APIC_INTEGRATED_VERS) { 1853 /* integrated apic */ 1854 1855 rm_code = (caddr_t)(uintptr_t)rm_platter_pa; 1856 vector = (rm_platter_pa >> MMU_PAGESHIFT) & 1857 (APIC_VECTOR_MASK | APIC_IPL_MASK); 1858 1859 /* to offset the INIT IPI queue up in the buffer */ 1860 apicadr[APIC_INT_CMD2] = cpu_id << APIC_ICR_ID_BIT_OFFSET; 1861 apicadr[APIC_INT_CMD1] = vector | AV_STARTUP; 1862 1863 drv_usecwait(200); /* 20 micro sec */ 1864 1865 apicadr[APIC_INT_CMD2] = cpu_id << APIC_ICR_ID_BIT_OFFSET; 1866 apicadr[APIC_INT_CMD1] = vector | AV_STARTUP; 1867 1868 drv_usecwait(200); /* 20 micro sec */ 1869 } 1870 intr_restore(iflag); 1871 } 1872 1873 1874 #ifdef DEBUG 1875 int apic_break_on_cpu = 9; 1876 int apic_stretch_interrupts = 0; 1877 int apic_stretch_ISR = 1 << 3; /* IPL of 3 matches nothing now */ 1878 1879 void 1880 apic_break() 1881 { 1882 } 1883 #endif /* DEBUG */ 1884 1885 /* 1886 * platform_intr_enter 1887 * 1888 * Called at the beginning of the interrupt service routine to 1889 * mask all level equal to and below the interrupt priority 1890 * of the interrupting vector. An EOI should be given to 1891 * the interrupt controller to enable other HW interrupts. 1892 * 1893 * Return -1 for spurious interrupts 1894 * 1895 */ 1896 /*ARGSUSED*/ 1897 static int 1898 apic_intr_enter(int ipl, int *vectorp) 1899 { 1900 uchar_t vector; 1901 int nipl; 1902 int irq, iflag; 1903 apic_cpus_info_t *cpu_infop; 1904 1905 /* 1906 * The real vector programmed in APIC is *vectorp + 0x20 1907 * But, cmnint code subtracts 0x20 before pushing it. 1908 * Hence APIC_BASE_VECT is 0x20. 1909 */ 1910 1911 vector = (uchar_t)*vectorp; 1912 1913 /* if interrupted by the clock, increment apic_nsec_since_boot */ 1914 if (vector == apic_clkvect) { 1915 if (!apic_oneshot) { 1916 /* NOTE: this is not MT aware */ 1917 apic_hrtime_stamp++; 1918 apic_nsec_since_boot += apic_nsec_per_intr; 1919 apic_hrtime_stamp++; 1920 last_count_read = apic_hertz_count; 1921 apic_redistribute_compute(); 1922 } 1923 1924 /* We will avoid all the book keeping overhead for clock */ 1925 nipl = apic_vectortoipl[vector >> APIC_IPL_SHIFT]; 1926 #if defined(__amd64) 1927 setcr8((ulong_t)apic_cr8pri[nipl]); 1928 #else 1929 apicadr[APIC_TASK_REG] = apic_ipltopri[nipl]; 1930 #endif 1931 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT]; 1932 apicadr[APIC_EOI_REG] = 0; 1933 return (nipl); 1934 } 1935 1936 cpu_infop = &apic_cpus[psm_get_cpu_id()]; 1937 1938 if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) { 1939 cpu_infop->aci_spur_cnt++; 1940 return (APIC_INT_SPURIOUS); 1941 } 1942 1943 /* Check if the vector we got is really what we need */ 1944 if (apic_revector_pending) { 1945 /* 1946 * Disable interrupts for the duration of 1947 * the vector translation to prevent a self-race for 1948 * the apic_revector_lock. This cannot be done 1949 * in apic_xlate_vector because it is recursive and 1950 * we want the vector translation to be atomic with 1951 * respect to other (higher-priority) interrupts. 1952 */ 1953 iflag = intr_clear(); 1954 vector = apic_xlate_vector(vector + APIC_BASE_VECT) - 1955 APIC_BASE_VECT; 1956 intr_restore(iflag); 1957 } 1958 1959 nipl = apic_vectortoipl[vector >> APIC_IPL_SHIFT]; 1960 *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT]; 1961 1962 #if defined(__amd64) 1963 setcr8((ulong_t)apic_cr8pri[nipl]); 1964 #else 1965 apicadr[APIC_TASK_REG] = apic_ipltopri[nipl]; 1966 #endif 1967 1968 cpu_infop->aci_current[nipl] = (uchar_t)irq; 1969 cpu_infop->aci_curipl = (uchar_t)nipl; 1970 cpu_infop->aci_ISR_in_progress |= 1 << nipl; 1971 1972 /* 1973 * apic_level_intr could have been assimilated into the irq struct. 1974 * but, having it as a character array is more efficient in terms of 1975 * cache usage. So, we leave it as is. 1976 */ 1977 if (!apic_level_intr[irq]) 1978 apicadr[APIC_EOI_REG] = 0; 1979 1980 #ifdef DEBUG 1981 APIC_DEBUG_BUF_PUT(vector); 1982 APIC_DEBUG_BUF_PUT(irq); 1983 APIC_DEBUG_BUF_PUT(nipl); 1984 APIC_DEBUG_BUF_PUT(psm_get_cpu_id()); 1985 if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl))) 1986 drv_usecwait(apic_stretch_interrupts); 1987 1988 if (apic_break_on_cpu == psm_get_cpu_id()) 1989 apic_break(); 1990 #endif /* DEBUG */ 1991 return (nipl); 1992 } 1993 1994 static void 1995 apic_intr_exit(int prev_ipl, int irq) 1996 { 1997 apic_cpus_info_t *cpu_infop; 1998 1999 #if defined(__amd64) 2000 setcr8((ulong_t)apic_cr8pri[prev_ipl]); 2001 #else 2002 apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl]; 2003 #endif 2004 2005 cpu_infop = &apic_cpus[psm_get_cpu_id()]; 2006 if (apic_level_intr[irq]) 2007 apicadr[APIC_EOI_REG] = 0; 2008 2009 cpu_infop->aci_curipl = (uchar_t)prev_ipl; 2010 /* ISR above current pri could not be in progress */ 2011 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; 2012 } 2013 2014 /* 2015 * Mask all interrupts below or equal to the given IPL 2016 */ 2017 static void 2018 apic_setspl(int ipl) 2019 { 2020 2021 #if defined(__amd64) 2022 setcr8((ulong_t)apic_cr8pri[ipl]); 2023 #else 2024 apicadr[APIC_TASK_REG] = apic_ipltopri[ipl]; 2025 #endif 2026 2027 /* interrupts at ipl above this cannot be in progress */ 2028 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1; 2029 /* 2030 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts 2031 * have enough time to come in before the priority is raised again 2032 * during the idle() loop. 2033 */ 2034 if (apic_setspl_delay) 2035 (void) get_apic_pri(); 2036 } 2037 2038 /* 2039 * trigger a software interrupt at the given IPL 2040 */ 2041 static void 2042 apic_set_softintr(int ipl) 2043 { 2044 int vector; 2045 uint_t flag; 2046 2047 vector = apic_resv_vector[ipl]; 2048 2049 flag = intr_clear(); 2050 2051 while (get_apic_cmd1() & AV_PENDING) 2052 apic_ret(); 2053 2054 /* generate interrupt at vector on itself only */ 2055 apicadr[APIC_INT_CMD1] = AV_SH_SELF | vector; 2056 2057 intr_restore(flag); 2058 } 2059 2060 /* 2061 * generates an interprocessor interrupt to another CPU 2062 */ 2063 static void 2064 apic_send_ipi(int cpun, int ipl) 2065 { 2066 int vector; 2067 uint_t flag; 2068 2069 vector = apic_resv_vector[ipl]; 2070 2071 flag = intr_clear(); 2072 2073 while (get_apic_cmd1() & AV_PENDING) 2074 apic_ret(); 2075 2076 apicadr[APIC_INT_CMD2] = 2077 apic_cpus[cpun].aci_local_id << APIC_ICR_ID_BIT_OFFSET; 2078 apicadr[APIC_INT_CMD1] = vector; 2079 2080 intr_restore(flag); 2081 } 2082 2083 2084 /*ARGSUSED*/ 2085 static void 2086 apic_set_idlecpu(processorid_t cpun) 2087 { 2088 } 2089 2090 /*ARGSUSED*/ 2091 static void 2092 apic_unset_idlecpu(processorid_t cpun) 2093 { 2094 } 2095 2096 2097 static void 2098 apic_ret() 2099 { 2100 } 2101 2102 static int 2103 get_apic_cmd1() 2104 { 2105 return (apicadr[APIC_INT_CMD1]); 2106 } 2107 2108 static int 2109 get_apic_pri() 2110 { 2111 #if defined(__amd64) 2112 return ((int)getcr8()); 2113 #else 2114 return (apicadr[APIC_TASK_REG]); 2115 #endif 2116 } 2117 2118 /* 2119 * If apic_coarse_time == 1, then apic_gettime() is used instead of 2120 * apic_gethrtime(). This is used for performance instead of accuracy. 2121 */ 2122 2123 static hrtime_t 2124 apic_gettime() 2125 { 2126 int old_hrtime_stamp; 2127 hrtime_t temp; 2128 2129 /* 2130 * In one-shot mode, we do not keep time, so if anyone 2131 * calls psm_gettime() directly, we vector over to 2132 * gethrtime(). 2133 * one-shot mode MUST NOT be enabled if this psm is the source of 2134 * hrtime. 2135 */ 2136 2137 if (apic_oneshot) 2138 return (gethrtime()); 2139 2140 2141 gettime_again: 2142 while ((old_hrtime_stamp = apic_hrtime_stamp) & 1) 2143 apic_ret(); 2144 2145 temp = apic_nsec_since_boot; 2146 2147 if (apic_hrtime_stamp != old_hrtime_stamp) { /* got an interrupt */ 2148 goto gettime_again; 2149 } 2150 return (temp); 2151 } 2152 2153 /* 2154 * Here we return the number of nanoseconds since booting. Note every 2155 * clock interrupt increments apic_nsec_since_boot by the appropriate 2156 * amount. 2157 */ 2158 static hrtime_t 2159 apic_gethrtime() 2160 { 2161 int curr_timeval, countval, elapsed_ticks, oflags; 2162 int old_hrtime_stamp, status; 2163 hrtime_t temp; 2164 uchar_t cpun; 2165 2166 2167 /* 2168 * In one-shot mode, we do not keep time, so if anyone 2169 * calls psm_gethrtime() directly, we vector over to 2170 * gethrtime(). 2171 * one-shot mode MUST NOT be enabled if this psm is the source of 2172 * hrtime. 2173 */ 2174 2175 if (apic_oneshot) 2176 return (gethrtime()); 2177 2178 oflags = intr_clear(); /* prevent migration */ 2179 2180 cpun = (uchar_t)((uint_t)apicadr[APIC_LID_REG] >> APIC_ID_BIT_OFFSET); 2181 2182 lock_set(&apic_gethrtime_lock); 2183 2184 gethrtime_again: 2185 while ((old_hrtime_stamp = apic_hrtime_stamp) & 1) 2186 apic_ret(); 2187 2188 /* 2189 * Check to see which CPU we are on. Note the time is kept on 2190 * the local APIC of CPU 0. If on CPU 0, simply read the current 2191 * counter. If on another CPU, issue a remote read command to CPU 0. 2192 */ 2193 if (cpun == apic_cpus[0].aci_local_id) { 2194 countval = apicadr[APIC_CURR_COUNT]; 2195 } else { 2196 while (get_apic_cmd1() & AV_PENDING) 2197 apic_ret(); 2198 2199 apicadr[APIC_INT_CMD2] = 2200 apic_cpus[0].aci_local_id << APIC_ICR_ID_BIT_OFFSET; 2201 apicadr[APIC_INT_CMD1] = APIC_CURR_ADD|AV_REMOTE; 2202 2203 while ((status = get_apic_cmd1()) & AV_READ_PENDING) 2204 apic_ret(); 2205 2206 if (status & AV_REMOTE_STATUS) /* 1 = valid */ 2207 countval = apicadr[APIC_REMOTE_READ]; 2208 else { /* 0 = invalid */ 2209 apic_remote_hrterr++; 2210 /* 2211 * return last hrtime right now, will need more 2212 * testing if change to retry 2213 */ 2214 temp = apic_last_hrtime; 2215 2216 lock_clear(&apic_gethrtime_lock); 2217 2218 intr_restore(oflags); 2219 2220 return (temp); 2221 } 2222 } 2223 if (countval > last_count_read) 2224 countval = 0; 2225 else 2226 last_count_read = countval; 2227 2228 elapsed_ticks = apic_hertz_count - countval; 2229 2230 curr_timeval = elapsed_ticks * apic_nsec_per_tick; 2231 temp = apic_nsec_since_boot + curr_timeval; 2232 2233 if (apic_hrtime_stamp != old_hrtime_stamp) { /* got an interrupt */ 2234 /* we might have clobbered last_count_read. Restore it */ 2235 last_count_read = apic_hertz_count; 2236 goto gethrtime_again; 2237 } 2238 2239 if (temp < apic_last_hrtime) { 2240 /* return last hrtime if error occurs */ 2241 apic_hrtime_error++; 2242 temp = apic_last_hrtime; 2243 } 2244 else 2245 apic_last_hrtime = temp; 2246 2247 lock_clear(&apic_gethrtime_lock); 2248 intr_restore(oflags); 2249 2250 return (temp); 2251 } 2252 2253 /* apic NMI handler */ 2254 /*ARGSUSED*/ 2255 static void 2256 apic_nmi_intr(caddr_t arg) 2257 { 2258 if (apic_shutdown_processors) { 2259 apic_disable_local_apic(); 2260 return; 2261 } 2262 2263 if (lock_try(&apic_nmi_lock)) { 2264 if (apic_kmdb_on_nmi) { 2265 if (psm_debugger() == 0) { 2266 cmn_err(CE_PANIC, 2267 "NMI detected, kmdb is not available."); 2268 } else { 2269 debug_enter("\nNMI detected, entering kmdb.\n"); 2270 } 2271 } else { 2272 if (apic_panic_on_nmi) { 2273 /* Keep panic from entering kmdb. */ 2274 nopanicdebug = 1; 2275 cmn_err(CE_PANIC, "pcplusmp: NMI received"); 2276 } else { 2277 /* 2278 * prom_printf is the best shot we have 2279 * of something which is problem free from 2280 * high level/NMI type of interrupts 2281 */ 2282 prom_printf("pcplusmp: NMI received\n"); 2283 apic_error |= APIC_ERR_NMI; 2284 apic_num_nmis++; 2285 } 2286 } 2287 lock_clear(&apic_nmi_lock); 2288 } 2289 } 2290 2291 /* 2292 * Add mask bits to disable interrupt vector from happening 2293 * at or above IPL. In addition, it should remove mask bits 2294 * to enable interrupt vectors below the given IPL. 2295 * 2296 * Both add and delspl are complicated by the fact that different interrupts 2297 * may share IRQs. This can happen in two ways. 2298 * 1. The same H/W line is shared by more than 1 device 2299 * 1a. with interrupts at different IPLs 2300 * 1b. with interrupts at same IPL 2301 * 2. We ran out of vectors at a given IPL and started sharing vectors. 2302 * 1b and 2 should be handled gracefully, except for the fact some ISRs 2303 * will get called often when no interrupt is pending for the device. 2304 * For 1a, we just hope that the machine blows up with the person who 2305 * set it up that way!. In the meantime, we handle it at the higher IPL. 2306 */ 2307 /*ARGSUSED*/ 2308 static int 2309 apic_addspl(int irqno, int ipl, int min_ipl, int max_ipl) 2310 { 2311 uchar_t vector; 2312 int iflag; 2313 apic_irq_t *irqptr, *irqheadptr; 2314 int irqindex; 2315 2316 ASSERT(max_ipl <= UCHAR_MAX); 2317 irqindex = IRQINDEX(irqno); 2318 2319 if ((irqindex == -1) || (!apic_irq_table[irqindex])) 2320 return (PSM_FAILURE); 2321 2322 irqptr = irqheadptr = apic_irq_table[irqindex]; 2323 2324 DDI_INTR_IMPLDBG((CE_CONT, "apic_addspl: dip=0x%p type=%d irqno=0x%x " 2325 "vector=0x%x\n", (void *)irqptr->airq_dip, 2326 irqptr->airq_mps_intr_index, irqno, irqptr->airq_vector)); 2327 2328 while (irqptr) { 2329 if (VIRTIRQ(irqindex, irqptr->airq_share_id) == irqno) 2330 break; 2331 irqptr = irqptr->airq_next; 2332 } 2333 irqptr->airq_share++; 2334 2335 /* return if it is not hardware interrupt */ 2336 if (irqptr->airq_mps_intr_index == RESERVE_INDEX) 2337 return (PSM_SUCCESS); 2338 2339 /* Or if there are more interupts at a higher IPL */ 2340 if (ipl != max_ipl) 2341 return (PSM_SUCCESS); 2342 2343 /* 2344 * if apic_picinit() has not been called yet, just return. 2345 * At the end of apic_picinit(), we will call setup_io_intr(). 2346 */ 2347 2348 if (!apic_flag) 2349 return (PSM_SUCCESS); 2350 2351 iflag = intr_clear(); 2352 2353 /* 2354 * Upgrade vector if max_ipl is not earlier ipl. If we cannot allocate, 2355 * return failure. Not very elegant, but then we hope the 2356 * machine will blow up with ... 2357 */ 2358 if (irqptr->airq_ipl != max_ipl) { 2359 vector = apic_allocate_vector(max_ipl, irqindex, 1); 2360 if (vector == 0) { 2361 intr_restore(iflag); 2362 irqptr->airq_share--; 2363 return (PSM_FAILURE); 2364 } 2365 irqptr = irqheadptr; 2366 apic_mark_vector(irqptr->airq_vector, vector); 2367 while (irqptr) { 2368 irqptr->airq_vector = vector; 2369 irqptr->airq_ipl = (uchar_t)max_ipl; 2370 /* 2371 * reprogram irq being added and every one else 2372 * who is not in the UNINIT state 2373 */ 2374 if ((VIRTIRQ(irqindex, irqptr->airq_share_id) == 2375 irqno) || (irqptr->airq_temp_cpu != IRQ_UNINIT)) { 2376 apic_record_rdt_entry(irqptr, irqindex); 2377 (void) apic_setup_io_intr(irqptr, irqindex); 2378 } 2379 irqptr = irqptr->airq_next; 2380 } 2381 intr_restore(iflag); 2382 return (PSM_SUCCESS); 2383 } 2384 2385 ASSERT(irqptr); 2386 (void) apic_setup_io_intr(irqptr, irqindex); 2387 intr_restore(iflag); 2388 return (PSM_SUCCESS); 2389 } 2390 2391 /* 2392 * Recompute mask bits for the given interrupt vector. 2393 * If there is no interrupt servicing routine for this 2394 * vector, this function should disable interrupt vector 2395 * from happening at all IPLs. If there are still 2396 * handlers using the given vector, this function should 2397 * disable the given vector from happening below the lowest 2398 * IPL of the remaining hadlers. 2399 */ 2400 /*ARGSUSED*/ 2401 static int 2402 apic_delspl(int irqno, int ipl, int min_ipl, int max_ipl) 2403 { 2404 uchar_t vector, bind_cpu; 2405 int iflag, intin, irqindex; 2406 volatile int32_t *ioapic; 2407 apic_irq_t *irqptr, *irqheadptr; 2408 2409 irqindex = IRQINDEX(irqno); 2410 irqptr = irqheadptr = apic_irq_table[irqindex]; 2411 2412 DDI_INTR_IMPLDBG((CE_CONT, "apic_delspl: dip=0x%p type=%d irqno=0x%x " 2413 "vector=0x%x\n", (void *)irqptr->airq_dip, 2414 irqptr->airq_mps_intr_index, irqno, irqptr->airq_vector)); 2415 2416 while (irqptr) { 2417 if (VIRTIRQ(irqindex, irqptr->airq_share_id) == irqno) 2418 break; 2419 irqptr = irqptr->airq_next; 2420 } 2421 ASSERT(irqptr); 2422 2423 irqptr->airq_share--; 2424 2425 if (ipl < max_ipl) 2426 return (PSM_SUCCESS); 2427 2428 /* return if it is not hardware interrupt */ 2429 if (irqptr->airq_mps_intr_index == RESERVE_INDEX) 2430 return (PSM_SUCCESS); 2431 2432 if (!apic_flag) { 2433 /* 2434 * Clear irq_struct. If two devices shared an intpt 2435 * line & 1 unloaded before picinit, we are hosed. But, then 2436 * we hope the machine will ... 2437 */ 2438 irqptr->airq_mps_intr_index = FREE_INDEX; 2439 irqptr->airq_temp_cpu = IRQ_UNINIT; 2440 apic_free_vector(irqptr->airq_vector); 2441 return (PSM_SUCCESS); 2442 } 2443 /* 2444 * Downgrade vector to new max_ipl if needed.If we cannot allocate, 2445 * use old IPL. Not very elegant, but then we hope ... 2446 */ 2447 if ((irqptr->airq_ipl != max_ipl) && (max_ipl != PSM_INVALID_IPL)) { 2448 apic_irq_t *irqp; 2449 if (vector = apic_allocate_vector(max_ipl, irqno, 1)) { 2450 apic_mark_vector(irqheadptr->airq_vector, vector); 2451 irqp = irqheadptr; 2452 while (irqp) { 2453 irqp->airq_vector = vector; 2454 irqp->airq_ipl = (uchar_t)max_ipl; 2455 if (irqp->airq_temp_cpu != IRQ_UNINIT) { 2456 apic_record_rdt_entry(irqp, irqindex); 2457 (void) apic_setup_io_intr(irqp, 2458 irqindex); 2459 } 2460 irqp = irqp->airq_next; 2461 } 2462 } 2463 } 2464 2465 if (irqptr->airq_share) 2466 return (PSM_SUCCESS); 2467 2468 ioapic = apicioadr[irqptr->airq_ioapicindex]; 2469 intin = irqptr->airq_intin_no; 2470 iflag = intr_clear(); 2471 lock_set(&apic_ioapic_lock); 2472 ioapic[APIC_IO_REG] = APIC_RDT_CMD + 2 * intin; 2473 ioapic[APIC_IO_DATA] = AV_MASK; 2474 2475 /* Disable the MSI/X vector */ 2476 if (APIC_IS_MSI_OR_MSIX_INDEX(irqptr->airq_mps_intr_index)) { 2477 int type = (irqptr->airq_mps_intr_index == MSI_INDEX) ? 2478 DDI_INTR_TYPE_MSI : DDI_INTR_TYPE_MSIX; 2479 2480 /* 2481 * Make sure we only disable on the last 2482 * of the multi-MSI support 2483 */ 2484 if (i_ddi_intr_get_current_nintrs(irqptr->airq_dip) == 1) { 2485 (void) pci_msi_unconfigure(irqptr->airq_dip, type, 2486 irqptr->airq_ioapicindex); 2487 2488 (void) pci_msi_disable_mode(irqptr->airq_dip, type, 2489 irqptr->airq_ioapicindex); 2490 } 2491 } 2492 2493 if (max_ipl == PSM_INVALID_IPL) { 2494 ASSERT(irqheadptr == irqptr); 2495 bind_cpu = irqptr->airq_temp_cpu; 2496 if (((uchar_t)bind_cpu != IRQ_UNBOUND) && 2497 ((uchar_t)bind_cpu != IRQ_UNINIT)) { 2498 ASSERT((bind_cpu & ~IRQ_USER_BOUND) < apic_nproc); 2499 if (bind_cpu & IRQ_USER_BOUND) { 2500 /* If hardbound, temp_cpu == cpu */ 2501 bind_cpu &= ~IRQ_USER_BOUND; 2502 apic_cpus[bind_cpu].aci_bound--; 2503 } else 2504 apic_cpus[bind_cpu].aci_temp_bound--; 2505 } 2506 lock_clear(&apic_ioapic_lock); 2507 intr_restore(iflag); 2508 irqptr->airq_temp_cpu = IRQ_UNINIT; 2509 irqptr->airq_mps_intr_index = FREE_INDEX; 2510 apic_free_vector(irqptr->airq_vector); 2511 return (PSM_SUCCESS); 2512 } 2513 lock_clear(&apic_ioapic_lock); 2514 intr_restore(iflag); 2515 2516 mutex_enter(&airq_mutex); 2517 if ((irqptr == apic_irq_table[irqindex])) { 2518 apic_irq_t *oldirqptr; 2519 /* Move valid irq entry to the head */ 2520 irqheadptr = oldirqptr = irqptr; 2521 irqptr = irqptr->airq_next; 2522 ASSERT(irqptr); 2523 while (irqptr) { 2524 if (irqptr->airq_mps_intr_index != FREE_INDEX) 2525 break; 2526 oldirqptr = irqptr; 2527 irqptr = irqptr->airq_next; 2528 } 2529 /* remove all invalid ones from the beginning */ 2530 apic_irq_table[irqindex] = irqptr; 2531 /* 2532 * and link them back after the head. The invalid ones 2533 * begin with irqheadptr and end at oldirqptr 2534 */ 2535 oldirqptr->airq_next = irqptr->airq_next; 2536 irqptr->airq_next = irqheadptr; 2537 } 2538 mutex_exit(&airq_mutex); 2539 2540 irqptr->airq_temp_cpu = IRQ_UNINIT; 2541 irqptr->airq_mps_intr_index = FREE_INDEX; 2542 return (PSM_SUCCESS); 2543 } 2544 2545 /* 2546 * Return HW interrupt number corresponding to the given IPL 2547 */ 2548 /*ARGSUSED*/ 2549 static int 2550 apic_softlvl_to_irq(int ipl) 2551 { 2552 /* 2553 * Do not use apic to trigger soft interrupt. 2554 * It will cause the system to hang when 2 hardware interrupts 2555 * at the same priority with the softint are already accepted 2556 * by the apic. Cause the AV_PENDING bit will not be cleared 2557 * until one of the hardware interrupt is eoi'ed. If we need 2558 * to send an ipi at this time, we will end up looping forever 2559 * to wait for the AV_PENDING bit to clear. 2560 */ 2561 return (PSM_SV_SOFTWARE); 2562 } 2563 2564 static int 2565 apic_post_cpu_start() 2566 { 2567 int i, cpun; 2568 apic_irq_t *irq_ptr; 2569 2570 apic_init_intr(); 2571 2572 /* 2573 * since some systems don't enable the internal cache on the non-boot 2574 * cpus, so we have to enable them here 2575 */ 2576 setcr0(getcr0() & ~(0x60000000)); 2577 2578 while (get_apic_cmd1() & AV_PENDING) 2579 apic_ret(); 2580 2581 cpun = psm_get_cpu_id(); 2582 apic_cpus[cpun].aci_status = APIC_CPU_ONLINE | APIC_CPU_INTR_ENABLE; 2583 2584 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 2585 irq_ptr = apic_irq_table[i]; 2586 if ((irq_ptr == NULL) || 2587 ((irq_ptr->airq_cpu & ~IRQ_USER_BOUND) != cpun)) 2588 continue; 2589 2590 while (irq_ptr) { 2591 if (irq_ptr->airq_temp_cpu != IRQ_UNINIT) 2592 (void) apic_rebind(irq_ptr, cpun, 1, IMMEDIATE); 2593 irq_ptr = irq_ptr->airq_next; 2594 } 2595 } 2596 2597 return (PSM_SUCCESS); 2598 } 2599 2600 processorid_t 2601 apic_get_next_processorid(processorid_t cpu_id) 2602 { 2603 2604 int i; 2605 2606 if (cpu_id == -1) 2607 return ((processorid_t)0); 2608 2609 for (i = cpu_id + 1; i < NCPU; i++) { 2610 if (apic_cpumask & (1 << i)) 2611 return (i); 2612 } 2613 2614 return ((processorid_t)-1); 2615 } 2616 2617 2618 /* 2619 * type == -1 indicates it is an internal request. Do not change 2620 * resv_vector for these requests 2621 */ 2622 static int 2623 apic_get_ipivect(int ipl, int type) 2624 { 2625 uchar_t vector; 2626 int irq; 2627 2628 if (irq = apic_allocate_irq(APIC_VECTOR(ipl))) { 2629 if (vector = apic_allocate_vector(ipl, irq, 1)) { 2630 apic_irq_table[irq]->airq_mps_intr_index = 2631 RESERVE_INDEX; 2632 apic_irq_table[irq]->airq_vector = vector; 2633 if (type != -1) { 2634 apic_resv_vector[ipl] = vector; 2635 } 2636 return (irq); 2637 } 2638 } 2639 apic_error |= APIC_ERR_GET_IPIVECT_FAIL; 2640 return (-1); /* shouldn't happen */ 2641 } 2642 2643 static int 2644 apic_getclkirq(int ipl) 2645 { 2646 int irq; 2647 2648 if ((irq = apic_get_ipivect(ipl, -1)) == -1) 2649 return (-1); 2650 /* 2651 * Note the vector in apic_clkvect for per clock handling. 2652 */ 2653 apic_clkvect = apic_irq_table[irq]->airq_vector - APIC_BASE_VECT; 2654 APIC_VERBOSE_IOAPIC((CE_NOTE, "get_clkirq: vector = %x\n", 2655 apic_clkvect)); 2656 return (irq); 2657 } 2658 2659 /* 2660 * Return the number of APIC clock ticks elapsed for 8245 to decrement 2661 * (APIC_TIME_COUNT + pit_ticks_adj) ticks. 2662 */ 2663 static uint_t 2664 apic_calibrate(volatile uint32_t *addr, uint16_t *pit_ticks_adj) 2665 { 2666 uint8_t pit_tick_lo; 2667 uint16_t pit_tick, target_pit_tick; 2668 uint32_t start_apic_tick, end_apic_tick; 2669 int iflag; 2670 2671 addr += APIC_CURR_COUNT; 2672 2673 iflag = intr_clear(); 2674 2675 do { 2676 pit_tick_lo = inb(PITCTR0_PORT); 2677 pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo; 2678 } while (pit_tick < APIC_TIME_MIN || 2679 pit_tick_lo <= APIC_LB_MIN || pit_tick_lo >= APIC_LB_MAX); 2680 2681 /* 2682 * Wait for the 8254 to decrement by 5 ticks to ensure 2683 * we didn't start in the middle of a tick. 2684 * Compare with 0x10 for the wrap around case. 2685 */ 2686 target_pit_tick = pit_tick - 5; 2687 do { 2688 pit_tick_lo = inb(PITCTR0_PORT); 2689 pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo; 2690 } while (pit_tick > target_pit_tick || pit_tick_lo < 0x10); 2691 2692 start_apic_tick = *addr; 2693 2694 /* 2695 * Wait for the 8254 to decrement by 2696 * (APIC_TIME_COUNT + pit_ticks_adj) ticks 2697 */ 2698 target_pit_tick = pit_tick - APIC_TIME_COUNT; 2699 do { 2700 pit_tick_lo = inb(PITCTR0_PORT); 2701 pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo; 2702 } while (pit_tick > target_pit_tick || pit_tick_lo < 0x10); 2703 2704 end_apic_tick = *addr; 2705 2706 *pit_ticks_adj = target_pit_tick - pit_tick; 2707 2708 intr_restore(iflag); 2709 2710 return (start_apic_tick - end_apic_tick); 2711 } 2712 2713 /* 2714 * Initialise the APIC timer on the local APIC of CPU 0 to the desired 2715 * frequency. Note at this stage in the boot sequence, the boot processor 2716 * is the only active processor. 2717 * hertz value of 0 indicates a one-shot mode request. In this case 2718 * the function returns the resolution (in nanoseconds) for the hardware 2719 * timer interrupt. If one-shot mode capability is not available, 2720 * the return value will be 0. apic_enable_oneshot is a global switch 2721 * for disabling the functionality. 2722 * A non-zero positive value for hertz indicates a periodic mode request. 2723 * In this case the hardware will be programmed to generate clock interrupts 2724 * at hertz frequency and returns the resolution of interrupts in 2725 * nanosecond. 2726 */ 2727 2728 static int 2729 apic_clkinit(int hertz) 2730 { 2731 2732 uint_t apic_ticks = 0; 2733 uint_t pit_time; 2734 int ret; 2735 uint16_t pit_ticks_adj; 2736 static int firsttime = 1; 2737 2738 if (firsttime) { 2739 /* first time calibrate */ 2740 2741 apicadr[APIC_DIVIDE_REG] = 0x0; 2742 apicadr[APIC_INIT_COUNT] = APIC_MAXVAL; 2743 2744 /* set periodic interrupt based on CLKIN */ 2745 apicadr[APIC_LOCAL_TIMER] = 2746 (apic_clkvect + APIC_BASE_VECT) | AV_TIME; 2747 tenmicrosec(); 2748 2749 apic_ticks = apic_calibrate(apicadr, &pit_ticks_adj); 2750 2751 apicadr[APIC_LOCAL_TIMER] = 2752 (apic_clkvect + APIC_BASE_VECT) | AV_MASK; 2753 /* 2754 * pit time is the amount of real time (in nanoseconds ) it took 2755 * the 8254 to decrement (APIC_TIME_COUNT + pit_ticks_adj) ticks 2756 */ 2757 pit_time = ((longlong_t)(APIC_TIME_COUNT + 2758 pit_ticks_adj) * NANOSEC) / PIT_HZ; 2759 2760 /* 2761 * Determine the number of nanoseconds per APIC clock tick 2762 * and then determine how many APIC ticks to interrupt at the 2763 * desired frequency 2764 */ 2765 apic_nsec_per_tick = pit_time / apic_ticks; 2766 if (apic_nsec_per_tick == 0) 2767 apic_nsec_per_tick = 1; 2768 2769 /* the interval timer initial count is 32 bit max */ 2770 apic_nsec_max = (hrtime_t)apic_nsec_per_tick * APIC_MAXVAL; 2771 firsttime = 0; 2772 } 2773 2774 if (hertz != 0) { 2775 /* periodic */ 2776 apic_nsec_per_intr = NANOSEC / hertz; 2777 apic_hertz_count = (longlong_t)apic_nsec_per_intr / 2778 apic_nsec_per_tick; 2779 apic_sample_factor_redistribution = hertz + 1; 2780 } 2781 2782 apic_int_busy_mark = (apic_int_busy_mark * 2783 apic_sample_factor_redistribution) / 100; 2784 apic_int_free_mark = (apic_int_free_mark * 2785 apic_sample_factor_redistribution) / 100; 2786 apic_diff_for_redistribution = (apic_diff_for_redistribution * 2787 apic_sample_factor_redistribution) / 100; 2788 2789 if (hertz == 0) { 2790 /* requested one_shot */ 2791 if (!apic_oneshot_enable) 2792 return (0); 2793 apic_oneshot = 1; 2794 ret = (int)apic_nsec_per_tick; 2795 } else { 2796 /* program the local APIC to interrupt at the given frequency */ 2797 apicadr[APIC_INIT_COUNT] = apic_hertz_count; 2798 apicadr[APIC_LOCAL_TIMER] = 2799 (apic_clkvect + APIC_BASE_VECT) | AV_TIME; 2800 apic_oneshot = 0; 2801 ret = NANOSEC / hertz; 2802 } 2803 2804 return (ret); 2805 2806 } 2807 2808 /* 2809 * apic_preshutdown: 2810 * Called early in shutdown whilst we can still access filesystems to do 2811 * things like loading modules which will be required to complete shutdown 2812 * after filesystems are all unmounted. 2813 */ 2814 static void 2815 apic_preshutdown(int cmd, int fcn) 2816 { 2817 APIC_VERBOSE_POWEROFF(("apic_preshutdown(%d,%d); m=%d a=%d\n", 2818 cmd, fcn, apic_poweroff_method, apic_enable_acpi)); 2819 2820 if ((cmd != A_SHUTDOWN) || (fcn != AD_POWEROFF)) { 2821 return; 2822 } 2823 } 2824 2825 static void 2826 apic_shutdown(int cmd, int fcn) 2827 { 2828 int iflag, restarts, attempts; 2829 int i, j; 2830 volatile int32_t *ioapic; 2831 uchar_t byte; 2832 2833 /* Send NMI to all CPUs except self to do per processor shutdown */ 2834 iflag = intr_clear(); 2835 while (get_apic_cmd1() & AV_PENDING) 2836 apic_ret(); 2837 apic_shutdown_processors = 1; 2838 apicadr[APIC_INT_CMD1] = AV_NMI | AV_LEVEL | AV_SH_ALL_EXCSELF; 2839 2840 /* restore cmos shutdown byte before reboot */ 2841 if (apic_cmos_ssb_set) { 2842 outb(CMOS_ADDR, SSB); 2843 outb(CMOS_DATA, 0); 2844 } 2845 /* Disable the I/O APIC redirection entries */ 2846 for (j = 0; j < apic_io_max; j++) { 2847 int intin_max; 2848 ioapic = apicioadr[j]; 2849 ioapic[APIC_IO_REG] = APIC_VERS_CMD; 2850 /* Bits 23-16 define the maximum redirection entries */ 2851 intin_max = (ioapic[APIC_IO_DATA] >> 16) & 0xff; 2852 for (i = 0; i < intin_max; i++) { 2853 ioapic[APIC_IO_REG] = APIC_RDT_CMD + 2 * i; 2854 ioapic[APIC_IO_DATA] = AV_MASK; 2855 } 2856 } 2857 2858 /* disable apic mode if imcr present */ 2859 if (apic_imcrp) { 2860 outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT); 2861 outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_PIC); 2862 } 2863 2864 apic_disable_local_apic(); 2865 2866 intr_restore(iflag); 2867 2868 if ((cmd != A_SHUTDOWN) || (fcn != AD_POWEROFF)) { 2869 return; 2870 } 2871 2872 switch (apic_poweroff_method) { 2873 case APIC_POWEROFF_VIA_RTC: 2874 2875 /* select the extended NVRAM bank in the RTC */ 2876 outb(CMOS_ADDR, RTC_REGA); 2877 byte = inb(CMOS_DATA); 2878 outb(CMOS_DATA, (byte | EXT_BANK)); 2879 2880 outb(CMOS_ADDR, PFR_REG); 2881 2882 /* for Predator must toggle the PAB bit */ 2883 byte = inb(CMOS_DATA); 2884 2885 /* 2886 * clear power active bar, wakeup alarm and 2887 * kickstart 2888 */ 2889 byte &= ~(PAB_CBIT | WF_FLAG | KS_FLAG); 2890 outb(CMOS_DATA, byte); 2891 2892 /* delay before next write */ 2893 drv_usecwait(1000); 2894 2895 /* for S40 the following would suffice */ 2896 byte = inb(CMOS_DATA); 2897 2898 /* power active bar control bit */ 2899 byte |= PAB_CBIT; 2900 outb(CMOS_DATA, byte); 2901 2902 break; 2903 2904 case APIC_POWEROFF_VIA_ASPEN_BMC: 2905 restarts = 0; 2906 restart_aspen_bmc: 2907 if (++restarts == 3) 2908 break; 2909 attempts = 0; 2910 do { 2911 byte = inb(MISMIC_FLAG_REGISTER); 2912 byte &= MISMIC_BUSY_MASK; 2913 if (byte != 0) { 2914 drv_usecwait(1000); 2915 if (attempts >= 3) 2916 goto restart_aspen_bmc; 2917 ++attempts; 2918 } 2919 } while (byte != 0); 2920 outb(MISMIC_CNTL_REGISTER, CC_SMS_GET_STATUS); 2921 byte = inb(MISMIC_FLAG_REGISTER); 2922 byte |= 0x1; 2923 outb(MISMIC_FLAG_REGISTER, byte); 2924 i = 0; 2925 for (; i < (sizeof (aspen_bmc)/sizeof (aspen_bmc[0])); 2926 i++) { 2927 attempts = 0; 2928 do { 2929 byte = inb(MISMIC_FLAG_REGISTER); 2930 byte &= MISMIC_BUSY_MASK; 2931 if (byte != 0) { 2932 drv_usecwait(1000); 2933 if (attempts >= 3) 2934 goto restart_aspen_bmc; 2935 ++attempts; 2936 } 2937 } while (byte != 0); 2938 outb(MISMIC_CNTL_REGISTER, aspen_bmc[i].cntl); 2939 outb(MISMIC_DATA_REGISTER, aspen_bmc[i].data); 2940 byte = inb(MISMIC_FLAG_REGISTER); 2941 byte |= 0x1; 2942 outb(MISMIC_FLAG_REGISTER, byte); 2943 } 2944 break; 2945 2946 case APIC_POWEROFF_VIA_SITKA_BMC: 2947 restarts = 0; 2948 restart_sitka_bmc: 2949 if (++restarts == 3) 2950 break; 2951 attempts = 0; 2952 do { 2953 byte = inb(SMS_STATUS_REGISTER); 2954 byte &= SMS_STATE_MASK; 2955 if ((byte == SMS_READ_STATE) || 2956 (byte == SMS_WRITE_STATE)) { 2957 drv_usecwait(1000); 2958 if (attempts >= 3) 2959 goto restart_sitka_bmc; 2960 ++attempts; 2961 } 2962 } while ((byte == SMS_READ_STATE) || 2963 (byte == SMS_WRITE_STATE)); 2964 outb(SMS_COMMAND_REGISTER, SMS_GET_STATUS); 2965 i = 0; 2966 for (; i < (sizeof (sitka_bmc)/sizeof (sitka_bmc[0])); 2967 i++) { 2968 attempts = 0; 2969 do { 2970 byte = inb(SMS_STATUS_REGISTER); 2971 byte &= SMS_IBF_MASK; 2972 if (byte != 0) { 2973 drv_usecwait(1000); 2974 if (attempts >= 3) 2975 goto restart_sitka_bmc; 2976 ++attempts; 2977 } 2978 } while (byte != 0); 2979 outb(sitka_bmc[i].port, sitka_bmc[i].data); 2980 } 2981 break; 2982 2983 case APIC_POWEROFF_NONE: 2984 2985 /* If no APIC direct method, we will try using ACPI */ 2986 if (apic_enable_acpi) { 2987 if (acpi_poweroff() == 1) 2988 return; 2989 } else 2990 return; 2991 2992 break; 2993 } 2994 /* 2995 * Wait a limited time here for power to go off. 2996 * If the power does not go off, then there was a 2997 * problem and we should continue to the halt which 2998 * prints a message for the user to press a key to 2999 * reboot. 3000 */ 3001 drv_usecwait(7000000); /* wait seven seconds */ 3002 3003 } 3004 3005 /* 3006 * Try and disable all interrupts. We just assign interrupts to other 3007 * processors based on policy. If any were bound by user request, we 3008 * let them continue and return failure. We do not bother to check 3009 * for cache affinity while rebinding. 3010 */ 3011 3012 static int 3013 apic_disable_intr(processorid_t cpun) 3014 { 3015 int bind_cpu = 0, i, hardbound = 0, iflag; 3016 apic_irq_t *irq_ptr; 3017 3018 iflag = intr_clear(); 3019 lock_set(&apic_ioapic_lock); 3020 apic_cpus[cpun].aci_status &= ~APIC_CPU_INTR_ENABLE; 3021 lock_clear(&apic_ioapic_lock); 3022 intr_restore(iflag); 3023 apic_cpus[cpun].aci_curipl = 0; 3024 i = apic_min_device_irq; 3025 for (; i <= apic_max_device_irq; i++) { 3026 /* 3027 * If there are bound interrupts on this cpu, then 3028 * rebind them to other processors. 3029 */ 3030 if ((irq_ptr = apic_irq_table[i]) != NULL) { 3031 ASSERT((irq_ptr->airq_temp_cpu == IRQ_UNBOUND) || 3032 (irq_ptr->airq_temp_cpu == IRQ_UNINIT) || 3033 ((irq_ptr->airq_temp_cpu & ~IRQ_USER_BOUND) < 3034 apic_nproc)); 3035 3036 if (irq_ptr->airq_temp_cpu == (cpun | IRQ_USER_BOUND)) { 3037 hardbound = 1; 3038 continue; 3039 } 3040 3041 if (irq_ptr->airq_temp_cpu == cpun) { 3042 do { 3043 apic_next_bind_cpu += 2; 3044 bind_cpu = apic_next_bind_cpu / 2; 3045 if (bind_cpu >= apic_nproc) { 3046 apic_next_bind_cpu = 1; 3047 bind_cpu = 0; 3048 3049 } 3050 } while (apic_rebind_all(irq_ptr, bind_cpu, 1)); 3051 } 3052 } 3053 } 3054 if (hardbound) { 3055 cmn_err(CE_WARN, "Could not disable interrupts on %d" 3056 "due to user bound interrupts", cpun); 3057 return (PSM_FAILURE); 3058 } 3059 else 3060 return (PSM_SUCCESS); 3061 } 3062 3063 static void 3064 apic_enable_intr(processorid_t cpun) 3065 { 3066 int i, iflag; 3067 apic_irq_t *irq_ptr; 3068 3069 iflag = intr_clear(); 3070 lock_set(&apic_ioapic_lock); 3071 apic_cpus[cpun].aci_status |= APIC_CPU_INTR_ENABLE; 3072 lock_clear(&apic_ioapic_lock); 3073 intr_restore(iflag); 3074 3075 i = apic_min_device_irq; 3076 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 3077 if ((irq_ptr = apic_irq_table[i]) != NULL) { 3078 if ((irq_ptr->airq_cpu & ~IRQ_USER_BOUND) == cpun) { 3079 (void) apic_rebind_all(irq_ptr, 3080 irq_ptr->airq_cpu, 1); 3081 } 3082 } 3083 } 3084 } 3085 3086 /* 3087 * apic_introp_xlate() replaces apic_translate_irq() and is 3088 * called only from apic_intr_ops(). With the new ADII framework, 3089 * the priority can no longer be retrived through i_ddi_get_intrspec(). 3090 * It has to be passed in from the caller. 3091 */ 3092 int 3093 apic_introp_xlate(dev_info_t *dip, struct intrspec *ispec, int type) 3094 { 3095 char dev_type[16]; 3096 int dev_len, pci_irq, newirq, bustype, devid, busid, i; 3097 int irqno = ispec->intrspec_vec; 3098 ddi_acc_handle_t cfg_handle; 3099 uchar_t ipin; 3100 struct apic_io_intr *intrp; 3101 iflag_t intr_flag; 3102 APIC_HEADER *hp; 3103 MADT_INTERRUPT_OVERRIDE *isop; 3104 apic_irq_t *airqp; 3105 3106 DDI_INTR_IMPLDBG((CE_CONT, "apic_introp_xlate: dip=0x%p name=%s " 3107 "type=%d irqno=0x%x\n", (void *)dip, ddi_get_name(dip), type, 3108 irqno)); 3109 3110 if (DDI_INTR_IS_MSI_OR_MSIX(type)) { 3111 if ((airqp = apic_find_irq(dip, ispec, type)) != NULL) 3112 return (apic_vector_to_irq[airqp->airq_vector]); 3113 return (apic_setup_irq_table(dip, irqno, NULL, ispec, 3114 NULL, type)); 3115 } 3116 3117 bustype = 0; 3118 3119 /* check if we have already translated this irq */ 3120 mutex_enter(&airq_mutex); 3121 newirq = apic_min_device_irq; 3122 for (; newirq <= apic_max_device_irq; newirq++) { 3123 airqp = apic_irq_table[newirq]; 3124 while (airqp) { 3125 if ((airqp->airq_dip == dip) && 3126 (airqp->airq_origirq == irqno) && 3127 (airqp->airq_mps_intr_index != FREE_INDEX)) { 3128 3129 mutex_exit(&airq_mutex); 3130 return (VIRTIRQ(newirq, airqp->airq_share_id)); 3131 } 3132 airqp = airqp->airq_next; 3133 } 3134 } 3135 mutex_exit(&airq_mutex); 3136 3137 if (apic_defconf) 3138 goto defconf; 3139 3140 if ((dip == NULL) || (!apic_irq_translate && !apic_enable_acpi)) 3141 goto nonpci; 3142 3143 dev_len = sizeof (dev_type); 3144 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ddi_get_parent(dip), 3145 DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type, 3146 &dev_len) != DDI_PROP_SUCCESS) { 3147 goto nonpci; 3148 } 3149 3150 if ((strcmp(dev_type, "pci") == 0) || 3151 (strcmp(dev_type, "pciex") == 0)) { 3152 /* pci device */ 3153 if (acpica_get_bdf(dip, &busid, &devid, NULL) != 0) 3154 goto nonpci; 3155 if (busid == 0 && apic_pci_bus_total == 1) 3156 busid = (int)apic_single_pci_busid; 3157 3158 if (pci_config_setup(dip, &cfg_handle) != DDI_SUCCESS) 3159 goto nonpci; 3160 ipin = pci_config_get8(cfg_handle, PCI_CONF_IPIN) - PCI_INTA; 3161 pci_config_teardown(&cfg_handle); 3162 if (apic_enable_acpi && !apic_use_acpi_madt_only) { 3163 if (apic_acpi_translate_pci_irq(dip, busid, devid, 3164 ipin, &pci_irq, &intr_flag) != ACPI_PSM_SUCCESS) 3165 goto nonpci; 3166 3167 intr_flag.bustype = BUS_PCI; 3168 if ((newirq = apic_setup_irq_table(dip, pci_irq, NULL, 3169 ispec, &intr_flag, type)) == -1) 3170 goto nonpci; 3171 return (newirq); 3172 } else { 3173 pci_irq = ((devid & 0x1f) << 2) | (ipin & 0x3); 3174 if ((intrp = apic_find_io_intr_w_busid(pci_irq, busid)) 3175 == NULL) { 3176 if ((pci_irq = apic_handle_pci_pci_bridge(dip, 3177 devid, ipin, &intrp)) == -1) 3178 goto nonpci; 3179 } 3180 if ((newirq = apic_setup_irq_table(dip, pci_irq, intrp, 3181 ispec, NULL, type)) == -1) 3182 goto nonpci; 3183 return (newirq); 3184 } 3185 } else if (strcmp(dev_type, "isa") == 0) 3186 bustype = BUS_ISA; 3187 else if (strcmp(dev_type, "eisa") == 0) 3188 bustype = BUS_EISA; 3189 3190 nonpci: 3191 if (apic_enable_acpi && !apic_use_acpi_madt_only) { 3192 /* search iso entries first */ 3193 if (acpi_iso_cnt != 0) { 3194 hp = (APIC_HEADER *)acpi_isop; 3195 i = 0; 3196 while (i < acpi_iso_cnt) { 3197 if (hp->Type == APIC_XRUPT_OVERRIDE) { 3198 isop = (MADT_INTERRUPT_OVERRIDE *)hp; 3199 if (isop->Bus == 0 && 3200 isop->Source == irqno) { 3201 newirq = isop->Interrupt; 3202 intr_flag.intr_po = 3203 isop->Polarity; 3204 intr_flag.intr_el = 3205 isop->TriggerMode; 3206 intr_flag.bustype = BUS_ISA; 3207 3208 return (apic_setup_irq_table( 3209 dip, newirq, NULL, ispec, 3210 &intr_flag, type)); 3211 3212 } 3213 i++; 3214 } 3215 hp = (APIC_HEADER *)(((char *)hp) + 3216 hp->Length); 3217 } 3218 } 3219 intr_flag.intr_po = INTR_PO_ACTIVE_HIGH; 3220 intr_flag.intr_el = INTR_EL_EDGE; 3221 intr_flag.bustype = BUS_ISA; 3222 return (apic_setup_irq_table(dip, irqno, NULL, ispec, 3223 &intr_flag, type)); 3224 } else { 3225 if (bustype == 0) 3226 bustype = eisa_level_intr_mask ? BUS_EISA : BUS_ISA; 3227 for (i = 0; i < 2; i++) { 3228 if (((busid = apic_find_bus_id(bustype)) != -1) && 3229 ((intrp = apic_find_io_intr_w_busid(irqno, busid)) 3230 != NULL)) { 3231 if ((newirq = apic_setup_irq_table(dip, irqno, 3232 intrp, ispec, NULL, type)) != -1) { 3233 return (newirq); 3234 } 3235 goto defconf; 3236 } 3237 bustype = (bustype == BUS_EISA) ? BUS_ISA : BUS_EISA; 3238 } 3239 } 3240 3241 /* MPS default configuration */ 3242 defconf: 3243 newirq = apic_setup_irq_table(dip, irqno, NULL, ispec, NULL, type); 3244 if (newirq == -1) 3245 return (newirq); 3246 ASSERT(IRQINDEX(newirq) == irqno); 3247 ASSERT(apic_irq_table[irqno]); 3248 return (newirq); 3249 } 3250 3251 3252 3253 3254 3255 3256 /* 3257 * On machines with PCI-PCI bridges, a device behind a PCI-PCI bridge 3258 * needs special handling. We may need to chase up the device tree, 3259 * using the PCI-PCI Bridge specification's "rotating IPIN assumptions", 3260 * to find the IPIN at the root bus that relates to the IPIN on the 3261 * subsidiary bus (for ACPI or MP). We may, however, have an entry 3262 * in the MP table or the ACPI namespace for this device itself. 3263 * We handle both cases in the search below. 3264 */ 3265 /* this is the non-acpi version */ 3266 static int 3267 apic_handle_pci_pci_bridge(dev_info_t *idip, int child_devno, int child_ipin, 3268 struct apic_io_intr **intrp) 3269 { 3270 dev_info_t *dipp, *dip; 3271 int pci_irq; 3272 ddi_acc_handle_t cfg_handle; 3273 int bridge_devno, bridge_bus; 3274 int ipin; 3275 3276 dip = idip; 3277 3278 /*CONSTCOND*/ 3279 while (1) { 3280 if ((dipp = ddi_get_parent(dip)) == (dev_info_t *)NULL) 3281 return (-1); 3282 if ((pci_config_setup(dipp, &cfg_handle) == DDI_SUCCESS) && 3283 (pci_config_get8(cfg_handle, PCI_CONF_BASCLASS) == 3284 PCI_CLASS_BRIDGE) && (pci_config_get8(cfg_handle, 3285 PCI_CONF_SUBCLASS) == PCI_BRIDGE_PCI)) { 3286 pci_config_teardown(&cfg_handle); 3287 if (acpica_get_bdf(dipp, &bridge_bus, &bridge_devno, 3288 NULL) != 0) 3289 return (-1); 3290 /* 3291 * This is the rotating scheme that Compaq is using 3292 * and documented in the pci to pci spec. Also, if 3293 * the pci to pci bridge is behind another pci to 3294 * pci bridge, then it need to keep transversing 3295 * up until an interrupt entry is found or reach 3296 * the top of the tree 3297 */ 3298 ipin = (child_devno + child_ipin) % PCI_INTD; 3299 if (bridge_bus == 0 && apic_pci_bus_total == 1) 3300 bridge_bus = (int)apic_single_pci_busid; 3301 pci_irq = ((bridge_devno & 0x1f) << 2) | 3302 (ipin & 0x3); 3303 if ((*intrp = apic_find_io_intr_w_busid(pci_irq, 3304 bridge_bus)) != NULL) { 3305 return (pci_irq); 3306 } 3307 dip = dipp; 3308 child_devno = bridge_devno; 3309 child_ipin = ipin; 3310 } else 3311 return (-1); 3312 } 3313 /*LINTED: function will not fall off the bottom */ 3314 } 3315 3316 3317 3318 3319 static uchar_t 3320 acpi_find_ioapic(int irq) 3321 { 3322 int i; 3323 3324 for (i = 0; i < apic_io_max; i++) { 3325 if (irq >= apic_io_vectbase[i] && irq <= apic_io_vectend[i]) 3326 return (i); 3327 } 3328 return (0xFF); /* shouldn't happen */ 3329 } 3330 3331 /* 3332 * See if two irqs are compatible for sharing a vector. 3333 * Currently we only support sharing of PCI devices. 3334 */ 3335 static int 3336 acpi_intr_compatible(iflag_t iflag1, iflag_t iflag2) 3337 { 3338 uint_t level1, po1; 3339 uint_t level2, po2; 3340 3341 /* Assume active high by default */ 3342 po1 = 0; 3343 po2 = 0; 3344 3345 if (iflag1.bustype != iflag2.bustype || iflag1.bustype != BUS_PCI) 3346 return (0); 3347 3348 if (iflag1.intr_el == INTR_EL_CONFORM) 3349 level1 = AV_LEVEL; 3350 else 3351 level1 = (iflag1.intr_el == INTR_EL_LEVEL) ? AV_LEVEL : 0; 3352 3353 if (level1 && ((iflag1.intr_po == INTR_PO_ACTIVE_LOW) || 3354 (iflag1.intr_po == INTR_PO_CONFORM))) 3355 po1 = AV_ACTIVE_LOW; 3356 3357 if (iflag2.intr_el == INTR_EL_CONFORM) 3358 level2 = AV_LEVEL; 3359 else 3360 level2 = (iflag2.intr_el == INTR_EL_LEVEL) ? AV_LEVEL : 0; 3361 3362 if (level2 && ((iflag2.intr_po == INTR_PO_ACTIVE_LOW) || 3363 (iflag2.intr_po == INTR_PO_CONFORM))) 3364 po2 = AV_ACTIVE_LOW; 3365 3366 if ((level1 == level2) && (po1 == po2)) 3367 return (1); 3368 3369 return (0); 3370 } 3371 3372 /* 3373 * Attempt to share vector with someone else 3374 */ 3375 static int 3376 apic_share_vector(int irqno, iflag_t *intr_flagp, short intr_index, int ipl, 3377 uchar_t ioapicindex, uchar_t ipin, apic_irq_t **irqptrp) 3378 { 3379 #ifdef DEBUG 3380 apic_irq_t *tmpirqp = NULL; 3381 #endif /* DEBUG */ 3382 apic_irq_t *irqptr, dummyirq; 3383 int newirq, chosen_irq = -1, share = 127; 3384 int lowest, highest, i; 3385 uchar_t share_id; 3386 3387 DDI_INTR_IMPLDBG((CE_CONT, "apic_share_vector: irqno=0x%x " 3388 "intr_index=0x%x ipl=0x%x\n", irqno, intr_index, ipl)); 3389 3390 highest = apic_ipltopri[ipl] + APIC_VECTOR_MASK; 3391 lowest = apic_ipltopri[ipl-1] + APIC_VECTOR_PER_IPL; 3392 3393 if (highest < lowest) /* Both ipl and ipl-1 map to same pri */ 3394 lowest -= APIC_VECTOR_PER_IPL; 3395 dummyirq.airq_mps_intr_index = intr_index; 3396 dummyirq.airq_ioapicindex = ioapicindex; 3397 dummyirq.airq_intin_no = ipin; 3398 if (intr_flagp) 3399 dummyirq.airq_iflag = *intr_flagp; 3400 apic_record_rdt_entry(&dummyirq, irqno); 3401 for (i = lowest; i <= highest; i++) { 3402 newirq = apic_vector_to_irq[i]; 3403 if (newirq == APIC_RESV_IRQ) 3404 continue; 3405 irqptr = apic_irq_table[newirq]; 3406 3407 if ((dummyirq.airq_rdt_entry & 0xFF00) != 3408 (irqptr->airq_rdt_entry & 0xFF00)) 3409 /* not compatible */ 3410 continue; 3411 3412 if (irqptr->airq_share < share) { 3413 share = irqptr->airq_share; 3414 chosen_irq = newirq; 3415 } 3416 } 3417 if (chosen_irq != -1) { 3418 /* 3419 * Assign a share id which is free or which is larger 3420 * than the largest one. 3421 */ 3422 share_id = 1; 3423 mutex_enter(&airq_mutex); 3424 irqptr = apic_irq_table[chosen_irq]; 3425 while (irqptr) { 3426 if (irqptr->airq_mps_intr_index == FREE_INDEX) { 3427 share_id = irqptr->airq_share_id; 3428 break; 3429 } 3430 if (share_id <= irqptr->airq_share_id) 3431 share_id = irqptr->airq_share_id + 1; 3432 #ifdef DEBUG 3433 tmpirqp = irqptr; 3434 #endif /* DEBUG */ 3435 irqptr = irqptr->airq_next; 3436 } 3437 if (!irqptr) { 3438 irqptr = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP); 3439 irqptr->airq_temp_cpu = IRQ_UNINIT; 3440 irqptr->airq_next = 3441 apic_irq_table[chosen_irq]->airq_next; 3442 apic_irq_table[chosen_irq]->airq_next = irqptr; 3443 #ifdef DEBUG 3444 tmpirqp = apic_irq_table[chosen_irq]; 3445 #endif /* DEBUG */ 3446 } 3447 irqptr->airq_mps_intr_index = intr_index; 3448 irqptr->airq_ioapicindex = ioapicindex; 3449 irqptr->airq_intin_no = ipin; 3450 if (intr_flagp) 3451 irqptr->airq_iflag = *intr_flagp; 3452 irqptr->airq_vector = apic_irq_table[chosen_irq]->airq_vector; 3453 irqptr->airq_share_id = share_id; 3454 apic_record_rdt_entry(irqptr, irqno); 3455 *irqptrp = irqptr; 3456 #ifdef DEBUG 3457 /* shuffle the pointers to test apic_delspl path */ 3458 if (tmpirqp) { 3459 tmpirqp->airq_next = irqptr->airq_next; 3460 irqptr->airq_next = apic_irq_table[chosen_irq]; 3461 apic_irq_table[chosen_irq] = irqptr; 3462 } 3463 #endif /* DEBUG */ 3464 mutex_exit(&airq_mutex); 3465 return (VIRTIRQ(chosen_irq, share_id)); 3466 } 3467 return (-1); 3468 } 3469 3470 /* 3471 * 3472 */ 3473 static int 3474 apic_setup_irq_table(dev_info_t *dip, int irqno, struct apic_io_intr *intrp, 3475 struct intrspec *ispec, iflag_t *intr_flagp, int type) 3476 { 3477 int origirq = ispec->intrspec_vec; 3478 uchar_t ipl = ispec->intrspec_pri; 3479 int newirq, intr_index; 3480 uchar_t ipin, ioapic, ioapicindex, vector; 3481 apic_irq_t *irqptr; 3482 major_t major; 3483 dev_info_t *sdip; 3484 3485 DDI_INTR_IMPLDBG((CE_CONT, "apic_setup_irq_table: dip=0x%p type=%d " 3486 "irqno=0x%x origirq=0x%x\n", (void *)dip, type, irqno, origirq)); 3487 3488 ASSERT(ispec != NULL); 3489 3490 major = (dip != NULL) ? ddi_name_to_major(ddi_get_name(dip)) : 0; 3491 3492 if (DDI_INTR_IS_MSI_OR_MSIX(type)) { 3493 /* MSI/X doesn't need to setup ioapic stuffs */ 3494 ioapicindex = 0xff; 3495 ioapic = 0xff; 3496 ipin = (uchar_t)0xff; 3497 intr_index = (type == DDI_INTR_TYPE_MSI) ? MSI_INDEX : 3498 MSIX_INDEX; 3499 mutex_enter(&airq_mutex); 3500 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) == -1) { 3501 mutex_exit(&airq_mutex); 3502 /* need an irq for MSI/X to index into autovect[] */ 3503 cmn_err(CE_WARN, "No interrupt irq: %s instance %d", 3504 ddi_get_name(dip), ddi_get_instance(dip)); 3505 return (-1); 3506 } 3507 mutex_exit(&airq_mutex); 3508 3509 } else if (intrp != NULL) { 3510 intr_index = (int)(intrp - apic_io_intrp); 3511 ioapic = intrp->intr_destid; 3512 ipin = intrp->intr_destintin; 3513 /* Find ioapicindex. If destid was ALL, we will exit with 0. */ 3514 for (ioapicindex = apic_io_max - 1; ioapicindex; ioapicindex--) 3515 if (apic_io_id[ioapicindex] == ioapic) 3516 break; 3517 ASSERT((ioapic == apic_io_id[ioapicindex]) || 3518 (ioapic == INTR_ALL_APIC)); 3519 3520 /* check whether this intin# has been used by another irqno */ 3521 if ((newirq = apic_find_intin(ioapicindex, ipin)) != -1) { 3522 return (newirq); 3523 } 3524 3525 } else if (intr_flagp != NULL) { 3526 /* ACPI case */ 3527 intr_index = ACPI_INDEX; 3528 ioapicindex = acpi_find_ioapic(irqno); 3529 ASSERT(ioapicindex != 0xFF); 3530 ioapic = apic_io_id[ioapicindex]; 3531 ipin = irqno - apic_io_vectbase[ioapicindex]; 3532 if (apic_irq_table[irqno] && 3533 apic_irq_table[irqno]->airq_mps_intr_index == ACPI_INDEX) { 3534 ASSERT(apic_irq_table[irqno]->airq_intin_no == ipin && 3535 apic_irq_table[irqno]->airq_ioapicindex == 3536 ioapicindex); 3537 return (irqno); 3538 } 3539 3540 } else { 3541 /* default configuration */ 3542 ioapicindex = 0; 3543 ioapic = apic_io_id[ioapicindex]; 3544 ipin = (uchar_t)irqno; 3545 intr_index = DEFAULT_INDEX; 3546 } 3547 3548 if (ispec == NULL) { 3549 APIC_VERBOSE_IOAPIC((CE_WARN, "No intrspec for irqno = %x\n", 3550 irqno)); 3551 } else if ((vector = apic_allocate_vector(ipl, irqno, 0)) == 0) { 3552 if ((newirq = apic_share_vector(irqno, intr_flagp, intr_index, 3553 ipl, ioapicindex, ipin, &irqptr)) != -1) { 3554 irqptr->airq_ipl = ipl; 3555 irqptr->airq_origirq = (uchar_t)origirq; 3556 irqptr->airq_dip = dip; 3557 irqptr->airq_major = major; 3558 sdip = apic_irq_table[IRQINDEX(newirq)]->airq_dip; 3559 /* This is OK to do really */ 3560 if (sdip == NULL) { 3561 cmn_err(CE_WARN, "Sharing vectors: %s" 3562 " instance %d and SCI", 3563 ddi_get_name(dip), ddi_get_instance(dip)); 3564 } else { 3565 cmn_err(CE_WARN, "Sharing vectors: %s" 3566 " instance %d and %s instance %d", 3567 ddi_get_name(sdip), ddi_get_instance(sdip), 3568 ddi_get_name(dip), ddi_get_instance(dip)); 3569 } 3570 return (newirq); 3571 } 3572 /* try high priority allocation now that share has failed */ 3573 if ((vector = apic_allocate_vector(ipl, irqno, 1)) == 0) { 3574 cmn_err(CE_WARN, "No interrupt vector: %s instance %d", 3575 ddi_get_name(dip), ddi_get_instance(dip)); 3576 return (-1); 3577 } 3578 } 3579 3580 mutex_enter(&airq_mutex); 3581 if (apic_irq_table[irqno] == NULL) { 3582 irqptr = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP); 3583 irqptr->airq_temp_cpu = IRQ_UNINIT; 3584 apic_irq_table[irqno] = irqptr; 3585 } else { 3586 irqptr = apic_irq_table[irqno]; 3587 if (irqptr->airq_mps_intr_index != FREE_INDEX) { 3588 /* 3589 * The slot is used by another irqno, so allocate 3590 * a free irqno for this interrupt 3591 */ 3592 newirq = apic_allocate_irq(apic_first_avail_irq); 3593 if (newirq == -1) { 3594 mutex_exit(&airq_mutex); 3595 return (-1); 3596 } 3597 irqno = newirq; 3598 irqptr = apic_irq_table[irqno]; 3599 if (irqptr == NULL) { 3600 irqptr = kmem_zalloc(sizeof (apic_irq_t), 3601 KM_SLEEP); 3602 irqptr->airq_temp_cpu = IRQ_UNINIT; 3603 apic_irq_table[irqno] = irqptr; 3604 } 3605 apic_modify_vector(vector, newirq); 3606 } 3607 } 3608 apic_max_device_irq = max(irqno, apic_max_device_irq); 3609 apic_min_device_irq = min(irqno, apic_min_device_irq); 3610 mutex_exit(&airq_mutex); 3611 irqptr->airq_ioapicindex = ioapicindex; 3612 irqptr->airq_intin_no = ipin; 3613 irqptr->airq_ipl = ipl; 3614 irqptr->airq_vector = vector; 3615 irqptr->airq_origirq = (uchar_t)origirq; 3616 irqptr->airq_share_id = 0; 3617 irqptr->airq_mps_intr_index = (short)intr_index; 3618 irqptr->airq_dip = dip; 3619 irqptr->airq_major = major; 3620 irqptr->airq_cpu = apic_bind_intr(dip, irqno, ioapic, ipin); 3621 if (intr_flagp) 3622 irqptr->airq_iflag = *intr_flagp; 3623 3624 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) { 3625 /* setup I/O APIC entry for non-MSI/X interrupts */ 3626 apic_record_rdt_entry(irqptr, irqno); 3627 } 3628 return (irqno); 3629 } 3630 3631 /* 3632 * return the cpu to which this intr should be bound. 3633 * Check properties or any other mechanism to see if user wants it 3634 * bound to a specific CPU. If so, return the cpu id with high bit set. 3635 * If not, use the policy to choose a cpu and return the id. 3636 */ 3637 uchar_t 3638 apic_bind_intr(dev_info_t *dip, int irq, uchar_t ioapicid, uchar_t intin) 3639 { 3640 int instance, instno, prop_len, bind_cpu, count; 3641 uint_t i, rc; 3642 uchar_t cpu; 3643 major_t major; 3644 char *name, *drv_name, *prop_val, *cptr; 3645 char prop_name[32]; 3646 3647 3648 if (apic_intr_policy == INTR_LOWEST_PRIORITY) 3649 return (IRQ_UNBOUND); 3650 3651 drv_name = NULL; 3652 rc = DDI_PROP_NOT_FOUND; 3653 major = (major_t)-1; 3654 if (dip != NULL) { 3655 name = ddi_get_name(dip); 3656 major = ddi_name_to_major(name); 3657 drv_name = ddi_major_to_name(major); 3658 instance = ddi_get_instance(dip); 3659 if (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) { 3660 i = apic_min_device_irq; 3661 for (; i <= apic_max_device_irq; i++) { 3662 3663 if ((i == irq) || (apic_irq_table[i] == NULL) || 3664 (apic_irq_table[i]->airq_mps_intr_index 3665 == FREE_INDEX)) 3666 continue; 3667 3668 if ((apic_irq_table[i]->airq_major == major) && 3669 (!(apic_irq_table[i]->airq_cpu & 3670 IRQ_USER_BOUND))) { 3671 3672 cpu = apic_irq_table[i]->airq_cpu; 3673 3674 cmn_err(CE_CONT, 3675 "!pcplusmp: %s (%s) instance #%d " 3676 "vector 0x%x ioapic 0x%x " 3677 "intin 0x%x is bound to cpu %d\n", 3678 name, drv_name, instance, irq, 3679 ioapicid, intin, cpu); 3680 return (cpu); 3681 } 3682 } 3683 } 3684 /* 3685 * search for "drvname"_intpt_bind_cpus property first, the 3686 * syntax of the property should be "a[,b,c,...]" where 3687 * instance 0 binds to cpu a, instance 1 binds to cpu b, 3688 * instance 3 binds to cpu c... 3689 * ddi_getlongprop() will search /option first, then / 3690 * if "drvname"_intpt_bind_cpus doesn't exist, then find 3691 * intpt_bind_cpus property. The syntax is the same, and 3692 * it applies to all the devices if its "drvname" specific 3693 * property doesn't exist 3694 */ 3695 (void) strcpy(prop_name, drv_name); 3696 (void) strcat(prop_name, "_intpt_bind_cpus"); 3697 rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, prop_name, 3698 (caddr_t)&prop_val, &prop_len); 3699 if (rc != DDI_PROP_SUCCESS) { 3700 rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, 3701 "intpt_bind_cpus", (caddr_t)&prop_val, &prop_len); 3702 } 3703 } 3704 if (rc == DDI_PROP_SUCCESS) { 3705 for (i = count = 0; i < (prop_len - 1); i++) 3706 if (prop_val[i] == ',') 3707 count++; 3708 if (prop_val[i-1] != ',') 3709 count++; 3710 /* 3711 * if somehow the binding instances defined in the 3712 * property are not enough for this instno., then 3713 * reuse the pattern for the next instance until 3714 * it reaches the requested instno 3715 */ 3716 instno = instance % count; 3717 i = 0; 3718 cptr = prop_val; 3719 while (i < instno) 3720 if (*cptr++ == ',') 3721 i++; 3722 bind_cpu = stoi(&cptr); 3723 kmem_free(prop_val, prop_len); 3724 /* if specific cpu is bogus, then default to cpu 0 */ 3725 if (bind_cpu >= apic_nproc) { 3726 cmn_err(CE_WARN, "pcplusmp: %s=%s: CPU %d not present", 3727 prop_name, prop_val, bind_cpu); 3728 bind_cpu = 0; 3729 } else { 3730 /* indicate that we are bound at user request */ 3731 bind_cpu |= IRQ_USER_BOUND; 3732 } 3733 /* 3734 * no need to check apic_cpus[].aci_status, if specific cpu is 3735 * not up, then post_cpu_start will handle it. 3736 */ 3737 } else { 3738 /* 3739 * We change bind_cpu only for every two calls 3740 * as most drivers still do 2 add_intrs for every 3741 * interrupt 3742 */ 3743 bind_cpu = (apic_next_bind_cpu++) / 2; 3744 if (bind_cpu >= apic_nproc) { 3745 apic_next_bind_cpu = 1; 3746 bind_cpu = 0; 3747 } 3748 } 3749 if (drv_name != NULL) 3750 cmn_err(CE_CONT, "!pcplusmp: %s (%s) instance %d " 3751 "vector 0x%x ioapic 0x%x intin 0x%x is bound to cpu %d\n", 3752 name, drv_name, instance, 3753 irq, ioapicid, intin, bind_cpu & ~IRQ_USER_BOUND); 3754 else 3755 cmn_err(CE_CONT, "!pcplusmp: " 3756 "vector 0x%x ioapic 0x%x intin 0x%x is bound to cpu %d\n", 3757 irq, ioapicid, intin, bind_cpu & ~IRQ_USER_BOUND); 3758 3759 return ((uchar_t)bind_cpu); 3760 } 3761 3762 static struct apic_io_intr * 3763 apic_find_io_intr_w_busid(int irqno, int busid) 3764 { 3765 struct apic_io_intr *intrp; 3766 3767 /* 3768 * It can have more than 1 entry with same source bus IRQ, 3769 * but unique with the source bus id 3770 */ 3771 intrp = apic_io_intrp; 3772 if (intrp != NULL) { 3773 while (intrp->intr_entry == APIC_IO_INTR_ENTRY) { 3774 if (intrp->intr_irq == irqno && 3775 intrp->intr_busid == busid && 3776 intrp->intr_type == IO_INTR_INT) 3777 return (intrp); 3778 intrp++; 3779 } 3780 } 3781 APIC_VERBOSE_IOAPIC((CE_NOTE, "Did not find io intr for irqno:" 3782 "busid %x:%x\n", irqno, busid)); 3783 return ((struct apic_io_intr *)NULL); 3784 } 3785 3786 3787 struct mps_bus_info { 3788 char *bus_name; 3789 int bus_id; 3790 } bus_info_array[] = { 3791 "ISA ", BUS_ISA, 3792 "PCI ", BUS_PCI, 3793 "EISA ", BUS_EISA, 3794 "XPRESS", BUS_XPRESS, 3795 "PCMCIA", BUS_PCMCIA, 3796 "VL ", BUS_VL, 3797 "CBUS ", BUS_CBUS, 3798 "CBUSII", BUS_CBUSII, 3799 "FUTURE", BUS_FUTURE, 3800 "INTERN", BUS_INTERN, 3801 "MBI ", BUS_MBI, 3802 "MBII ", BUS_MBII, 3803 "MPI ", BUS_MPI, 3804 "MPSA ", BUS_MPSA, 3805 "NUBUS ", BUS_NUBUS, 3806 "TC ", BUS_TC, 3807 "VME ", BUS_VME 3808 }; 3809 3810 static int 3811 apic_find_bus_type(char *bus) 3812 { 3813 int i = 0; 3814 3815 for (; i < sizeof (bus_info_array)/sizeof (struct mps_bus_info); i++) 3816 if (strncmp(bus, bus_info_array[i].bus_name, 3817 strlen(bus_info_array[i].bus_name)) == 0) 3818 return (bus_info_array[i].bus_id); 3819 APIC_VERBOSE_IOAPIC((CE_WARN, "Did not find bus type for bus %s", bus)); 3820 return (0); 3821 } 3822 3823 static int 3824 apic_find_bus(int busid) 3825 { 3826 struct apic_bus *busp; 3827 3828 busp = apic_busp; 3829 while (busp->bus_entry == APIC_BUS_ENTRY) { 3830 if (busp->bus_id == busid) 3831 return (apic_find_bus_type((char *)&busp->bus_str1)); 3832 busp++; 3833 } 3834 APIC_VERBOSE_IOAPIC((CE_WARN, "Did not find bus for bus id %x", busid)); 3835 return (0); 3836 } 3837 3838 static int 3839 apic_find_bus_id(int bustype) 3840 { 3841 struct apic_bus *busp; 3842 3843 busp = apic_busp; 3844 while (busp->bus_entry == APIC_BUS_ENTRY) { 3845 if (apic_find_bus_type((char *)&busp->bus_str1) == bustype) 3846 return (busp->bus_id); 3847 busp++; 3848 } 3849 APIC_VERBOSE_IOAPIC((CE_WARN, "Did not find bus id for bustype %x", 3850 bustype)); 3851 return (-1); 3852 } 3853 3854 /* 3855 * Check if a particular irq need to be reserved for any io_intr 3856 */ 3857 static struct apic_io_intr * 3858 apic_find_io_intr(int irqno) 3859 { 3860 struct apic_io_intr *intrp; 3861 3862 intrp = apic_io_intrp; 3863 if (intrp != NULL) { 3864 while (intrp->intr_entry == APIC_IO_INTR_ENTRY) { 3865 if (intrp->intr_irq == irqno && 3866 intrp->intr_type == IO_INTR_INT) 3867 return (intrp); 3868 intrp++; 3869 } 3870 } 3871 return ((struct apic_io_intr *)NULL); 3872 } 3873 3874 /* 3875 * Check if the given ioapicindex intin combination has already been assigned 3876 * an irq. If so return irqno. Else -1 3877 */ 3878 static int 3879 apic_find_intin(uchar_t ioapic, uchar_t intin) 3880 { 3881 apic_irq_t *irqptr; 3882 int i; 3883 3884 /* find ioapic and intin in the apic_irq_table[] and return the index */ 3885 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 3886 irqptr = apic_irq_table[i]; 3887 while (irqptr) { 3888 if ((irqptr->airq_mps_intr_index >= 0) && 3889 (irqptr->airq_intin_no == intin) && 3890 (irqptr->airq_ioapicindex == ioapic)) { 3891 APIC_VERBOSE_IOAPIC((CE_NOTE, "!Found irq " 3892 "entry for ioapic:intin %x:%x " 3893 "shared interrupts ?", ioapic, intin)); 3894 return (i); 3895 } 3896 irqptr = irqptr->airq_next; 3897 } 3898 } 3899 return (-1); 3900 } 3901 3902 int 3903 apic_allocate_irq(int irq) 3904 { 3905 int freeirq, i; 3906 3907 if ((freeirq = apic_find_free_irq(irq, (APIC_RESV_IRQ - 1))) == -1) 3908 if ((freeirq = apic_find_free_irq(APIC_FIRST_FREE_IRQ, 3909 (irq - 1))) == -1) { 3910 /* 3911 * if BIOS really defines every single irq in the mps 3912 * table, then don't worry about conflicting with 3913 * them, just use any free slot in apic_irq_table 3914 */ 3915 for (i = APIC_FIRST_FREE_IRQ; i < APIC_RESV_IRQ; i++) { 3916 if ((apic_irq_table[i] == NULL) || 3917 apic_irq_table[i]->airq_mps_intr_index == 3918 FREE_INDEX) { 3919 freeirq = i; 3920 break; 3921 } 3922 } 3923 if (freeirq == -1) { 3924 /* This shouldn't happen, but just in case */ 3925 cmn_err(CE_WARN, "pcplusmp: NO available IRQ"); 3926 return (-1); 3927 } 3928 } 3929 if (apic_irq_table[freeirq] == NULL) { 3930 apic_irq_table[freeirq] = 3931 kmem_zalloc(sizeof (apic_irq_t), KM_NOSLEEP); 3932 if (apic_irq_table[freeirq] == NULL) { 3933 cmn_err(CE_WARN, "pcplusmp: NO memory to allocate IRQ"); 3934 return (-1); 3935 } 3936 apic_irq_table[freeirq]->airq_mps_intr_index = FREE_INDEX; 3937 } 3938 return (freeirq); 3939 } 3940 3941 static int 3942 apic_find_free_irq(int start, int end) 3943 { 3944 int i; 3945 3946 for (i = start; i <= end; i++) 3947 /* Check if any I/O entry needs this IRQ */ 3948 if (apic_find_io_intr(i) == NULL) { 3949 /* Then see if it is free */ 3950 if ((apic_irq_table[i] == NULL) || 3951 (apic_irq_table[i]->airq_mps_intr_index == 3952 FREE_INDEX)) { 3953 return (i); 3954 } 3955 } 3956 return (-1); 3957 } 3958 3959 /* 3960 * Allocate a free vector for irq at ipl. Takes care of merging of multiple 3961 * IPLs into a single APIC level as well as stretching some IPLs onto multiple 3962 * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority 3963 * requests and allocated only when pri is set. 3964 */ 3965 static uchar_t 3966 apic_allocate_vector(int ipl, int irq, int pri) 3967 { 3968 int lowest, highest, i; 3969 3970 highest = apic_ipltopri[ipl] + APIC_VECTOR_MASK; 3971 lowest = apic_ipltopri[ipl - 1] + APIC_VECTOR_PER_IPL; 3972 3973 if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */ 3974 lowest -= APIC_VECTOR_PER_IPL; 3975 3976 #ifdef DEBUG 3977 if (apic_restrict_vector) /* for testing shared interrupt logic */ 3978 highest = lowest + apic_restrict_vector + APIC_HI_PRI_VECTS; 3979 #endif /* DEBUG */ 3980 if (pri == 0) 3981 highest -= APIC_HI_PRI_VECTS; 3982 3983 for (i = lowest; i < highest; i++) { 3984 if ((i == T_FASTTRAP) || (i == APIC_SPUR_INTR) || 3985 (i == T_SYSCALLINT) || (i == T_DTRACE_PROBE) || 3986 (i == T_DTRACE_RET)) 3987 continue; 3988 if (apic_vector_to_irq[i] == APIC_RESV_IRQ) { 3989 apic_vector_to_irq[i] = (uchar_t)irq; 3990 return (i); 3991 } 3992 } 3993 3994 return (0); 3995 } 3996 3997 static void 3998 apic_modify_vector(uchar_t vector, int irq) 3999 { 4000 apic_vector_to_irq[vector] = (uchar_t)irq; 4001 } 4002 4003 /* 4004 * Mark vector as being in the process of being deleted. Interrupts 4005 * may still come in on some CPU. The moment an interrupt comes with 4006 * the new vector, we know we can free the old one. Called only from 4007 * addspl and delspl with interrupts disabled. Because an interrupt 4008 * can be shared, but no interrupt from either device may come in, 4009 * we also use a timeout mechanism, which we arbitrarily set to 4010 * apic_revector_timeout microseconds. 4011 */ 4012 static void 4013 apic_mark_vector(uchar_t oldvector, uchar_t newvector) 4014 { 4015 int iflag = intr_clear(); 4016 lock_set(&apic_revector_lock); 4017 if (!apic_oldvec_to_newvec) { 4018 apic_oldvec_to_newvec = 4019 kmem_zalloc(sizeof (newvector) * APIC_MAX_VECTOR * 2, 4020 KM_NOSLEEP); 4021 4022 if (!apic_oldvec_to_newvec) { 4023 /* 4024 * This failure is not catastrophic. 4025 * But, the oldvec will never be freed. 4026 */ 4027 apic_error |= APIC_ERR_MARK_VECTOR_FAIL; 4028 lock_clear(&apic_revector_lock); 4029 intr_restore(iflag); 4030 return; 4031 } 4032 apic_newvec_to_oldvec = &apic_oldvec_to_newvec[APIC_MAX_VECTOR]; 4033 } 4034 4035 /* See if we already did this for drivers which do double addintrs */ 4036 if (apic_oldvec_to_newvec[oldvector] != newvector) { 4037 apic_oldvec_to_newvec[oldvector] = newvector; 4038 apic_newvec_to_oldvec[newvector] = oldvector; 4039 apic_revector_pending++; 4040 } 4041 lock_clear(&apic_revector_lock); 4042 intr_restore(iflag); 4043 (void) timeout(apic_xlate_vector_free_timeout_handler, 4044 (void *)(uintptr_t)oldvector, drv_usectohz(apic_revector_timeout)); 4045 } 4046 4047 /* 4048 * xlate_vector is called from intr_enter if revector_pending is set. 4049 * It will xlate it if needed and mark the old vector as free. 4050 */ 4051 static uchar_t 4052 apic_xlate_vector(uchar_t vector) 4053 { 4054 uchar_t newvector, oldvector = 0; 4055 4056 lock_set(&apic_revector_lock); 4057 /* Do we really need to do this ? */ 4058 if (!apic_revector_pending) { 4059 lock_clear(&apic_revector_lock); 4060 return (vector); 4061 } 4062 if ((newvector = apic_oldvec_to_newvec[vector]) != 0) 4063 oldvector = vector; 4064 else { 4065 /* 4066 * The incoming vector is new . See if a stale entry is 4067 * remaining 4068 */ 4069 if ((oldvector = apic_newvec_to_oldvec[vector]) != 0) 4070 newvector = vector; 4071 } 4072 4073 if (oldvector) { 4074 apic_revector_pending--; 4075 apic_oldvec_to_newvec[oldvector] = 0; 4076 apic_newvec_to_oldvec[newvector] = 0; 4077 apic_free_vector(oldvector); 4078 lock_clear(&apic_revector_lock); 4079 /* There could have been more than one reprogramming! */ 4080 return (apic_xlate_vector(newvector)); 4081 } 4082 lock_clear(&apic_revector_lock); 4083 return (vector); 4084 } 4085 4086 void 4087 apic_xlate_vector_free_timeout_handler(void *arg) 4088 { 4089 int iflag; 4090 uchar_t oldvector, newvector; 4091 4092 oldvector = (uchar_t)(uintptr_t)arg; 4093 iflag = intr_clear(); 4094 lock_set(&apic_revector_lock); 4095 if ((newvector = apic_oldvec_to_newvec[oldvector]) != 0) { 4096 apic_free_vector(oldvector); 4097 apic_oldvec_to_newvec[oldvector] = 0; 4098 apic_newvec_to_oldvec[newvector] = 0; 4099 apic_revector_pending--; 4100 } 4101 4102 lock_clear(&apic_revector_lock); 4103 intr_restore(iflag); 4104 } 4105 4106 4107 /* Mark vector as not being used by any irq */ 4108 static void 4109 apic_free_vector(uchar_t vector) 4110 { 4111 apic_vector_to_irq[vector] = APIC_RESV_IRQ; 4112 } 4113 4114 /* 4115 * compute the polarity, trigger mode and vector for programming into 4116 * the I/O apic and record in airq_rdt_entry. 4117 */ 4118 static void 4119 apic_record_rdt_entry(apic_irq_t *irqptr, int irq) 4120 { 4121 int ioapicindex, bus_type, vector; 4122 short intr_index; 4123 uint_t level, po, io_po; 4124 struct apic_io_intr *iointrp; 4125 4126 intr_index = irqptr->airq_mps_intr_index; 4127 DDI_INTR_IMPLDBG((CE_CONT, "apic_record_rdt_entry: intr_index=%d " 4128 "irq = 0x%x dip = 0x%p vector = 0x%x\n", intr_index, irq, 4129 (void *)irqptr->airq_dip, irqptr->airq_vector)); 4130 4131 if (intr_index == RESERVE_INDEX) { 4132 apic_error |= APIC_ERR_INVALID_INDEX; 4133 return; 4134 } else if (APIC_IS_MSI_OR_MSIX_INDEX(intr_index)) { 4135 return; 4136 } 4137 4138 vector = irqptr->airq_vector; 4139 ioapicindex = irqptr->airq_ioapicindex; 4140 /* Assume edge triggered by default */ 4141 level = 0; 4142 /* Assume active high by default */ 4143 po = 0; 4144 4145 if (intr_index == DEFAULT_INDEX || intr_index == FREE_INDEX) { 4146 ASSERT(irq < 16); 4147 if (eisa_level_intr_mask & (1 << irq)) 4148 level = AV_LEVEL; 4149 if (intr_index == FREE_INDEX && apic_defconf == 0) 4150 apic_error |= APIC_ERR_INVALID_INDEX; 4151 } else if (intr_index == ACPI_INDEX) { 4152 bus_type = irqptr->airq_iflag.bustype; 4153 if (irqptr->airq_iflag.intr_el == INTR_EL_CONFORM) { 4154 if (bus_type == BUS_PCI) 4155 level = AV_LEVEL; 4156 } else 4157 level = (irqptr->airq_iflag.intr_el == INTR_EL_LEVEL) ? 4158 AV_LEVEL : 0; 4159 if (level && 4160 ((irqptr->airq_iflag.intr_po == INTR_PO_ACTIVE_LOW) || 4161 (irqptr->airq_iflag.intr_po == INTR_PO_CONFORM && 4162 bus_type == BUS_PCI))) 4163 po = AV_ACTIVE_LOW; 4164 } else { 4165 iointrp = apic_io_intrp + intr_index; 4166 bus_type = apic_find_bus(iointrp->intr_busid); 4167 if (iointrp->intr_el == INTR_EL_CONFORM) { 4168 if ((irq < 16) && (eisa_level_intr_mask & (1 << irq))) 4169 level = AV_LEVEL; 4170 else if (bus_type == BUS_PCI) 4171 level = AV_LEVEL; 4172 } else 4173 level = (iointrp->intr_el == INTR_EL_LEVEL) ? 4174 AV_LEVEL : 0; 4175 if (level && ((iointrp->intr_po == INTR_PO_ACTIVE_LOW) || 4176 (iointrp->intr_po == INTR_PO_CONFORM && 4177 bus_type == BUS_PCI))) 4178 po = AV_ACTIVE_LOW; 4179 } 4180 if (level) 4181 apic_level_intr[irq] = 1; 4182 /* 4183 * The 82489DX External APIC cannot do active low polarity interrupts. 4184 */ 4185 if (po && (apic_io_ver[ioapicindex] != IOAPIC_VER_82489DX)) 4186 io_po = po; 4187 else 4188 io_po = 0; 4189 4190 if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) 4191 printf("setio: ioapic=%x intin=%x level=%x po=%x vector=%x\n", 4192 ioapicindex, irqptr->airq_intin_no, level, io_po, vector); 4193 4194 irqptr->airq_rdt_entry = level|io_po|vector; 4195 } 4196 4197 /* 4198 * Call rebind to do the actual programming. 4199 */ 4200 static int 4201 apic_setup_io_intr(apic_irq_t *irqptr, int irq) 4202 { 4203 int rv; 4204 4205 if (rv = apic_rebind(irqptr, apic_irq_table[irq]->airq_cpu, 1, 4206 IMMEDIATE)) 4207 /* CPU is not up or interrupt is disabled. Fall back to 0 */ 4208 rv = apic_rebind(irqptr, 0, 1, IMMEDIATE); 4209 4210 return (rv); 4211 } 4212 4213 /* 4214 * Deferred reprogramming: Call apic_rebind to do the real work. 4215 */ 4216 static int 4217 apic_setup_io_intr_deferred(apic_irq_t *irqptr, int irq) 4218 { 4219 int rv; 4220 4221 if (rv = apic_rebind(irqptr, apic_irq_table[irq]->airq_cpu, 1, 4222 DEFERRED)) 4223 /* CPU is not up or interrupt is disabled. Fall back to 0 */ 4224 rv = apic_rebind(irqptr, 0, 1, DEFERRED); 4225 4226 return (rv); 4227 } 4228 4229 /* 4230 * Bind interrupt corresponding to irq_ptr to bind_cpu. acquire_lock 4231 * if false (0) means lock is already held (e.g: in rebind_all). 4232 */ 4233 static int 4234 apic_rebind(apic_irq_t *irq_ptr, int bind_cpu, int acquire_lock, int when) 4235 { 4236 int intin_no; 4237 volatile int32_t *ioapic; 4238 uchar_t airq_temp_cpu; 4239 apic_cpus_info_t *cpu_infop; 4240 int iflag; 4241 int which_irq = apic_vector_to_irq[irq_ptr->airq_vector]; 4242 4243 intin_no = irq_ptr->airq_intin_no; 4244 ioapic = apicioadr[irq_ptr->airq_ioapicindex]; 4245 airq_temp_cpu = irq_ptr->airq_temp_cpu; 4246 if (airq_temp_cpu != IRQ_UNINIT && airq_temp_cpu != IRQ_UNBOUND) { 4247 if (airq_temp_cpu & IRQ_USER_BOUND) 4248 /* Mask off high bit so it can be used as array index */ 4249 airq_temp_cpu &= ~IRQ_USER_BOUND; 4250 4251 ASSERT(airq_temp_cpu < apic_nproc); 4252 } 4253 4254 iflag = intr_clear(); 4255 4256 if (acquire_lock) 4257 lock_set(&apic_ioapic_lock); 4258 4259 /* 4260 * Can't bind to a CPU that's not online: 4261 */ 4262 cpu_infop = &apic_cpus[bind_cpu & ~IRQ_USER_BOUND]; 4263 if (!(cpu_infop->aci_status & APIC_CPU_INTR_ENABLE)) { 4264 4265 if (acquire_lock) 4266 lock_clear(&apic_ioapic_lock); 4267 4268 intr_restore(iflag); 4269 return (1); 4270 } 4271 4272 /* 4273 * If this is a deferred reprogramming attempt, ensure we have 4274 * not been passed stale data: 4275 */ 4276 if ((when == DEFERRED) && 4277 (apic_reprogram_info[which_irq].valid == 0)) { 4278 /* stale info, so just return */ 4279 if (acquire_lock) 4280 lock_clear(&apic_ioapic_lock); 4281 4282 intr_restore(iflag); 4283 return (0); 4284 } 4285 4286 /* 4287 * If this interrupt has been delivered to a CPU and that CPU 4288 * has not handled it yet, we cannot reprogram the IOAPIC now: 4289 */ 4290 if (!APIC_IS_MSI_OR_MSIX_INDEX(irq_ptr->airq_mps_intr_index) && 4291 apic_check_stuck_interrupt(irq_ptr, airq_temp_cpu, bind_cpu, 4292 ioapic, intin_no, which_irq) != 0) { 4293 4294 if (acquire_lock) 4295 lock_clear(&apic_ioapic_lock); 4296 4297 intr_restore(iflag); 4298 return (0); 4299 } 4300 4301 /* 4302 * NOTE: We do not unmask the RDT here, as an interrupt MAY still 4303 * come in before we have a chance to reprogram it below. The 4304 * reprogramming below will simultaneously change and unmask the 4305 * RDT entry. 4306 */ 4307 4308 if ((uchar_t)bind_cpu == IRQ_UNBOUND) { 4309 /* Write the RDT entry -- no specific CPU binding */ 4310 WRITE_IOAPIC_RDT_ENTRY_HIGH_DWORD(ioapic, intin_no, AV_TOALL); 4311 4312 if (airq_temp_cpu != IRQ_UNINIT && airq_temp_cpu != IRQ_UNBOUND) 4313 apic_cpus[airq_temp_cpu].aci_temp_bound--; 4314 4315 /* Write the vector, trigger, and polarity portion of the RDT */ 4316 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 4317 AV_LDEST | AV_LOPRI | irq_ptr->airq_rdt_entry); 4318 if (acquire_lock) 4319 lock_clear(&apic_ioapic_lock); 4320 irq_ptr->airq_temp_cpu = IRQ_UNBOUND; 4321 intr_restore(iflag); 4322 return (0); 4323 } 4324 4325 if (bind_cpu & IRQ_USER_BOUND) { 4326 cpu_infop->aci_bound++; 4327 } else { 4328 cpu_infop->aci_temp_bound++; 4329 } 4330 ASSERT((bind_cpu & ~IRQ_USER_BOUND) < apic_nproc); 4331 if (!APIC_IS_MSI_OR_MSIX_INDEX(irq_ptr->airq_mps_intr_index)) { 4332 /* Write the RDT entry -- bind to a specific CPU: */ 4333 WRITE_IOAPIC_RDT_ENTRY_HIGH_DWORD(ioapic, intin_no, 4334 cpu_infop->aci_local_id << APIC_ID_BIT_OFFSET); 4335 } 4336 if ((airq_temp_cpu != IRQ_UNBOUND) && (airq_temp_cpu != IRQ_UNINIT)) { 4337 apic_cpus[airq_temp_cpu].aci_temp_bound--; 4338 } 4339 if (!APIC_IS_MSI_OR_MSIX_INDEX(irq_ptr->airq_mps_intr_index)) { 4340 /* Write the vector, trigger, and polarity portion of the RDT */ 4341 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 4342 AV_PDEST | AV_FIXED | irq_ptr->airq_rdt_entry); 4343 } else { 4344 int type = (irq_ptr->airq_mps_intr_index == MSI_INDEX) ? 4345 DDI_INTR_TYPE_MSI : DDI_INTR_TYPE_MSIX; 4346 (void) pci_msi_disable_mode(irq_ptr->airq_dip, type, 4347 irq_ptr->airq_ioapicindex); 4348 if (irq_ptr->airq_ioapicindex == irq_ptr->airq_origirq) { 4349 /* first one */ 4350 DDI_INTR_IMPLDBG((CE_CONT, "apic_rebind: call " 4351 "apic_pci_msi_enable_vector\n")); 4352 if (apic_pci_msi_enable_vector(irq_ptr->airq_dip, type, 4353 which_irq, irq_ptr->airq_vector, 4354 irq_ptr->airq_intin_no, 4355 cpu_infop->aci_local_id) != PSM_SUCCESS) { 4356 cmn_err(CE_WARN, "pcplusmp: " 4357 "apic_pci_msi_enable_vector " 4358 "returned PSM_FAILURE"); 4359 } 4360 } 4361 if ((irq_ptr->airq_ioapicindex + irq_ptr->airq_intin_no - 1) == 4362 irq_ptr->airq_origirq) { /* last one */ 4363 DDI_INTR_IMPLDBG((CE_CONT, "apic_rebind: call " 4364 "pci_msi_enable_mode\n")); 4365 if (pci_msi_enable_mode(irq_ptr->airq_dip, type, 4366 which_irq) != DDI_SUCCESS) { 4367 DDI_INTR_IMPLDBG((CE_CONT, "pcplusmp: " 4368 "pci_msi_enable failed\n")); 4369 (void) pci_msi_unconfigure(irq_ptr->airq_dip, 4370 (irq_ptr->airq_mps_intr_index == MSI_INDEX) ? 4371 DDI_INTR_TYPE_MSI : DDI_INTR_TYPE_MSIX, 4372 which_irq); 4373 } 4374 } 4375 } 4376 if (acquire_lock) 4377 lock_clear(&apic_ioapic_lock); 4378 irq_ptr->airq_temp_cpu = (uchar_t)bind_cpu; 4379 apic_redist_cpu_skip &= ~(1 << (bind_cpu & ~IRQ_USER_BOUND)); 4380 intr_restore(iflag); 4381 return (0); 4382 } 4383 4384 /* 4385 * Checks to see if the IOAPIC interrupt entry specified has its Remote IRR 4386 * bit set. Sets up a timeout to perform the reprogramming at a later time 4387 * if it cannot wait for the Remote IRR bit to clear (or if waiting did not 4388 * result in the bit's clearing). 4389 * 4390 * This function will mask the RDT entry if the Remote IRR bit is set. 4391 * 4392 * Returns non-zero if the caller should defer IOAPIC reprogramming. 4393 */ 4394 static int 4395 apic_check_stuck_interrupt(apic_irq_t *irq_ptr, int old_bind_cpu, 4396 int new_bind_cpu, volatile int32_t *ioapic, int intin_no, int which_irq) 4397 { 4398 int32_t rdt_entry; 4399 int waited; 4400 4401 /* Mask the RDT entry, but only if it's a level-triggered interrupt */ 4402 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 4403 if ((rdt_entry & (AV_LEVEL|AV_MASK)) == AV_LEVEL) { 4404 4405 /* Mask it */ 4406 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 4407 AV_MASK | rdt_entry); 4408 } 4409 4410 /* 4411 * Wait for the delivery pending bit to clear. 4412 */ 4413 if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 4414 (AV_LEVEL|AV_PENDING)) == (AV_LEVEL|AV_PENDING)) { 4415 4416 /* 4417 * If we're still waiting on the delivery of this interrupt, 4418 * continue to wait here until it is delivered (this should be 4419 * a very small amount of time, but include a timeout just in 4420 * case). 4421 */ 4422 for (waited = 0; waited < apic_max_usecs_clear_pending; 4423 waited += APIC_USECS_PER_WAIT_INTERVAL) { 4424 if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) 4425 & AV_PENDING) == 0) { 4426 break; 4427 } 4428 drv_usecwait(APIC_USECS_PER_WAIT_INTERVAL); 4429 } 4430 4431 if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 4432 AV_PENDING) != 0) { 4433 cmn_err(CE_WARN, "!IOAPIC %d intin %d: Could not " 4434 "deliver interrupt to local APIC within " 4435 "%d usecs.", irq_ptr->airq_ioapicindex, 4436 irq_ptr->airq_intin_no, 4437 apic_max_usecs_clear_pending); 4438 } 4439 } 4440 4441 /* 4442 * If the remote IRR bit is set, then the interrupt has been sent 4443 * to a CPU for processing. We have no choice but to wait for 4444 * that CPU to process the interrupt, at which point the remote IRR 4445 * bit will be cleared. 4446 */ 4447 if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 4448 (AV_LEVEL|AV_REMOTE_IRR)) == (AV_LEVEL|AV_REMOTE_IRR)) { 4449 4450 /* 4451 * If the CPU that this RDT is bound to is NOT the current 4452 * CPU, wait until that CPU handles the interrupt and ACKs 4453 * it. If this interrupt is not bound to any CPU (that is, 4454 * if it's bound to the logical destination of "anyone"), it 4455 * may have been delivered to the current CPU so handle that 4456 * case by deferring the reprogramming (below). 4457 */ 4458 kpreempt_disable(); 4459 if ((old_bind_cpu != IRQ_UNBOUND) && 4460 (old_bind_cpu != IRQ_UNINIT) && 4461 (old_bind_cpu != psm_get_cpu_id())) { 4462 for (waited = 0; waited < apic_max_usecs_clear_pending; 4463 waited += APIC_USECS_PER_WAIT_INTERVAL) { 4464 if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4465 intin_no) & AV_REMOTE_IRR) == 0) { 4466 4467 /* Clear the reprogramming state: */ 4468 lock_set(&apic_ioapic_reprogram_lock); 4469 4470 apic_reprogram_info[which_irq].valid 4471 = 0; 4472 apic_reprogram_info[which_irq].bindcpu 4473 = 0; 4474 apic_reprogram_info[which_irq].timeouts 4475 = 0; 4476 4477 lock_clear(&apic_ioapic_reprogram_lock); 4478 4479 /* Remote IRR has cleared! */ 4480 kpreempt_enable(); 4481 return (0); 4482 } 4483 drv_usecwait(APIC_USECS_PER_WAIT_INTERVAL); 4484 } 4485 } 4486 kpreempt_enable(); 4487 4488 /* 4489 * If we waited and the Remote IRR bit is still not cleared, 4490 * AND if we've invoked the timeout APIC_REPROGRAM_MAX_TIMEOUTS 4491 * times for this interrupt, try the last-ditch workarounds: 4492 */ 4493 if (apic_reprogram_info[which_irq].timeouts >= 4494 APIC_REPROGRAM_MAX_TIMEOUTS) { 4495 4496 if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) 4497 & AV_REMOTE_IRR) != 0) { 4498 /* 4499 * Trying to clear the bit through normal 4500 * channels has failed. So as a last-ditch 4501 * effort, try to set the trigger mode to 4502 * edge, then to level. This has been 4503 * observed to work on many systems. 4504 */ 4505 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4506 intin_no, 4507 READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4508 intin_no) & ~AV_LEVEL); 4509 4510 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4511 intin_no, 4512 READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4513 intin_no) | AV_LEVEL); 4514 4515 /* 4516 * If the bit's STILL set, declare total and 4517 * utter failure 4518 */ 4519 if ((READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, 4520 intin_no) & AV_REMOTE_IRR) != 0) { 4521 cmn_err(CE_WARN, "!IOAPIC %d intin %d: " 4522 "Remote IRR failed to reset " 4523 "within %d usecs. Interrupts to " 4524 "this pin may cease to function.", 4525 irq_ptr->airq_ioapicindex, 4526 irq_ptr->airq_intin_no, 4527 apic_max_usecs_clear_pending); 4528 } 4529 } 4530 /* Clear the reprogramming state: */ 4531 lock_set(&apic_ioapic_reprogram_lock); 4532 4533 apic_reprogram_info[which_irq].valid = 0; 4534 apic_reprogram_info[which_irq].bindcpu = 0; 4535 apic_reprogram_info[which_irq].timeouts = 0; 4536 4537 lock_clear(&apic_ioapic_reprogram_lock); 4538 } else { 4539 #ifdef DEBUG 4540 cmn_err(CE_WARN, "Deferring reprogramming of irq %d", 4541 which_irq); 4542 #endif /* DEBUG */ 4543 /* 4544 * If waiting for the Remote IRR bit (above) didn't 4545 * allow it to clear, defer the reprogramming: 4546 */ 4547 lock_set(&apic_ioapic_reprogram_lock); 4548 4549 apic_reprogram_info[which_irq].valid = 1; 4550 apic_reprogram_info[which_irq].bindcpu = new_bind_cpu; 4551 apic_reprogram_info[which_irq].timeouts++; 4552 4553 lock_clear(&apic_ioapic_reprogram_lock); 4554 4555 /* Fire up a timeout to handle this later */ 4556 (void) timeout(apic_reprogram_timeout_handler, 4557 (void *) 0, 4558 drv_usectohz(APIC_REPROGRAM_TIMEOUT_DELAY)); 4559 4560 /* Inform caller to defer IOAPIC programming: */ 4561 return (1); 4562 } 4563 } 4564 return (0); 4565 } 4566 4567 /* 4568 * Timeout handler that performs the APIC reprogramming 4569 */ 4570 /*ARGSUSED*/ 4571 static void 4572 apic_reprogram_timeout_handler(void *arg) 4573 { 4574 /*LINTED: set but not used in function*/ 4575 int i, result; 4576 4577 /* Serialize access to this function */ 4578 mutex_enter(&apic_reprogram_timeout_mutex); 4579 4580 /* 4581 * For each entry in the reprogramming state that's valid, 4582 * try the reprogramming again: 4583 */ 4584 for (i = 0; i < APIC_MAX_VECTOR; i++) { 4585 if (apic_reprogram_info[i].valid == 0) 4586 continue; 4587 /* 4588 * Though we can't really do anything about errors 4589 * at this point, keep track of them for reporting. 4590 * Note that it is very possible for apic_setup_io_intr 4591 * to re-register this very timeout if the Remote IRR bit 4592 * has not yet cleared. 4593 */ 4594 result = apic_setup_io_intr_deferred(apic_irq_table[i], i); 4595 4596 #ifdef DEBUG 4597 if (result) 4598 cmn_err(CE_WARN, "apic_reprogram_timeout: " 4599 "apic_setup_io_intr returned nonzero for " 4600 "irq=%d!", i); 4601 #endif /* DEBUG */ 4602 } 4603 4604 mutex_exit(&apic_reprogram_timeout_mutex); 4605 } 4606 4607 4608 /* 4609 * Called to migrate all interrupts at an irq to another cpu. safe 4610 * if true means we are not being called from an interrupt 4611 * context and hence it is safe to do a lock_set. If false 4612 * do only a lock_try and return failure ( non 0 ) if we cannot get it 4613 */ 4614 int 4615 apic_rebind_all(apic_irq_t *irq_ptr, int bind_cpu, int safe) 4616 { 4617 apic_irq_t *irqptr = irq_ptr; 4618 int retval = 0; 4619 int iflag; 4620 4621 iflag = intr_clear(); 4622 if (!safe) { 4623 if (lock_try(&apic_ioapic_lock) == 0) { 4624 intr_restore(iflag); 4625 return (1); 4626 } 4627 } else 4628 lock_set(&apic_ioapic_lock); 4629 4630 while (irqptr) { 4631 if (irqptr->airq_temp_cpu != IRQ_UNINIT) 4632 retval |= apic_rebind(irqptr, bind_cpu, 0, IMMEDIATE); 4633 irqptr = irqptr->airq_next; 4634 } 4635 lock_clear(&apic_ioapic_lock); 4636 intr_restore(iflag); 4637 return (retval); 4638 } 4639 4640 /* 4641 * apic_intr_redistribute does all the messy computations for identifying 4642 * which interrupt to move to which CPU. Currently we do just one interrupt 4643 * at a time. This reduces the time we spent doing all this within clock 4644 * interrupt. When it is done in idle, we could do more than 1. 4645 * First we find the most busy and the most free CPU (time in ISR only) 4646 * skipping those CPUs that has been identified as being ineligible (cpu_skip) 4647 * Then we look for IRQs which are closest to the difference between the 4648 * most busy CPU and the average ISR load. We try to find one whose load 4649 * is less than difference.If none exists, then we chose one larger than the 4650 * difference, provided it does not make the most idle CPU worse than the 4651 * most busy one. In the end, we clear all the busy fields for CPUs. For 4652 * IRQs, they are cleared as they are scanned. 4653 */ 4654 static void 4655 apic_intr_redistribute() 4656 { 4657 int busiest_cpu, most_free_cpu; 4658 int cpu_free, cpu_busy, max_busy, min_busy; 4659 int min_free, diff; 4660 int average_busy, cpus_online; 4661 int i, busy; 4662 apic_cpus_info_t *cpu_infop; 4663 apic_irq_t *min_busy_irq = NULL; 4664 apic_irq_t *max_busy_irq = NULL; 4665 4666 busiest_cpu = most_free_cpu = -1; 4667 cpu_free = cpu_busy = max_busy = average_busy = 0; 4668 min_free = apic_sample_factor_redistribution; 4669 cpus_online = 0; 4670 /* 4671 * Below we will check for CPU_INTR_ENABLE, bound, temp_bound, temp_cpu 4672 * without ioapic_lock. That is OK as we are just doing statistical 4673 * sampling anyway and any inaccuracy now will get corrected next time 4674 * The call to rebind which actually changes things will make sure 4675 * we are consistent. 4676 */ 4677 for (i = 0; i < apic_nproc; i++) { 4678 if (!(apic_redist_cpu_skip & (1 << i)) && 4679 (apic_cpus[i].aci_status & APIC_CPU_INTR_ENABLE)) { 4680 4681 cpu_infop = &apic_cpus[i]; 4682 /* 4683 * If no unbound interrupts or only 1 total on this 4684 * CPU, skip 4685 */ 4686 if (!cpu_infop->aci_temp_bound || 4687 (cpu_infop->aci_bound + cpu_infop->aci_temp_bound) 4688 == 1) { 4689 apic_redist_cpu_skip |= 1 << i; 4690 continue; 4691 } 4692 4693 busy = cpu_infop->aci_busy; 4694 average_busy += busy; 4695 cpus_online++; 4696 if (max_busy < busy) { 4697 max_busy = busy; 4698 busiest_cpu = i; 4699 } 4700 if (min_free > busy) { 4701 min_free = busy; 4702 most_free_cpu = i; 4703 } 4704 if (busy > apic_int_busy_mark) { 4705 cpu_busy |= 1 << i; 4706 } else { 4707 if (busy < apic_int_free_mark) 4708 cpu_free |= 1 << i; 4709 } 4710 } 4711 } 4712 if ((cpu_busy && cpu_free) || 4713 (max_busy >= (min_free + apic_diff_for_redistribution))) { 4714 4715 apic_num_imbalance++; 4716 #ifdef DEBUG 4717 if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) { 4718 prom_printf( 4719 "redistribute busy=%x free=%x max=%x min=%x", 4720 cpu_busy, cpu_free, max_busy, min_free); 4721 } 4722 #endif /* DEBUG */ 4723 4724 4725 average_busy /= cpus_online; 4726 4727 diff = max_busy - average_busy; 4728 min_busy = max_busy; /* start with the max possible value */ 4729 max_busy = 0; 4730 min_busy_irq = max_busy_irq = NULL; 4731 i = apic_min_device_irq; 4732 for (; i < apic_max_device_irq; i++) { 4733 apic_irq_t *irq_ptr; 4734 /* Change to linked list per CPU ? */ 4735 if ((irq_ptr = apic_irq_table[i]) == NULL) 4736 continue; 4737 /* Check for irq_busy & decide which one to move */ 4738 /* Also zero them for next round */ 4739 if ((irq_ptr->airq_temp_cpu == busiest_cpu) && 4740 irq_ptr->airq_busy) { 4741 if (irq_ptr->airq_busy < diff) { 4742 /* 4743 * Check for least busy CPU, 4744 * best fit or what ? 4745 */ 4746 if (max_busy < irq_ptr->airq_busy) { 4747 /* 4748 * Most busy within the 4749 * required differential 4750 */ 4751 max_busy = irq_ptr->airq_busy; 4752 max_busy_irq = irq_ptr; 4753 } 4754 } else { 4755 if (min_busy > irq_ptr->airq_busy) { 4756 /* 4757 * least busy, but more than 4758 * the reqd diff 4759 */ 4760 if (min_busy < 4761 (diff + average_busy - 4762 min_free)) { 4763 /* 4764 * Making sure new cpu 4765 * will not end up 4766 * worse 4767 */ 4768 min_busy = 4769 irq_ptr->airq_busy; 4770 4771 min_busy_irq = irq_ptr; 4772 } 4773 } 4774 } 4775 } 4776 irq_ptr->airq_busy = 0; 4777 } 4778 4779 if (max_busy_irq != NULL) { 4780 #ifdef DEBUG 4781 if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) { 4782 prom_printf("rebinding %x to %x", 4783 max_busy_irq->airq_vector, most_free_cpu); 4784 } 4785 #endif /* DEBUG */ 4786 if (apic_rebind_all(max_busy_irq, most_free_cpu, 0) 4787 == 0) 4788 /* Make change permenant */ 4789 max_busy_irq->airq_cpu = (uchar_t)most_free_cpu; 4790 } else if (min_busy_irq != NULL) { 4791 #ifdef DEBUG 4792 if (apic_verbose & APIC_VERBOSE_IOAPIC_FLAG) { 4793 prom_printf("rebinding %x to %x", 4794 min_busy_irq->airq_vector, most_free_cpu); 4795 } 4796 #endif /* DEBUG */ 4797 4798 if (apic_rebind_all(min_busy_irq, most_free_cpu, 0) == 4799 0) 4800 /* Make change permenant */ 4801 min_busy_irq->airq_cpu = (uchar_t)most_free_cpu; 4802 } else { 4803 if (cpu_busy != (1 << busiest_cpu)) { 4804 apic_redist_cpu_skip |= 1 << busiest_cpu; 4805 /* 4806 * We leave cpu_skip set so that next time we 4807 * can choose another cpu 4808 */ 4809 } 4810 } 4811 apic_num_rebind++; 4812 } else { 4813 /* 4814 * found nothing. Could be that we skipped over valid CPUs 4815 * or we have balanced everything. If we had a variable 4816 * ticks_for_redistribution, it could be increased here. 4817 * apic_int_busy, int_free etc would also need to be 4818 * changed. 4819 */ 4820 if (apic_redist_cpu_skip) 4821 apic_redist_cpu_skip = 0; 4822 } 4823 for (i = 0; i < apic_nproc; i++) { 4824 apic_cpus[i].aci_busy = 0; 4825 } 4826 } 4827 4828 static void 4829 apic_cleanup_busy() 4830 { 4831 int i; 4832 apic_irq_t *irq_ptr; 4833 4834 for (i = 0; i < apic_nproc; i++) { 4835 apic_cpus[i].aci_busy = 0; 4836 } 4837 4838 for (i = apic_min_device_irq; i < apic_max_device_irq; i++) { 4839 if ((irq_ptr = apic_irq_table[i]) != NULL) 4840 irq_ptr->airq_busy = 0; 4841 } 4842 apic_skipped_redistribute = 0; 4843 } 4844 4845 4846 /* 4847 * This function will reprogram the timer. 4848 * 4849 * When in oneshot mode the argument is the absolute time in future to 4850 * generate the interrupt at. 4851 * 4852 * When in periodic mode, the argument is the interval at which the 4853 * interrupts should be generated. There is no need to support the periodic 4854 * mode timer change at this time. 4855 */ 4856 static void 4857 apic_timer_reprogram(hrtime_t time) 4858 { 4859 hrtime_t now; 4860 uint_t ticks; 4861 4862 /* 4863 * We should be called from high PIL context (CBE_HIGH_PIL), 4864 * so kpreempt is disabled. 4865 */ 4866 4867 if (!apic_oneshot) { 4868 /* time is the interval for periodic mode */ 4869 ticks = (uint_t)((time) / apic_nsec_per_tick); 4870 } else { 4871 /* one shot mode */ 4872 4873 now = gethrtime(); 4874 4875 if (time <= now) { 4876 /* 4877 * requested to generate an interrupt in the past 4878 * generate an interrupt as soon as possible 4879 */ 4880 ticks = apic_min_timer_ticks; 4881 } else if ((time - now) > apic_nsec_max) { 4882 /* 4883 * requested to generate an interrupt at a time 4884 * further than what we are capable of. Set to max 4885 * the hardware can handle 4886 */ 4887 4888 ticks = APIC_MAXVAL; 4889 #ifdef DEBUG 4890 cmn_err(CE_CONT, "apic_timer_reprogram, request at" 4891 " %lld too far in future, current time" 4892 " %lld \n", time, now); 4893 #endif /* DEBUG */ 4894 } else 4895 ticks = (uint_t)((time - now) / apic_nsec_per_tick); 4896 } 4897 4898 if (ticks < apic_min_timer_ticks) 4899 ticks = apic_min_timer_ticks; 4900 4901 apicadr[APIC_INIT_COUNT] = ticks; 4902 4903 } 4904 4905 /* 4906 * This function will enable timer interrupts. 4907 */ 4908 static void 4909 apic_timer_enable(void) 4910 { 4911 /* 4912 * We should be Called from high PIL context (CBE_HIGH_PIL), 4913 * so kpreempt is disabled. 4914 */ 4915 4916 if (!apic_oneshot) 4917 apicadr[APIC_LOCAL_TIMER] = 4918 (apic_clkvect + APIC_BASE_VECT) | AV_TIME; 4919 else { 4920 /* one shot */ 4921 apicadr[APIC_LOCAL_TIMER] = (apic_clkvect + APIC_BASE_VECT); 4922 } 4923 } 4924 4925 /* 4926 * This function will disable timer interrupts. 4927 */ 4928 static void 4929 apic_timer_disable(void) 4930 { 4931 /* 4932 * We should be Called from high PIL context (CBE_HIGH_PIL), 4933 * so kpreempt is disabled. 4934 */ 4935 4936 apicadr[APIC_LOCAL_TIMER] = (apic_clkvect + APIC_BASE_VECT) | AV_MASK; 4937 } 4938 4939 4940 cyclic_id_t apic_cyclic_id; 4941 4942 /* 4943 * If this module needs to be a consumer of cyclic subsystem, they 4944 * can be added here, since at this time kernel cyclic subsystem is initialized 4945 * argument is not currently used, and is reserved for future. 4946 */ 4947 static void 4948 apic_post_cyclic_setup(void *arg) 4949 { 4950 _NOTE(ARGUNUSED(arg)) 4951 cyc_handler_t hdlr; 4952 cyc_time_t when; 4953 4954 /* cpu_lock is held */ 4955 4956 /* set up cyclics for intr redistribution */ 4957 4958 /* 4959 * In peridoc mode intr redistribution processing is done in 4960 * apic_intr_enter during clk intr processing 4961 */ 4962 if (!apic_oneshot) 4963 return; 4964 4965 hdlr.cyh_level = CY_LOW_LEVEL; 4966 hdlr.cyh_func = (cyc_func_t)apic_redistribute_compute; 4967 hdlr.cyh_arg = NULL; 4968 4969 when.cyt_when = 0; 4970 when.cyt_interval = apic_redistribute_sample_interval; 4971 apic_cyclic_id = cyclic_add(&hdlr, &when); 4972 4973 4974 } 4975 4976 static void 4977 apic_redistribute_compute(void) 4978 { 4979 int i, j, max_busy; 4980 4981 if (apic_enable_dynamic_migration) { 4982 if (++apic_nticks == apic_sample_factor_redistribution) { 4983 /* 4984 * Time to call apic_intr_redistribute(). 4985 * reset apic_nticks. This will cause max_busy 4986 * to be calculated below and if it is more than 4987 * apic_int_busy, we will do the whole thing 4988 */ 4989 apic_nticks = 0; 4990 } 4991 max_busy = 0; 4992 for (i = 0; i < apic_nproc; i++) { 4993 4994 /* 4995 * Check if curipl is non zero & if ISR is in 4996 * progress 4997 */ 4998 if (((j = apic_cpus[i].aci_curipl) != 0) && 4999 (apic_cpus[i].aci_ISR_in_progress & (1 << j))) { 5000 5001 int irq; 5002 apic_cpus[i].aci_busy++; 5003 irq = apic_cpus[i].aci_current[j]; 5004 apic_irq_table[irq]->airq_busy++; 5005 } 5006 5007 if (!apic_nticks && 5008 (apic_cpus[i].aci_busy > max_busy)) 5009 max_busy = apic_cpus[i].aci_busy; 5010 } 5011 if (!apic_nticks) { 5012 if (max_busy > apic_int_busy_mark) { 5013 /* 5014 * We could make the following check be 5015 * skipped > 1 in which case, we get a 5016 * redistribution at half the busy mark (due to 5017 * double interval). Need to be able to collect 5018 * more empirical data to decide if that is a 5019 * good strategy. Punt for now. 5020 */ 5021 if (apic_skipped_redistribute) 5022 apic_cleanup_busy(); 5023 else 5024 apic_intr_redistribute(); 5025 } else 5026 apic_skipped_redistribute++; 5027 } 5028 } 5029 } 5030 5031 5032 static int 5033 apic_acpi_translate_pci_irq(dev_info_t *dip, int busid, int devid, 5034 int ipin, int *pci_irqp, iflag_t *intr_flagp) 5035 { 5036 5037 int status; 5038 acpi_psm_lnk_t acpipsmlnk; 5039 5040 if ((status = acpi_get_irq_cache_ent(busid, devid, ipin, pci_irqp, 5041 intr_flagp)) == ACPI_PSM_SUCCESS) { 5042 APIC_VERBOSE_IRQ((CE_CONT, "!pcplusmp: Found irqno %d " 5043 "from cache for device %s, instance #%d\n", *pci_irqp, 5044 ddi_get_name(dip), ddi_get_instance(dip))); 5045 return (status); 5046 } 5047 5048 bzero(&acpipsmlnk, sizeof (acpi_psm_lnk_t)); 5049 5050 if ((status = acpi_translate_pci_irq(dip, ipin, pci_irqp, intr_flagp, 5051 &acpipsmlnk)) == ACPI_PSM_FAILURE) { 5052 APIC_VERBOSE_IRQ((CE_WARN, "pcplusmp: " 5053 " acpi_translate_pci_irq failed for device %s, instance" 5054 " #%d", ddi_get_name(dip), ddi_get_instance(dip))); 5055 return (status); 5056 } 5057 5058 if (status == ACPI_PSM_PARTIAL && acpipsmlnk.lnkobj != NULL) { 5059 status = apic_acpi_irq_configure(&acpipsmlnk, dip, pci_irqp, 5060 intr_flagp); 5061 if (status != ACPI_PSM_SUCCESS) { 5062 status = acpi_get_current_irq_resource(&acpipsmlnk, 5063 pci_irqp, intr_flagp); 5064 } 5065 } 5066 5067 if (status == ACPI_PSM_SUCCESS) { 5068 acpi_new_irq_cache_ent(busid, devid, ipin, *pci_irqp, 5069 intr_flagp, &acpipsmlnk); 5070 5071 APIC_VERBOSE_IRQ((CE_CONT, "pcplusmp: [ACPI] " 5072 "new irq %d for device %s, instance #%d\n", 5073 *pci_irqp, ddi_get_name(dip), ddi_get_instance(dip))); 5074 } 5075 5076 return (status); 5077 } 5078 5079 /* 5080 * Configures the irq for the interrupt link device identified by 5081 * acpipsmlnkp. 5082 * 5083 * Gets the current and the list of possible irq settings for the 5084 * device. If apic_unconditional_srs is not set, and the current 5085 * resource setting is in the list of possible irq settings, 5086 * current irq resource setting is passed to the caller. 5087 * 5088 * Otherwise, picks an irq number from the list of possible irq 5089 * settings, and sets the irq of the device to this value. 5090 * If prefer_crs is set, among a set of irq numbers in the list that have 5091 * the least number of devices sharing the interrupt, we pick current irq 5092 * resource setting if it is a member of this set. 5093 * 5094 * Passes the irq number in the value pointed to by pci_irqp, and 5095 * polarity and sensitivity in the structure pointed to by dipintrflagp 5096 * to the caller. 5097 * 5098 * Note that if setting the irq resource failed, but successfuly obtained 5099 * the current irq resource settings, passes the current irq resources 5100 * and considers it a success. 5101 * 5102 * Returns: 5103 * ACPI_PSM_SUCCESS on success. 5104 * 5105 * ACPI_PSM_FAILURE if an error occured during the configuration or 5106 * if a suitable irq was not found for this device, or if setting the 5107 * irq resource and obtaining the current resource fails. 5108 * 5109 */ 5110 static int 5111 apic_acpi_irq_configure(acpi_psm_lnk_t *acpipsmlnkp, dev_info_t *dip, 5112 int *pci_irqp, iflag_t *dipintr_flagp) 5113 { 5114 5115 int i, min_share, foundnow, done = 0; 5116 int32_t irq; 5117 int32_t share_irq = -1; 5118 int32_t chosen_irq = -1; 5119 int cur_irq = -1; 5120 acpi_irqlist_t *irqlistp; 5121 acpi_irqlist_t *irqlistent; 5122 5123 if ((acpi_get_possible_irq_resources(acpipsmlnkp, &irqlistp)) 5124 == ACPI_PSM_FAILURE) { 5125 APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: Unable to determine " 5126 "or assign IRQ for device %s, instance #%d: The system was " 5127 "unable to get the list of potential IRQs from ACPI.", 5128 ddi_get_name(dip), ddi_get_instance(dip))); 5129 5130 return (ACPI_PSM_FAILURE); 5131 } 5132 5133 if ((acpi_get_current_irq_resource(acpipsmlnkp, &cur_irq, 5134 dipintr_flagp) == ACPI_PSM_SUCCESS) && (!apic_unconditional_srs) && 5135 (cur_irq > 0)) { 5136 /* 5137 * If an IRQ is set in CRS and that IRQ exists in the set 5138 * returned from _PRS, return that IRQ, otherwise print 5139 * a warning 5140 */ 5141 5142 if (acpi_irqlist_find_irq(irqlistp, cur_irq, NULL) 5143 == ACPI_PSM_SUCCESS) { 5144 5145 acpi_free_irqlist(irqlistp); 5146 ASSERT(pci_irqp != NULL); 5147 *pci_irqp = cur_irq; 5148 return (ACPI_PSM_SUCCESS); 5149 } 5150 5151 APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: Could not find the " 5152 "current irq %d for device %s, instance #%d in ACPI's " 5153 "list of possible irqs for this device. Picking one from " 5154 " the latter list.", cur_irq, ddi_get_name(dip), 5155 ddi_get_instance(dip))); 5156 } 5157 5158 irqlistent = irqlistp; 5159 min_share = 255; 5160 5161 while (irqlistent != NULL) { 5162 irqlistent->intr_flags.bustype = BUS_PCI; 5163 5164 for (foundnow = 0, i = 0; i < irqlistent->num_irqs; i++) { 5165 5166 irq = irqlistent->irqs[i]; 5167 5168 if ((irq < 16) && (apic_reserved_irqlist[irq])) 5169 continue; 5170 5171 if (irq == 0) { 5172 /* invalid irq number */ 5173 continue; 5174 } 5175 5176 if ((apic_irq_table[irq] == NULL) || 5177 (apic_irq_table[irq]->airq_dip == dip)) { 5178 chosen_irq = irq; 5179 foundnow = 1; 5180 /* 5181 * If we do not prefer current irq from crs 5182 * or if we do and this irq is the same as 5183 * current irq from crs, this is the one 5184 * to pick. 5185 */ 5186 if (!(apic_prefer_crs) || (irq == cur_irq)) { 5187 done = 1; 5188 break; 5189 } 5190 continue; 5191 } 5192 5193 if (irqlistent->intr_flags.intr_el == INTR_EL_EDGE) 5194 continue; 5195 5196 if (!acpi_intr_compatible(irqlistent->intr_flags, 5197 apic_irq_table[irq]->airq_iflag)) 5198 continue; 5199 5200 if ((apic_irq_table[irq]->airq_share < min_share) || 5201 ((apic_irq_table[irq]->airq_share == min_share) && 5202 (cur_irq == irq) && (apic_prefer_crs))) { 5203 min_share = apic_irq_table[irq]->airq_share; 5204 share_irq = irq; 5205 foundnow = 1; 5206 } 5207 } 5208 5209 /* 5210 * If we found an IRQ in the inner loop this time, save the 5211 * details from the irqlist for later use. 5212 */ 5213 if (foundnow && ((chosen_irq != -1) || (share_irq != -1))) { 5214 /* 5215 * Copy the acpi_prs_private_t and flags from this 5216 * irq list entry, since we found an irq from this 5217 * entry. 5218 */ 5219 acpipsmlnkp->acpi_prs_prv = irqlistent->acpi_prs_prv; 5220 *dipintr_flagp = irqlistent->intr_flags; 5221 } 5222 5223 if (done) 5224 break; 5225 5226 /* Go to the next irqlist entry */ 5227 irqlistent = irqlistent->next; 5228 } 5229 5230 5231 acpi_free_irqlist(irqlistp); 5232 if (chosen_irq != -1) 5233 irq = chosen_irq; 5234 else if (share_irq != -1) 5235 irq = share_irq; 5236 else { 5237 APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: Could not find a " 5238 "suitable irq from the list of possible irqs for device " 5239 "%s, instance #%d in ACPI's list of possible irqs", 5240 ddi_get_name(dip), ddi_get_instance(dip))); 5241 return (ACPI_PSM_FAILURE); 5242 } 5243 5244 APIC_VERBOSE_IRQ((CE_CONT, "!pcplusmp: Setting irq %d for device %s " 5245 "instance #%d\n", irq, ddi_get_name(dip), ddi_get_instance(dip))); 5246 5247 if ((acpi_set_irq_resource(acpipsmlnkp, irq)) == ACPI_PSM_SUCCESS) { 5248 /* 5249 * setting irq was successful, check to make sure CRS 5250 * reflects that. If CRS does not agree with what we 5251 * set, return the irq that was set. 5252 */ 5253 5254 if (acpi_get_current_irq_resource(acpipsmlnkp, &cur_irq, 5255 dipintr_flagp) == ACPI_PSM_SUCCESS) { 5256 5257 if (cur_irq != irq) 5258 APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: " 5259 "IRQ resource set (irqno %d) for device %s " 5260 "instance #%d, differs from current " 5261 "setting irqno %d", 5262 irq, ddi_get_name(dip), 5263 ddi_get_instance(dip), cur_irq)); 5264 } 5265 5266 /* 5267 * return the irq that was set, and not what CRS reports, 5268 * since CRS has been seen to be bogus on some systems 5269 */ 5270 cur_irq = irq; 5271 } else { 5272 APIC_VERBOSE_IRQ((CE_WARN, "!pcplusmp: set resource irq %d " 5273 "failed for device %s instance #%d", 5274 irq, ddi_get_name(dip), ddi_get_instance(dip))); 5275 5276 if (cur_irq == -1) 5277 return (ACPI_PSM_FAILURE); 5278 } 5279 5280 ASSERT(pci_irqp != NULL); 5281 *pci_irqp = cur_irq; 5282 return (ACPI_PSM_SUCCESS); 5283 } 5284