1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright (c) 2010, Intel Corporation. 27 * All rights reserved. 28 */ 29 /* 30 * Copyright (c) 2017, Joyent, Inc. All rights reserved. 31 */ 32 33 /* 34 * To understand how the apix module interacts with the interrupt subsystem read 35 * the theory statement in uts/i86pc/os/intr.c. 36 */ 37 38 /* 39 * PSMI 1.1 extensions are supported only in 2.6 and later versions. 40 * PSMI 1.2 extensions are supported only in 2.7 and later versions. 41 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10. 42 * PSMI 1.5 extensions are supported in Solaris Nevada. 43 * PSMI 1.6 extensions are supported in Solaris Nevada. 44 * PSMI 1.7 extensions are supported in Solaris Nevada. 45 */ 46 #define PSMI_1_7 47 48 #include <sys/processor.h> 49 #include <sys/time.h> 50 #include <sys/psm.h> 51 #include <sys/smp_impldefs.h> 52 #include <sys/cram.h> 53 #include <sys/acpi/acpi.h> 54 #include <sys/acpica.h> 55 #include <sys/psm_common.h> 56 #include <sys/pit.h> 57 #include <sys/ddi.h> 58 #include <sys/sunddi.h> 59 #include <sys/ddi_impldefs.h> 60 #include <sys/pci.h> 61 #include <sys/promif.h> 62 #include <sys/x86_archext.h> 63 #include <sys/cpc_impl.h> 64 #include <sys/uadmin.h> 65 #include <sys/panic.h> 66 #include <sys/debug.h> 67 #include <sys/archsystm.h> 68 #include <sys/trap.h> 69 #include <sys/machsystm.h> 70 #include <sys/sysmacros.h> 71 #include <sys/cpuvar.h> 72 #include <sys/rm_platter.h> 73 #include <sys/privregs.h> 74 #include <sys/note.h> 75 #include <sys/pci_intr_lib.h> 76 #include <sys/spl.h> 77 #include <sys/clock.h> 78 #include <sys/cyclic.h> 79 #include <sys/dditypes.h> 80 #include <sys/sunddi.h> 81 #include <sys/x_call.h> 82 #include <sys/reboot.h> 83 #include <sys/mach_intr.h> 84 #include <sys/apix.h> 85 #include <sys/apix_irm_impl.h> 86 87 static int apix_probe(); 88 static void apix_init(); 89 static void apix_picinit(void); 90 static int apix_intr_enter(int, int *); 91 static void apix_intr_exit(int, int); 92 static void apix_setspl(int); 93 static int apix_disable_intr(processorid_t); 94 static void apix_enable_intr(processorid_t); 95 static int apix_get_clkvect(int); 96 static int apix_get_ipivect(int, int); 97 static void apix_post_cyclic_setup(void *); 98 static int apix_post_cpu_start(); 99 static int apix_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 100 psm_intr_op_t, int *); 101 102 /* 103 * Helper functions for apix_intr_ops() 104 */ 105 static void apix_redistribute_compute(void); 106 static int apix_get_pending(apix_vector_t *); 107 static apix_vector_t *apix_get_req_vector(ddi_intr_handle_impl_t *, ushort_t); 108 static int apix_get_intr_info(ddi_intr_handle_impl_t *, apic_get_intr_t *); 109 static char *apix_get_apic_type(void); 110 static int apix_intx_get_pending(int); 111 static void apix_intx_set_mask(int irqno); 112 static void apix_intx_clear_mask(int irqno); 113 static int apix_intx_get_shared(int irqno); 114 static void apix_intx_set_shared(int irqno, int delta); 115 static apix_vector_t *apix_intx_xlate_vector(dev_info_t *, int, 116 struct intrspec *); 117 static int apix_intx_alloc_vector(dev_info_t *, int, struct intrspec *); 118 119 extern int apic_clkinit(int); 120 121 /* IRM initialization for APIX PSM module */ 122 extern void apix_irm_init(void); 123 124 extern int irm_enable; 125 126 /* 127 * Local static data 128 */ 129 static struct psm_ops apix_ops = { 130 apix_probe, 131 132 apix_init, 133 apix_picinit, 134 apix_intr_enter, 135 apix_intr_exit, 136 apix_setspl, 137 apix_addspl, 138 apix_delspl, 139 apix_disable_intr, 140 apix_enable_intr, 141 NULL, /* psm_softlvl_to_irq */ 142 NULL, /* psm_set_softintr */ 143 144 apic_set_idlecpu, 145 apic_unset_idlecpu, 146 147 apic_clkinit, 148 apix_get_clkvect, 149 NULL, /* psm_hrtimeinit */ 150 apic_gethrtime, 151 152 apic_get_next_processorid, 153 apic_cpu_start, 154 apix_post_cpu_start, 155 apic_shutdown, 156 apix_get_ipivect, 157 apic_send_ipi, 158 159 NULL, /* psm_translate_irq */ 160 NULL, /* psm_notify_error */ 161 NULL, /* psm_notify_func */ 162 apic_timer_reprogram, 163 apic_timer_enable, 164 apic_timer_disable, 165 apix_post_cyclic_setup, 166 apic_preshutdown, 167 apix_intr_ops, /* Advanced DDI Interrupt framework */ 168 apic_state, /* save, restore apic state for S3 */ 169 apic_cpu_ops, /* CPU control interface. */ 170 }; 171 172 struct psm_ops *psmops = &apix_ops; 173 174 static struct psm_info apix_psm_info = { 175 PSM_INFO_VER01_7, /* version */ 176 PSM_OWN_EXCLUSIVE, /* ownership */ 177 &apix_ops, /* operation */ 178 APIX_NAME, /* machine name */ 179 "apix MPv1.4 compatible", 180 }; 181 182 static void *apix_hdlp; 183 184 static int apix_is_enabled = 0; 185 186 /* 187 * Flag to indicate if APIX is to be enabled only for platforms 188 * with specific hw feature(s). 189 */ 190 int apix_hw_chk_enable = 1; 191 192 /* 193 * Hw features that are checked for enabling APIX support. 194 */ 195 #define APIX_SUPPORT_X2APIC 0x00000001 196 uint_t apix_supported_hw = APIX_SUPPORT_X2APIC; 197 198 /* 199 * apix_lock is used for cpu selection and vector re-binding 200 */ 201 lock_t apix_lock; 202 apix_impl_t *apixs[NCPU]; 203 /* 204 * Mapping between device interrupt and the allocated vector. Indexed 205 * by major number. 206 */ 207 apix_dev_vector_t **apix_dev_vector; 208 /* 209 * Mapping between device major number and cpu id. It gets used 210 * when interrupt binding policy round robin with affinity is 211 * applied. With that policy, devices with the same major number 212 * will be bound to the same CPU. 213 */ 214 processorid_t *apix_major_to_cpu; /* major to cpu mapping */ 215 kmutex_t apix_mutex; /* for apix_dev_vector & apix_major_to_cpu */ 216 217 int apix_nipis = 16; /* Maximum number of IPIs */ 218 /* 219 * Maximum number of vectors in a CPU that can be used for interrupt 220 * allocation (including IPIs and the reserved vectors). 221 */ 222 int apix_cpu_nvectors = APIX_NVECTOR; 223 224 /* gcpu.h */ 225 226 extern void apic_do_interrupt(struct regs *rp, trap_trace_rec_t *ttp); 227 extern void apic_change_eoi(); 228 229 /* 230 * This is the loadable module wrapper 231 */ 232 233 int 234 _init(void) 235 { 236 if (apic_coarse_hrtime) 237 apix_ops.psm_gethrtime = &apic_gettime; 238 return (psm_mod_init(&apix_hdlp, &apix_psm_info)); 239 } 240 241 int 242 _fini(void) 243 { 244 return (psm_mod_fini(&apix_hdlp, &apix_psm_info)); 245 } 246 247 int 248 _info(struct modinfo *modinfop) 249 { 250 return (psm_mod_info(&apix_hdlp, &apix_psm_info, modinfop)); 251 } 252 253 static int 254 apix_probe() 255 { 256 int rval; 257 258 if (apix_enable == 0) 259 return (PSM_FAILURE); 260 261 /* check for hw features if specified */ 262 if (apix_hw_chk_enable) { 263 /* check if x2APIC mode is supported */ 264 if ((apix_supported_hw & APIX_SUPPORT_X2APIC) == 265 APIX_SUPPORT_X2APIC) { 266 if (apic_local_mode() == LOCAL_X2APIC) { 267 /* x2APIC mode activated by BIOS, switch ops */ 268 apic_mode = LOCAL_X2APIC; 269 apic_change_ops(); 270 } else if (!apic_detect_x2apic()) { 271 /* x2APIC mode is not supported in the hw */ 272 apix_enable = 0; 273 } 274 } 275 if (apix_enable == 0) 276 return (PSM_FAILURE); 277 } 278 279 rval = apic_probe_common(apix_psm_info.p_mach_idstring); 280 if (rval == PSM_SUCCESS) 281 apix_is_enabled = 1; 282 else 283 apix_is_enabled = 0; 284 return (rval); 285 } 286 287 /* 288 * Initialize the data structures needed by pcplusmpx module. 289 * Specifically, the data structures used by addspl() and delspl() 290 * routines. 291 */ 292 static void 293 apix_softinit() 294 { 295 int i, *iptr; 296 apix_impl_t *hdlp; 297 int nproc; 298 299 nproc = max(apic_nproc, apic_max_nproc); 300 301 hdlp = kmem_zalloc(nproc * sizeof (apix_impl_t), KM_SLEEP); 302 for (i = 0; i < nproc; i++) { 303 apixs[i] = &hdlp[i]; 304 apixs[i]->x_cpuid = i; 305 LOCK_INIT_CLEAR(&apixs[i]->x_lock); 306 } 307 308 /* cpu 0 is always up (for now) */ 309 apic_cpus[0].aci_status = APIC_CPU_ONLINE | APIC_CPU_INTR_ENABLE; 310 311 iptr = (int *)&apic_irq_table[0]; 312 for (i = 0; i <= APIC_MAX_VECTOR; i++) { 313 apic_level_intr[i] = 0; 314 *iptr++ = NULL; 315 } 316 mutex_init(&airq_mutex, NULL, MUTEX_DEFAULT, NULL); 317 318 apix_dev_vector = kmem_zalloc(sizeof (apix_dev_vector_t *) * devcnt, 319 KM_SLEEP); 320 321 if (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) { 322 apix_major_to_cpu = kmem_zalloc(sizeof (int) * devcnt, 323 KM_SLEEP); 324 for (i = 0; i < devcnt; i++) 325 apix_major_to_cpu[i] = IRQ_UNINIT; 326 } 327 328 mutex_init(&apix_mutex, NULL, MUTEX_DEFAULT, NULL); 329 } 330 331 static int 332 apix_get_pending_spl(void) 333 { 334 int cpuid = CPU->cpu_id; 335 336 return (bsrw_insn(apixs[cpuid]->x_intr_pending)); 337 } 338 339 static uintptr_t 340 apix_get_intr_handler(int cpu, short vec) 341 { 342 apix_vector_t *apix_vector; 343 344 ASSERT(cpu < apic_nproc && vec < APIX_NVECTOR); 345 if (cpu >= apic_nproc) 346 return (NULL); 347 348 apix_vector = apixs[cpu]->x_vectbl[vec]; 349 350 return ((uintptr_t)(apix_vector->v_autovect)); 351 } 352 353 static void 354 apix_init() 355 { 356 extern void (*do_interrupt_common)(struct regs *, trap_trace_rec_t *); 357 358 APIC_VERBOSE(INIT, (CE_CONT, "apix: psm_softinit\n")); 359 360 do_interrupt_common = apix_do_interrupt; 361 addintr = apix_add_avintr; 362 remintr = apix_rem_avintr; 363 get_pending_spl = apix_get_pending_spl; 364 get_intr_handler = apix_get_intr_handler; 365 psm_get_localapicid = apic_get_localapicid; 366 psm_get_ioapicid = apic_get_ioapicid; 367 368 apix_softinit(); 369 370 #if !defined(__amd64) 371 if (cpuid_have_cr8access(CPU)) 372 apic_have_32bit_cr8 = 1; 373 #endif 374 375 /* 376 * Initialize IRM pool parameters 377 */ 378 if (irm_enable) { 379 int i; 380 int lowest_irq; 381 int highest_irq; 382 383 /* number of CPUs present */ 384 apix_irminfo.apix_ncpus = apic_nproc; 385 /* total number of entries in all of the IOAPICs present */ 386 lowest_irq = apic_io_vectbase[0]; 387 highest_irq = apic_io_vectend[0]; 388 for (i = 1; i < apic_io_max; i++) { 389 if (apic_io_vectbase[i] < lowest_irq) 390 lowest_irq = apic_io_vectbase[i]; 391 if (apic_io_vectend[i] > highest_irq) 392 highest_irq = apic_io_vectend[i]; 393 } 394 apix_irminfo.apix_ioapic_max_vectors = 395 highest_irq - lowest_irq + 1; 396 /* 397 * Number of available per-CPU vectors excluding 398 * reserved vectors for Dtrace, int80, system-call, 399 * fast-trap, etc. 400 */ 401 apix_irminfo.apix_per_cpu_vectors = APIX_NAVINTR - 402 APIX_SW_RESERVED_VECTORS; 403 404 /* Number of vectors (pre) allocated (SCI and HPET) */ 405 apix_irminfo.apix_vectors_allocated = 0; 406 if (apic_hpet_vect != -1) 407 apix_irminfo.apix_vectors_allocated++; 408 if (apic_sci_vect != -1) 409 apix_irminfo.apix_vectors_allocated++; 410 } 411 } 412 413 static void 414 apix_init_intr() 415 { 416 processorid_t cpun = psm_get_cpu_id(); 417 uint_t nlvt; 418 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR; 419 extern void cmi_cmci_trap(void); 420 421 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL); 422 423 if (apic_mode == LOCAL_APIC) { 424 /* 425 * We are running APIC in MMIO mode. 426 */ 427 if (apic_flat_model) { 428 apic_reg_ops->apic_write(APIC_FORMAT_REG, 429 APIC_FLAT_MODEL); 430 } else { 431 apic_reg_ops->apic_write(APIC_FORMAT_REG, 432 APIC_CLUSTER_MODEL); 433 } 434 435 apic_reg_ops->apic_write(APIC_DEST_REG, 436 AV_HIGH_ORDER >> cpun); 437 } 438 439 if (apic_directed_EOI_supported()) { 440 /* 441 * Setting the 12th bit in the Spurious Interrupt Vector 442 * Register suppresses broadcast EOIs generated by the local 443 * APIC. The suppression of broadcast EOIs happens only when 444 * interrupts are level-triggered. 445 */ 446 svr |= APIC_SVR_SUPPRESS_BROADCAST_EOI; 447 } 448 449 /* need to enable APIC before unmasking NMI */ 450 apic_reg_ops->apic_write(APIC_SPUR_INT_REG, svr); 451 452 /* 453 * Presence of an invalid vector with delivery mode AV_FIXED can 454 * cause an error interrupt, even if the entry is masked...so 455 * write a valid vector to LVT entries along with the mask bit 456 */ 457 458 /* All APICs have timer and LINT0/1 */ 459 apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK|APIC_RESV_IRQ); 460 apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK|APIC_RESV_IRQ); 461 apic_reg_ops->apic_write(APIC_INT_VECT1, AV_NMI); /* enable NMI */ 462 463 /* 464 * On integrated APICs, the number of LVT entries is 465 * 'Max LVT entry' + 1; on 82489DX's (non-integrated 466 * APICs), nlvt is "3" (LINT0, LINT1, and timer) 467 */ 468 469 if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) { 470 nlvt = 3; 471 } else { 472 nlvt = ((apic_reg_ops->apic_read(APIC_VERS_REG) >> 16) & 473 0xFF) + 1; 474 } 475 476 if (nlvt >= 5) { 477 /* Enable performance counter overflow interrupt */ 478 479 if (!is_x86_feature(x86_featureset, X86FSET_MSR)) 480 apic_enable_cpcovf_intr = 0; 481 if (apic_enable_cpcovf_intr) { 482 if (apic_cpcovf_vect == 0) { 483 int ipl = APIC_PCINT_IPL; 484 485 apic_cpcovf_vect = apix_get_ipivect(ipl, -1); 486 ASSERT(apic_cpcovf_vect); 487 488 (void) add_avintr(NULL, ipl, 489 (avfunc)kcpc_hw_overflow_intr, 490 "apic pcint", apic_cpcovf_vect, 491 NULL, NULL, NULL, NULL); 492 kcpc_hw_overflow_intr_installed = 1; 493 kcpc_hw_enable_cpc_intr = 494 apic_cpcovf_mask_clear; 495 } 496 apic_reg_ops->apic_write(APIC_PCINT_VECT, 497 apic_cpcovf_vect); 498 } 499 } 500 501 if (nlvt >= 6) { 502 /* Only mask TM intr if the BIOS apparently doesn't use it */ 503 504 uint32_t lvtval; 505 506 lvtval = apic_reg_ops->apic_read(APIC_THERM_VECT); 507 if (((lvtval & AV_MASK) == AV_MASK) || 508 ((lvtval & AV_DELIV_MODE) != AV_SMI)) { 509 apic_reg_ops->apic_write(APIC_THERM_VECT, 510 AV_MASK|APIC_RESV_IRQ); 511 } 512 } 513 514 /* Enable error interrupt */ 515 516 if (nlvt >= 4 && apic_enable_error_intr) { 517 if (apic_errvect == 0) { 518 int ipl = 0xf; /* get highest priority intr */ 519 apic_errvect = apix_get_ipivect(ipl, -1); 520 ASSERT(apic_errvect); 521 /* 522 * Not PSMI compliant, but we are going to merge 523 * with ON anyway 524 */ 525 (void) add_avintr(NULL, ipl, 526 (avfunc)apic_error_intr, "apic error intr", 527 apic_errvect, NULL, NULL, NULL, NULL); 528 } 529 apic_reg_ops->apic_write(APIC_ERR_VECT, apic_errvect); 530 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0); 531 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0); 532 } 533 534 /* Enable CMCI interrupt */ 535 if (cmi_enable_cmci) { 536 mutex_enter(&cmci_cpu_setup_lock); 537 if (cmci_cpu_setup_registered == 0) { 538 mutex_enter(&cpu_lock); 539 register_cpu_setup_func(cmci_cpu_setup, NULL); 540 mutex_exit(&cpu_lock); 541 cmci_cpu_setup_registered = 1; 542 } 543 mutex_exit(&cmci_cpu_setup_lock); 544 545 if (apic_cmci_vect == 0) { 546 int ipl = 0x2; 547 apic_cmci_vect = apix_get_ipivect(ipl, -1); 548 ASSERT(apic_cmci_vect); 549 550 (void) add_avintr(NULL, ipl, 551 (avfunc)cmi_cmci_trap, "apic cmci intr", 552 apic_cmci_vect, NULL, NULL, NULL, NULL); 553 } 554 apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect); 555 } 556 557 apic_reg_ops->apic_write_task_reg(0); 558 } 559 560 static void 561 apix_picinit(void) 562 { 563 int i, j; 564 uint_t isr; 565 566 APIC_VERBOSE(INIT, (CE_CONT, "apix: psm_picinit\n")); 567 568 /* 569 * initialize interrupt remapping before apic 570 * hardware initialization 571 */ 572 apic_intrmap_init(apic_mode); 573 if (apic_vt_ops == psm_vt_ops) 574 apix_mul_ioapic_method = APIC_MUL_IOAPIC_IIR; 575 576 /* 577 * On UniSys Model 6520, the BIOS leaves vector 0x20 isr 578 * bit on without clearing it with EOI. Since softint 579 * uses vector 0x20 to interrupt itself, so softint will 580 * not work on this machine. In order to fix this problem 581 * a check is made to verify all the isr bits are clear. 582 * If not, EOIs are issued to clear the bits. 583 */ 584 for (i = 7; i >= 1; i--) { 585 isr = apic_reg_ops->apic_read(APIC_ISR_REG + (i * 4)); 586 if (isr != 0) 587 for (j = 0; ((j < 32) && (isr != 0)); j++) 588 if (isr & (1 << j)) { 589 apic_reg_ops->apic_write( 590 APIC_EOI_REG, 0); 591 isr &= ~(1 << j); 592 apic_error |= APIC_ERR_BOOT_EOI; 593 } 594 } 595 596 /* set a flag so we know we have run apic_picinit() */ 597 apic_picinit_called = 1; 598 LOCK_INIT_CLEAR(&apic_gethrtime_lock); 599 LOCK_INIT_CLEAR(&apic_ioapic_lock); 600 LOCK_INIT_CLEAR(&apic_error_lock); 601 LOCK_INIT_CLEAR(&apic_mode_switch_lock); 602 603 picsetup(); /* initialise the 8259 */ 604 605 /* add nmi handler - least priority nmi handler */ 606 LOCK_INIT_CLEAR(&apic_nmi_lock); 607 608 if (!psm_add_nmintr(0, (avfunc) apic_nmi_intr, 609 "apix NMI handler", (caddr_t)NULL)) 610 cmn_err(CE_WARN, "apix: Unable to add nmi handler"); 611 612 apix_init_intr(); 613 614 /* enable apic mode if imcr present */ 615 if (apic_imcrp) { 616 outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT); 617 outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_APIC); 618 } 619 620 ioapix_init_intr(IOAPIC_MASK); 621 622 /* setup global IRM pool if applicable */ 623 if (irm_enable) 624 apix_irm_init(); 625 } 626 627 static __inline__ void 628 apix_send_eoi(void) 629 { 630 if (apic_mode == LOCAL_APIC) 631 LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0); 632 else 633 X2APIC_WRITE(APIC_EOI_REG, 0); 634 } 635 636 /* 637 * platform_intr_enter 638 * 639 * Called at the beginning of the interrupt service routine, but unlike 640 * pcplusmp, does not mask interrupts. An EOI is given to the interrupt 641 * controller to enable other HW interrupts but interrupts are still 642 * masked by the IF flag. 643 * 644 * Return -1 for spurious interrupts 645 * 646 */ 647 static int 648 apix_intr_enter(int ipl, int *vectorp) 649 { 650 struct cpu *cpu = CPU; 651 uint32_t cpuid = CPU->cpu_id; 652 apic_cpus_info_t *cpu_infop; 653 uchar_t vector; 654 apix_vector_t *vecp; 655 int nipl = -1; 656 657 /* 658 * The real vector delivered is (*vectorp + 0x20), but our caller 659 * subtracts 0x20 from the vector before passing it to us. 660 * (That's why APIC_BASE_VECT is 0x20.) 661 */ 662 vector = *vectorp = (uchar_t)*vectorp + APIC_BASE_VECT; 663 664 cpu_infop = &apic_cpus[cpuid]; 665 if (vector == APIC_SPUR_INTR) { 666 cpu_infop->aci_spur_cnt++; 667 return (APIC_INT_SPURIOUS); 668 } 669 670 vecp = xv_vector(cpuid, vector); 671 if (vecp == NULL) { 672 if (APIX_IS_FAKE_INTR(vector)) 673 nipl = apix_rebindinfo.i_pri; 674 apix_send_eoi(); 675 return (nipl); 676 } 677 nipl = vecp->v_pri; 678 679 /* if interrupted by the clock, increment apic_nsec_since_boot */ 680 if (vector == (apic_clkvect + APIC_BASE_VECT)) { 681 if (!apic_oneshot) { 682 /* NOTE: this is not MT aware */ 683 apic_hrtime_stamp++; 684 apic_nsec_since_boot += apic_nsec_per_intr; 685 apic_hrtime_stamp++; 686 last_count_read = apic_hertz_count; 687 apix_redistribute_compute(); 688 } 689 690 apix_send_eoi(); 691 692 return (nipl); 693 } 694 695 ASSERT(vecp->v_state != APIX_STATE_OBSOLETED); 696 697 /* pre-EOI handling for level-triggered interrupts */ 698 if (!APIX_IS_DIRECTED_EOI(apix_mul_ioapic_method) && 699 (vecp->v_type & APIX_TYPE_FIXED) && apic_level_intr[vecp->v_inum]) 700 apix_level_intr_pre_eoi(vecp->v_inum); 701 702 /* send back EOI */ 703 apix_send_eoi(); 704 705 cpu_infop->aci_current[nipl] = vector; 706 if ((nipl > ipl) && (nipl > cpu->cpu_base_spl)) { 707 cpu_infop->aci_curipl = (uchar_t)nipl; 708 cpu_infop->aci_ISR_in_progress |= 1 << nipl; 709 } 710 711 #ifdef DEBUG 712 if (vector >= APIX_IPI_MIN) 713 return (nipl); /* skip IPI */ 714 715 APIC_DEBUG_BUF_PUT(vector); 716 APIC_DEBUG_BUF_PUT(vecp->v_inum); 717 APIC_DEBUG_BUF_PUT(nipl); 718 APIC_DEBUG_BUF_PUT(psm_get_cpu_id()); 719 if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl))) 720 drv_usecwait(apic_stretch_interrupts); 721 #endif /* DEBUG */ 722 723 return (nipl); 724 } 725 726 /* 727 * Any changes made to this function must also change X2APIC 728 * version of intr_exit. 729 */ 730 static void 731 apix_intr_exit(int prev_ipl, int arg2) 732 { 733 int cpuid = psm_get_cpu_id(); 734 apic_cpus_info_t *cpu_infop = &apic_cpus[cpuid]; 735 apix_impl_t *apixp = apixs[cpuid]; 736 737 UNREFERENCED_1PARAMETER(arg2); 738 739 cpu_infop->aci_curipl = (uchar_t)prev_ipl; 740 /* ISR above current pri could not be in progress */ 741 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; 742 743 if (apixp->x_obsoletes != NULL) { 744 if (APIX_CPU_LOCK_HELD(cpuid)) 745 return; 746 747 APIX_ENTER_CPU_LOCK(cpuid); 748 (void) apix_obsolete_vector(apixp->x_obsoletes); 749 APIX_LEAVE_CPU_LOCK(cpuid); 750 } 751 } 752 753 /* 754 * The pcplusmp setspl code uses the TPR to mask all interrupts at or below the 755 * given ipl, but apix never uses the TPR and we never mask a subset of the 756 * interrupts. They are either all blocked by the IF flag or all can come in. 757 * 758 * For setspl, we mask all interrupts for XC_HI_PIL (15), otherwise, interrupts 759 * can come in if currently enabled by the IF flag. This table shows the state 760 * of the IF flag when we leave this function. 761 * 762 * curr IF | ipl == 15 ipl != 15 763 * --------+--------------------------- 764 * 0 | 0 0 765 * 1 | 0 1 766 */ 767 static void 768 apix_setspl(int ipl) 769 { 770 /* 771 * Interrupts at ipl above this cannot be in progress, so the following 772 * mask is ok. 773 */ 774 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1; 775 776 if (ipl == XC_HI_PIL) 777 cli(); 778 } 779 780 int 781 apix_addspl(int virtvec, int ipl, int min_ipl, int max_ipl) 782 { 783 uint32_t cpuid = APIX_VIRTVEC_CPU(virtvec); 784 uchar_t vector = (uchar_t)APIX_VIRTVEC_VECTOR(virtvec); 785 apix_vector_t *vecp = xv_vector(cpuid, vector); 786 787 UNREFERENCED_3PARAMETER(ipl, min_ipl, max_ipl); 788 ASSERT(vecp != NULL && LOCK_HELD(&apix_lock)); 789 790 if (vecp->v_type == APIX_TYPE_FIXED) 791 apix_intx_set_shared(vecp->v_inum, 1); 792 793 /* There are more interrupts, so it's already been enabled */ 794 if (vecp->v_share > 1) 795 return (PSM_SUCCESS); 796 797 /* return if it is not hardware interrupt */ 798 if (vecp->v_type == APIX_TYPE_IPI) 799 return (PSM_SUCCESS); 800 801 /* 802 * if apix_picinit() has not been called yet, just return. 803 * At the end of apic_picinit(), we will call setup_io_intr(). 804 */ 805 if (!apic_picinit_called) 806 return (PSM_SUCCESS); 807 808 (void) apix_setup_io_intr(vecp); 809 810 return (PSM_SUCCESS); 811 } 812 813 int 814 apix_delspl(int virtvec, int ipl, int min_ipl, int max_ipl) 815 { 816 uint32_t cpuid = APIX_VIRTVEC_CPU(virtvec); 817 uchar_t vector = (uchar_t)APIX_VIRTVEC_VECTOR(virtvec); 818 apix_vector_t *vecp = xv_vector(cpuid, vector); 819 820 UNREFERENCED_3PARAMETER(ipl, min_ipl, max_ipl); 821 ASSERT(vecp != NULL && LOCK_HELD(&apix_lock)); 822 823 if (vecp->v_type == APIX_TYPE_FIXED) 824 apix_intx_set_shared(vecp->v_inum, -1); 825 826 /* There are more interrupts */ 827 if (vecp->v_share > 1) 828 return (PSM_SUCCESS); 829 830 /* return if it is not hardware interrupt */ 831 if (vecp->v_type == APIX_TYPE_IPI) 832 return (PSM_SUCCESS); 833 834 if (!apic_picinit_called) { 835 cmn_err(CE_WARN, "apix: delete 0x%x before apic init", 836 virtvec); 837 return (PSM_SUCCESS); 838 } 839 840 apix_disable_vector(vecp); 841 842 return (PSM_SUCCESS); 843 } 844 845 /* 846 * Try and disable all interrupts. We just assign interrupts to other 847 * processors based on policy. If any were bound by user request, we 848 * let them continue and return failure. We do not bother to check 849 * for cache affinity while rebinding. 850 */ 851 static int 852 apix_disable_intr(processorid_t cpun) 853 { 854 apix_impl_t *apixp = apixs[cpun]; 855 apix_vector_t *vecp, *newp; 856 int bindcpu, i, hardbound = 0, errbound = 0, ret, loop, type; 857 858 lock_set(&apix_lock); 859 860 apic_cpus[cpun].aci_status &= ~APIC_CPU_INTR_ENABLE; 861 apic_cpus[cpun].aci_curipl = 0; 862 863 /* if this is for SUSPEND operation, skip rebinding */ 864 if (apic_cpus[cpun].aci_status & APIC_CPU_SUSPEND) { 865 for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) { 866 vecp = apixp->x_vectbl[i]; 867 if (!IS_VECT_ENABLED(vecp)) 868 continue; 869 870 apix_disable_vector(vecp); 871 } 872 lock_clear(&apix_lock); 873 return (PSM_SUCCESS); 874 } 875 876 for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) { 877 vecp = apixp->x_vectbl[i]; 878 if (!IS_VECT_ENABLED(vecp)) 879 continue; 880 881 if (vecp->v_flags & APIX_VECT_USER_BOUND) { 882 hardbound++; 883 continue; 884 } 885 type = vecp->v_type; 886 887 /* 888 * If there are bound interrupts on this cpu, then 889 * rebind them to other processors. 890 */ 891 loop = 0; 892 do { 893 bindcpu = apic_find_cpu(APIC_CPU_INTR_ENABLE); 894 895 if (type != APIX_TYPE_MSI) 896 newp = apix_set_cpu(vecp, bindcpu, &ret); 897 else 898 newp = apix_grp_set_cpu(vecp, bindcpu, &ret); 899 } while ((newp == NULL) && (loop++ < apic_nproc)); 900 901 if (loop >= apic_nproc) { 902 errbound++; 903 cmn_err(CE_WARN, "apix: failed to rebind vector %x/%x", 904 vecp->v_cpuid, vecp->v_vector); 905 } 906 } 907 908 lock_clear(&apix_lock); 909 910 if (hardbound || errbound) { 911 cmn_err(CE_WARN, "Could not disable interrupts on %d" 912 "due to user bound interrupts or failed operation", 913 cpun); 914 return (PSM_FAILURE); 915 } 916 917 return (PSM_SUCCESS); 918 } 919 920 /* 921 * Bind interrupts to specified CPU 922 */ 923 static void 924 apix_enable_intr(processorid_t cpun) 925 { 926 apix_vector_t *vecp; 927 int i, ret; 928 processorid_t n; 929 930 lock_set(&apix_lock); 931 932 apic_cpus[cpun].aci_status |= APIC_CPU_INTR_ENABLE; 933 934 /* interrupt enabling for system resume */ 935 if (apic_cpus[cpun].aci_status & APIC_CPU_SUSPEND) { 936 for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) { 937 vecp = xv_vector(cpun, i); 938 if (!IS_VECT_ENABLED(vecp)) 939 continue; 940 941 apix_enable_vector(vecp); 942 } 943 apic_cpus[cpun].aci_status &= ~APIC_CPU_SUSPEND; 944 } 945 946 for (n = 0; n < apic_nproc; n++) { 947 if (!apic_cpu_in_range(n) || n == cpun || 948 (apic_cpus[n].aci_status & APIC_CPU_INTR_ENABLE) == 0) 949 continue; 950 951 for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) { 952 vecp = xv_vector(n, i); 953 if (!IS_VECT_ENABLED(vecp) || 954 vecp->v_bound_cpuid != cpun) 955 continue; 956 957 if (vecp->v_type != APIX_TYPE_MSI) 958 (void) apix_set_cpu(vecp, cpun, &ret); 959 else 960 (void) apix_grp_set_cpu(vecp, cpun, &ret); 961 } 962 } 963 964 lock_clear(&apix_lock); 965 } 966 967 /* 968 * Allocate vector for IPI 969 * type == -1 indicates it is an internal request. Do not change 970 * resv_vector for these requests. 971 */ 972 static int 973 apix_get_ipivect(int ipl, int type) 974 { 975 uchar_t vector; 976 977 if ((vector = apix_alloc_ipi(ipl)) > 0) { 978 if (type != -1) 979 apic_resv_vector[ipl] = vector; 980 return (vector); 981 } 982 apic_error |= APIC_ERR_GET_IPIVECT_FAIL; 983 return (-1); /* shouldn't happen */ 984 } 985 986 static int 987 apix_get_clkvect(int ipl) 988 { 989 int vector; 990 991 if ((vector = apix_get_ipivect(ipl, -1)) == -1) 992 return (-1); 993 994 apic_clkvect = vector - APIC_BASE_VECT; 995 APIC_VERBOSE(IPI, (CE_CONT, "apix: clock vector = %x\n", 996 apic_clkvect)); 997 return (vector); 998 } 999 1000 static int 1001 apix_post_cpu_start() 1002 { 1003 int cpun; 1004 static int cpus_started = 1; 1005 1006 /* We know this CPU + BSP started successfully. */ 1007 cpus_started++; 1008 1009 /* 1010 * On BSP we would have enabled X2APIC, if supported by processor, 1011 * in acpi_probe(), but on AP we do it here. 1012 * 1013 * We enable X2APIC mode only if BSP is running in X2APIC & the 1014 * local APIC mode of the current CPU is MMIO (xAPIC). 1015 */ 1016 if (apic_mode == LOCAL_X2APIC && apic_detect_x2apic() && 1017 apic_local_mode() == LOCAL_APIC) { 1018 apic_enable_x2apic(); 1019 } 1020 1021 /* 1022 * Switch back to x2apic IPI sending method for performance when target 1023 * CPU has entered x2apic mode. 1024 */ 1025 if (apic_mode == LOCAL_X2APIC) { 1026 apic_switch_ipi_callback(B_FALSE); 1027 } 1028 1029 splx(ipltospl(LOCK_LEVEL)); 1030 apix_init_intr(); 1031 1032 /* 1033 * since some systems don't enable the internal cache on the non-boot 1034 * cpus, so we have to enable them here 1035 */ 1036 setcr0(getcr0() & ~(CR0_CD | CR0_NW)); 1037 1038 #ifdef DEBUG 1039 APIC_AV_PENDING_SET(); 1040 #else 1041 if (apic_mode == LOCAL_APIC) 1042 APIC_AV_PENDING_SET(); 1043 #endif /* DEBUG */ 1044 1045 /* 1046 * We may be booting, or resuming from suspend; aci_status will 1047 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the 1048 * APIC_CPU_ONLINE flag here rather than setting aci_status completely. 1049 */ 1050 cpun = psm_get_cpu_id(); 1051 apic_cpus[cpun].aci_status |= APIC_CPU_ONLINE; 1052 1053 apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init); 1054 1055 return (PSM_SUCCESS); 1056 } 1057 1058 /* 1059 * If this module needs a periodic handler for the interrupt distribution, it 1060 * can be added here. The argument to the periodic handler is not currently 1061 * used, but is reserved for future. 1062 */ 1063 static void 1064 apix_post_cyclic_setup(void *arg) 1065 { 1066 UNREFERENCED_1PARAMETER(arg); 1067 1068 cyc_handler_t cyh; 1069 cyc_time_t cyt; 1070 1071 /* cpu_lock is held */ 1072 /* set up a periodic handler for intr redistribution */ 1073 1074 /* 1075 * In peridoc mode intr redistribution processing is done in 1076 * apic_intr_enter during clk intr processing 1077 */ 1078 if (!apic_oneshot) 1079 return; 1080 1081 /* 1082 * Register a periodical handler for the redistribution processing. 1083 * Though we would generally prefer to use the DDI interface for 1084 * periodic handler invocation, ddi_periodic_add(9F), we are 1085 * unfortunately already holding cpu_lock, which ddi_periodic_add will 1086 * attempt to take for us. Thus, we add our own cyclic directly: 1087 */ 1088 cyh.cyh_func = (void (*)(void *))apix_redistribute_compute; 1089 cyh.cyh_arg = NULL; 1090 cyh.cyh_level = CY_LOW_LEVEL; 1091 1092 cyt.cyt_when = 0; 1093 cyt.cyt_interval = apic_redistribute_sample_interval; 1094 1095 apic_cyclic_id = cyclic_add(&cyh, &cyt); 1096 } 1097 1098 /* 1099 * Called the first time we enable x2apic mode on this cpu. 1100 * Update some of the function pointers to use x2apic routines. 1101 */ 1102 void 1103 x2apic_update_psm() 1104 { 1105 struct psm_ops *pops = &apix_ops; 1106 1107 ASSERT(pops != NULL); 1108 1109 /* 1110 * The pcplusmp module's version of x2apic_update_psm makes additional 1111 * changes that we do not have to make here. It needs to make those 1112 * changes because pcplusmp relies on the TPR register and the means of 1113 * addressing that changes when using the local apic versus the x2apic. 1114 * It's also worth noting that the apix driver specific function end up 1115 * being apix_foo as opposed to apic_foo and x2apic_foo. 1116 */ 1117 pops->psm_send_ipi = x2apic_send_ipi; 1118 1119 send_dirintf = pops->psm_send_ipi; 1120 1121 apic_mode = LOCAL_X2APIC; 1122 apic_change_ops(); 1123 } 1124 1125 /* 1126 * This function provides external interface to the nexus for all 1127 * functionalities related to the new DDI interrupt framework. 1128 * 1129 * Input: 1130 * dip - pointer to the dev_info structure of the requested device 1131 * hdlp - pointer to the internal interrupt handle structure for the 1132 * requested interrupt 1133 * intr_op - opcode for this call 1134 * result - pointer to the integer that will hold the result to be 1135 * passed back if return value is PSM_SUCCESS 1136 * 1137 * Output: 1138 * return value is either PSM_SUCCESS or PSM_FAILURE 1139 */ 1140 static int 1141 apix_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 1142 psm_intr_op_t intr_op, int *result) 1143 { 1144 int cap; 1145 apix_vector_t *vecp, *newvecp; 1146 struct intrspec *ispec, intr_spec; 1147 processorid_t target; 1148 1149 ispec = &intr_spec; 1150 ispec->intrspec_pri = hdlp->ih_pri; 1151 ispec->intrspec_vec = hdlp->ih_inum; 1152 ispec->intrspec_func = hdlp->ih_cb_func; 1153 1154 switch (intr_op) { 1155 case PSM_INTR_OP_ALLOC_VECTORS: 1156 switch (hdlp->ih_type) { 1157 case DDI_INTR_TYPE_MSI: 1158 /* allocate MSI vectors */ 1159 *result = apix_alloc_msi(dip, hdlp->ih_inum, 1160 hdlp->ih_scratch1, 1161 (int)(uintptr_t)hdlp->ih_scratch2); 1162 break; 1163 case DDI_INTR_TYPE_MSIX: 1164 /* allocate MSI-X vectors */ 1165 *result = apix_alloc_msix(dip, hdlp->ih_inum, 1166 hdlp->ih_scratch1, 1167 (int)(uintptr_t)hdlp->ih_scratch2); 1168 break; 1169 case DDI_INTR_TYPE_FIXED: 1170 /* allocate or share vector for fixed */ 1171 if ((ihdl_plat_t *)hdlp->ih_private == NULL) { 1172 return (PSM_FAILURE); 1173 } 1174 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 1175 *result = apix_intx_alloc_vector(dip, hdlp->ih_inum, 1176 ispec); 1177 break; 1178 default: 1179 return (PSM_FAILURE); 1180 } 1181 break; 1182 case PSM_INTR_OP_FREE_VECTORS: 1183 apix_free_vectors(dip, hdlp->ih_inum, hdlp->ih_scratch1, 1184 hdlp->ih_type); 1185 break; 1186 case PSM_INTR_OP_XLATE_VECTOR: 1187 /* 1188 * Vectors are allocated by ALLOC and freed by FREE. 1189 * XLATE finds and returns APIX_VIRTVEC_VECTOR(cpu, vector). 1190 */ 1191 *result = APIX_INVALID_VECT; 1192 vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type); 1193 if (vecp != NULL) { 1194 *result = APIX_VIRTVECTOR(vecp->v_cpuid, 1195 vecp->v_vector); 1196 break; 1197 } 1198 1199 /* 1200 * No vector to device mapping exists. If this is FIXED type 1201 * then check if this IRQ is already mapped for another device 1202 * then return the vector number for it (i.e. shared IRQ case). 1203 * Otherwise, return PSM_FAILURE. 1204 */ 1205 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 1206 vecp = apix_intx_xlate_vector(dip, hdlp->ih_inum, 1207 ispec); 1208 *result = (vecp == NULL) ? APIX_INVALID_VECT : 1209 APIX_VIRTVECTOR(vecp->v_cpuid, vecp->v_vector); 1210 } 1211 if (*result == APIX_INVALID_VECT) 1212 return (PSM_FAILURE); 1213 break; 1214 case PSM_INTR_OP_GET_PENDING: 1215 vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type); 1216 if (vecp == NULL) 1217 return (PSM_FAILURE); 1218 1219 *result = apix_get_pending(vecp); 1220 break; 1221 case PSM_INTR_OP_CLEAR_MASK: 1222 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 1223 return (PSM_FAILURE); 1224 1225 vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type); 1226 if (vecp == NULL) 1227 return (PSM_FAILURE); 1228 1229 apix_intx_clear_mask(vecp->v_inum); 1230 break; 1231 case PSM_INTR_OP_SET_MASK: 1232 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 1233 return (PSM_FAILURE); 1234 1235 vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type); 1236 if (vecp == NULL) 1237 return (PSM_FAILURE); 1238 1239 apix_intx_set_mask(vecp->v_inum); 1240 break; 1241 case PSM_INTR_OP_GET_SHARED: 1242 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 1243 return (PSM_FAILURE); 1244 1245 vecp = apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type); 1246 if (vecp == NULL) 1247 return (PSM_FAILURE); 1248 1249 *result = apix_intx_get_shared(vecp->v_inum); 1250 break; 1251 case PSM_INTR_OP_SET_PRI: 1252 /* 1253 * Called prior to adding the interrupt handler or when 1254 * an interrupt handler is unassigned. 1255 */ 1256 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1257 return (PSM_SUCCESS); 1258 1259 if (apix_get_dev_map(dip, hdlp->ih_inum, hdlp->ih_type) == NULL) 1260 return (PSM_FAILURE); 1261 1262 break; 1263 case PSM_INTR_OP_SET_CPU: 1264 case PSM_INTR_OP_GRP_SET_CPU: 1265 /* 1266 * The interrupt handle given here has been allocated 1267 * specifically for this command, and ih_private carries 1268 * a CPU value. 1269 */ 1270 *result = EINVAL; 1271 target = (int)(intptr_t)hdlp->ih_private; 1272 if (!apic_cpu_in_range(target)) { 1273 DDI_INTR_IMPLDBG((CE_WARN, 1274 "[grp_]set_cpu: cpu out of range: %d\n", target)); 1275 return (PSM_FAILURE); 1276 } 1277 1278 lock_set(&apix_lock); 1279 1280 vecp = apix_get_req_vector(hdlp, hdlp->ih_flags); 1281 if (!IS_VECT_ENABLED(vecp)) { 1282 DDI_INTR_IMPLDBG((CE_WARN, 1283 "[grp]_set_cpu: invalid vector 0x%x\n", 1284 hdlp->ih_vector)); 1285 lock_clear(&apix_lock); 1286 return (PSM_FAILURE); 1287 } 1288 1289 *result = 0; 1290 1291 if (intr_op == PSM_INTR_OP_SET_CPU) 1292 newvecp = apix_set_cpu(vecp, target, result); 1293 else 1294 newvecp = apix_grp_set_cpu(vecp, target, result); 1295 1296 lock_clear(&apix_lock); 1297 1298 if (newvecp == NULL) { 1299 *result = EIO; 1300 return (PSM_FAILURE); 1301 } 1302 newvecp->v_bound_cpuid = target; 1303 hdlp->ih_vector = APIX_VIRTVECTOR(newvecp->v_cpuid, 1304 newvecp->v_vector); 1305 break; 1306 1307 case PSM_INTR_OP_GET_INTR: 1308 /* 1309 * The interrupt handle given here has been allocated 1310 * specifically for this command, and ih_private carries 1311 * a pointer to a apic_get_intr_t. 1312 */ 1313 if (apix_get_intr_info(hdlp, hdlp->ih_private) != PSM_SUCCESS) 1314 return (PSM_FAILURE); 1315 break; 1316 1317 case PSM_INTR_OP_CHECK_MSI: 1318 /* 1319 * Check MSI/X is supported or not at APIC level and 1320 * masked off the MSI/X bits in hdlp->ih_type if not 1321 * supported before return. If MSI/X is supported, 1322 * leave the ih_type unchanged and return. 1323 * 1324 * hdlp->ih_type passed in from the nexus has all the 1325 * interrupt types supported by the device. 1326 */ 1327 if (apic_support_msi == 0) { /* uninitialized */ 1328 /* 1329 * if apic_support_msi is not set, call 1330 * apic_check_msi_support() to check whether msi 1331 * is supported first 1332 */ 1333 if (apic_check_msi_support() == PSM_SUCCESS) 1334 apic_support_msi = 1; /* supported */ 1335 else 1336 apic_support_msi = -1; /* not-supported */ 1337 } 1338 if (apic_support_msi == 1) { 1339 if (apic_msix_enable) 1340 *result = hdlp->ih_type; 1341 else 1342 *result = hdlp->ih_type & ~DDI_INTR_TYPE_MSIX; 1343 } else 1344 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 1345 DDI_INTR_TYPE_MSIX); 1346 break; 1347 case PSM_INTR_OP_GET_CAP: 1348 cap = DDI_INTR_FLAG_PENDING; 1349 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 1350 cap |= DDI_INTR_FLAG_MASKABLE; 1351 *result = cap; 1352 break; 1353 case PSM_INTR_OP_APIC_TYPE: 1354 ((apic_get_type_t *)(hdlp->ih_private))->avgi_type = 1355 apix_get_apic_type(); 1356 ((apic_get_type_t *)(hdlp->ih_private))->avgi_num_intr = 1357 APIX_IPI_MIN; 1358 ((apic_get_type_t *)(hdlp->ih_private))->avgi_num_cpu = 1359 apic_nproc; 1360 hdlp->ih_ver = apic_get_apic_version(); 1361 break; 1362 case PSM_INTR_OP_SET_CAP: 1363 default: 1364 return (PSM_FAILURE); 1365 } 1366 1367 return (PSM_SUCCESS); 1368 } 1369 1370 static void 1371 apix_cleanup_busy(void) 1372 { 1373 int i, j; 1374 apix_vector_t *vecp; 1375 1376 for (i = 0; i < apic_nproc; i++) { 1377 if (!apic_cpu_in_range(i)) 1378 continue; 1379 apic_cpus[i].aci_busy = 0; 1380 for (j = APIX_AVINTR_MIN; j < APIX_AVINTR_MAX; j++) { 1381 if ((vecp = xv_vector(i, j)) != NULL) 1382 vecp->v_busy = 0; 1383 } 1384 } 1385 } 1386 1387 static void 1388 apix_redistribute_compute(void) 1389 { 1390 int i, j, max_busy; 1391 1392 if (!apic_enable_dynamic_migration) 1393 return; 1394 1395 if (++apic_nticks == apic_sample_factor_redistribution) { 1396 /* 1397 * Time to call apic_intr_redistribute(). 1398 * reset apic_nticks. This will cause max_busy 1399 * to be calculated below and if it is more than 1400 * apic_int_busy, we will do the whole thing 1401 */ 1402 apic_nticks = 0; 1403 } 1404 max_busy = 0; 1405 for (i = 0; i < apic_nproc; i++) { 1406 if (!apic_cpu_in_range(i)) 1407 continue; 1408 /* 1409 * Check if curipl is non zero & if ISR is in 1410 * progress 1411 */ 1412 if (((j = apic_cpus[i].aci_curipl) != 0) && 1413 (apic_cpus[i].aci_ISR_in_progress & (1 << j))) { 1414 1415 int vect; 1416 apic_cpus[i].aci_busy++; 1417 vect = apic_cpus[i].aci_current[j]; 1418 apixs[i]->x_vectbl[vect]->v_busy++; 1419 } 1420 1421 if (!apic_nticks && 1422 (apic_cpus[i].aci_busy > max_busy)) 1423 max_busy = apic_cpus[i].aci_busy; 1424 } 1425 if (!apic_nticks) { 1426 if (max_busy > apic_int_busy_mark) { 1427 /* 1428 * We could make the following check be 1429 * skipped > 1 in which case, we get a 1430 * redistribution at half the busy mark (due to 1431 * double interval). Need to be able to collect 1432 * more empirical data to decide if that is a 1433 * good strategy. Punt for now. 1434 */ 1435 apix_cleanup_busy(); 1436 apic_skipped_redistribute = 0; 1437 } else 1438 apic_skipped_redistribute++; 1439 } 1440 } 1441 1442 /* 1443 * intr_ops() service routines 1444 */ 1445 1446 static int 1447 apix_get_pending(apix_vector_t *vecp) 1448 { 1449 int bit, index, irr, pending; 1450 1451 /* need to get on the bound cpu */ 1452 mutex_enter(&cpu_lock); 1453 affinity_set(vecp->v_cpuid); 1454 1455 index = vecp->v_vector / 32; 1456 bit = vecp->v_vector % 32; 1457 irr = apic_reg_ops->apic_read(APIC_IRR_REG + index); 1458 1459 affinity_clear(); 1460 mutex_exit(&cpu_lock); 1461 1462 pending = (irr & (1 << bit)) ? 1 : 0; 1463 if (!pending && vecp->v_type == APIX_TYPE_FIXED) 1464 pending = apix_intx_get_pending(vecp->v_inum); 1465 1466 return (pending); 1467 } 1468 1469 static apix_vector_t * 1470 apix_get_req_vector(ddi_intr_handle_impl_t *hdlp, ushort_t flags) 1471 { 1472 apix_vector_t *vecp; 1473 processorid_t cpuid; 1474 int32_t virt_vec = 0; 1475 1476 switch (flags & PSMGI_INTRBY_FLAGS) { 1477 case PSMGI_INTRBY_IRQ: 1478 return (apix_intx_get_vector(hdlp->ih_vector)); 1479 case PSMGI_INTRBY_VEC: 1480 virt_vec = (virt_vec == 0) ? hdlp->ih_vector : virt_vec; 1481 1482 cpuid = APIX_VIRTVEC_CPU(virt_vec); 1483 if (!apic_cpu_in_range(cpuid)) 1484 return (NULL); 1485 1486 vecp = xv_vector(cpuid, APIX_VIRTVEC_VECTOR(virt_vec)); 1487 break; 1488 case PSMGI_INTRBY_DEFAULT: 1489 vecp = apix_get_dev_map(hdlp->ih_dip, hdlp->ih_inum, 1490 hdlp->ih_type); 1491 break; 1492 default: 1493 return (NULL); 1494 } 1495 1496 return (vecp); 1497 } 1498 1499 static int 1500 apix_get_intr_info(ddi_intr_handle_impl_t *hdlp, 1501 apic_get_intr_t *intr_params_p) 1502 { 1503 apix_vector_t *vecp; 1504 struct autovec *av_dev; 1505 int i; 1506 1507 vecp = apix_get_req_vector(hdlp, intr_params_p->avgi_req_flags); 1508 if (IS_VECT_FREE(vecp)) { 1509 intr_params_p->avgi_num_devs = 0; 1510 intr_params_p->avgi_cpu_id = 0; 1511 intr_params_p->avgi_req_flags = 0; 1512 return (PSM_SUCCESS); 1513 } 1514 1515 if (intr_params_p->avgi_req_flags & PSMGI_REQ_CPUID) { 1516 intr_params_p->avgi_cpu_id = vecp->v_cpuid; 1517 1518 /* Return user bound info for intrd. */ 1519 if (intr_params_p->avgi_cpu_id & IRQ_USER_BOUND) { 1520 intr_params_p->avgi_cpu_id &= ~IRQ_USER_BOUND; 1521 intr_params_p->avgi_cpu_id |= PSMGI_CPU_USER_BOUND; 1522 } 1523 } 1524 1525 if (intr_params_p->avgi_req_flags & PSMGI_REQ_VECTOR) 1526 intr_params_p->avgi_vector = vecp->v_vector; 1527 1528 if (intr_params_p->avgi_req_flags & 1529 (PSMGI_REQ_NUM_DEVS | PSMGI_REQ_GET_DEVS)) 1530 /* Get number of devices from apic_irq table shared field. */ 1531 intr_params_p->avgi_num_devs = vecp->v_share; 1532 1533 if (intr_params_p->avgi_req_flags & PSMGI_REQ_GET_DEVS) { 1534 1535 intr_params_p->avgi_req_flags |= PSMGI_REQ_NUM_DEVS; 1536 1537 /* Some devices have NULL dip. Don't count these. */ 1538 if (intr_params_p->avgi_num_devs > 0) { 1539 for (i = 0, av_dev = vecp->v_autovect; av_dev; 1540 av_dev = av_dev->av_link) { 1541 if (av_dev->av_vector && av_dev->av_dip) 1542 i++; 1543 } 1544 intr_params_p->avgi_num_devs = 1545 (uint8_t)MIN(intr_params_p->avgi_num_devs, i); 1546 } 1547 1548 /* There are no viable dips to return. */ 1549 if (intr_params_p->avgi_num_devs == 0) { 1550 intr_params_p->avgi_dip_list = NULL; 1551 1552 } else { /* Return list of dips */ 1553 1554 /* Allocate space in array for that number of devs. */ 1555 intr_params_p->avgi_dip_list = kmem_zalloc( 1556 intr_params_p->avgi_num_devs * 1557 sizeof (dev_info_t *), 1558 KM_NOSLEEP); 1559 if (intr_params_p->avgi_dip_list == NULL) { 1560 DDI_INTR_IMPLDBG((CE_WARN, 1561 "apix_get_vector_intr_info: no memory")); 1562 return (PSM_FAILURE); 1563 } 1564 1565 /* 1566 * Loop through the device list of the autovec table 1567 * filling in the dip array. 1568 * 1569 * Note that the autovect table may have some special 1570 * entries which contain NULL dips. These will be 1571 * ignored. 1572 */ 1573 for (i = 0, av_dev = vecp->v_autovect; av_dev; 1574 av_dev = av_dev->av_link) { 1575 if (av_dev->av_vector && av_dev->av_dip) 1576 intr_params_p->avgi_dip_list[i++] = 1577 av_dev->av_dip; 1578 } 1579 } 1580 } 1581 1582 return (PSM_SUCCESS); 1583 } 1584 1585 static char * 1586 apix_get_apic_type(void) 1587 { 1588 return (apix_psm_info.p_mach_idstring); 1589 } 1590 1591 apix_vector_t * 1592 apix_set_cpu(apix_vector_t *vecp, int new_cpu, int *result) 1593 { 1594 apix_vector_t *newp = NULL; 1595 dev_info_t *dip; 1596 int inum, cap_ptr; 1597 ddi_acc_handle_t handle; 1598 ddi_intr_msix_t *msix_p = NULL; 1599 ushort_t msix_ctrl; 1600 uintptr_t off; 1601 uint32_t mask; 1602 1603 ASSERT(LOCK_HELD(&apix_lock)); 1604 *result = ENXIO; 1605 1606 /* Fail if this is an MSI intr and is part of a group. */ 1607 if (vecp->v_type == APIX_TYPE_MSI) { 1608 if (i_ddi_intr_get_current_nintrs(APIX_GET_DIP(vecp)) > 1) 1609 return (NULL); 1610 else 1611 return (apix_grp_set_cpu(vecp, new_cpu, result)); 1612 } 1613 1614 /* 1615 * Mask MSI-X. It's unmasked when MSI-X gets enabled. 1616 */ 1617 if (vecp->v_type == APIX_TYPE_MSIX && IS_VECT_ENABLED(vecp)) { 1618 if ((dip = APIX_GET_DIP(vecp)) == NULL) 1619 return (NULL); 1620 inum = vecp->v_devp->dv_inum; 1621 1622 handle = i_ddi_get_pci_config_handle(dip); 1623 cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip); 1624 msix_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL); 1625 if ((msix_ctrl & PCI_MSIX_FUNCTION_MASK) == 0) { 1626 /* 1627 * Function is not masked, then mask "inum"th 1628 * entry in the MSI-X table 1629 */ 1630 msix_p = i_ddi_get_msix(dip); 1631 off = (uintptr_t)msix_p->msix_tbl_addr + (inum * 1632 PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET; 1633 mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off); 1634 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, 1635 mask | 1); 1636 } 1637 } 1638 1639 *result = 0; 1640 if ((newp = apix_rebind(vecp, new_cpu, 1)) == NULL) 1641 *result = EIO; 1642 1643 /* Restore mask bit */ 1644 if (msix_p != NULL) 1645 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, mask); 1646 1647 return (newp); 1648 } 1649 1650 /* 1651 * Set cpu for MSIs 1652 */ 1653 apix_vector_t * 1654 apix_grp_set_cpu(apix_vector_t *vecp, int new_cpu, int *result) 1655 { 1656 apix_vector_t *newp, *vp; 1657 uint32_t orig_cpu = vecp->v_cpuid; 1658 int orig_vect = vecp->v_vector; 1659 int i, num_vectors, cap_ptr, msi_mask_off; 1660 uint32_t msi_pvm; 1661 ushort_t msi_ctrl; 1662 ddi_acc_handle_t handle; 1663 dev_info_t *dip; 1664 1665 APIC_VERBOSE(INTR, (CE_CONT, "apix_grp_set_cpu: oldcpu: %x, vector: %x," 1666 " newcpu:%x\n", vecp->v_cpuid, vecp->v_vector, new_cpu)); 1667 1668 ASSERT(LOCK_HELD(&apix_lock)); 1669 1670 *result = ENXIO; 1671 1672 if (vecp->v_type != APIX_TYPE_MSI) { 1673 DDI_INTR_IMPLDBG((CE_WARN, "set_grp: intr not MSI\n")); 1674 return (NULL); 1675 } 1676 1677 if ((dip = APIX_GET_DIP(vecp)) == NULL) 1678 return (NULL); 1679 1680 num_vectors = i_ddi_intr_get_current_nintrs(dip); 1681 if ((num_vectors < 1) || ((num_vectors - 1) & orig_vect)) { 1682 APIC_VERBOSE(INTR, (CE_WARN, 1683 "set_grp: base vec not part of a grp or not aligned: " 1684 "vec:0x%x, num_vec:0x%x\n", orig_vect, num_vectors)); 1685 return (NULL); 1686 } 1687 1688 if (vecp->v_inum != apix_get_min_dev_inum(dip, vecp->v_type)) 1689 return (NULL); 1690 1691 *result = EIO; 1692 for (i = 1; i < num_vectors; i++) { 1693 if ((vp = xv_vector(orig_cpu, orig_vect + i)) == NULL) 1694 return (NULL); 1695 #ifdef DEBUG 1696 /* 1697 * Sanity check: CPU and dip is the same for all entries. 1698 * May be called when first msi to be enabled, at this time 1699 * add_avintr() is not called for other msi 1700 */ 1701 if ((vp->v_share != 0) && 1702 ((APIX_GET_DIP(vp) != dip) || 1703 (vp->v_cpuid != vecp->v_cpuid))) { 1704 APIC_VERBOSE(INTR, (CE_WARN, 1705 "set_grp: cpu or dip for vec 0x%x difft than for " 1706 "vec 0x%x\n", orig_vect, orig_vect + i)); 1707 APIC_VERBOSE(INTR, (CE_WARN, 1708 " cpu: %d vs %d, dip: 0x%p vs 0x%p\n", orig_cpu, 1709 vp->v_cpuid, (void *)dip, 1710 (void *)APIX_GET_DIP(vp))); 1711 return (NULL); 1712 } 1713 #endif /* DEBUG */ 1714 } 1715 1716 cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip); 1717 handle = i_ddi_get_pci_config_handle(dip); 1718 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 1719 1720 /* MSI Per vector masking is supported. */ 1721 if (msi_ctrl & PCI_MSI_PVM_MASK) { 1722 if (msi_ctrl & PCI_MSI_64BIT_MASK) 1723 msi_mask_off = cap_ptr + PCI_MSI_64BIT_MASKBITS; 1724 else 1725 msi_mask_off = cap_ptr + PCI_MSI_32BIT_MASK; 1726 msi_pvm = pci_config_get32(handle, msi_mask_off); 1727 pci_config_put32(handle, msi_mask_off, (uint32_t)-1); 1728 APIC_VERBOSE(INTR, (CE_CONT, 1729 "set_grp: pvm supported. Mask set to 0x%x\n", 1730 pci_config_get32(handle, msi_mask_off))); 1731 } 1732 1733 if ((newp = apix_rebind(vecp, new_cpu, num_vectors)) != NULL) 1734 *result = 0; 1735 1736 /* Reenable vectors if per vector masking is supported. */ 1737 if (msi_ctrl & PCI_MSI_PVM_MASK) { 1738 pci_config_put32(handle, msi_mask_off, msi_pvm); 1739 APIC_VERBOSE(INTR, (CE_CONT, 1740 "set_grp: pvm supported. Mask restored to 0x%x\n", 1741 pci_config_get32(handle, msi_mask_off))); 1742 } 1743 1744 return (newp); 1745 } 1746 1747 void 1748 apix_intx_set_vector(int irqno, uint32_t cpuid, uchar_t vector) 1749 { 1750 apic_irq_t *irqp; 1751 1752 mutex_enter(&airq_mutex); 1753 irqp = apic_irq_table[irqno]; 1754 irqp->airq_cpu = cpuid; 1755 irqp->airq_vector = vector; 1756 apic_record_rdt_entry(irqp, irqno); 1757 mutex_exit(&airq_mutex); 1758 } 1759 1760 apix_vector_t * 1761 apix_intx_get_vector(int irqno) 1762 { 1763 apic_irq_t *irqp; 1764 uint32_t cpuid; 1765 uchar_t vector; 1766 1767 mutex_enter(&airq_mutex); 1768 irqp = apic_irq_table[irqno & 0xff]; 1769 if (IS_IRQ_FREE(irqp) || (irqp->airq_cpu == IRQ_UNINIT)) { 1770 mutex_exit(&airq_mutex); 1771 return (NULL); 1772 } 1773 cpuid = irqp->airq_cpu; 1774 vector = irqp->airq_vector; 1775 mutex_exit(&airq_mutex); 1776 1777 return (xv_vector(cpuid, vector)); 1778 } 1779 1780 /* 1781 * Must called with interrupts disabled and apic_ioapic_lock held 1782 */ 1783 void 1784 apix_intx_enable(int irqno) 1785 { 1786 uchar_t ioapicindex, intin; 1787 apic_irq_t *irqp = apic_irq_table[irqno]; 1788 ioapic_rdt_t irdt; 1789 apic_cpus_info_t *cpu_infop; 1790 apix_vector_t *vecp = xv_vector(irqp->airq_cpu, irqp->airq_vector); 1791 1792 ASSERT(LOCK_HELD(&apic_ioapic_lock) && !IS_IRQ_FREE(irqp)); 1793 1794 ioapicindex = irqp->airq_ioapicindex; 1795 intin = irqp->airq_intin_no; 1796 cpu_infop = &apic_cpus[irqp->airq_cpu]; 1797 1798 irdt.ir_lo = AV_PDEST | AV_FIXED | irqp->airq_rdt_entry; 1799 irdt.ir_hi = cpu_infop->aci_local_id; 1800 1801 apic_vt_ops->apic_intrmap_alloc_entry(&vecp->v_intrmap_private, NULL, 1802 vecp->v_type, 1, ioapicindex); 1803 apic_vt_ops->apic_intrmap_map_entry(vecp->v_intrmap_private, 1804 (void *)&irdt, vecp->v_type, 1); 1805 apic_vt_ops->apic_intrmap_record_rdt(vecp->v_intrmap_private, &irdt); 1806 1807 /* write RDT entry high dword - destination */ 1808 WRITE_IOAPIC_RDT_ENTRY_HIGH_DWORD(ioapicindex, intin, 1809 irdt.ir_hi); 1810 1811 /* Write the vector, trigger, and polarity portion of the RDT */ 1812 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapicindex, intin, irdt.ir_lo); 1813 1814 vecp->v_state = APIX_STATE_ENABLED; 1815 1816 APIC_VERBOSE_IOAPIC((CE_CONT, "apix_intx_enable: ioapic 0x%x" 1817 " intin 0x%x rdt_low 0x%x rdt_high 0x%x\n", 1818 ioapicindex, intin, irdt.ir_lo, irdt.ir_hi)); 1819 } 1820 1821 /* 1822 * Must called with interrupts disabled and apic_ioapic_lock held 1823 */ 1824 void 1825 apix_intx_disable(int irqno) 1826 { 1827 apic_irq_t *irqp = apic_irq_table[irqno]; 1828 int ioapicindex, intin; 1829 1830 ASSERT(LOCK_HELD(&apic_ioapic_lock) && !IS_IRQ_FREE(irqp)); 1831 /* 1832 * The assumption here is that this is safe, even for 1833 * systems with IOAPICs that suffer from the hardware 1834 * erratum because all devices have been quiesced before 1835 * they unregister their interrupt handlers. If that 1836 * assumption turns out to be false, this mask operation 1837 * can induce the same erratum result we're trying to 1838 * avoid. 1839 */ 1840 ioapicindex = irqp->airq_ioapicindex; 1841 intin = irqp->airq_intin_no; 1842 ioapic_write(ioapicindex, APIC_RDT_CMD + 2 * intin, AV_MASK); 1843 1844 APIC_VERBOSE_IOAPIC((CE_CONT, "apix_intx_disable: ioapic 0x%x" 1845 " intin 0x%x\n", ioapicindex, intin)); 1846 } 1847 1848 void 1849 apix_intx_free(int irqno) 1850 { 1851 apic_irq_t *irqp; 1852 1853 mutex_enter(&airq_mutex); 1854 irqp = apic_irq_table[irqno]; 1855 1856 if (IS_IRQ_FREE(irqp)) { 1857 mutex_exit(&airq_mutex); 1858 return; 1859 } 1860 1861 irqp->airq_mps_intr_index = FREE_INDEX; 1862 irqp->airq_cpu = IRQ_UNINIT; 1863 irqp->airq_vector = APIX_INVALID_VECT; 1864 mutex_exit(&airq_mutex); 1865 } 1866 1867 #ifdef DEBUG 1868 int apix_intr_deliver_timeouts = 0; 1869 int apix_intr_rirr_timeouts = 0; 1870 int apix_intr_rirr_reset_failure = 0; 1871 #endif 1872 int apix_max_reps_irr_pending = 10; 1873 1874 #define GET_RDT_BITS(ioapic, intin, bits) \ 1875 (READ_IOAPIC_RDT_ENTRY_LOW_DWORD((ioapic), (intin)) & (bits)) 1876 #define APIX_CHECK_IRR_DELAY drv_usectohz(5000) 1877 1878 int 1879 apix_intx_rebind(int irqno, processorid_t cpuid, uchar_t vector) 1880 { 1881 apic_irq_t *irqp = apic_irq_table[irqno]; 1882 ulong_t iflag; 1883 int waited, ioapic_ix, intin_no, level, repeats, rdt_entry, masked; 1884 1885 ASSERT(irqp != NULL); 1886 1887 iflag = intr_clear(); 1888 lock_set(&apic_ioapic_lock); 1889 1890 ioapic_ix = irqp->airq_ioapicindex; 1891 intin_no = irqp->airq_intin_no; 1892 level = apic_level_intr[irqno]; 1893 1894 /* 1895 * Wait for the delivery status bit to be cleared. This should 1896 * be a very small amount of time. 1897 */ 1898 repeats = 0; 1899 do { 1900 repeats++; 1901 1902 for (waited = 0; waited < apic_max_reps_clear_pending; 1903 waited++) { 1904 if (GET_RDT_BITS(ioapic_ix, intin_no, AV_PENDING) == 0) 1905 break; 1906 } 1907 if (!level) 1908 break; 1909 1910 /* 1911 * Mask the RDT entry for level-triggered interrupts. 1912 */ 1913 irqp->airq_rdt_entry |= AV_MASK; 1914 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix, 1915 intin_no); 1916 if ((masked = (rdt_entry & AV_MASK)) == 0) { 1917 /* Mask it */ 1918 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix, intin_no, 1919 AV_MASK | rdt_entry); 1920 } 1921 1922 /* 1923 * If there was a race and an interrupt was injected 1924 * just before we masked, check for that case here. 1925 * Then, unmask the RDT entry and try again. If we're 1926 * on our last try, don't unmask (because we want the 1927 * RDT entry to remain masked for the rest of the 1928 * function). 1929 */ 1930 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix, 1931 intin_no); 1932 if ((masked == 0) && ((rdt_entry & AV_PENDING) != 0) && 1933 (repeats < apic_max_reps_clear_pending)) { 1934 /* Unmask it */ 1935 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix, 1936 intin_no, rdt_entry & ~AV_MASK); 1937 irqp->airq_rdt_entry &= ~AV_MASK; 1938 } 1939 } while ((rdt_entry & AV_PENDING) && 1940 (repeats < apic_max_reps_clear_pending)); 1941 1942 #ifdef DEBUG 1943 if (GET_RDT_BITS(ioapic_ix, intin_no, AV_PENDING) != 0) 1944 apix_intr_deliver_timeouts++; 1945 #endif 1946 1947 if (!level || !APIX_IS_MASK_RDT(apix_mul_ioapic_method)) 1948 goto done; 1949 1950 /* 1951 * wait for remote IRR to be cleared for level-triggered 1952 * interrupts 1953 */ 1954 repeats = 0; 1955 do { 1956 repeats++; 1957 1958 for (waited = 0; waited < apic_max_reps_clear_pending; 1959 waited++) { 1960 if (GET_RDT_BITS(ioapic_ix, intin_no, AV_REMOTE_IRR) 1961 == 0) 1962 break; 1963 } 1964 1965 if (GET_RDT_BITS(ioapic_ix, intin_no, AV_REMOTE_IRR) != 0) { 1966 lock_clear(&apic_ioapic_lock); 1967 intr_restore(iflag); 1968 1969 delay(APIX_CHECK_IRR_DELAY); 1970 1971 iflag = intr_clear(); 1972 lock_set(&apic_ioapic_lock); 1973 } 1974 } while (repeats < apix_max_reps_irr_pending); 1975 1976 if (repeats >= apix_max_reps_irr_pending) { 1977 #ifdef DEBUG 1978 apix_intr_rirr_timeouts++; 1979 #endif 1980 1981 /* 1982 * If we waited and the Remote IRR bit is still not cleared, 1983 * AND if we've invoked the timeout APIC_REPROGRAM_MAX_TIMEOUTS 1984 * times for this interrupt, try the last-ditch workaround: 1985 */ 1986 if (GET_RDT_BITS(ioapic_ix, intin_no, AV_REMOTE_IRR) != 0) { 1987 /* 1988 * Trying to clear the bit through normal 1989 * channels has failed. So as a last-ditch 1990 * effort, try to set the trigger mode to 1991 * edge, then to level. This has been 1992 * observed to work on many systems. 1993 */ 1994 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix, 1995 intin_no, 1996 READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix, 1997 intin_no) & ~AV_LEVEL); 1998 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix, 1999 intin_no, 2000 READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic_ix, 2001 intin_no) | AV_LEVEL); 2002 } 2003 2004 if (GET_RDT_BITS(ioapic_ix, intin_no, AV_REMOTE_IRR) != 0) { 2005 #ifdef DEBUG 2006 apix_intr_rirr_reset_failure++; 2007 #endif 2008 lock_clear(&apic_ioapic_lock); 2009 intr_restore(iflag); 2010 prom_printf("apix: Remote IRR still " 2011 "not clear for IOAPIC %d intin %d.\n" 2012 "\tInterrupts to this pin may cease " 2013 "functioning.\n", ioapic_ix, intin_no); 2014 return (1); /* return failure */ 2015 } 2016 } 2017 2018 done: 2019 /* change apic_irq_table */ 2020 lock_clear(&apic_ioapic_lock); 2021 intr_restore(iflag); 2022 apix_intx_set_vector(irqno, cpuid, vector); 2023 iflag = intr_clear(); 2024 lock_set(&apic_ioapic_lock); 2025 2026 /* reprogramme IO-APIC RDT entry */ 2027 apix_intx_enable(irqno); 2028 2029 lock_clear(&apic_ioapic_lock); 2030 intr_restore(iflag); 2031 2032 return (0); 2033 } 2034 2035 static int 2036 apix_intx_get_pending(int irqno) 2037 { 2038 apic_irq_t *irqp; 2039 int intin, ioapicindex, pending; 2040 ulong_t iflag; 2041 2042 mutex_enter(&airq_mutex); 2043 irqp = apic_irq_table[irqno]; 2044 if (IS_IRQ_FREE(irqp)) { 2045 mutex_exit(&airq_mutex); 2046 return (0); 2047 } 2048 2049 /* check IO-APIC delivery status */ 2050 intin = irqp->airq_intin_no; 2051 ioapicindex = irqp->airq_ioapicindex; 2052 mutex_exit(&airq_mutex); 2053 2054 iflag = intr_clear(); 2055 lock_set(&apic_ioapic_lock); 2056 2057 pending = (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapicindex, intin) & 2058 AV_PENDING) ? 1 : 0; 2059 2060 lock_clear(&apic_ioapic_lock); 2061 intr_restore(iflag); 2062 2063 return (pending); 2064 } 2065 2066 /* 2067 * This function will mask the interrupt on the I/O APIC 2068 */ 2069 static void 2070 apix_intx_set_mask(int irqno) 2071 { 2072 int intin, ioapixindex, rdt_entry; 2073 ulong_t iflag; 2074 apic_irq_t *irqp; 2075 2076 mutex_enter(&airq_mutex); 2077 irqp = apic_irq_table[irqno]; 2078 2079 ASSERT(irqp->airq_mps_intr_index != FREE_INDEX); 2080 2081 intin = irqp->airq_intin_no; 2082 ioapixindex = irqp->airq_ioapicindex; 2083 mutex_exit(&airq_mutex); 2084 2085 iflag = intr_clear(); 2086 lock_set(&apic_ioapic_lock); 2087 2088 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapixindex, intin); 2089 2090 /* clear mask */ 2091 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapixindex, intin, 2092 (AV_MASK | rdt_entry)); 2093 2094 lock_clear(&apic_ioapic_lock); 2095 intr_restore(iflag); 2096 } 2097 2098 /* 2099 * This function will clear the mask for the interrupt on the I/O APIC 2100 */ 2101 static void 2102 apix_intx_clear_mask(int irqno) 2103 { 2104 int intin, ioapixindex, rdt_entry; 2105 ulong_t iflag; 2106 apic_irq_t *irqp; 2107 2108 mutex_enter(&airq_mutex); 2109 irqp = apic_irq_table[irqno]; 2110 2111 ASSERT(irqp->airq_mps_intr_index != FREE_INDEX); 2112 2113 intin = irqp->airq_intin_no; 2114 ioapixindex = irqp->airq_ioapicindex; 2115 mutex_exit(&airq_mutex); 2116 2117 iflag = intr_clear(); 2118 lock_set(&apic_ioapic_lock); 2119 2120 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapixindex, intin); 2121 2122 /* clear mask */ 2123 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapixindex, intin, 2124 ((~AV_MASK) & rdt_entry)); 2125 2126 lock_clear(&apic_ioapic_lock); 2127 intr_restore(iflag); 2128 } 2129 2130 /* 2131 * For level-triggered interrupt, mask the IRQ line. Mask means 2132 * new interrupts will not be delivered. The interrupt already 2133 * accepted by a local APIC is not affected 2134 */ 2135 void 2136 apix_level_intr_pre_eoi(int irq) 2137 { 2138 apic_irq_t *irqp = apic_irq_table[irq]; 2139 int apic_ix, intin_ix; 2140 2141 if (irqp == NULL) 2142 return; 2143 2144 ASSERT(apic_level_intr[irq] == TRIGGER_MODE_LEVEL); 2145 2146 lock_set(&apic_ioapic_lock); 2147 2148 intin_ix = irqp->airq_intin_no; 2149 apic_ix = irqp->airq_ioapicindex; 2150 2151 if (irqp->airq_cpu != CPU->cpu_id) { 2152 if (!APIX_IS_MASK_RDT(apix_mul_ioapic_method)) 2153 ioapic_write_eoi(apic_ix, irqp->airq_vector); 2154 lock_clear(&apic_ioapic_lock); 2155 return; 2156 } 2157 2158 if (apix_mul_ioapic_method == APIC_MUL_IOAPIC_IOXAPIC) { 2159 /* 2160 * This is a IOxAPIC and there is EOI register: 2161 * Change the vector to reserved unused vector, so that 2162 * the EOI from Local APIC won't clear the Remote IRR for 2163 * this level trigger interrupt. Instead, we'll manually 2164 * clear it in apix_post_hardint() after ISR handling. 2165 */ 2166 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_ix, 2167 (irqp->airq_rdt_entry & (~0xff)) | APIX_RESV_VECTOR); 2168 } else { 2169 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_ix, 2170 AV_MASK | irqp->airq_rdt_entry); 2171 } 2172 2173 lock_clear(&apic_ioapic_lock); 2174 } 2175 2176 /* 2177 * For level-triggered interrupt, unmask the IRQ line 2178 * or restore the original vector number. 2179 */ 2180 void 2181 apix_level_intr_post_dispatch(int irq) 2182 { 2183 apic_irq_t *irqp = apic_irq_table[irq]; 2184 int apic_ix, intin_ix; 2185 2186 if (irqp == NULL) 2187 return; 2188 2189 lock_set(&apic_ioapic_lock); 2190 2191 intin_ix = irqp->airq_intin_no; 2192 apic_ix = irqp->airq_ioapicindex; 2193 2194 if (APIX_IS_DIRECTED_EOI(apix_mul_ioapic_method)) { 2195 /* 2196 * Already sent EOI back to Local APIC. 2197 * Send EOI to IO-APIC 2198 */ 2199 ioapic_write_eoi(apic_ix, irqp->airq_vector); 2200 } else { 2201 /* clear the mask or restore the vector */ 2202 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_ix, 2203 irqp->airq_rdt_entry); 2204 2205 /* send EOI to IOxAPIC */ 2206 if (apix_mul_ioapic_method == APIC_MUL_IOAPIC_IOXAPIC) 2207 ioapic_write_eoi(apic_ix, irqp->airq_vector); 2208 } 2209 2210 lock_clear(&apic_ioapic_lock); 2211 } 2212 2213 static int 2214 apix_intx_get_shared(int irqno) 2215 { 2216 apic_irq_t *irqp; 2217 int share; 2218 2219 mutex_enter(&airq_mutex); 2220 irqp = apic_irq_table[irqno]; 2221 if (IS_IRQ_FREE(irqp) || (irqp->airq_cpu == IRQ_UNINIT)) { 2222 mutex_exit(&airq_mutex); 2223 return (0); 2224 } 2225 share = irqp->airq_share; 2226 mutex_exit(&airq_mutex); 2227 2228 return (share); 2229 } 2230 2231 static void 2232 apix_intx_set_shared(int irqno, int delta) 2233 { 2234 apic_irq_t *irqp; 2235 2236 mutex_enter(&airq_mutex); 2237 irqp = apic_irq_table[irqno]; 2238 if (IS_IRQ_FREE(irqp)) { 2239 mutex_exit(&airq_mutex); 2240 return; 2241 } 2242 irqp->airq_share += delta; 2243 mutex_exit(&airq_mutex); 2244 } 2245 2246 /* 2247 * Setup IRQ table. Return IRQ no or -1 on failure 2248 */ 2249 static int 2250 apix_intx_setup(dev_info_t *dip, int inum, int irqno, 2251 struct apic_io_intr *intrp, struct intrspec *ispec, iflag_t *iflagp) 2252 { 2253 int origirq = ispec->intrspec_vec; 2254 int newirq; 2255 short intr_index; 2256 uchar_t ipin, ioapic, ioapicindex; 2257 apic_irq_t *irqp; 2258 2259 UNREFERENCED_1PARAMETER(inum); 2260 2261 if (intrp != NULL) { 2262 intr_index = (short)(intrp - apic_io_intrp); 2263 ioapic = intrp->intr_destid; 2264 ipin = intrp->intr_destintin; 2265 2266 /* Find ioapicindex. If destid was ALL, we will exit with 0. */ 2267 for (ioapicindex = apic_io_max - 1; ioapicindex; ioapicindex--) 2268 if (apic_io_id[ioapicindex] == ioapic) 2269 break; 2270 ASSERT((ioapic == apic_io_id[ioapicindex]) || 2271 (ioapic == INTR_ALL_APIC)); 2272 2273 /* check whether this intin# has been used by another irqno */ 2274 if ((newirq = apic_find_intin(ioapicindex, ipin)) != -1) 2275 return (newirq); 2276 2277 } else if (iflagp != NULL) { /* ACPI */ 2278 intr_index = ACPI_INDEX; 2279 ioapicindex = acpi_find_ioapic(irqno); 2280 ASSERT(ioapicindex != 0xFF); 2281 ioapic = apic_io_id[ioapicindex]; 2282 ipin = irqno - apic_io_vectbase[ioapicindex]; 2283 2284 if (apic_irq_table[irqno] && 2285 apic_irq_table[irqno]->airq_mps_intr_index == ACPI_INDEX) { 2286 ASSERT(apic_irq_table[irqno]->airq_intin_no == ipin && 2287 apic_irq_table[irqno]->airq_ioapicindex == 2288 ioapicindex); 2289 return (irqno); 2290 } 2291 2292 } else { /* default configuration */ 2293 intr_index = DEFAULT_INDEX; 2294 ioapicindex = 0; 2295 ioapic = apic_io_id[ioapicindex]; 2296 ipin = (uchar_t)irqno; 2297 } 2298 2299 /* allocate a new IRQ no */ 2300 if ((irqp = apic_irq_table[irqno]) == NULL) { 2301 irqp = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP); 2302 apic_irq_table[irqno] = irqp; 2303 } else { 2304 if (irqp->airq_mps_intr_index != FREE_INDEX) { 2305 newirq = apic_allocate_irq(apic_first_avail_irq); 2306 if (newirq == -1) { 2307 return (-1); 2308 } 2309 irqno = newirq; 2310 irqp = apic_irq_table[irqno]; 2311 ASSERT(irqp != NULL); 2312 } 2313 } 2314 apic_max_device_irq = max(irqno, apic_max_device_irq); 2315 apic_min_device_irq = min(irqno, apic_min_device_irq); 2316 2317 irqp->airq_mps_intr_index = intr_index; 2318 irqp->airq_ioapicindex = ioapicindex; 2319 irqp->airq_intin_no = ipin; 2320 irqp->airq_dip = dip; 2321 irqp->airq_origirq = (uchar_t)origirq; 2322 if (iflagp != NULL) 2323 irqp->airq_iflag = *iflagp; 2324 irqp->airq_cpu = IRQ_UNINIT; 2325 irqp->airq_vector = 0; 2326 2327 return (irqno); 2328 } 2329 2330 /* 2331 * Setup IRQ table for non-pci devices. Return IRQ no or -1 on error 2332 */ 2333 static int 2334 apix_intx_setup_nonpci(dev_info_t *dip, int inum, int bustype, 2335 struct intrspec *ispec) 2336 { 2337 int irqno = ispec->intrspec_vec; 2338 int newirq, i; 2339 iflag_t intr_flag; 2340 ACPI_SUBTABLE_HEADER *hp; 2341 ACPI_MADT_INTERRUPT_OVERRIDE *isop; 2342 struct apic_io_intr *intrp; 2343 2344 if (!apic_enable_acpi || apic_use_acpi_madt_only) { 2345 int busid; 2346 2347 if (bustype == 0) 2348 bustype = eisa_level_intr_mask ? BUS_EISA : BUS_ISA; 2349 2350 /* loop checking BUS_ISA/BUS_EISA */ 2351 for (i = 0; i < 2; i++) { 2352 if (((busid = apic_find_bus_id(bustype)) != -1) && 2353 ((intrp = apic_find_io_intr_w_busid(irqno, busid)) 2354 != NULL)) { 2355 return (apix_intx_setup(dip, inum, irqno, 2356 intrp, ispec, NULL)); 2357 } 2358 bustype = (bustype == BUS_EISA) ? BUS_ISA : BUS_EISA; 2359 } 2360 2361 /* fall back to default configuration */ 2362 return (-1); 2363 } 2364 2365 /* search iso entries first */ 2366 if (acpi_iso_cnt != 0) { 2367 hp = (ACPI_SUBTABLE_HEADER *)acpi_isop; 2368 i = 0; 2369 while (i < acpi_iso_cnt) { 2370 if (hp->Type == ACPI_MADT_TYPE_INTERRUPT_OVERRIDE) { 2371 isop = (ACPI_MADT_INTERRUPT_OVERRIDE *) hp; 2372 if (isop->Bus == 0 && 2373 isop->SourceIrq == irqno) { 2374 newirq = isop->GlobalIrq; 2375 intr_flag.intr_po = isop->IntiFlags & 2376 ACPI_MADT_POLARITY_MASK; 2377 intr_flag.intr_el = (isop->IntiFlags & 2378 ACPI_MADT_TRIGGER_MASK) >> 2; 2379 intr_flag.bustype = BUS_ISA; 2380 2381 return (apix_intx_setup(dip, inum, 2382 newirq, NULL, ispec, &intr_flag)); 2383 } 2384 i++; 2385 } 2386 hp = (ACPI_SUBTABLE_HEADER *)(((char *)hp) + 2387 hp->Length); 2388 } 2389 } 2390 intr_flag.intr_po = INTR_PO_ACTIVE_HIGH; 2391 intr_flag.intr_el = INTR_EL_EDGE; 2392 intr_flag.bustype = BUS_ISA; 2393 return (apix_intx_setup(dip, inum, irqno, NULL, ispec, &intr_flag)); 2394 } 2395 2396 2397 /* 2398 * Setup IRQ table for pci devices. Return IRQ no or -1 on error 2399 */ 2400 static int 2401 apix_intx_setup_pci(dev_info_t *dip, int inum, int bustype, 2402 struct intrspec *ispec) 2403 { 2404 int busid, devid, pci_irq; 2405 ddi_acc_handle_t cfg_handle; 2406 uchar_t ipin; 2407 iflag_t intr_flag; 2408 struct apic_io_intr *intrp; 2409 2410 if (acpica_get_bdf(dip, &busid, &devid, NULL) != 0) 2411 return (-1); 2412 2413 if (busid == 0 && apic_pci_bus_total == 1) 2414 busid = (int)apic_single_pci_busid; 2415 2416 if (pci_config_setup(dip, &cfg_handle) != DDI_SUCCESS) 2417 return (-1); 2418 ipin = pci_config_get8(cfg_handle, PCI_CONF_IPIN) - PCI_INTA; 2419 pci_config_teardown(&cfg_handle); 2420 2421 if (apic_enable_acpi && !apic_use_acpi_madt_only) { /* ACPI */ 2422 if (apic_acpi_translate_pci_irq(dip, busid, devid, 2423 ipin, &pci_irq, &intr_flag) != ACPI_PSM_SUCCESS) 2424 return (-1); 2425 2426 intr_flag.bustype = (uchar_t)bustype; 2427 return (apix_intx_setup(dip, inum, pci_irq, NULL, ispec, 2428 &intr_flag)); 2429 } 2430 2431 /* MP configuration table */ 2432 pci_irq = ((devid & 0x1f) << 2) | (ipin & 0x3); 2433 if ((intrp = apic_find_io_intr_w_busid(pci_irq, busid)) == NULL) { 2434 pci_irq = apic_handle_pci_pci_bridge(dip, devid, ipin, &intrp); 2435 if (pci_irq == -1) 2436 return (-1); 2437 } 2438 2439 return (apix_intx_setup(dip, inum, pci_irq, intrp, ispec, NULL)); 2440 } 2441 2442 /* 2443 * Translate and return IRQ no 2444 */ 2445 static int 2446 apix_intx_xlate_irq(dev_info_t *dip, int inum, struct intrspec *ispec) 2447 { 2448 int newirq, irqno = ispec->intrspec_vec; 2449 int parent_is_pci_or_pciex = 0, child_is_pciex = 0; 2450 int bustype = 0, dev_len; 2451 char dev_type[16]; 2452 2453 if (apic_defconf) { 2454 mutex_enter(&airq_mutex); 2455 goto defconf; 2456 } 2457 2458 if ((dip == NULL) || (!apic_irq_translate && !apic_enable_acpi)) { 2459 mutex_enter(&airq_mutex); 2460 goto nonpci; 2461 } 2462 2463 /* 2464 * use ddi_getlongprop_buf() instead of ddi_prop_lookup_string() 2465 * to avoid extra buffer allocation. 2466 */ 2467 dev_len = sizeof (dev_type); 2468 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ddi_get_parent(dip), 2469 DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type, 2470 &dev_len) == DDI_PROP_SUCCESS) { 2471 if ((strcmp(dev_type, "pci") == 0) || 2472 (strcmp(dev_type, "pciex") == 0)) 2473 parent_is_pci_or_pciex = 1; 2474 } 2475 2476 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 2477 DDI_PROP_DONTPASS, "compatible", (caddr_t)dev_type, 2478 &dev_len) == DDI_PROP_SUCCESS) { 2479 if (strstr(dev_type, "pciex")) 2480 child_is_pciex = 1; 2481 } 2482 2483 mutex_enter(&airq_mutex); 2484 2485 if (parent_is_pci_or_pciex) { 2486 bustype = child_is_pciex ? BUS_PCIE : BUS_PCI; 2487 newirq = apix_intx_setup_pci(dip, inum, bustype, ispec); 2488 if (newirq != -1) 2489 goto done; 2490 bustype = 0; 2491 } else if (strcmp(dev_type, "isa") == 0) 2492 bustype = BUS_ISA; 2493 else if (strcmp(dev_type, "eisa") == 0) 2494 bustype = BUS_EISA; 2495 2496 nonpci: 2497 newirq = apix_intx_setup_nonpci(dip, inum, bustype, ispec); 2498 if (newirq != -1) 2499 goto done; 2500 2501 defconf: 2502 newirq = apix_intx_setup(dip, inum, irqno, NULL, ispec, NULL); 2503 if (newirq == -1) { 2504 mutex_exit(&airq_mutex); 2505 return (-1); 2506 } 2507 done: 2508 ASSERT(apic_irq_table[newirq]); 2509 mutex_exit(&airq_mutex); 2510 return (newirq); 2511 } 2512 2513 static int 2514 apix_intx_alloc_vector(dev_info_t *dip, int inum, struct intrspec *ispec) 2515 { 2516 int irqno; 2517 apix_vector_t *vecp; 2518 2519 if ((irqno = apix_intx_xlate_irq(dip, inum, ispec)) == -1) 2520 return (0); 2521 2522 if ((vecp = apix_alloc_intx(dip, inum, irqno)) == NULL) 2523 return (0); 2524 2525 DDI_INTR_IMPLDBG((CE_CONT, "apix_intx_alloc_vector: dip=0x%p name=%s " 2526 "irqno=0x%x cpuid=%d vector=0x%x\n", 2527 (void *)dip, ddi_driver_name(dip), irqno, 2528 vecp->v_cpuid, vecp->v_vector)); 2529 2530 return (1); 2531 } 2532 2533 /* 2534 * Return the vector number if the translated IRQ for this device 2535 * has a vector mapping setup. If no IRQ setup exists or no vector is 2536 * allocated to it then return 0. 2537 */ 2538 static apix_vector_t * 2539 apix_intx_xlate_vector(dev_info_t *dip, int inum, struct intrspec *ispec) 2540 { 2541 int irqno; 2542 apix_vector_t *vecp; 2543 2544 /* get the IRQ number */ 2545 if ((irqno = apix_intx_xlate_irq(dip, inum, ispec)) == -1) 2546 return (NULL); 2547 2548 /* get the vector number if a vector is allocated to this irqno */ 2549 vecp = apix_intx_get_vector(irqno); 2550 2551 return (vecp); 2552 } 2553 2554 /* stub function */ 2555 int 2556 apix_loaded(void) 2557 { 2558 return (apix_is_enabled); 2559 } 2560